From f4f1855b4c487eac80af60e0aa2c2eb464eefd66 Mon Sep 17 00:00:00 2001 From: Diogo Behrens Date: Thu, 11 Dec 2025 16:45:58 +0100 Subject: [PATCH] Squashed 'vatomic/' changes from fedc65e..035bfc5 035bfc5 doc: Improve comments in memory model file 8f8bb39 doc: Add manpages in sections 3 and 7 72a3416 doc: Document the minimal compatibility version with and C and C++ 58134f9 refactor: Move hpp headers to atomic directory f2a5f05 refactor: Rename internal/config as remap to match intented use (#16) 3975131 doc: Point to CONTRIBUTING and VERIFICATION documentation e74c0f7 doc: Add CONTRIBUTING file 4cc6533 doc: Add doxygen and markdown targets independently bdedb1e refactor: Flatten template directories cb1ea5d scripts: Tidy up verification scripts 18b0963 doc: Improve documentation in verify 03398af cicd: Fine tuning of file filtering cffe266 cicd: Add conditions to start verification 77b2b73 tmplr: Update tmplr to v1.4 2bff2f9 ARM-V8 and RISC-V verification (#21) 7b4b175 Update actions (#22) e7228b3 Generate documentation of vatomic (#20) 18253b8 Support the use of `volatile vsync::atomic`. (#19) 062b917 Add support for `vsync::atomic` (#18) 854481c Add std:atomic like interface (#14) f0ae3d7 Fix `inttypes.h` not available on `macos-latest` runner. (#13) 6536b9a Drop out-dated and unused script ef7b876 Drop doc.h this file does not belong here a6eda56 Remove unnecessary condition from cmake (#11) ea3e244 Drop `compat.h` (#10) 1a9e9ed Drop unused file `atomic_rlx.h` (#9) git-subtree-dir: vatomic git-subtree-split: 035bfc563dea3c735cc53fe73c9bac6874d1c4f7 --- .clang-format | 1 + .github/Dockerfile.boogie | 45 +- .github/toolchains/armeb.cmake | 3 +- .github/toolchains/armeb8.cmake | 1 - .github/workflows/actions.yml | 210 +- .github/workflows/docker.yml | 63 + .gitignore | 1 + CHANGELOG.md | 9 + CMakeLists.txt | 24 +- README.md | 80 +- cmake/doc.cmake | 65 + cmake/tmplr.cmake | 28 + doc/CONTRIBUTING.md | 132 + doc/Doxyfile.in | 90 + doc/VERIFICATION.md | 176 + doc/api/vsync/README.md | 23 + doc/api/vsync/atomic.h.md | 14 + doc/api/vsync/atomic/README.md | 123 + doc/api/vsync/atomic/await.h.md | 72 + doc/api/vsync/atomic/await_ptr.h.md | 349 ++ doc/api/vsync/atomic/await_u32.h.md | 2109 +++++++ doc/api/vsync/atomic/await_u64.h.md | 2109 +++++++ doc/api/vsync/atomic/config.h.md | 77 + doc/api/vsync/atomic/core.h.md | 160 + doc/api/vsync/atomic/core_ptr.h.md | 342 ++ doc/api/vsync/atomic/core_sz.h.md | 2208 ++++++++ doc/api/vsync/atomic/core_u16.h.md | 2208 ++++++++ doc/api/vsync/atomic/core_u32.h.md | 2208 ++++++++ doc/api/vsync/atomic/core_u64.h.md | 2208 ++++++++ doc/api/vsync/atomic/core_u8.h.md | 2208 ++++++++ doc/api/vsync/atomic/dispatch.h.md | 4845 +++++++++++++++++ doc/man/CMakeLists.txt | 17 + doc/man/vatomic.7 | 89 + doc/man/vatomic_add.3 | 1 + doc/man/vatomic_and.3 | 1 + doc/man/vatomic_arith.3 | 54 + doc/man/vatomic_await.3 | 60 + doc/man/vatomic_await_op.3 | 53 + doc/man/vatomic_basic.3 | 57 + doc/man/vatomic_bitwise.3 | 47 + doc/man/vatomic_cmpxchg.3 | 54 + doc/man/vatomic_dec.3 | 1 + doc/man/vatomic_fence.3 | 24 + doc/man/vatomic_inc.3 | 1 + doc/man/vatomic_init.3 | 1 + doc/man/vatomic_max.3 | 1 + doc/man/vatomic_or.3 | 1 + doc/man/vatomic_read.3 | 1 + doc/man/vatomic_sub.3 | 1 + doc/man/vatomic_write.3 | 1 + doc/man/vatomic_xchg.3 | 30 + doc/man/vatomic_xor.3 | 1 + examples/eg_core.c | 30 + include/vsync/atomic.hpp | 61 + include/vsync/atomic/compat.h | 2095 ------- include/vsync/atomic/config.h | 4 +- include/vsync/atomic/core.h | 32 +- include/vsync/atomic/core_bool.hpp | 259 + include/vsync/atomic/core_ptr.hpp | 480 ++ include/vsync/atomic/core_s16.hpp | 279 + include/vsync/atomic/core_s32.hpp | 279 + include/vsync/atomic/core_s64.hpp | 279 + include/vsync/atomic/core_s8.hpp | 279 + include/vsync/atomic/core_sz.hpp | 536 ++ include/vsync/atomic/core_u16.hpp | 536 ++ include/vsync/atomic/core_u32.hpp | 536 ++ include/vsync/atomic/core_u64.hpp | 536 ++ include/vsync/atomic/core_u8.hpp | 535 ++ include/vsync/atomic/doc.h | 3 +- include/vsync/atomic/internal/atomic_rlx.h | 928 ---- .../{config/fnc_rlx.h => remap_fnc_rlx.h} | 2 +- .../{config/fnc_sc.h => remap_fnc_sc.h} | 2 +- .../{config/ptr_rlx.h => remap_ptr_rlx.h} | 2 +- .../{config/ptr_sc.h => remap_ptr_sc.h} | 2 +- .../{config/sz_rlx.h => remap_sz_rlx.h} | 2 +- .../{config/sz_sc.h => remap_sz_sc.h} | 2 +- .../{config/u16_rlx.h => remap_u16_rlx.h} | 2 +- .../{config/u16_sc.h => remap_u16_sc.h} | 2 +- .../{config/u32_rlx.h => remap_u32_rlx.h} | 2 +- .../{config/u32_sc.h => remap_u32_sc.h} | 2 +- .../{config/u64_rlx.h => remap_u64_rlx.h} | 2 +- .../{config/u64_sc.h => remap_u64_sc.h} | 2 +- .../{config/u8_rlx.h => remap_u8_rlx.h} | 2 +- .../{config/u8_sc.h => remap_u8_sc.h} | 2 +- include/vsync/doc.h | 109 +- include/vsync/vtypes.h | 30 +- scripts/ensure-cmd.sh | 304 ++ scripts/license-check.sh | 20 +- template/CMakeLists.txt | 11 +- .../{include/vsync => }/atomic/CMakeLists.txt | 5 +- .../{include/vsync => }/atomic/await.h.in | 0 .../{include/vsync => }/atomic/await_TY.h.in | 0 template/{include/vsync => }/atomic/core.h.in | 6 +- .../{include/vsync => }/atomic/core_TY.h.in | 0 .../{include/vsync => }/atomic/dispatch.h.in | 0 .../vsync => }/atomic/internal/CMakeLists.txt | 5 +- .../vsync => }/atomic/internal/arm32_v7.h.in | 0 .../vsync => }/atomic/internal/arm32_v8.h.in | 0 .../vsync => }/atomic/internal/arm64.h.in | 0 .../atomic/internal/arm64_llsc.h.in | 0 .../vsync => }/atomic/internal/arm64_lse.h.in | 0 .../vsync => }/atomic/internal/arm64_lxe.h.in | 0 .../vsync => }/atomic/internal/builtins.h.in | 0 .../vsync => }/atomic/internal/fallback.h.in | 0 .../internal/remap}/CMakeLists.txt | 7 +- template/atomic/internal/remap/TY_rlx.h.in | 163 + .../internal/remap}/TY_sc.h.in | 106 +- template/{include/vsync => }/atomic/tmplr.h | 0 template/cpp-atomic/CMakeLists.txt | 26 + template/cpp-atomic/signed_core_TY.hpp.in | 150 + template/cpp-atomic/unsigned_core_TY.hpp.in | 395 ++ template/include/CMakeLists.txt | 3 - template/include/vsync/CMakeLists.txt | 3 - .../vsync/atomic/internal/config/TY_rlx.h.in | 143 - .../test-atomic}/CMakeLists.txt | 11 +- .../test-atomic}/call_FUNC.c.in | 0 .../test-atomic}/mt_test_await_TY.c.in | 0 .../test-atomic}/mt_test_rmw_TY.c.in | 0 .../test-atomic}/ut_test_TY.c.in | 0 .../test-atomic}/ut_test_await_TY.c.in | 0 .../{include/vsync/atomic => }/vatomic.rules | 38 +- test/CMakeLists.txt | 1 + test/atomics/CMakeLists.txt | 59 +- test/atomics/unit.sh | 230 - test/atomics_basic/CMakeLists.txt | 43 +- test/atomics_cxx/CMakeLists.txt | 14 +- test/atomics_cxx/mt.cpp | 243 + test/atomics_cxx/ut_int_types.cpp | 399 ++ test/atomics_cxx/ut_ptr.cpp | 235 + test/atomics_cxx/vatomic_empty.cpp | 4 + test/atomics_gen/CMakeLists.txt | 4 - test/sanity/CMakeLists.txt | 13 + test/sanity/ut_vtypes_format.c | 19 + tmplr/CMakeLists.txt | 6 - tmplr/include/tmplr/macros.h | 84 - tmplr/test/test-drop.in | 6 - tmplr/test/test-skip.in | 12 - tmplr/test/test1.in | 3 - tmplr/test/test2.in | 6 - tmplr/test/test3.in | 9 - tmplr/test/test4.in | 6 - tmplr/test/test5.in | 5 - tmplr/test/test6.in | 7 - tmplr/test/test7.in | 6 - tmplr/tmplr.c | 765 --- verify/ASMModel.hs | 207 - verify/ASMParsers.hs | 395 -- verify/BoogieTranslator.hs | 106 - verify/CMakeLists.txt | 239 +- verify/Cargo.lock | 633 +++ verify/Cargo.toml | 18 + verify/Main.hs | 290 - verify/README.md | 1 + verify/armv8/library.bpl | 304 ++ verify/await.bpl | 32 - verify/boogie/auxiliary.bpl | 274 + verify/boogie/correctness.bpl | 55 + verify/boogie/templates/await.bpl | 12 + verify/boogie/templates/fence.bpl | 16 + verify/boogie/templates/must_store.bpl | 24 + verify/boogie/templates/read.bpl | 25 + verify/boogie/templates/read_only.bpl | 12 + verify/boogie/templates/registers.bpl | 5 + verify/boogie/templates/rmw.bpl | 37 + verify/boogie/templates/write.bpl | 20 + verify/cleaner.sh | 26 + verify/generate.sh | 50 + verify/library.bpl | 714 --- verify/lists/vatomic_await.txt | 90 + .../vatomic_core.txt} | 139 +- verify/lists/vatomic_ptr.txt | 28 + verify/read.bpl | 23 - verify/riscv/library.bpl | 279 + verify/rmw.bpl | 131 - verify/src/arm/mod.rs | 482 ++ verify/src/arm/parser.rs | 1221 +++++ verify/src/arm/transform.rs | 187 + verify/src/generate.rs | 70 + verify/src/lib.rs | 398 ++ verify/src/loops.rs | 376 ++ verify/src/main.rs | 231 + verify/src/riscv/mod.rs | 563 ++ verify/src/riscv/parser.rs | 1506 +++++ verify/src/riscv/transform.rs | 153 + verify/verify.sh | 39 + verify/write.bpl | 22 - verify/xchg.bpl | 88 - vmm.cat | 215 +- 188 files changed, 36861 insertions(+), 6991 deletions(-) create mode 100644 .github/workflows/docker.yml create mode 100644 cmake/doc.cmake create mode 100644 cmake/tmplr.cmake create mode 100644 doc/CONTRIBUTING.md create mode 100644 doc/Doxyfile.in create mode 100644 doc/VERIFICATION.md create mode 100644 doc/api/vsync/README.md create mode 100644 doc/api/vsync/atomic.h.md create mode 100644 doc/api/vsync/atomic/README.md create mode 100644 doc/api/vsync/atomic/await.h.md create mode 100644 doc/api/vsync/atomic/await_ptr.h.md create mode 100644 doc/api/vsync/atomic/await_u32.h.md create mode 100644 doc/api/vsync/atomic/await_u64.h.md create mode 100644 doc/api/vsync/atomic/config.h.md create mode 100644 doc/api/vsync/atomic/core.h.md create mode 100644 doc/api/vsync/atomic/core_ptr.h.md create mode 100644 doc/api/vsync/atomic/core_sz.h.md create mode 100644 doc/api/vsync/atomic/core_u16.h.md create mode 100644 doc/api/vsync/atomic/core_u32.h.md create mode 100644 doc/api/vsync/atomic/core_u64.h.md create mode 100644 doc/api/vsync/atomic/core_u8.h.md create mode 100644 doc/api/vsync/atomic/dispatch.h.md create mode 100644 doc/man/CMakeLists.txt create mode 100644 doc/man/vatomic.7 create mode 100644 doc/man/vatomic_add.3 create mode 100644 doc/man/vatomic_and.3 create mode 100644 doc/man/vatomic_arith.3 create mode 100644 doc/man/vatomic_await.3 create mode 100644 doc/man/vatomic_await_op.3 create mode 100644 doc/man/vatomic_basic.3 create mode 100644 doc/man/vatomic_bitwise.3 create mode 100644 doc/man/vatomic_cmpxchg.3 create mode 100644 doc/man/vatomic_dec.3 create mode 100644 doc/man/vatomic_fence.3 create mode 100644 doc/man/vatomic_inc.3 create mode 100644 doc/man/vatomic_init.3 create mode 100644 doc/man/vatomic_max.3 create mode 100644 doc/man/vatomic_or.3 create mode 100644 doc/man/vatomic_read.3 create mode 100644 doc/man/vatomic_sub.3 create mode 100644 doc/man/vatomic_write.3 create mode 100644 doc/man/vatomic_xchg.3 create mode 100644 doc/man/vatomic_xor.3 create mode 100644 examples/eg_core.c create mode 100644 include/vsync/atomic.hpp delete mode 100644 include/vsync/atomic/compat.h create mode 100644 include/vsync/atomic/core_bool.hpp create mode 100644 include/vsync/atomic/core_ptr.hpp create mode 100644 include/vsync/atomic/core_s16.hpp create mode 100644 include/vsync/atomic/core_s32.hpp create mode 100644 include/vsync/atomic/core_s64.hpp create mode 100644 include/vsync/atomic/core_s8.hpp create mode 100644 include/vsync/atomic/core_sz.hpp create mode 100644 include/vsync/atomic/core_u16.hpp create mode 100644 include/vsync/atomic/core_u32.hpp create mode 100644 include/vsync/atomic/core_u64.hpp create mode 100644 include/vsync/atomic/core_u8.hpp delete mode 100644 include/vsync/atomic/internal/atomic_rlx.h rename include/vsync/atomic/internal/{config/fnc_rlx.h => remap_fnc_rlx.h} (89%) rename include/vsync/atomic/internal/{config/fnc_sc.h => remap_fnc_sc.h} (89%) rename include/vsync/atomic/internal/{config/ptr_rlx.h => remap_ptr_rlx.h} (98%) rename include/vsync/atomic/internal/{config/ptr_sc.h => remap_ptr_sc.h} (98%) rename include/vsync/atomic/internal/{config/sz_rlx.h => remap_sz_rlx.h} (99%) rename include/vsync/atomic/internal/{config/sz_sc.h => remap_sz_sc.h} (99%) rename include/vsync/atomic/internal/{config/u16_rlx.h => remap_u16_rlx.h} (99%) rename include/vsync/atomic/internal/{config/u16_sc.h => remap_u16_sc.h} (99%) rename include/vsync/atomic/internal/{config/u32_rlx.h => remap_u32_rlx.h} (99%) rename include/vsync/atomic/internal/{config/u32_sc.h => remap_u32_sc.h} (99%) rename include/vsync/atomic/internal/{config/u64_rlx.h => remap_u64_rlx.h} (99%) rename include/vsync/atomic/internal/{config/u64_sc.h => remap_u64_sc.h} (99%) rename include/vsync/atomic/internal/{config/u8_rlx.h => remap_u8_rlx.h} (99%) rename include/vsync/atomic/internal/{config/u8_sc.h => remap_u8_sc.h} (99%) create mode 100755 scripts/ensure-cmd.sh rename template/{include/vsync => }/atomic/CMakeLists.txt (85%) rename template/{include/vsync => }/atomic/await.h.in (100%) rename template/{include/vsync => }/atomic/await_TY.h.in (100%) rename template/{include/vsync => }/atomic/core.h.in (98%) rename template/{include/vsync => }/atomic/core_TY.h.in (100%) rename template/{include/vsync => }/atomic/dispatch.h.in (100%) rename template/{include/vsync => }/atomic/internal/CMakeLists.txt (82%) rename template/{include/vsync => }/atomic/internal/arm32_v7.h.in (100%) rename template/{include/vsync => }/atomic/internal/arm32_v8.h.in (100%) rename template/{include/vsync => }/atomic/internal/arm64.h.in (100%) rename template/{include/vsync => }/atomic/internal/arm64_llsc.h.in (100%) rename template/{include/vsync => }/atomic/internal/arm64_lse.h.in (100%) rename template/{include/vsync => }/atomic/internal/arm64_lxe.h.in (100%) rename template/{include/vsync => }/atomic/internal/builtins.h.in (100%) rename template/{include/vsync => }/atomic/internal/fallback.h.in (100%) rename template/{include/vsync/atomic/internal/config => atomic/internal/remap}/CMakeLists.txt (86%) create mode 100644 template/atomic/internal/remap/TY_rlx.h.in rename template/{include/vsync/atomic/internal/config => atomic/internal/remap}/TY_sc.h.in (50%) rename template/{include/vsync => }/atomic/tmplr.h (100%) create mode 100644 template/cpp-atomic/CMakeLists.txt create mode 100644 template/cpp-atomic/signed_core_TY.hpp.in create mode 100644 template/cpp-atomic/unsigned_core_TY.hpp.in delete mode 100644 template/include/CMakeLists.txt delete mode 100644 template/include/vsync/CMakeLists.txt delete mode 100644 template/include/vsync/atomic/internal/config/TY_rlx.h.in rename {test/atomics_gen/templates => template/test-atomic}/CMakeLists.txt (83%) rename {test/atomics_gen/templates => template/test-atomic}/call_FUNC.c.in (100%) rename {test/atomics_gen/templates => template/test-atomic}/mt_test_await_TY.c.in (100%) rename {test/atomics_gen/templates => template/test-atomic}/mt_test_rmw_TY.c.in (100%) rename {test/atomics_gen/templates => template/test-atomic}/ut_test_TY.c.in (100%) rename {test/atomics_gen/templates => template/test-atomic}/ut_test_await_TY.c.in (100%) rename template/{include/vsync/atomic => }/vatomic.rules (82%) delete mode 100644 test/atomics/unit.sh create mode 100644 test/atomics_cxx/mt.cpp create mode 100644 test/atomics_cxx/ut_int_types.cpp create mode 100644 test/atomics_cxx/ut_ptr.cpp create mode 100644 test/sanity/CMakeLists.txt create mode 100644 test/sanity/ut_vtypes_format.c delete mode 100644 tmplr/CMakeLists.txt delete mode 100644 tmplr/include/tmplr/macros.h delete mode 100644 tmplr/test/test-drop.in delete mode 100644 tmplr/test/test-skip.in delete mode 100644 tmplr/test/test1.in delete mode 100644 tmplr/test/test2.in delete mode 100644 tmplr/test/test3.in delete mode 100644 tmplr/test/test4.in delete mode 100644 tmplr/test/test5.in delete mode 100644 tmplr/test/test6.in delete mode 100644 tmplr/test/test7.in delete mode 100644 tmplr/tmplr.c delete mode 100644 verify/ASMModel.hs delete mode 100644 verify/ASMParsers.hs delete mode 100644 verify/BoogieTranslator.hs create mode 100644 verify/Cargo.lock create mode 100644 verify/Cargo.toml delete mode 100644 verify/Main.hs create mode 100644 verify/README.md create mode 100644 verify/armv8/library.bpl delete mode 100644 verify/await.bpl create mode 100644 verify/boogie/auxiliary.bpl create mode 100644 verify/boogie/correctness.bpl create mode 100644 verify/boogie/templates/await.bpl create mode 100644 verify/boogie/templates/fence.bpl create mode 100644 verify/boogie/templates/must_store.bpl create mode 100644 verify/boogie/templates/read.bpl create mode 100644 verify/boogie/templates/read_only.bpl create mode 100644 verify/boogie/templates/registers.bpl create mode 100644 verify/boogie/templates/rmw.bpl create mode 100644 verify/boogie/templates/write.bpl create mode 100755 verify/cleaner.sh create mode 100644 verify/generate.sh delete mode 100644 verify/library.bpl create mode 100644 verify/lists/vatomic_await.txt rename verify/{atomics_list.txt => lists/vatomic_core.txt} (50%) create mode 100644 verify/lists/vatomic_ptr.txt delete mode 100644 verify/read.bpl create mode 100644 verify/riscv/library.bpl delete mode 100644 verify/rmw.bpl create mode 100644 verify/src/arm/mod.rs create mode 100644 verify/src/arm/parser.rs create mode 100644 verify/src/arm/transform.rs create mode 100644 verify/src/generate.rs create mode 100644 verify/src/lib.rs create mode 100644 verify/src/loops.rs create mode 100644 verify/src/main.rs create mode 100644 verify/src/riscv/mod.rs create mode 100644 verify/src/riscv/parser.rs create mode 100644 verify/src/riscv/transform.rs create mode 100755 verify/verify.sh delete mode 100644 verify/write.bpl delete mode 100644 verify/xchg.bpl diff --git a/.clang-format b/.clang-format index e708ddb5..6d006b00 100644 --- a/.clang-format +++ b/.clang-format @@ -52,6 +52,7 @@ SpacesInParentheses : false SpacesInSquareBrackets : false TabWidth : 4 UseTab : Never +NamespaceIndentation : All ForEachMacros: - await_while - await_do diff --git a/.github/Dockerfile.boogie b/.github/Dockerfile.boogie index 94b82f8f..140eafcc 100644 --- a/.github/Dockerfile.boogie +++ b/.github/Dockerfile.boogie @@ -1,17 +1,38 @@ -FROM ubuntu:22.04 +FROM ubuntu:24.04 -# Install useful package +ENV DEBIAN_FRONTEND=noninteractive \ + RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:/opt/z3:/opt/boogie:$PATH + +# Install dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - ca-certificates \ - cmake \ - ninja-build \ - g++-aarch64-linux-gnu \ - gcc-aarch64-linux-gnu \ - dotnet-sdk-6.0 \ - z3 \ - ghc \ + curl \ + wget \ + unzip \ + git \ + build-essential \ + ca-certificates \ + cmake \ + ninja-build \ + g++-aarch64-linux-gnu \ + gcc-aarch64-linux-gnu \ + gcc-riscv64-linux-gnu \ + libc6-dev-riscv64-cross \ + python3 \ + python3-pip \ + dotnet-sdk-8.0 \ && rm -rf /var/lib/apt/lists/* -RUN dotnet tool install --tool-path /usr/local/bin boogie --version 3.4.3 +# Set default encoding to UTF-8 +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + +#Install z3 via pip +RUN pip install z3-solver --break-system-packages + +# Install Rust +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +# Install boogie via dotnet +RUN dotnet tool install --tool-path /usr/local/bin boogie --version 3.5.5 diff --git a/.github/toolchains/armeb.cmake b/.github/toolchains/armeb.cmake index 1a7ccd5c..4672b805 100644 --- a/.github/toolchains/armeb.cmake +++ b/.github/toolchains/armeb.cmake @@ -6,10 +6,9 @@ set(CMAKE_SYSTEM_PROCESSOR arm) set(ARMEB_PATH /tmp/gcc-linaro-7.5.0-2019.12-x86_64_armeb-linux-gnueabi/bin) set(CMAKE_C_COMPILER ${ARMEB_PATH}/armeb-linux-gnueabi-gcc) set(CMAKE_CXX_COMPILER ${ARMEB_PATH}/armeb-linux-gnueabi-g++) -set(CMAKE_CROSSCOMPILING_EMULATOR qemu-armeb -L /usr/armeb-linux-gnueabi) +set(CMAKE_CROSSCOMPILING_EMULATOR qemu-armeb -L /usr/armeb-linux-gnueabi/libc) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mbig-endian") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -static") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a -marm") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mno-thumb-interwork -mfpu=vfp -msoft-float") diff --git a/.github/toolchains/armeb8.cmake b/.github/toolchains/armeb8.cmake index 7df80594..08f010d0 100644 --- a/.github/toolchains/armeb8.cmake +++ b/.github/toolchains/armeb8.cmake @@ -9,7 +9,6 @@ set(CMAKE_CXX_COMPILER ${ARMEB_PATH}/armeb-linux-gnueabi-g++) set(CMAKE_CROSSCOMPILING_EMULATOR qemu-armeb -L /usr/armeb-linux-gnueabi/libc) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mbig-endian") -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -static") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a -marm") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mno-thumb-interwork -mfpu=vfp -msoft-float") diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index cdba9261..9f42cfb9 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -1,9 +1,52 @@ name: vatomic tests and verification -on: [push] +on: + - push + - pull_request + env: REGISTRY: ghcr.io jobs: + changed: + name: Detect changed files + runs-on: ubuntu-latest + outputs: + workflows: ${{ steps.workflows.outputs.changed }} + include: ${{ steps.include.outputs.changed }} + verify: ${{ steps.verify.outputs.changed }} + test: ${{ steps.test.outputs.changed }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: dorny/paths-filter@v3 + id: include + with: + filters: | + changed: + - 'include/**' + - uses: dorny/paths-filter@v3 + id: workflows + with: + filters: | + changed: + - '.github/**' + - uses: dorny/paths-filter@v3 + id: verify + with: + filters: | + changed: + - 'verify/**.bpl' + - 'verify/**.rs' + - 'verify/**.sh' + - 'verify/**.txt' + - uses: dorny/paths-filter@v3 + id: test + with: + filters: | + changed: + - 'test/**' + test-install: strategy: matrix: @@ -32,8 +75,19 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Configure Testing - run: cmake -S. -Bbuild + - name: Prep Env for macos + if: matrix.os == 'macos-latest' + run: | + sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer + - name: Configure Testing Ubuntu + run: | + cmake -S. -Bbuild + if: matrix.os != 'macos-latest' + - name: Configure Testing Mac + run: | + export SDK_ROOT=$(xcrun --sdk macosx --show-sdk-path) + cmake -S. -Bbuild -DCMAKE_C_FLAGS="-isysroot $SDK_ROOT" -DCMAKE_CXX_FLAGS="-isysroot $SDK_ROOT" + if: matrix.os == 'macos-latest' - name: Build Tests run: cmake --build build - name: Run Tests @@ -53,44 +107,35 @@ jobs: usesh: true prepare: /usr/sbin/pkg_add curl cmake run: | + set -eux rm -rf /tmp/target - cmake -S. -Bbuild - cmake --build build -DCMAKE_INSTALL_PREFIX=/tmp/target + cmake -S. -Bbuild -DCMAKE_INSTALL_PREFIX=/tmp/target + cmake --build build ctest --test-dir build --output-on-failure cmake --install build cmake -Stest/project -Bbuild2 -DCMAKE_PREFIX_PATH=/tmp/target cmake --build build2 - check-expectations: runs-on: ubuntu-22.04 strategy: + fail-fast: false matrix: target: - - "clang-format-apply" - - "vatomic-generate" - - "vatomic-test-generate" - steps: - - name: Check out repository code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Configure - run: cmake -S. -Bbuild - - name: Run ${{ matrix.target }} - run: cmake --build build --target ${{ matrix.target }} - - name: Check things match expectation - run: cmake --build build --target diff-check || - (echo "Run 'make ${{ matrix.target }}' and commit" && false) - - check-cmake-format: - runs-on: ubuntu-latest - strategy: - matrix: - target: - - "cmake-format-apply" + - clang-format-apply + - cmake-format-apply + - markdown + - vatomic-generate + - vatomic-test-generate steps: - - name: Install cmake-format + - name: Download and Install doxygen & mdox for markdown check + if: matrix.target == 'markdown' + run: | + sudo apt update && sudo apt install -y doxygen curl + sudo curl -L -o /usr/local/bin/mdox https://github.com/db7/mdox/releases/download/v0.1/mdox-v0.1-linux-amd64 + sudo chmod +x /usr/local/bin/mdox + - name: Install cmake-format for cmake-format-check + if: matrix.target == 'cmake-format-apply' run: pip install cmakelang - name: Check out repository code uses: actions/checkout@v4 @@ -104,68 +149,15 @@ jobs: run: cmake --build build --target diff-check || (echo "Run 'make ${{ matrix.target }}' and commit" && false) - prepare-docker: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - attestations: write - id-token: write - strategy: - fail-fast: true - max-parallel: 1 - matrix: - container: - - name: "qemu-ci" - path: ".github/Dockerfile.qemu" - - name: "boogie-ci" - path: ".github/Dockerfile.boogie" - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Log in to the Container registry - uses: docker/login-action@v3.0.0 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ghcr.io/open-s4c/vatomic/${{ matrix.container.name }} - - tags: | - type=edge,branch=dev - type=raw,latest - type=sha,format=long - - - name: Setup docker buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and push Docker image - uses: docker/build-push-action@v5.1.0 - with: - context: .github/docker-context - push: true - file: ${{ matrix.container.path }} - cache-from: type=gha,scope=${{ matrix.container }} - cache-to: type=gha,mode=max,scope=${{ matrix.container }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - test-qemu-other: - needs: [ prepare-docker ] + needs: changed + if: ${{ (needs.changed.outputs.include == 'true') || (needs.changed.outputs.test == 'true') }} strategy: matrix: optimization_level: [ "O1", "O2", "O3" ] toolchain: [ "x86_64", "riscv" ] runs-on: ubuntu-latest - container: ghcr.io/open-s4c/vatomic/qemu-ci:sha-${{ github.sha }} + container: ghcr.io/${{ github.repository }}/qemu-ci:latest steps: - name: Check out to run with the tests uses: actions/checkout@v4 @@ -175,20 +167,22 @@ jobs: run: cmake -S . -Bbuild -DCMAKE_TOOLCHAIN_FILE=".github/toolchains/${{ matrix.toolchain }}.cmake" -DCMAKE_C_FLAGS="-${{ matrix.optimization_level }} ${{ matrix.flags }}" + -DVATOMIC_DEV=OFF - name: Build run: cmake --build build - name: Test run: ctest --test-dir build --output-on-failure test-qemu-arm32: - needs: [ prepare-docker ] + needs: changed + if: ${{ (needs.changed.outputs.include == 'true') || (needs.changed.outputs.test == 'true') }} strategy: matrix: optimization_level: [ "O1", "O2", "O3" ] toolchain: [ "armel", "armel8", "armeb", "armeb8" ] flags: [ "-DVSYNC_BUILTINS=ON", "-DVSYNC_BUILTINS=OFF" ] runs-on: ubuntu-latest - container: ghcr.io/open-s4c/vatomic/qemu-ci:sha-${{ github.sha }} + container: ghcr.io/${{ github.repository }}/qemu-ci:latest steps: - name: Check out to run with the tests uses: actions/checkout@v4 @@ -198,13 +192,15 @@ jobs: run: cmake -S . -Bbuild -DCMAKE_TOOLCHAIN_FILE=".github/toolchains/${{ matrix.toolchain }}.cmake" -DCMAKE_C_FLAGS="-${{ matrix.optimization_level }} ${{ matrix.flags }}" + -DVATOMIC_DEV=OFF - name: Build run: cmake --build build - name: Test run: ctest --test-dir build --output-on-failure test-qemu-arm64: - needs: [ prepare-docker ] + needs: changed + if: ${{ (needs.changed.outputs.include == 'true') || (needs.changed.outputs.test == 'true') }} strategy: matrix: optimization_level: [ "O1", "O2", "O3" ] @@ -216,7 +212,7 @@ jobs: - toolchain: "arm64_lse" flags: "-DVATOMIC_ENABLE_ARM64_LXE=OFF" runs-on: ubuntu-latest - container: ghcr.io/open-s4c/vatomic/qemu-ci:sha-${{ github.sha }} + container: ghcr.io/${{ github.repository }}/qemu-ci:latest steps: - name: Check out to run with the tests uses: actions/checkout@v4 @@ -226,28 +222,50 @@ jobs: run: cmake -S . -Bbuild -DCMAKE_TOOLCHAIN_FILE=".github/toolchains/${{ matrix.options.toolchain }}.cmake" -DCMAKE_C_FLAGS="-${{ matrix.optimization_level }} ${{ matrix.options.flags }}" + -DVATOMIC_DEV=OFF - name: Build run: cmake --build build - name: Test run: ctest --test-dir build --output-on-failure - boogie-verification: - needs: [ prepare-docker ] + verify-armv8: + needs: changed + if: ${{ (needs.changed.outputs.include == 'true') || (needs.changed.outputs.verify == 'true') }} strategy: matrix: - target: [ lse, llsc, lxe, no_polite_await ] + target: [ builtin, llsc, lse, lxe ] + group: [ vatomic8, vatomic16, vatomic32, vatomic64, vatomicsz, vatomicptr, vatomic_fence ] runs-on: ubuntu-latest - container: ghcr.io/open-s4c/vatomic/boogie-ci:sha-${{ github.sha }} + container: ghcr.io/${{ github.repository }}/boogie-ci:latest steps: - name: Check out repository code uses: actions/checkout@v4 with: fetch-depth: 0 - name: Configure - run: cmake -S. -Bbuild -DLIBVSYNC_ATOMICS_VERIFICATION=ON + run: cmake -S. -Bbuild -DVATOMIC_DEV=OFF - name: Build - run: cmake --build build --target build_boogie_${{ matrix.target }} + run: cmake --build build --target build_boogie_armv8_${{ matrix.target }}_${{ matrix.group }} - name: Verify - run: ctest -R ${{ matrix.target }} --test-dir build/verify + run: ctest -j 4 -L armv8_${{ matrix.target }}_${{ matrix.group }} --test-dir build/verify + verify-riscv: + needs: changed + if: ${{ (needs.changed.outputs.include == 'true') || (needs.changed.outputs.verify == 'true') }} + strategy: + matrix: + group: [ vatomic8, vatomic16, vatomic32, vatomic64, vatomicsz, vatomicptr, vatomic_fence ] + runs-on: ubuntu-latest + container: ghcr.io/${{ github.repository }}/boogie-ci:latest + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Configure + run: cmake -S. -Bbuild -DVATOMIC_DEV=OFF + - name: Build + run: cmake --build build --target build_boogie_riscv_builtin_${{ matrix.group }} + - name: Verify + run: ctest -j 4 -L riscv_builtin_${{ matrix.group }} --test-dir build/verify diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000..a38952a2 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,63 @@ +name: vatomic dockers build +on: + push: + paths: + - '.github/Dockerfile*' + pull_request: + paths: + - '.github/Dockerfile*' +env: + REGISTRY: ghcr.io +jobs: + prepare-docker: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + attestations: write + id-token: write + strategy: + fail-fast: true + max-parallel: 1 + matrix: + container: + - name: "qemu-ci" + path: ".github/Dockerfile.qemu" + - name: "boogie-ci" + path: ".github/Dockerfile.boogie" + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Log in to the Container registry + uses: docker/login-action@v3.0.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }}/${{ matrix.container.name }} + + tags: | + type=edge,branch=dev + type=raw,latest + type=sha,format=long + + - name: Setup docker buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v5.1.0 + with: + context: .github/docker-context + push: true + file: ${{ matrix.container.path }} + cache-from: type=gha,scope=${{ matrix.container }} + cache-to: type=gha,mode=max,scope=${{ matrix.container }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.gitignore b/.gitignore index eabfa7b0..04641eb5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *build*/ .kakrc +.vscode \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 6099fcca..affd3d88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ This project partially comply with [Semantic Versioning][]. [Semantic Versioning]: https://semver.org/spec/v2.0.0.html +## Unreleased + +- Introduced vsync::atomic for C++ (vsync/atomic.hpp) +- Added new verification pipeline in `verify/` +- Introduced RISCV verification +- Improved documentation on verification and contributing to the project +- Reorganization of template files +- Minor refactoring of internal headers (not user facing) + ## [2.3.1] - Added verification of AArch64 atomic implementation diff --git a/CMakeLists.txt b/CMakeLists.txt index 00c39939..116cd822 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,10 @@ project( VERSION 2.3.1 DESCRIPTION "VSync atomics") +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED ON) +set(CMAKE_C_EXTENSIONS OFF) + include(GNUInstallDirs) include(CheckSymbolExists) include(cmake/export.cmake) @@ -22,20 +26,32 @@ target_include_directories( install(DIRECTORY include/vsync DESTINATION include) install(FILES vmm.cat DESTINATION share/vsync/) install(TARGETS vatomic EXPORT ${PROJECT_TARGETS}) +add_subdirectory(doc/man) -# Enable development targets only if this is the top level project +# Enable development and test targets only if this is the top level project if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME) set(VATOMIC_DEV_DEFAULT ON) + set(VATOMIC_TESTS_DEFAULT ON) else() set(VATOMIC_DEV_DEFAULT OFF) + set(VATOMIC_TESTS_DEFAULT OFF) endif() -option(VATOMIC_DEV "Enable development targets" ${VATOMIC_DEV_DEFAULT}) +option(VATOMIC_DEV + "Enable generating vatomic headers from templates with tmplr" + ${VATOMIC_DEV_DEFAULT}) +option(VATOMIC_TESTS "Enable test and verification targets" + ${VATOMIC_TESTS_DEFAULT}) if(VATOMIC_DEV) # Atomic templating + include(cmake/tmplr.cmake) + set(TMPLR_PROGRAM ${TMPLR_PROGRAM} -P_tmpl -b 1000) add_subdirectory(template) - add_subdirectory(tmplr) + + # Documentation generation + include(cmake/doc.cmake) + add_doc_targets() # Basic format sanitization add_custom_target( @@ -61,7 +77,9 @@ if(VATOMIC_DEV) # General diff check for pipeline add_custom_target(diff-check COMMAND git --no-pager diff --exit-code) +endif() +if(VATOMIC_TESTS) include(CTest) include(ProcessorCount) include(cmake/v_add_test.cmake) diff --git a/README.md b/README.md index 20b99a68..105a079e 100644 --- a/README.md +++ b/README.md @@ -10,14 +10,86 @@ with a model checker such as [Dartagnan][] or [vsyncer][]. The atomics implementations are being gradually verified to comply with VMM. At the moment, we have completed the verification of ARMv8 64-bits with and -without LSE instructions. +without LSE instructions, and RISC-V using compiler builtins. -This project is a spinoff of the VSync project and a key component in -[libvsync][]. +The C implementation is C99-compatible: the build system enforces `-std=c99` +with compiler extensions disabled, so every shipped header and test builds +cleanly in that dialect out of the box. The C++ bindings and tests are likewise +compiled as C++11 (no compiler extensions), which we exercise in the same +comprehensive build. -Refer to our ASPLOS'21 [publication][paper] describing part of the +This project is a spinoff of the VSync project and a key component in +[libvsync][]. Refer to our ASPLOS'21 [publication][paper] describing part of the research effort put into this library. +## Getting started + +In order to build and install `vatomic` run: + +```sh +cmake -S . -B build +cmake --build build +ctest --test-dir build --output-on-failure +cmake --install build --prefix /desired/prefix +``` + +`vatomic` installs headers under `include/vsync/` and a CMake package file, +allowing downstream projects to simply `find_package(vatomic CONFIG REQUIRED)` +and link against the `vatomic::vatomic` interface target. + +### Using the C API + +```c +#include + +void example(void) +{ + vatomic32_t counter; + vatomic32_write_rlx(&counter, 0); + vuint32_t old = vatomic32_add(&counter, 2); + (void)old; +} +``` + +### Using the C++ API + +```c++ +#include + +void example() +{ + vsync::atomic counter{}; + counter.store(42, vsync::memory_order_relaxed); + auto prev = counter.fetch_add(1); + (void)prev; +} +``` + +## Testing and verification + +Besides the unit tests (`ctest`) the repository ships a Boogie-based +verification harness. To run it locally: + +```sh +cmake -S . -B build -DVATOMIC_DEV=ON +cmake --build build --target build_boogie_lse +ctest --test-dir build/verify -R lse --output-on-failure +``` + +Targets exist for `lse`, `llsc`, `lxe`, and `no_polite_await`. Refer to +[`doc/VERIFICATION.md`](doc/VERIFICATION.md) for dependency details. + +## Releases and support + +Changes between versions are tracked in [CHANGELOG.md](CHANGELOG.md). Issues +and pull requests are welcome in this repository; please include details about +the architecture, compiler, and toolchain you are using. + +## Contributing + +See [doc/CONTRIBUTING.md](doc/CONTRIBUTING.md) for guidelines on filing issues, +running the test and verification suites, and preparing pull requests. + ## Acknowledgements This project was initially developed under the support of diff --git a/cmake/doc.cmake b/cmake/doc.cmake new file mode 100644 index 00000000..40c37b88 --- /dev/null +++ b/cmake/doc.cmake @@ -0,0 +1,65 @@ +# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# SPDX-License-Identifier: MIT + +# ############################################################################## +# Adds cmake targets that help generate the markdown documentation make doxygen +# make markdown +# ############################################################################## +function(add_doc_targets) + find_program(MDOX_INSTALLED mdox DOC "mdox") + find_program(DOXYGEN_INSTALLED doxygen DOC "doxygen") + set(MARKDOWN_TARGET "markdown") + set(DOXYGEN_TARGET "doxygen") + + # ########################################################################## + # Add doxygen target Run with: make doxygen + # ########################################################################## + if(NOT DOXYGEN_INSTALLED) + message( + WARNING + "Target `${DOXYGEN_TARGET}` disabled! Please install https://doxygen.nl/download.html" + ) + return() + endif() + + set(DOXYGEN_DOCKER "") + set(DOXYGEN_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/doxygen") + set(DOXYGEN_FILE_IN "${PROJECT_SOURCE_DIR}/doc/Doxyfile.in") + set(DOXYGEN_FILE "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile") + set(DOXYGEN_INPUT "${PROJECT_SOURCE_DIR}") + # Note that this will substitute @VAR@ with the value of VAR in + # ${DOXYGEN_FILE_IN} and write the outout to ${DOXYGEN_FILE} + configure_file(${DOXYGEN_FILE_IN} ${DOXYGEN_FILE}) + # create output directory + file(MAKE_DIRECTORY "${DOXYGEN_OUTPUT}") + add_custom_target( + ${DOXYGEN_TARGET} + COMMAND ${DOXYGEN_DOCKER} doxygen ${DOXYGEN_FILE} + WORKING_DIRECTORY "${DOXYGEN_INPUT}" + COMMENT "Generating Doxygen documentation for include folder." + VERBATIM) + + # ########################################################################## + # Add markdown target Run with: make markdown + # ########################################################################## + + if(NOT MDOX_INSTALLED) + message( + WARNING + "Target `${MARKDOWN_TARGET}` disabled! Please install https://github.com/db7/mdox" + ) + return() + endif() + + set(DOC_OUTPUT "${PROJECT_SOURCE_DIR}/doc/api") + set(MDOX mdox) + add_custom_target( + "${MARKDOWN_TARGET}" + COMMAND "${CMAKE_COMMAND}" -E rm -rf "${DOC_OUTPUT}" + COMMAND "${CMAKE_COMMAND}" -E make_directory "${DOC_OUTPUT}" + COMMAND ${DOXYGEN_DOCKER} ${MDOX} -i ${DOXYGEN_OUTPUT}/xml -o + "${DOC_OUTPUT}" + COMMENT "Generating Markdown documentation for ${DOXYGEN_INPUT}" + VERBATIM + DEPENDS "${DOXYGEN_TARGET}") +endfunction() diff --git a/cmake/tmplr.cmake b/cmake/tmplr.cmake new file mode 100644 index 00000000..2cc28e6c --- /dev/null +++ b/cmake/tmplr.cmake @@ -0,0 +1,28 @@ +# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# SPDX-License-Identifier: MIT + +set(OPENS4C_URL "https://github.com/open-s4c") +set(TMPLR_VERSION "1.4") +set(TMPLR_URL "${OPENS4C_URL}/tmplr/archive/refs/tags/v${TMPLR_VERSION}.tar.gz") +set(TMPLR_SHA256 + "ab6b67cd9894afbd8f262a7739598902c873c89007bcddb818afe65b405294ea") + +if(DEFINED TMPLR_PROGRAM AND EXISTS "${TMPLR_PROGRAM}") + # Cached values are valid; nothing further needed. + return() +endif() + +execute_process( + COMMAND + "${CMAKE_SOURCE_DIR}/scripts/ensure-cmd.sh" -q # + --workdir "${CMAKE_BINARY_DIR}" # + --url "${TMPLR_URL}" # + --sha256 "${TMPLR_SHA256}" # + "tmplr" "${TMPLR_VERSION}" + OUTPUT_VARIABLE TMPLR_PROGRAM + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE ENSURE_TMPLR_RC) +if(NOT ENSURE_TMPLR_RC EQUAL 0) + message(FATAL_ERROR "Failed to ensure tmplr ${TMPLR_VERSION} is available") +endif() +message(STATUS "tmplr v${TMPLR_VERSION}: ${TMPLR_PROGRAM}") diff --git a/doc/CONTRIBUTING.md b/doc/CONTRIBUTING.md new file mode 100644 index 00000000..e5554513 --- /dev/null +++ b/doc/CONTRIBUTING.md @@ -0,0 +1,132 @@ +# Contributing to vatomic + +This document collects the expectations for bug reports, patches, and +documentation changes so reviews can stay focused on the substance of your +contribution. + +## Before you start + +- Review the [README](../README.md) to understand the project scope, supported + architectures, and the current verification status. +- Open a GitHub issue or discussion before investing significant effort, + especially when proposing a new API or architecture backend. Please describe + the motivating use case, target architecture, compiler/toolchain versions, and + any memory-model considerations. +- For bug reports include (if possible) a minimal reproducer, the architecture + target, and the compiler (including version and flags) you are using. + +## Development workflow + +1. Fork the repository and create a topic branch; keep pull requests scoped to a + single logical change. +2. Keep commits focused and descriptive. Reference the GitHub issue (if any) in + the commit message body. +3. Update or add documentation/tests alongside code changes so the behavior + stays discoverable. +4. Run the relevant test suites (see below) before submitting your pull request. + +## Building and testing + +`vatomic` is a header-only library, but tests are compiled programs. The +build process is orchestrated by CMake >= 3.16. +Ensure the following two flags are set when configuring the project: + +- `VATOMIC_TESTS=ON` - enables build of unit tests, +- `VATOMIC_DEV=ON` - enables template file expansion. + +Here is an initial sequence of commands to compile and run all tests: + +```sh +cmake -S . -B build -DVATOMIC_DEV=ON -DVATOMIC_TESTS=ON +cmake --build build +ctest --test-dir build --output-on-failure +``` + +### Template files + +The `vatomic` library only consists of the files in `include/`. These are +not written by hand, but rather expanded from a series of template files, +which can be found in `template/include`. + +When setting `VATOMIC_DEV`, the build system will trigger the expansion of +the templates whenever they are modified, overwriting the files in include. In +this process, two tools are used: + +- [`tmplr`][] - reads the `.h.in` files and expands them with the additional + rules in `template/include/vsync/atomic/vatomic.rules`. +- `clang-format` - reformats the overwritten header files in `include` + to maintain a coherent and readable output. + +If not installed, `tmplr` is automatically downloaded (with curl) and built +with your standard C compiler. `clang-format` is expected to be preinstalled. + +Most test cases in `test/` are also expanded templates. The following commands +expand the template files: + +```sh +cmake --build --target vatomic-generate +cmake --build --target vatomic-test-generate +``` + +### Verification + +Enabling `VATOMIC_TESTS` exposes the verification targets under `verify/` +if the required tools (Boogie, Z3, etc) are installed. When you touch code +that is formally verified, please run the relevant harness (for example +`build_boogie_lse` followed by `ctest --test-dir build/verify -R lse +--output-on-failure`). Refer to [`doc/VERIFICATION.md`](VERIFICATION.md) +for the full list of targets and dependencies. + +### API documentation + +A Markdown version of the API documentation is kept in `doc/api`. The process +to extract the documentation of the code is performed in two steps: + +- [Doxygen][] parses the header files in `include` and generates a set of + XML files. +- [mdox][] parses the XML files and writes the new Markdown files into + `doc/api`. + +The user has to manually install these tools to be able update the API +documentation. The following commands create the up-to-date documentation: + +```sh +cmake --build build --target markdown +``` + +## Coding guidelines + +- Follow the existing file layout under `include/vsync/`. Keep architecture + specific code isolated under the matching directory. +- Run `clang-format` (configuration in `.clang-format`) on all C/C++ changes + and `cmake-format` on CMake files. The build systems has two targets to + help applying the format over the whole codebase: `clang-format-apply` and + `cmake-format-apply`. The CI enforces these styles with `clang-format` 14. +- Favor small, documented helpers over macro-heavy solutions when possible. + When macros are unavoidable, prefer well-scoped names. +- Maintain a consistent memory-ordering story: document the expected ordering + whenever you add or update APIs, and add litmus/Boogie coverage when + applicable. +- Public APIs must compile cleanly as both C and C++ (see + `include/vsync/atomic.h` and `include/vsync/atomic.hpp` for patterns + to follow). + +## Documentation + +- Update the Doxygen comments in `include/` and the manuals under `doc/man/` + when you add or modify APIs. +- If your change affects build, testing, or verification flows, update + `README.md`, `doc/VERIFICATION.md`, or add a new document under `doc/` so the + information is easy to find. + +## Pull request checklist + +- [ ] Tests and relevant verification targets pass locally. +- [ ] `clang-format` / `cmake-format` ran on touched files. +- [ ] Documentation reflects the change. +- [ ] Each commit message explains **why** the change is needed. +- [ ] New public APIs include usage examples or tests. + +Thanks again for contributing! Maintaining a clear testing story and +documentation trail keeps the project approachable for the broader concurrency +community. diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in new file mode 100644 index 00000000..c23c1d27 --- /dev/null +++ b/doc/Doxyfile.in @@ -0,0 +1,90 @@ +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = @PROJECT_NAME@ +PROJECT_NUMBER = @PROJECT_VERSION@ +PROJECT_BRIEF = +OUTPUT_DIRECTORY = @DOXYGEN_OUTPUT@ +FULL_PATH_NAMES = YES +STRIP_FROM_PATH = @DOXYGEN_INPUT@/include +STRIP_FROM_INC_PATH = @DOXYGEN_INPUT@/include +EXAMPLE_PATH = @PROJECT_SOURCE_DIR@/examples +EXAMPLE_PATTERNS = *.c +TAB_SIZE = 4 +OPTIMIZE_OUTPUT_FOR_C = YES +MARKDOWN_SUPPORT = YES + +GROUP_NESTED_COMPOUNDS = YES + +EXTRACT_ALL = NO +EXTRACT_STATIC = YES +SHOW_USED_FILES = YES +SHOW_FILES = YES +USE_MDFILE_AS_MAINPAGE = README.md +INPUT += @DOXYGEN_INPUT@/include/vsync +FILE_PATTERNS = *.c *.h *.md +RECURSIVE = YES + +ALIASES += "types=^^**Atomic types**: " +ALIASES += "modes=^^**Barrier modes**: " +ALIASES += "memord=^^\n**Memory ordering**: " +ALIASES += "usage=^^**Usage example**: " +ALIASES += "note=^^\> **Note:** " +# Adding new lines with doxygen is tricky follow the pattern with example to add newlines before and after the header tag +ALIASES += "example=^^\n\n\### Example:\n\n^^" +ALIASES += "cite=^^\n\n\### References:\n\n^^" +# we use three newlines to allow for itemizedlist right after the tag + +PREDEFINED = DOCUMENTATION DOC + +JAVADOC_BANNER = YES +# first line is considered brief +JAVADOC_AUTOBRIEF = YES + +##### +INLINE_SIMPLE_STRUCTS = YES +#DISABLE_INDEX = YES +#### + +INCLUDE_PATH = @DOXYGEN_INPUT@/include/ + +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = NO + +SOURCE_TOOLTIPS = NO +SOURCE_BROWSER = NO +VERBATIM_HEADERS = YES +CLANG_ASSISTED_PARSING = NO +GENERATE_HTML = NO +GENERATE_LATEX = NO +GENERATE_MAN = YES +GENERATE_XML = YES +XML_OUTPUT = xml + +HIDE_UNDOC_RELATIONS = YES + +CLASS_DIAGRAMS = NO +HAVE_DOT = NO +DOT_TRANSPARENT = NO +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = NO +DOT_CLEANUP = NO +CLASS_GRAPH = NO +COLLABORATION_GRAPH = NO +GROUP_GRAPHS = NO +UML_LOOK = NO +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = NO +INCLUDED_BY_GRAPH = NO +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = NO +DIRECTORY_GRAPH = NO +INTERACTIVE_SVG = NO +DOT_PATH = +DOTFILE_DIRS = +MSCFILE_DIRS = +DIAFILE_DIRS = +PLANTUML_JAR_PATH = +PLANTUML_CFG_FILE = +PLANTUML_INCLUDE_PATH = +DIA_PATH = diff --git a/doc/VERIFICATION.md b/doc/VERIFICATION.md new file mode 100644 index 00000000..17a02398 --- /dev/null +++ b/doc/VERIFICATION.md @@ -0,0 +1,176 @@ +# Verification Guide + +`vatomic` ships a Boogie-based pipeline that checks each atomic primitive +against the [VSync Memory Model][]. The verification assets live under `verify/` +and power the `build_boogie_*` targets exposed by CMake. + +[VSync Memory Model]: ../vmm.cat +[VMM]: ../vmm.cat + +## Dependencies + +The verification targets are available when configuring the project with +`-DVATOMIC_TESTS=ON` (the default for top-level builds). Locally you will need: + +- CMake (>= 3.16) and a build backend (Ninja or Make) +- A Rust toolchain (`cargo`) to build the assembly parser/generator in + `verify/src` +- Boogie CLI (`boogie`) and its SMT solver dependency (Z3) +- Cross compilers: `aarch64-linux-gnu-gcc` (ARMv8) and + `riscv64-linux-gnu-gcc` (RISC-V) + +### Details + +- Boogie support for irreducible control flow (needed for several atomics) + only landed in release [**3.5.5**][] (see our [contribution to + upstream][]). Stick to that version or newer when running the pipeline + outside the CI image. +- Z3 results varied wildly with old releases. Use at least the version baked + into `.github/Dockerfile.boogie` (currently **z3-solver 4.13.0.0** from + PyPI) to match CI behaviour. +- The cross-compilers shipped with Ubuntu 24.04 (GCC **13.2** series, as + installed in `.github/Dockerfile.boogie`) are the minimum known-good + versions. Older Ubuntu toolchains could not inline the builtin assembly + that powers the verification flow. + +[contribution to upstream]: https://github.com/boogie-org/boogie/pull/1032 +[**3.5.5**]: https://github.com/boogie-org/boogie/releases/tag/v3.5.5 + +### Using the CICD container + +CI uses the container image `ghcr.io/open-s4c/vatomic/boogie-ci:latest`, +which bundles all of the above. You can pull it locally if you prefer a +hermetic environment. + +```sh +docker pull ghcr.io/open-s4c/vatomic/boogie-ci:latest +``` + +Then you can start the container mounting the vatomic directory as follows: + +```sh +cd vatomic +docker run -it --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u):$(id -g) \ + ghcr.io/open-s4c/vatomic/boogie-ci:latest +``` + +You current `vatomic` directory is mounted over the exact same path in the +container (`-v` flag), and you start with the workdir set to that directory +(`-w` flag). Also the user id and user group match the ids in the host (`-u` +flag), so that if you create files they preserve the right ownership after +leaving the container. + +Inside the container, you can follow the instructions in the next section. + +--- + +## Running the verification suite + +1. Configure the project and enable `VATOMIC_TESTS` targets: + + ```sh + cmake -S . -B build -DVATOMIC_TESTS=ON + ``` + +2. Build one (or all) Boogie targets. Available options are `lse`, `llsc`, + `lxe`, and `no_polite_await`. + + ```sh + cmake --build build --target build_boogie_lse + ``` + + Each target: + + - Preprocesses `vsync/atomic.h` with the chosen compiler flags to obtain + `atomic.s` (and optionally sanitises it via `cleaner.sh`). + - Runs the Rust generator (`cargo run` via `generate.sh`) to parse the + assembly and emit Boogie (`.bpl`) files. + - Instantiates the templates under `verify/boogie/templates`. + - Executes Boogie to prove each atomic operation (driven by `verify.sh`). + +3. Inspect results via CTest or directly in the generated logs: + + ```sh + ctest --test-dir build/verify -R lse --output-on-failure + # Logs: build/verify/lse/*.log + ``` + +### Customising the toolchain + +- Pass `-DARMV8_CMAKE_C_COMPILER=/path/to/aarch64-linux-gnu-gcc` or + `-DRISCV_CMAKE_C_COMPILER=/path/to/riscv64-linux-gnu-gcc` to override the + default cross compilers. +- Set `BPL`/`Z3_EXE` or adjust `PATH` so Boogie and Z3 are discoverable. +- Export `CARGO` to point at a specific `cargo` binary when multiple toolchains + coexist on the system. + +--- + +## Maintaining the function list + +The lists of verified entry points lives in `verify/lists`. When adding new +atomic primitives, update that file so the verification targets cover the +new functions. Refer to the [Boogie templates](#boogie-templates) section +for the available templates and how they map to atomic behaviour. + +--- + +## Boogie templates + +Each `.bpl` template describes some aspect of correctness for the supported +atomics. For each atomic, one or more templates are instantiated to fully +specify the proof obligations. The templates are: + +- `read_only` — describes atomics that never write, e.g., `read`, `await` + (non-RMW) +- `read` — describes atomics that perform a read, e.g., `RMW`, `read`, `await`, + `await-RMW` +- `write` — describes `write` atomics +- `await` — describes `await` and `await-RMW` atomics +- `rmw` — describes atomics performing a read-modify-write operation, e.g., + `RMW` and `await-RMW` + +### Template parameters + +Each template uses one or more of the following parameters, written as `#NAME`: + +- `#registers` — comma separated identifiers specifying all registers used in + the function, e.g., `a0, a1, a2, x5, x6`. +- `#address` — identifier specifying the register that originally holds the + address, e.g., `a0`. +- `#input1` — identifier for the register holding the first function argument, + e.g., for `write(a, v)` the value of `v`, or for `cmpxchg(a, e, v)` the value + of `e`. Could be `a1`. +- `#input2` — second function argument. Could be `a2`. +- `#output` — register holding the output value at the end of the function, + e.g., `a0`. +- `#implementation` — body of the function, including assumes for all procedure + parameters, code, and two basic loop invariants. +- `#state` — comma separated list of identifiers for all additional state + introduced by the architecture, e.g., `local_monitor, monitor_exclusive`. + +### Template matrix + +List of templates and their functions: + +| template | write number | return value | store order | load order | +|--------------|--------------|--------------|-------------|------------| +| `read_only` | 0 | output | – | – | +| `read` | – | ret | – | yes | +| `write` | ≤1 | – | yes | – | +| `await` | – | await cond | – | – | +| `must_store` | 1 (+value) | – | yes | – | +| `rmw` | (+op) | – | yes | – | + +List of templates used by each atomic: + +| template | read | write | RMW | await | await RMW | +|--------------|------|-------|-----|-------|-----------| +| `read_only` | x | | | x | | +| `read` | x | | x | x | x | +| `write` | | x | x | | x | +| `await` | | | | x | x | +| `must_store` | | x | | | | +| `rmw` | | | x | | x | + +--- diff --git a/doc/api/vsync/README.md b/doc/api/vsync/README.md new file mode 100644 index 00000000..6b3ba024 --- /dev/null +++ b/doc/api/vsync/README.md @@ -0,0 +1,23 @@ +# vsync +_vatomic is a header library of atomics operations, supporting mainstream architectures: ARMv7, ARMv8 (AArch32 and AArch64), RISC-V, and x86_64._ + +The memory ordering guarantees provided by the atomic interface are formally described in the VSync Memory Model (VMM) file. + +--- +## File Index + + +| File|Description| +| --- | --- | +| [vsync/atomic.h](atomic.h.md)|Rich interface of atomic operations and fences. | + +--- +## Directory Index + + +| Directory|Description| +| --- | --- | +| [vsync/atomic](atomic/README.md)|Rich interface of atomic operations and fences. | + + +--- diff --git a/doc/api/vsync/atomic.h.md b/doc/api/vsync/atomic.h.md new file mode 100644 index 00000000..753d6aed --- /dev/null +++ b/doc/api/vsync/atomic.h.md @@ -0,0 +1,14 @@ +# [vsync](README.md) / atomic.h +_Rich interface of atomic operations and fences._ + +Atomics are implemented in custom assembly or compiler builtins. + + +- check [core.h](atomic/core.h.md) for the documentation of the core interface. +- check [await.h](atomic/await.h.md) for the documentation of the await interface. +- check [config.h](atomic/config.h.md) to learn about libvsync available configurations. +- check [atomic/dispatch.h](atomic/dispatch.h.md) an additional dispatch layer allows for a more flexible use of the interface. + + + +--- diff --git a/doc/api/vsync/atomic/README.md b/doc/api/vsync/atomic/README.md new file mode 100644 index 00000000..8a72fde0 --- /dev/null +++ b/doc/api/vsync/atomic/README.md @@ -0,0 +1,123 @@ +# [vsync](../README.md) / atomic +_Rich interface of atomic operations and fences._ + +VSync atomics (VAtomics) implements a rich interface of atomic operations and fences. It employs efficient custom assembly for supported architectures, and falls back to compiler builtins otherwise. + +### Atomic types + +[core.h](core.h.md) implements the following atomic types: + + + +| Atomic type (A) |Related type (T) | +| --- | --- | +| vatomic8_t |vuint8_t | +| vatomic16_t |vuint16_t | +| vatomic32_t |vuint32_t | +| vatomic64_t |vuint64_t | +| vatomicptr_t |void* | + + + +Functions are always prefixed with the atomic type, e.g., [vatomic32_read()](core_u32.h.md#function-vatomic32_read), [vatomic64_read()](core_u64.h.md#function-vatomic64_read), [vatomicptr_read()](core_ptr.h.md#function-vatomicptr_read). Arguments or return values typically are of related types, e.g., [`vatomic64_write(vatomic64_t *a, vuint64_t v)`](core_u64.h.md#function-vatomic64_write). + +The types `vuint32_t` and `vuint64_t` map to equivalent types from unless VSYNC_ENABLE_FREESTANDING is defined. + +Note that a few functions are not defined for vatomicptr_t, e.g., add, sub, etc. + +### Memory orders + +By default, atomic functions are _seq_cst_ (sequentially consistent). To specify another memory order, add the corresponding suffix to the function name: + + + +| Mode |Suffix | +| --- | --- | +| acquire |`_acq` | +| release |`_rel` | +| relaxed |`_rlx` | + + + +> **Note:** Not all functions support every memory order. See the function documentation for more information. + +### Components + +VSync atomics are divided in several components. + +For the declaration and definition of the core atomic operations and fences, include [core.h](core.h.md) (as in `#include <`[`vsync/atomic/core.h`](core.h.md)`>`). + +For a set of advanced atomic operations optimized for politely waiting (spinning), include [await.h](await.h.md). + +For readability and ease of use, include [dispatch.h](internal_2dispatch_8h). It introduces several macro dispatchers prefixed with vatomic_. These dispatchers map to the call to the respective vatomic8_, vatomic16_, vatomic32_ or vatomi64_ functions depending on the type/size of the arguments. For example, [vatomic_read(a)](dispatch.h.md#macro-vatomic_read) is the same as vatomic32_read(a) if the type of `a` is vatomic32_t. + +More advanced stamped and marked atomic pointers are available in atomicptr_stamped.h and atomicptr_markable.h. + +### Implementation variants + +Optimized atomic implementations are available for ARMv7 (32 and 64 bits) and ARMv8 (32 and 64 bits). For all other architectures, including x86_64, the fallback is `__atomic` compiler builtins. To force the use of compiler builtins, define VATOMIC_BUILTINS. + +For further configuration flags, please refer to [config.h](config.h.md). + + +### Example: + + + +```c +#include +#include +#include + +vatomic32_t var; +vatomicptr_t ptr; +int x; + +void +foo(void) +{ + vatomic32_write(&var, 1000); + vatomic32_add(&var, 10); + vuint32_t val = vatomic32_read(&var); + assert(val == 1010); + assert(vatomic32_cmpxchg(&var, val, 0) == val); + + x = 123; + vatomicptr_write(&ptr, &x); + int *y = vatomicptr_read(&ptr); + (*y)++; + assert(*y == x); + + printf("passed\n"); +} +int +main(void) +{ + foo(); +} +``` + + + +--- +## File Index + + +| File|Description| +| --- | --- | +| [vsync/atomic/await.h](await.h.md)|Atomic await functions. | +| [vsync/atomic/await_ptr.h](await_ptr.h.md)|Atomic await functions for vatomicptr_t variables. | +| [vsync/atomic/await_u32.h](await_u32.h.md)|Atomic await functions for vatomic32_t variables. | +| [vsync/atomic/await_u64.h](await_u64.h.md)|Atomic await functions for vatomic64_t variables. | +| [vsync/atomic/config.h](config.h.md)|Global configuration of vatomics. | +| [vsync/atomic/core.h](core.h.md)|Atomic types, core atomic operations and fences. | +| [vsync/atomic/core_ptr.h](core_ptr.h.md)|Atomic functions for vatomicptr_t variables. | +| [vsync/atomic/core_sz.h](core_sz.h.md)|Atomic functions for vatomicsz_t variables. | +| [vsync/atomic/core_u16.h](core_u16.h.md)|Atomic functions for vatomic16_t variables. | +| [vsync/atomic/core_u32.h](core_u32.h.md)|Atomic functions for vatomic32_t variables. | +| [vsync/atomic/core_u64.h](core_u64.h.md)|Atomic functions for vatomic64_t variables. | +| [vsync/atomic/core_u8.h](core_u8.h.md)|Atomic functions for vatomic8_t variables. | +| [vsync/atomic/dispatch.h](dispatch.h.md)|Set of macros to dispatch atomic functions. | + + +--- diff --git a/doc/api/vsync/atomic/await.h.md b/doc/api/vsync/atomic/await.h.md new file mode 100644 index 00000000..c6882d3a --- /dev/null +++ b/doc/api/vsync/atomic/await.h.md @@ -0,0 +1,72 @@ +# [vsync](../README.md) / [atomic](README.md) / await.h +_Atomic await functions._ + +Await functions are used to politely await a condition on an atomic variable. Moreover, some await functions also perform write to memroy at the end of the spinning. Functions of the form `await_COND` only await a condition and functions of the form `await_COND_OP` await and perform a write operation. + +### Supported conditions: + + + +| COND |Condition |Atomic types | +| --- | --- | --- | +| lt |(*a < v) |vatomic32_t, vatomic64_t | +| le |(*a <= v) |vatomic32_t, vatomic64_t | +| gt |(*a > v) |vatomic32_t, vatomic64_t | +| ge |(*a >= v) |vatomic32_t, vatomic64_t | +| eq |(*a == v) |vatomic32_t, vatomic64_t, vatomicptr_t | +| neq |(*a != v) |vatomic32_t, vatomic64_t, vatomicptr_t | + + + +The following example waits for the pointer me->next to be different than NULL. The variable next contains the value that satisfied the condition. The operation has an acquire barrier. + + + +```c +node_t *next = vatomicptr_await_neq_acq(me->next, NULL); +``` + + + +### Supported operations: + + + +| OP |Operation |Atomic types | +| --- | --- | --- | +| add |*a += v |vatomic32_t, vatomic64_t | +| sub |*a -= v |vatomic32_t, vatomic64_t | +| set |*a = v |vatomic32_t, vatomic64_t, vatomicptr_t | + + + +The following example waits for the pointer me->next to be equal to pred. Once the condition is met, write NULL in me->next. The variable next contains the value that satisfied the condition. The operation has a release barrier. + + + +```c +node_t *next = vatomicptr_await_eq_set_rel(me->next, pred, NULL); +``` + + + +### Return value + +The return value is (typically) the first read value that satisfies the condition. Exception are functions with `eq` condition. Those return the last previously read value different than the expected value (if any). This twist can be used to identify whether the operation had to spin at all. + +### Detailed documentation: + +The declaration and documentation of all await operations is split into files based on the atomic type operated by the function as follows: + + + +| File |Type | +| --- | --- | +| [await_u32.h](await_u32.h.md) |vatomic32_t and vuint32_t | +| [await_u64.h](await_u64.h.md) |vatomic64_t and vuint64_t | +| [await_ptr.h](await_ptr.h.md) |vatomicptr_t and void * | + + + + +--- diff --git a/doc/api/vsync/atomic/await_ptr.h.md b/doc/api/vsync/atomic/await_ptr.h.md new file mode 100644 index 00000000..85e91a15 --- /dev/null +++ b/doc/api/vsync/atomic/await_ptr.h.md @@ -0,0 +1,349 @@ +# [vsync](../README.md) / [atomic](README.md) / await_ptr.h +_Atomic await functions for vatomicptr_t variables._ + +This file declares and documents the atomic await functions operating on vatomicptr_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomicptr_await_neq](await_ptr.h.md#function-vatomicptr_await_neq) | Politely awaits the value pointed by a to be different than v. | +| [vatomicptr_await_neq_acq](await_ptr.h.md#function-vatomicptr_await_neq_acq) | Politely awaits the value pointed by a to be different than v. | +| [vatomicptr_await_neq_rlx](await_ptr.h.md#function-vatomicptr_await_neq_rlx) | Politely awaits the value pointed by a to be different than v. | +| [vatomicptr_await_eq](await_ptr.h.md#function-vatomicptr_await_eq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomicptr_await_eq_acq](await_ptr.h.md#function-vatomicptr_await_eq_acq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomicptr_await_eq_rlx](await_ptr.h.md#function-vatomicptr_await_eq_rlx) | Politely awaits the value pointed by a to be equal to v. | +| [vatomicptr_await_eq_set](await_ptr.h.md#function-vatomicptr_await_eq_set) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_eq_set_acq](await_ptr.h.md#function-vatomicptr_await_eq_set_acq) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_eq_set_rel](await_ptr.h.md#function-vatomicptr_await_eq_set_rel) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_eq_set_rlx](await_ptr.h.md#function-vatomicptr_await_eq_set_rlx) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_neq_set](await_ptr.h.md#function-vatomicptr_await_neq_set) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_neq_set_acq](await_ptr.h.md#function-vatomicptr_await_neq_set_acq) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_neq_set_rel](await_ptr.h.md#function-vatomicptr_await_neq_set_rel) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomicptr_await_neq_set_rlx](await_ptr.h.md#function-vatomicptr_await_neq_set_rlx) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | + +## Function `vatomicptr_await_neq` + +```c +static void* vatomicptr_await_neq(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_await_neq_acq` + +```c +static void* vatomicptr_await_neq_acq(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomicptr_await_neq_rlx` + +```c +static void* vatomicptr_await_neq_rlx(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomicptr_await_eq` + +```c +static void* vatomicptr_await_eq(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_await_eq_acq` + +```c +static void* vatomicptr_await_eq_acq(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: acquire + + +## Function `vatomicptr_await_eq_rlx` + +```c +static void* vatomicptr_await_eq_rlx(const vatomicptr_t *a, void *v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomicptr_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: relaxed + + +## Function `vatomicptr_await_eq_set` + +```c +static void* vatomicptr_await_eq_set(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_await_eq_set_acq` + +```c +static void* vatomicptr_await_eq_set_acq(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicptr_await_eq_set_rel` + +```c +static void* vatomicptr_await_eq_set_rel(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicptr_await_eq_set_rlx` + +```c +static void* vatomicptr_await_eq_set_rlx(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicptr_await_neq_set` + +```c +static void* vatomicptr_await_neq_set(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_await_neq_set_acq` + +```c +static void* vatomicptr_await_neq_set_acq(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicptr_await_neq_set_rel` + +```c +static void* vatomicptr_await_neq_set_rel(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicptr_await_neq_set_rlx` + +```c +static void* vatomicptr_await_neq_set_rlx(vatomicptr_t *a, void *c, void *v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/await_u32.h.md b/doc/api/vsync/atomic/await_u32.h.md new file mode 100644 index 00000000..ddac30be --- /dev/null +++ b/doc/api/vsync/atomic/await_u32.h.md @@ -0,0 +1,2109 @@ +# [vsync](../README.md) / [atomic](README.md) / await_u32.h +_Atomic await functions for vatomic32_t variables._ + +This file declares and documents the atomic await functions operating on vatomic32_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic32_await_lt](await_u32.h.md#function-vatomic32_await_lt) | Politely awaits the value pointed by a to be less than v. | +| [vatomic32_await_lt_acq](await_u32.h.md#function-vatomic32_await_lt_acq) | Politely awaits the value pointed by a to be less than v. | +| [vatomic32_await_lt_rlx](await_u32.h.md#function-vatomic32_await_lt_rlx) | Politely awaits the value pointed by a to be less than v. | +| [vatomic32_await_le](await_u32.h.md#function-vatomic32_await_le) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic32_await_le_acq](await_u32.h.md#function-vatomic32_await_le_acq) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic32_await_le_rlx](await_u32.h.md#function-vatomic32_await_le_rlx) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic32_await_gt](await_u32.h.md#function-vatomic32_await_gt) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic32_await_gt_acq](await_u32.h.md#function-vatomic32_await_gt_acq) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic32_await_gt_rlx](await_u32.h.md#function-vatomic32_await_gt_rlx) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic32_await_ge](await_u32.h.md#function-vatomic32_await_ge) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic32_await_ge_acq](await_u32.h.md#function-vatomic32_await_ge_acq) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic32_await_ge_rlx](await_u32.h.md#function-vatomic32_await_ge_rlx) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic32_await_neq](await_u32.h.md#function-vatomic32_await_neq) | Politely awaits the value pointed by a to be different than v. | +| [vatomic32_await_neq_acq](await_u32.h.md#function-vatomic32_await_neq_acq) | Politely awaits the value pointed by a to be different than v. | +| [vatomic32_await_neq_rlx](await_u32.h.md#function-vatomic32_await_neq_rlx) | Politely awaits the value pointed by a to be different than v. | +| [vatomic32_await_eq](await_u32.h.md#function-vatomic32_await_eq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic32_await_eq_acq](await_u32.h.md#function-vatomic32_await_eq_acq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic32_await_eq_rlx](await_u32.h.md#function-vatomic32_await_eq_rlx) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic32_await_eq_add](await_u32.h.md#function-vatomic32_await_eq_add) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_eq_add_acq](await_u32.h.md#function-vatomic32_await_eq_add_acq) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_eq_add_rel](await_u32.h.md#function-vatomic32_await_eq_add_rel) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_eq_add_rlx](await_u32.h.md#function-vatomic32_await_eq_add_rlx) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_eq_sub](await_u32.h.md#function-vatomic32_await_eq_sub) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_eq_sub_acq](await_u32.h.md#function-vatomic32_await_eq_sub_acq) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_eq_sub_rel](await_u32.h.md#function-vatomic32_await_eq_sub_rel) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_eq_sub_rlx](await_u32.h.md#function-vatomic32_await_eq_sub_rlx) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_eq_set](await_u32.h.md#function-vatomic32_await_eq_set) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_eq_set_acq](await_u32.h.md#function-vatomic32_await_eq_set_acq) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_eq_set_rel](await_u32.h.md#function-vatomic32_await_eq_set_rel) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_eq_set_rlx](await_u32.h.md#function-vatomic32_await_eq_set_rlx) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_neq_add](await_u32.h.md#function-vatomic32_await_neq_add) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_neq_add_acq](await_u32.h.md#function-vatomic32_await_neq_add_acq) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_neq_add_rel](await_u32.h.md#function-vatomic32_await_neq_add_rel) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_neq_add_rlx](await_u32.h.md#function-vatomic32_await_neq_add_rlx) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_neq_sub](await_u32.h.md#function-vatomic32_await_neq_sub) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_neq_sub_acq](await_u32.h.md#function-vatomic32_await_neq_sub_acq) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_neq_sub_rel](await_u32.h.md#function-vatomic32_await_neq_sub_rel) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_neq_sub_rlx](await_u32.h.md#function-vatomic32_await_neq_sub_rlx) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_neq_set](await_u32.h.md#function-vatomic32_await_neq_set) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_neq_set_acq](await_u32.h.md#function-vatomic32_await_neq_set_acq) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_neq_set_rel](await_u32.h.md#function-vatomic32_await_neq_set_rel) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_neq_set_rlx](await_u32.h.md#function-vatomic32_await_neq_set_rlx) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_lt_add](await_u32.h.md#function-vatomic32_await_lt_add) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_lt_add_acq](await_u32.h.md#function-vatomic32_await_lt_add_acq) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_lt_add_rel](await_u32.h.md#function-vatomic32_await_lt_add_rel) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_lt_add_rlx](await_u32.h.md#function-vatomic32_await_lt_add_rlx) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_lt_sub](await_u32.h.md#function-vatomic32_await_lt_sub) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_lt_sub_acq](await_u32.h.md#function-vatomic32_await_lt_sub_acq) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_lt_sub_rel](await_u32.h.md#function-vatomic32_await_lt_sub_rel) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_lt_sub_rlx](await_u32.h.md#function-vatomic32_await_lt_sub_rlx) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_lt_set](await_u32.h.md#function-vatomic32_await_lt_set) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_lt_set_acq](await_u32.h.md#function-vatomic32_await_lt_set_acq) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_lt_set_rel](await_u32.h.md#function-vatomic32_await_lt_set_rel) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_lt_set_rlx](await_u32.h.md#function-vatomic32_await_lt_set_rlx) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_le_add](await_u32.h.md#function-vatomic32_await_le_add) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_le_add_acq](await_u32.h.md#function-vatomic32_await_le_add_acq) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_le_add_rel](await_u32.h.md#function-vatomic32_await_le_add_rel) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_le_add_rlx](await_u32.h.md#function-vatomic32_await_le_add_rlx) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_le_sub](await_u32.h.md#function-vatomic32_await_le_sub) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_le_sub_acq](await_u32.h.md#function-vatomic32_await_le_sub_acq) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_le_sub_rel](await_u32.h.md#function-vatomic32_await_le_sub_rel) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_le_sub_rlx](await_u32.h.md#function-vatomic32_await_le_sub_rlx) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_le_set](await_u32.h.md#function-vatomic32_await_le_set) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_le_set_acq](await_u32.h.md#function-vatomic32_await_le_set_acq) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_le_set_rel](await_u32.h.md#function-vatomic32_await_le_set_rel) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_le_set_rlx](await_u32.h.md#function-vatomic32_await_le_set_rlx) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_gt_add](await_u32.h.md#function-vatomic32_await_gt_add) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_gt_add_acq](await_u32.h.md#function-vatomic32_await_gt_add_acq) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_gt_add_rel](await_u32.h.md#function-vatomic32_await_gt_add_rel) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_gt_add_rlx](await_u32.h.md#function-vatomic32_await_gt_add_rlx) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic32_await_gt_sub](await_u32.h.md#function-vatomic32_await_gt_sub) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_gt_sub_acq](await_u32.h.md#function-vatomic32_await_gt_sub_acq) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_gt_sub_rel](await_u32.h.md#function-vatomic32_await_gt_sub_rel) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_gt_sub_rlx](await_u32.h.md#function-vatomic32_await_gt_sub_rlx) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_gt_set](await_u32.h.md#function-vatomic32_await_gt_set) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_gt_set_acq](await_u32.h.md#function-vatomic32_await_gt_set_acq) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_gt_set_rel](await_u32.h.md#function-vatomic32_await_gt_set_rel) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_gt_set_rlx](await_u32.h.md#function-vatomic32_await_gt_set_rlx) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic32_await_ge_add](await_u32.h.md#function-vatomic32_await_ge_add) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_ge_add_acq](await_u32.h.md#function-vatomic32_await_ge_add_acq) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_ge_add_rel](await_u32.h.md#function-vatomic32_await_ge_add_rel) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_ge_add_rlx](await_u32.h.md#function-vatomic32_await_ge_add_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic32_await_ge_sub](await_u32.h.md#function-vatomic32_await_ge_sub) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_ge_sub_acq](await_u32.h.md#function-vatomic32_await_ge_sub_acq) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_ge_sub_rel](await_u32.h.md#function-vatomic32_await_ge_sub_rel) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_ge_sub_rlx](await_u32.h.md#function-vatomic32_await_ge_sub_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic32_await_ge_set](await_u32.h.md#function-vatomic32_await_ge_set) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_ge_set_acq](await_u32.h.md#function-vatomic32_await_ge_set_acq) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_ge_set_rel](await_u32.h.md#function-vatomic32_await_ge_set_rel) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic32_await_ge_set_rlx](await_u32.h.md#function-vatomic32_await_ge_set_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | + +## Function `vatomic32_await_lt` + +```c +static vuint32_t vatomic32_await_lt(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_lt_acq` + +```c +static vuint32_t vatomic32_await_lt_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic32_await_lt_rlx` + +```c +static vuint32_t vatomic32_await_lt_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_le` + +```c +static vuint32_t vatomic32_await_le(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_le_acq` + +```c +static vuint32_t vatomic32_await_le_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic32_await_le_rlx` + +```c +static vuint32_t vatomic32_await_le_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_gt` + +```c +static vuint32_t vatomic32_await_gt(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_gt_acq` + +```c +static vuint32_t vatomic32_await_gt_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic32_await_gt_rlx` + +```c +static vuint32_t vatomic32_await_gt_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_ge` + +```c +static vuint32_t vatomic32_await_ge(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_ge_acq` + +```c +static vuint32_t vatomic32_await_ge_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic32_await_ge_rlx` + +```c +static vuint32_t vatomic32_await_ge_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_neq` + +```c +static vuint32_t vatomic32_await_neq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_neq_acq` + +```c +static vuint32_t vatomic32_await_neq_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic32_await_neq_rlx` + +```c +static vuint32_t vatomic32_await_neq_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_eq` + +```c +static vuint32_t vatomic32_await_eq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_eq_acq` + +```c +static vuint32_t vatomic32_await_eq_acq(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: acquire + + +## Function `vatomic32_await_eq_rlx` + +```c +static vuint32_t vatomic32_await_eq_rlx(const vatomic32_t *a, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic32_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_eq_add` + +```c +static vuint32_t vatomic32_await_eq_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_eq_add_acq` + +```c +static vuint32_t vatomic32_await_eq_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_eq_add_rel` + +```c +static vuint32_t vatomic32_await_eq_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_eq_add_rlx` + +```c +static vuint32_t vatomic32_await_eq_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_eq_sub` + +```c +static vuint32_t vatomic32_await_eq_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_eq_sub_acq` + +```c +static vuint32_t vatomic32_await_eq_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_eq_sub_rel` + +```c +static vuint32_t vatomic32_await_eq_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_eq_sub_rlx` + +```c +static vuint32_t vatomic32_await_eq_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_eq_set` + +```c +static vuint32_t vatomic32_await_eq_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_eq_set_acq` + +```c +static vuint32_t vatomic32_await_eq_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_eq_set_rel` + +```c +static vuint32_t vatomic32_await_eq_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_eq_set_rlx` + +```c +static vuint32_t vatomic32_await_eq_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_neq_add` + +```c +static vuint32_t vatomic32_await_neq_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_neq_add_acq` + +```c +static vuint32_t vatomic32_await_neq_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_neq_add_rel` + +```c +static vuint32_t vatomic32_await_neq_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_neq_add_rlx` + +```c +static vuint32_t vatomic32_await_neq_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_neq_sub` + +```c +static vuint32_t vatomic32_await_neq_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_neq_sub_acq` + +```c +static vuint32_t vatomic32_await_neq_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_neq_sub_rel` + +```c +static vuint32_t vatomic32_await_neq_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_neq_sub_rlx` + +```c +static vuint32_t vatomic32_await_neq_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_neq_set` + +```c +static vuint32_t vatomic32_await_neq_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_neq_set_acq` + +```c +static vuint32_t vatomic32_await_neq_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_neq_set_rel` + +```c +static vuint32_t vatomic32_await_neq_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_neq_set_rlx` + +```c +static vuint32_t vatomic32_await_neq_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_lt_add` + +```c +static vuint32_t vatomic32_await_lt_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_lt_add_acq` + +```c +static vuint32_t vatomic32_await_lt_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_lt_add_rel` + +```c +static vuint32_t vatomic32_await_lt_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_lt_add_rlx` + +```c +static vuint32_t vatomic32_await_lt_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_lt_sub` + +```c +static vuint32_t vatomic32_await_lt_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_lt_sub_acq` + +```c +static vuint32_t vatomic32_await_lt_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_lt_sub_rel` + +```c +static vuint32_t vatomic32_await_lt_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_lt_sub_rlx` + +```c +static vuint32_t vatomic32_await_lt_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_lt_set` + +```c +static vuint32_t vatomic32_await_lt_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_lt_set_acq` + +```c +static vuint32_t vatomic32_await_lt_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_lt_set_rel` + +```c +static vuint32_t vatomic32_await_lt_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_lt_set_rlx` + +```c +static vuint32_t vatomic32_await_lt_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_le_add` + +```c +static vuint32_t vatomic32_await_le_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_le_add_acq` + +```c +static vuint32_t vatomic32_await_le_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_le_add_rel` + +```c +static vuint32_t vatomic32_await_le_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_le_add_rlx` + +```c +static vuint32_t vatomic32_await_le_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_le_sub` + +```c +static vuint32_t vatomic32_await_le_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_le_sub_acq` + +```c +static vuint32_t vatomic32_await_le_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_le_sub_rel` + +```c +static vuint32_t vatomic32_await_le_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_le_sub_rlx` + +```c +static vuint32_t vatomic32_await_le_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_le_set` + +```c +static vuint32_t vatomic32_await_le_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_le_set_acq` + +```c +static vuint32_t vatomic32_await_le_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_le_set_rel` + +```c +static vuint32_t vatomic32_await_le_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_le_set_rlx` + +```c +static vuint32_t vatomic32_await_le_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_gt_add` + +```c +static vuint32_t vatomic32_await_gt_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_gt_add_acq` + +```c +static vuint32_t vatomic32_await_gt_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_gt_add_rel` + +```c +static vuint32_t vatomic32_await_gt_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_gt_add_rlx` + +```c +static vuint32_t vatomic32_await_gt_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_gt_sub` + +```c +static vuint32_t vatomic32_await_gt_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_gt_sub_acq` + +```c +static vuint32_t vatomic32_await_gt_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_gt_sub_rel` + +```c +static vuint32_t vatomic32_await_gt_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_gt_sub_rlx` + +```c +static vuint32_t vatomic32_await_gt_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_gt_set` + +```c +static vuint32_t vatomic32_await_gt_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_gt_set_acq` + +```c +static vuint32_t vatomic32_await_gt_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_gt_set_rel` + +```c +static vuint32_t vatomic32_await_gt_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_gt_set_rlx` + +```c +static vuint32_t vatomic32_await_gt_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_ge_add` + +```c +static vuint32_t vatomic32_await_ge_add(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_ge_add_acq` + +```c +static vuint32_t vatomic32_await_ge_add_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_ge_add_rel` + +```c +static vuint32_t vatomic32_await_ge_add_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_ge_add_rlx` + +```c +static vuint32_t vatomic32_await_ge_add_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_ge_sub` + +```c +static vuint32_t vatomic32_await_ge_sub(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_ge_sub_acq` + +```c +static vuint32_t vatomic32_await_ge_sub_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_ge_sub_rel` + +```c +static vuint32_t vatomic32_await_ge_sub_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_ge_sub_rlx` + +```c +static vuint32_t vatomic32_await_ge_sub_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_await_ge_set` + +```c +static vuint32_t vatomic32_await_ge_set(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_await_ge_set_acq` + +```c +static vuint32_t vatomic32_await_ge_set_acq(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_await_ge_set_rel` + +```c +static vuint32_t vatomic32_await_ge_set_rel(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_await_ge_set_rlx` + +```c +static vuint32_t vatomic32_await_ge_set_rlx(vatomic32_t *a, vuint32_t c, vuint32_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/await_u64.h.md b/doc/api/vsync/atomic/await_u64.h.md new file mode 100644 index 00000000..83c48f91 --- /dev/null +++ b/doc/api/vsync/atomic/await_u64.h.md @@ -0,0 +1,2109 @@ +# [vsync](../README.md) / [atomic](README.md) / await_u64.h +_Atomic await functions for vatomic64_t variables._ + +This file declares and documents the atomic await functions operating on vatomic64_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic64_await_lt](await_u64.h.md#function-vatomic64_await_lt) | Politely awaits the value pointed by a to be less than v. | +| [vatomic64_await_lt_acq](await_u64.h.md#function-vatomic64_await_lt_acq) | Politely awaits the value pointed by a to be less than v. | +| [vatomic64_await_lt_rlx](await_u64.h.md#function-vatomic64_await_lt_rlx) | Politely awaits the value pointed by a to be less than v. | +| [vatomic64_await_le](await_u64.h.md#function-vatomic64_await_le) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic64_await_le_acq](await_u64.h.md#function-vatomic64_await_le_acq) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic64_await_le_rlx](await_u64.h.md#function-vatomic64_await_le_rlx) | Politely awaits the value pointed by a to be less than or equal to v. | +| [vatomic64_await_gt](await_u64.h.md#function-vatomic64_await_gt) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic64_await_gt_acq](await_u64.h.md#function-vatomic64_await_gt_acq) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic64_await_gt_rlx](await_u64.h.md#function-vatomic64_await_gt_rlx) | Politely awaits the value pointed by a to be greater than v. | +| [vatomic64_await_ge](await_u64.h.md#function-vatomic64_await_ge) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic64_await_ge_acq](await_u64.h.md#function-vatomic64_await_ge_acq) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic64_await_ge_rlx](await_u64.h.md#function-vatomic64_await_ge_rlx) | Politely awaits the value pointed by a to be greater than or equal to v. | +| [vatomic64_await_neq](await_u64.h.md#function-vatomic64_await_neq) | Politely awaits the value pointed by a to be different than v. | +| [vatomic64_await_neq_acq](await_u64.h.md#function-vatomic64_await_neq_acq) | Politely awaits the value pointed by a to be different than v. | +| [vatomic64_await_neq_rlx](await_u64.h.md#function-vatomic64_await_neq_rlx) | Politely awaits the value pointed by a to be different than v. | +| [vatomic64_await_eq](await_u64.h.md#function-vatomic64_await_eq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic64_await_eq_acq](await_u64.h.md#function-vatomic64_await_eq_acq) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic64_await_eq_rlx](await_u64.h.md#function-vatomic64_await_eq_rlx) | Politely awaits the value pointed by a to be equal to v. | +| [vatomic64_await_eq_add](await_u64.h.md#function-vatomic64_await_eq_add) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_eq_add_acq](await_u64.h.md#function-vatomic64_await_eq_add_acq) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_eq_add_rel](await_u64.h.md#function-vatomic64_await_eq_add_rel) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_eq_add_rlx](await_u64.h.md#function-vatomic64_await_eq_add_rlx) | Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_eq_sub](await_u64.h.md#function-vatomic64_await_eq_sub) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_eq_sub_acq](await_u64.h.md#function-vatomic64_await_eq_sub_acq) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_eq_sub_rel](await_u64.h.md#function-vatomic64_await_eq_sub_rel) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_eq_sub_rlx](await_u64.h.md#function-vatomic64_await_eq_sub_rlx) | Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_eq_set](await_u64.h.md#function-vatomic64_await_eq_set) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_eq_set_acq](await_u64.h.md#function-vatomic64_await_eq_set_acq) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_eq_set_rel](await_u64.h.md#function-vatomic64_await_eq_set_rel) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_eq_set_rlx](await_u64.h.md#function-vatomic64_await_eq_set_rlx) | Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_neq_add](await_u64.h.md#function-vatomic64_await_neq_add) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_neq_add_acq](await_u64.h.md#function-vatomic64_await_neq_add_acq) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_neq_add_rel](await_u64.h.md#function-vatomic64_await_neq_add_rel) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_neq_add_rlx](await_u64.h.md#function-vatomic64_await_neq_add_rlx) | Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_neq_sub](await_u64.h.md#function-vatomic64_await_neq_sub) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_neq_sub_acq](await_u64.h.md#function-vatomic64_await_neq_sub_acq) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_neq_sub_rel](await_u64.h.md#function-vatomic64_await_neq_sub_rel) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_neq_sub_rlx](await_u64.h.md#function-vatomic64_await_neq_sub_rlx) | Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_neq_set](await_u64.h.md#function-vatomic64_await_neq_set) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_neq_set_acq](await_u64.h.md#function-vatomic64_await_neq_set_acq) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_neq_set_rel](await_u64.h.md#function-vatomic64_await_neq_set_rel) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_neq_set_rlx](await_u64.h.md#function-vatomic64_await_neq_set_rlx) | Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_lt_add](await_u64.h.md#function-vatomic64_await_lt_add) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_lt_add_acq](await_u64.h.md#function-vatomic64_await_lt_add_acq) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_lt_add_rel](await_u64.h.md#function-vatomic64_await_lt_add_rel) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_lt_add_rlx](await_u64.h.md#function-vatomic64_await_lt_add_rlx) | Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_lt_sub](await_u64.h.md#function-vatomic64_await_lt_sub) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_lt_sub_acq](await_u64.h.md#function-vatomic64_await_lt_sub_acq) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_lt_sub_rel](await_u64.h.md#function-vatomic64_await_lt_sub_rel) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_lt_sub_rlx](await_u64.h.md#function-vatomic64_await_lt_sub_rlx) | Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_lt_set](await_u64.h.md#function-vatomic64_await_lt_set) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_lt_set_acq](await_u64.h.md#function-vatomic64_await_lt_set_acq) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_lt_set_rel](await_u64.h.md#function-vatomic64_await_lt_set_rel) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_lt_set_rlx](await_u64.h.md#function-vatomic64_await_lt_set_rlx) | Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_le_add](await_u64.h.md#function-vatomic64_await_le_add) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_le_add_acq](await_u64.h.md#function-vatomic64_await_le_add_acq) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_le_add_rel](await_u64.h.md#function-vatomic64_await_le_add_rel) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_le_add_rlx](await_u64.h.md#function-vatomic64_await_le_add_rlx) | Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_le_sub](await_u64.h.md#function-vatomic64_await_le_sub) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_le_sub_acq](await_u64.h.md#function-vatomic64_await_le_sub_acq) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_le_sub_rel](await_u64.h.md#function-vatomic64_await_le_sub_rel) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_le_sub_rlx](await_u64.h.md#function-vatomic64_await_le_sub_rlx) | Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_le_set](await_u64.h.md#function-vatomic64_await_le_set) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_le_set_acq](await_u64.h.md#function-vatomic64_await_le_set_acq) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_le_set_rel](await_u64.h.md#function-vatomic64_await_le_set_rel) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_le_set_rlx](await_u64.h.md#function-vatomic64_await_le_set_rlx) | Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_gt_add](await_u64.h.md#function-vatomic64_await_gt_add) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_gt_add_acq](await_u64.h.md#function-vatomic64_await_gt_add_acq) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_gt_add_rel](await_u64.h.md#function-vatomic64_await_gt_add_rel) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_gt_add_rlx](await_u64.h.md#function-vatomic64_await_gt_add_rlx) | Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied. | +| [vatomic64_await_gt_sub](await_u64.h.md#function-vatomic64_await_gt_sub) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_gt_sub_acq](await_u64.h.md#function-vatomic64_await_gt_sub_acq) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_gt_sub_rel](await_u64.h.md#function-vatomic64_await_gt_sub_rel) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_gt_sub_rlx](await_u64.h.md#function-vatomic64_await_gt_sub_rlx) | Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_gt_set](await_u64.h.md#function-vatomic64_await_gt_set) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_gt_set_acq](await_u64.h.md#function-vatomic64_await_gt_set_acq) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_gt_set_rel](await_u64.h.md#function-vatomic64_await_gt_set_rel) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_gt_set_rlx](await_u64.h.md#function-vatomic64_await_gt_set_rlx) | Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied. | +| [vatomic64_await_ge_add](await_u64.h.md#function-vatomic64_await_ge_add) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_ge_add_acq](await_u64.h.md#function-vatomic64_await_ge_add_acq) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_ge_add_rel](await_u64.h.md#function-vatomic64_await_ge_add_rel) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_ge_add_rlx](await_u64.h.md#function-vatomic64_await_ge_add_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied. | +| [vatomic64_await_ge_sub](await_u64.h.md#function-vatomic64_await_ge_sub) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_ge_sub_acq](await_u64.h.md#function-vatomic64_await_ge_sub_acq) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_ge_sub_rel](await_u64.h.md#function-vatomic64_await_ge_sub_rel) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_ge_sub_rlx](await_u64.h.md#function-vatomic64_await_ge_sub_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied. | +| [vatomic64_await_ge_set](await_u64.h.md#function-vatomic64_await_ge_set) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_ge_set_acq](await_u64.h.md#function-vatomic64_await_ge_set_acq) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_ge_set_rel](await_u64.h.md#function-vatomic64_await_ge_set_rel) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | +| [vatomic64_await_ge_set_rlx](await_u64.h.md#function-vatomic64_await_ge_set_rlx) | Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied. | + +## Function `vatomic64_await_lt` + +```c +static vuint64_t vatomic64_await_lt(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_lt_acq` + +```c +static vuint64_t vatomic64_await_lt_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic64_await_lt_rlx` + +```c +static vuint64_t vatomic64_await_lt_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_le` + +```c +static vuint64_t vatomic64_await_le(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_le_acq` + +```c +static vuint64_t vatomic64_await_le_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic64_await_le_rlx` + +```c +static vuint64_t vatomic64_await_le_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_gt` + +```c +static vuint64_t vatomic64_await_gt(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_gt_acq` + +```c +static vuint64_t vatomic64_await_gt_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic64_await_gt_rlx` + +```c +static vuint64_t vatomic64_await_gt_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_ge` + +```c +static vuint64_t vatomic64_await_ge(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_ge_acq` + +```c +static vuint64_t vatomic64_await_ge_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic64_await_ge_rlx` + +```c +static vuint64_t vatomic64_await_ge_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_neq` + +```c +static vuint64_t vatomic64_await_neq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_neq_acq` + +```c +static vuint64_t vatomic64_await_neq_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: acquire + + +## Function `vatomic64_await_neq_rlx` + +```c +static vuint64_t vatomic64_await_neq_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** value satisfying conditon + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_eq` + +```c +static vuint64_t vatomic64_await_eq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_eq_acq` + +```c +static vuint64_t vatomic64_await_eq_acq(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: acquire + + +## Function `vatomic64_await_eq_rlx` + +```c +static vuint64_t vatomic64_await_eq_rlx(const vatomic64_t *a, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to v._ + + +The return value is the first read value that satisfies the condition, as if read with vatomic64_read(a). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: value to compare + + +**Returns:** last value before satisfying condition + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_eq_add` + +```c +static vuint64_t vatomic64_await_eq_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_eq_add_acq` + +```c +static vuint64_t vatomic64_await_eq_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_eq_add_rel` + +```c +static vuint64_t vatomic64_await_eq_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_eq_add_rlx` + +```c +static vuint64_t vatomic64_await_eq_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_eq_sub` + +```c +static vuint64_t vatomic64_await_eq_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_eq_sub_acq` + +```c +static vuint64_t vatomic64_await_eq_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_eq_sub_rel` + +```c +static vuint64_t vatomic64_await_eq_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_eq_sub_rlx` + +```c +static vuint64_t vatomic64_await_eq_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_eq_set` + +```c +static vuint64_t vatomic64_await_eq_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_eq_set_acq` + +```c +static vuint64_t vatomic64_await_eq_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_eq_set_rel` + +```c +static vuint64_t vatomic64_await_eq_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_eq_set_rlx` + +```c +static vuint64_t vatomic64_await_eq_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_neq_add` + +```c +static vuint64_t vatomic64_await_neq_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_neq_add_acq` + +```c +static vuint64_t vatomic64_await_neq_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_neq_add_rel` + +```c +static vuint64_t vatomic64_await_neq_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_neq_add_rlx` + +```c +static vuint64_t vatomic64_await_neq_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_neq_sub` + +```c +static vuint64_t vatomic64_await_neq_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_neq_sub_acq` + +```c +static vuint64_t vatomic64_await_neq_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_neq_sub_rel` + +```c +static vuint64_t vatomic64_await_neq_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_neq_sub_rlx` + +```c +static vuint64_t vatomic64_await_neq_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_neq_set` + +```c +static vuint64_t vatomic64_await_neq_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_neq_set_acq` + +```c +static vuint64_t vatomic64_await_neq_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_neq_set_rel` + +```c +static vuint64_t vatomic64_await_neq_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_neq_set_rlx` + +```c +static vuint64_t vatomic64_await_neq_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be different than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_lt_add` + +```c +static vuint64_t vatomic64_await_lt_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_lt_add_acq` + +```c +static vuint64_t vatomic64_await_lt_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_lt_add_rel` + +```c +static vuint64_t vatomic64_await_lt_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_lt_add_rlx` + +```c +static vuint64_t vatomic64_await_lt_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_lt_sub` + +```c +static vuint64_t vatomic64_await_lt_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_lt_sub_acq` + +```c +static vuint64_t vatomic64_await_lt_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_lt_sub_rel` + +```c +static vuint64_t vatomic64_await_lt_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_lt_sub_rlx` + +```c +static vuint64_t vatomic64_await_lt_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_lt_set` + +```c +static vuint64_t vatomic64_await_lt_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_lt_set_acq` + +```c +static vuint64_t vatomic64_await_lt_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_lt_set_rel` + +```c +static vuint64_t vatomic64_await_lt_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_lt_set_rlx` + +```c +static vuint64_t vatomic64_await_lt_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_le_add` + +```c +static vuint64_t vatomic64_await_le_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_le_add_acq` + +```c +static vuint64_t vatomic64_await_le_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_le_add_rel` + +```c +static vuint64_t vatomic64_await_le_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_le_add_rlx` + +```c +static vuint64_t vatomic64_await_le_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_le_sub` + +```c +static vuint64_t vatomic64_await_le_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_le_sub_acq` + +```c +static vuint64_t vatomic64_await_le_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_le_sub_rel` + +```c +static vuint64_t vatomic64_await_le_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_le_sub_rlx` + +```c +static vuint64_t vatomic64_await_le_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_le_set` + +```c +static vuint64_t vatomic64_await_le_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_le_set_acq` + +```c +static vuint64_t vatomic64_await_le_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_le_set_rel` + +```c +static vuint64_t vatomic64_await_le_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_le_set_rlx` + +```c +static vuint64_t vatomic64_await_le_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be less than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_gt_add` + +```c +static vuint64_t vatomic64_await_gt_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_gt_add_acq` + +```c +static vuint64_t vatomic64_await_gt_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_gt_add_rel` + +```c +static vuint64_t vatomic64_await_gt_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_gt_add_rlx` + +```c +static vuint64_t vatomic64_await_gt_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_gt_sub` + +```c +static vuint64_t vatomic64_await_gt_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_gt_sub_acq` + +```c +static vuint64_t vatomic64_await_gt_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_gt_sub_rel` + +```c +static vuint64_t vatomic64_await_gt_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_gt_sub_rlx` + +```c +static vuint64_t vatomic64_await_gt_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_gt_set` + +```c +static vuint64_t vatomic64_await_gt_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_gt_set_acq` + +```c +static vuint64_t vatomic64_await_gt_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_gt_set_rel` + +```c +static vuint64_t vatomic64_await_gt_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_gt_set_rlx` + +```c +static vuint64_t vatomic64_await_gt_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_ge_add` + +```c +static vuint64_t vatomic64_await_ge_add(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_ge_add_acq` + +```c +static vuint64_t vatomic64_await_ge_add_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_ge_add_rel` + +```c +static vuint64_t vatomic64_await_ge_add_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_ge_add_rlx` + +```c +static vuint64_t vatomic64_await_ge_add_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, adding v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_ge_sub` + +```c +static vuint64_t vatomic64_await_ge_sub(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_ge_sub_acq` + +```c +static vuint64_t vatomic64_await_ge_sub_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_ge_sub_rel` + +```c +static vuint64_t vatomic64_await_ge_sub_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_ge_sub_rlx` + +```c +static vuint64_t vatomic64_await_ge_sub_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, subtracting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_await_ge_set` + +```c +static vuint64_t vatomic64_await_ge_set(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_await_ge_set_acq` + +```c +static vuint64_t vatomic64_await_ge_set_acq(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_await_ge_set_rel` + +```c +static vuint64_t vatomic64_await_ge_set_rel(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_await_ge_set_rlx` + +```c +static vuint64_t vatomic64_await_ge_set_rlx(vatomic64_t *a, vuint64_t c, vuint64_t v) +``` +_Politely awaits the value pointed by a to be greater than or equal to c, setting v to a once the condition is satisfied._ + + + + +**Parameters:** + +- `a`: atomic variable +- `c`: value to compare +- `v`: value to apply the operation + + +**Returns:** old value + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/config.h.md b/doc/api/vsync/atomic/config.h.md new file mode 100644 index 00000000..d7ab06fe --- /dev/null +++ b/doc/api/vsync/atomic/config.h.md @@ -0,0 +1,77 @@ +# [vsync](../README.md) / [atomic](README.md) / config.h +_Global configuration of vatomics._ + +The configuration defined in this file is typically applied to the whole project using vatomic. + +--- +# Macros + +| Macro | Description | +|---|---| +| [VATOMIC_ENABLE_FENCE_RLX_NOP](config.h.md#macro-vatomic_enable_fence_rlx_nop) | maps `vatomic_fence_rlx` to a NOP instruction, otherwise to nothing. | +| [VATOMIC_DISABLE_POLITE_AWAIT](config.h.md#macro-vatomic_disable_polite_await) | disables polite await in x86_64 and aarch64. | +| [VATOMIC_BUILTINS](config.h.md#macro-vatomic_builtins) | uses `__atomic` compiler builtins to implement atomic operations. | +| [VATOMIC_ENABLE_ATOMIC_SC](config.h.md#macro-vatomic_enable_atomic_sc) | turns barrier mode of all atomic operations and fences to seq_cst. | +| [VATOMIC_ENABLE_ATOMIC_RLX](config.h.md#macro-vatomic_enable_atomic_rlx) | turns barrier mode of all atomic operations and fences to relaxed. | +| [VATOMIC_DISABLE_ARM64_LSE](config.h.md#macro-vatomic_disable_arm64_lse) | Do not use ARM64 LSE instructions for atomic operations. | +| [VATOMIC_ENABLE_ARM64_LXE](config.h.md#macro-vatomic_enable_arm64_lxe) | use ARM64 LSE instructions in slow path of LLSC for atomic operations. | + +## Macro `VATOMIC_ENABLE_FENCE_RLX_NOP` + + +_maps_ `vatomic_fence_rlx` _to a NOP instruction, otherwise to nothing._ + + + +## Macro `VATOMIC_DISABLE_POLITE_AWAIT` + + +_disables polite await in x86_64 and aarch64._ + + +By default, `vatomic_await_` functions use polite await strategies with `PAUSE` or `WFE` instructions in x86_64 and arm64, respectively. Define VATOMIC_DISABLE_POLITE_AWAIT to busy loop without such instructions. + + +## Macro `VATOMIC_BUILTINS` + + +_uses_ `__atomic` _compiler builtins to implement atomic operations._ + + +Unless VATOMIC_BUILTINS is defined, arm32 and aarch64 architectures employ custom assembly as atomics implementation. Other architectures, including x86_64, always employ compiler builtins. + + +## Macro `VATOMIC_ENABLE_ATOMIC_SC` + + +_turns barrier mode of all atomic operations and fences to seq_cst._ + + + +## Macro `VATOMIC_ENABLE_ATOMIC_RLX` + + +_turns barrier mode of all atomic operations and fences to relaxed._ + + + +## Macro `VATOMIC_DISABLE_ARM64_LSE` + + +_Do not use ARM64 LSE instructions for atomic operations._ + + +If the compiler is configured to emit LSE instructions with some flag such as -march=armv8-a+lse, vatomic uses LSE instructions. Defining this flag disables the use of these instructions. + + +## Macro `VATOMIC_ENABLE_ARM64_LXE` + + +_use ARM64 LSE instructions in slow path of LLSC for atomic operations._ + + +To use this option, the compiler must be configured to emit LSE instructions with some flag such as -march=armv8-a+lse. + + + +--- diff --git a/doc/api/vsync/atomic/core.h.md b/doc/api/vsync/atomic/core.h.md new file mode 100644 index 00000000..ddc851eb --- /dev/null +++ b/doc/api/vsync/atomic/core.h.md @@ -0,0 +1,160 @@ +# [vsync](../README.md) / [atomic](README.md) / core.h +_Atomic types, core atomic operations and fences._ + +This file is the main entry point of VSync atomics. You can include it with: + + + +```c +#include +``` + + + +The declaration and documentation of most atomic operations is split into files based on the atomic type operated by the function as follows: + + + +| File |Type | +| --- | --- | +| [core_u8.h](core_u8.h.md) |vatomic8_t and vuint8_t | +| [core_u16.h](core_u16.h.md) |vatomic16_t and vuint16_t | +| [core_u32.h](core_u32.h.md) |vatomic32_t and vuint32_t | +| [core_u64.h](core_u64.h.md) |vatomic64_t and vuint64_t | +| [core_sz.h](core_sz.h.md) |vatomicsz_t and vsize_t | +| [core_ptr.h](core_ptr.h.md) |vatomicptr_t and void * | + + + +Major configuration options are described in [config.h](config.h.md). + +--- +# Macros + +| Macro | Description | +|---|---| +| [VATOMIC_INIT](core.h.md#macro-vatomic_init) | Initializes an atomic variable with value v. | +| [vatomicptr](core.h.md#macro-vatomicptr) | Declares an atomic pointer type. | +| [vatomic_cpu_pause](core.h.md#macro-vatomic_cpu_pause) | Calls CPU pause instruction if available, e.g., `PAUSE` in x86. | +| [VATOMIC_DISABLE_MACRO_UNDEF](core.h.md#macro-vatomic_disable_macro_undef) | Disables undefines of non-exported macros. | + +## Macro `VATOMIC_INIT` + +```c +VATOMIC_INIT(v) +``` + + +_Initializes an atomic variable with value v._ + + + +## Macro `vatomicptr` + +```c +vatomicptr(T) +``` + + +_Declares an atomic pointer type._ + + +When declaring an atomic pointer it may be useful to annotate it with the original type. Use [vatomicptr(T)](core.h.md#macro-vatomicptr) macro for that. + + +### Example: + + + +```c +typedef struct mcs_node_s { + vatomicptr(struct mcs_node_s*) next; +} mcs_node_t; +``` + + + + +## Macro `vatomic_cpu_pause` + +```c +vatomic_cpu_pause() +``` + + +_Calls CPU pause instruction if available, e.g.,_ `PAUSE` _in x86._ + + +Tight spinloops often overuse the memory subsytem. This macro calls an architecture-dependent instruction to slowdown spinloops (`PAUSE` in x86 and `YIELD` in aarch64). Define `VSYNC_DISABLE_POLITE_AWAIT` to disable the effect of this macro. + + +### Example: + + + +```c +while (!vatomic_read(&flag)) vatomic_cpu_pause(); +``` + + + + +## Macro `VATOMIC_DISABLE_MACRO_UNDEF` + + +_Disables undefines of non-exported macros._ + + +By default, all non-exported macros are undefined at the end of [atomic.h](../atomic.h.md). When VATOMIC_DISABLE_MACRO_UNDEF, no macro is undefined. This is useful for testing. + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic_fence](core.h.md#function-vatomic_fence) | Executes an atomic fence with seq_cst memory order. | +| [vatomic_fence_acq](core.h.md#function-vatomic_fence_acq) | Executes an atomic fence with acquire memory order. | +| [vatomic_fence_rel](core.h.md#function-vatomic_fence_rel) | Executes an atomic fence with release memory order. | +| [vatomic_fence_rlx](core.h.md#function-vatomic_fence_rlx) | Executes an atomic fence with relaxed memory order. | + +## Function `vatomic_fence` + +```c +static void vatomic_fence(void) +``` +_Executes an atomic fence with seq_cst memory order._ + + + +## Function `vatomic_fence_acq` + +```c +static void vatomic_fence_acq(void) +``` +_Executes an atomic fence with acquire memory order._ + + + +## Function `vatomic_fence_rel` + +```c +static void vatomic_fence_rel(void) +``` +_Executes an atomic fence with release memory order._ + + + +## Function `vatomic_fence_rlx` + +```c +static void vatomic_fence_rlx(void) +``` +_Executes an atomic fence with relaxed memory order._ + + +> **Note:** Technically, there no fence_rlx, it compiles to a NOP. + + + +--- diff --git a/doc/api/vsync/atomic/core_ptr.h.md b/doc/api/vsync/atomic/core_ptr.h.md new file mode 100644 index 00000000..d2d80843 --- /dev/null +++ b/doc/api/vsync/atomic/core_ptr.h.md @@ -0,0 +1,342 @@ +# [vsync](../README.md) / [atomic](README.md) / core_ptr.h +_Atomic functions for vatomicptr_t variables._ + +This file declares and documents the core atomic functions operating on vatomicptr_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomicptr_init](core_ptr.h.md#function-vatomicptr_init) | Initializes the atomic variable a with value v. | +| [vatomicptr_read](core_ptr.h.md#function-vatomicptr_read) | Returns the value of the atomic variable pointed by a. | +| [vatomicptr_read_acq](core_ptr.h.md#function-vatomicptr_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomicptr_read_rlx](core_ptr.h.md#function-vatomicptr_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomicptr_write](core_ptr.h.md#function-vatomicptr_write) | Writes value v in the atomic variable pointed by a. | +| [vatomicptr_write_rel](core_ptr.h.md#function-vatomicptr_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomicptr_write_rlx](core_ptr.h.md#function-vatomicptr_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomicptr_xchg](core_ptr.h.md#function-vatomicptr_xchg) | Writes v in a and returns old value. | +| [vatomicptr_xchg_acq](core_ptr.h.md#function-vatomicptr_xchg_acq) | Writes v in a and returns old value. | +| [vatomicptr_xchg_rel](core_ptr.h.md#function-vatomicptr_xchg_rel) | Writes v in a and returns old value. | +| [vatomicptr_xchg_rlx](core_ptr.h.md#function-vatomicptr_xchg_rlx) | Writes v in a and returns old value. | +| [vatomicptr_cmpxchg](core_ptr.h.md#function-vatomicptr_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomicptr_cmpxchg_acq](core_ptr.h.md#function-vatomicptr_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomicptr_cmpxchg_rel](core_ptr.h.md#function-vatomicptr_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomicptr_cmpxchg_rlx](core_ptr.h.md#function-vatomicptr_cmpxchg_rlx) | Writes value v in a if e is the current value. | + +## Function `vatomicptr_init` + +```c +static void vatomicptr_init(vatomicptr_t *a, void *v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomicptr_write()](core_ptr.h.md#function-vatomicptr_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_read` + +```c +static void* vatomicptr_read(const vatomicptr_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_read_acq` + +```c +static void* vatomicptr_read_acq(const vatomicptr_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomicptr_read_rlx` + +```c +static void* vatomicptr_read_rlx(const vatomicptr_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomicptr_write` + +```c +static void vatomicptr_write(vatomicptr_t *a, void *v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_write_rel` + +```c +static void vatomicptr_write_rel(vatomicptr_t *a, void *v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomicptr_write_rlx` + +```c +static void vatomicptr_write_rlx(vatomicptr_t *a, void *v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomicptr_xchg` + +```c +static void* vatomicptr_xchg(vatomicptr_t *a, void *v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicptr_xchg_acq` + +```c +static void* vatomicptr_xchg_acq(vatomicptr_t *a, void *v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicptr_xchg_rel` + +```c +static void* vatomicptr_xchg_rel(vatomicptr_t *a, void *v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicptr_xchg_rlx` + +```c +static void* vatomicptr_xchg_rlx(vatomicptr_t *a, void *v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicptr_cmpxchg` + +```c +static void* vatomicptr_cmpxchg(vatomicptr_t *a, void *e, void *v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicptr_cmpxchg_acq` + +```c +static void* vatomicptr_cmpxchg_acq(vatomicptr_t *a, void *e, void *v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicptr_cmpxchg_rel` + +```c +static void* vatomicptr_cmpxchg_rel(vatomicptr_t *a, void *e, void *v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicptr_cmpxchg_rlx` + +```c +static void* vatomicptr_cmpxchg_rlx(vatomicptr_t *a, void *e, void *v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + + +--- diff --git a/doc/api/vsync/atomic/core_sz.h.md b/doc/api/vsync/atomic/core_sz.h.md new file mode 100644 index 00000000..0edd1398 --- /dev/null +++ b/doc/api/vsync/atomic/core_sz.h.md @@ -0,0 +1,2208 @@ +# [vsync](../README.md) / [atomic](README.md) / core_sz.h +_Atomic functions for vatomicsz_t variables._ + +This file declares and documents the core atomic functions operating on vatomicsz_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomicsz_init](core_sz.h.md#function-vatomicsz_init) | Initializes the atomic variable a with value v. | +| [vatomicsz_read](core_sz.h.md#function-vatomicsz_read) | Returns the value of the atomic variable pointed by a. | +| [vatomicsz_read_acq](core_sz.h.md#function-vatomicsz_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomicsz_read_rlx](core_sz.h.md#function-vatomicsz_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomicsz_write](core_sz.h.md#function-vatomicsz_write) | Writes value v in the atomic variable pointed by a. | +| [vatomicsz_write_rel](core_sz.h.md#function-vatomicsz_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomicsz_write_rlx](core_sz.h.md#function-vatomicsz_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomicsz_xchg](core_sz.h.md#function-vatomicsz_xchg) | Writes v in a and returns old value. | +| [vatomicsz_xchg_acq](core_sz.h.md#function-vatomicsz_xchg_acq) | Writes v in a and returns old value. | +| [vatomicsz_xchg_rel](core_sz.h.md#function-vatomicsz_xchg_rel) | Writes v in a and returns old value. | +| [vatomicsz_xchg_rlx](core_sz.h.md#function-vatomicsz_xchg_rlx) | Writes v in a and returns old value. | +| [vatomicsz_cmpxchg](core_sz.h.md#function-vatomicsz_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomicsz_cmpxchg_acq](core_sz.h.md#function-vatomicsz_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomicsz_cmpxchg_rel](core_sz.h.md#function-vatomicsz_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomicsz_cmpxchg_rlx](core_sz.h.md#function-vatomicsz_cmpxchg_rlx) | Writes value v in a if e is the current value. | +| [vatomicsz_get_max](core_sz.h.md#function-vatomicsz_get_max) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomicsz_get_max_acq](core_sz.h.md#function-vatomicsz_get_max_acq) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomicsz_get_max_rel](core_sz.h.md#function-vatomicsz_get_max_rel) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomicsz_get_max_rlx](core_sz.h.md#function-vatomicsz_get_max_rlx) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomicsz_max_get](core_sz.h.md#function-vatomicsz_max_get) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomicsz_max_get_acq](core_sz.h.md#function-vatomicsz_max_get_acq) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomicsz_max_get_rel](core_sz.h.md#function-vatomicsz_max_get_rel) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomicsz_max_get_rlx](core_sz.h.md#function-vatomicsz_max_get_rlx) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomicsz_max](core_sz.h.md#function-vatomicsz_max) | Writes v to a if v is greater than *a. | +| [vatomicsz_max_rel](core_sz.h.md#function-vatomicsz_max_rel) | Writes v to a if v is greater than *a. | +| [vatomicsz_max_rlx](core_sz.h.md#function-vatomicsz_max_rlx) | Writes v to a if v is greater than *a. | +| [vatomicsz_get_and](core_sz.h.md#function-vatomicsz_get_and) | Applies bitwise and to the value of a and returns the old value. | +| [vatomicsz_get_and_acq](core_sz.h.md#function-vatomicsz_get_and_acq) | Applies bitwise and to the value of a and returns the old value. | +| [vatomicsz_get_and_rel](core_sz.h.md#function-vatomicsz_get_and_rel) | Applies bitwise and to the value of a and returns the old value. | +| [vatomicsz_get_and_rlx](core_sz.h.md#function-vatomicsz_get_and_rlx) | Applies bitwise and to the value of a and returns the old value. | +| [vatomicsz_and_get](core_sz.h.md#function-vatomicsz_and_get) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomicsz_and_get_acq](core_sz.h.md#function-vatomicsz_and_get_acq) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomicsz_and_get_rel](core_sz.h.md#function-vatomicsz_and_get_rel) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomicsz_and_get_rlx](core_sz.h.md#function-vatomicsz_and_get_rlx) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomicsz_and](core_sz.h.md#function-vatomicsz_and) | Applies bitwise and operation with v to the value of a. | +| [vatomicsz_and_rel](core_sz.h.md#function-vatomicsz_and_rel) | Applies bitwise and operation with v to the value of a. | +| [vatomicsz_and_rlx](core_sz.h.md#function-vatomicsz_and_rlx) | Applies bitwise and operation with v to the value of a. | +| [vatomicsz_get_or](core_sz.h.md#function-vatomicsz_get_or) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomicsz_get_or_acq](core_sz.h.md#function-vatomicsz_get_or_acq) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomicsz_get_or_rel](core_sz.h.md#function-vatomicsz_get_or_rel) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomicsz_get_or_rlx](core_sz.h.md#function-vatomicsz_get_or_rlx) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomicsz_or_get](core_sz.h.md#function-vatomicsz_or_get) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomicsz_or_get_acq](core_sz.h.md#function-vatomicsz_or_get_acq) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomicsz_or_get_rel](core_sz.h.md#function-vatomicsz_or_get_rel) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomicsz_or_get_rlx](core_sz.h.md#function-vatomicsz_or_get_rlx) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomicsz_or](core_sz.h.md#function-vatomicsz_or) | Applies bitwise or operation with v to the value of a. | +| [vatomicsz_or_rel](core_sz.h.md#function-vatomicsz_or_rel) | Applies bitwise or operation with v to the value of a. | +| [vatomicsz_or_rlx](core_sz.h.md#function-vatomicsz_or_rlx) | Applies bitwise or operation with v to the value of a. | +| [vatomicsz_get_xor](core_sz.h.md#function-vatomicsz_get_xor) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomicsz_get_xor_acq](core_sz.h.md#function-vatomicsz_get_xor_acq) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomicsz_get_xor_rel](core_sz.h.md#function-vatomicsz_get_xor_rel) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomicsz_get_xor_rlx](core_sz.h.md#function-vatomicsz_get_xor_rlx) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomicsz_xor_get](core_sz.h.md#function-vatomicsz_xor_get) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomicsz_xor_get_acq](core_sz.h.md#function-vatomicsz_xor_get_acq) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomicsz_xor_get_rel](core_sz.h.md#function-vatomicsz_xor_get_rel) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomicsz_xor_get_rlx](core_sz.h.md#function-vatomicsz_xor_get_rlx) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomicsz_xor](core_sz.h.md#function-vatomicsz_xor) | Applies bitwise xor opeation with v to the value of a. | +| [vatomicsz_xor_rel](core_sz.h.md#function-vatomicsz_xor_rel) | Applies bitwise xor opeation with v to the value of a. | +| [vatomicsz_xor_rlx](core_sz.h.md#function-vatomicsz_xor_rlx) | Applies bitwise xor opeation with v to the value of a. | +| [vatomicsz_get_add](core_sz.h.md#function-vatomicsz_get_add) | Adds v to the value of a and returns the old value. | +| [vatomicsz_get_add_acq](core_sz.h.md#function-vatomicsz_get_add_acq) | Adds v to the value of a and returns the old value. | +| [vatomicsz_get_add_rel](core_sz.h.md#function-vatomicsz_get_add_rel) | Adds v to the value of a and returns the old value. | +| [vatomicsz_get_add_rlx](core_sz.h.md#function-vatomicsz_get_add_rlx) | Adds v to the value of a and returns the old value. | +| [vatomicsz_add_get](core_sz.h.md#function-vatomicsz_add_get) | Adds v to the value of a and returns the new value. | +| [vatomicsz_add_get_acq](core_sz.h.md#function-vatomicsz_add_get_acq) | Adds v to the value of a and returns the new value. | +| [vatomicsz_add_get_rel](core_sz.h.md#function-vatomicsz_add_get_rel) | Adds v to the value of a and returns the new value. | +| [vatomicsz_add_get_rlx](core_sz.h.md#function-vatomicsz_add_get_rlx) | Adds v to the value of a and returns the new value. | +| [vatomicsz_add](core_sz.h.md#function-vatomicsz_add) | Adds v to the value of a. | +| [vatomicsz_add_rel](core_sz.h.md#function-vatomicsz_add_rel) | Adds v to the value of a. | +| [vatomicsz_add_rlx](core_sz.h.md#function-vatomicsz_add_rlx) | Adds v to the value of a. | +| [vatomicsz_get_inc](core_sz.h.md#function-vatomicsz_get_inc) | Increments the value of a and returns the old value. | +| [vatomicsz_get_inc_acq](core_sz.h.md#function-vatomicsz_get_inc_acq) | Increments the value of a and returns the old value. | +| [vatomicsz_get_inc_rel](core_sz.h.md#function-vatomicsz_get_inc_rel) | Increments the value of a and returns the old value. | +| [vatomicsz_get_inc_rlx](core_sz.h.md#function-vatomicsz_get_inc_rlx) | Increments the value of a and returns the old value. | +| [vatomicsz_inc_get](core_sz.h.md#function-vatomicsz_inc_get) | Increments the value of a and returns the new value. | +| [vatomicsz_inc_get_acq](core_sz.h.md#function-vatomicsz_inc_get_acq) | Increments the value of a and returns the new value. | +| [vatomicsz_inc_get_rel](core_sz.h.md#function-vatomicsz_inc_get_rel) | Increments the value of a and returns the new value. | +| [vatomicsz_inc_get_rlx](core_sz.h.md#function-vatomicsz_inc_get_rlx) | Increments the value of a and returns the new value. | +| [vatomicsz_inc](core_sz.h.md#function-vatomicsz_inc) | Increments the value of a. | +| [vatomicsz_inc_rel](core_sz.h.md#function-vatomicsz_inc_rel) | Increments the value of a. | +| [vatomicsz_inc_rlx](core_sz.h.md#function-vatomicsz_inc_rlx) | Increments the value of a. | +| [vatomicsz_get_sub](core_sz.h.md#function-vatomicsz_get_sub) | Subtracts v from a and returns the old value. | +| [vatomicsz_get_sub_acq](core_sz.h.md#function-vatomicsz_get_sub_acq) | Subtracts v from a and returns the old value. | +| [vatomicsz_get_sub_rel](core_sz.h.md#function-vatomicsz_get_sub_rel) | Subtracts v from a and returns the old value. | +| [vatomicsz_get_sub_rlx](core_sz.h.md#function-vatomicsz_get_sub_rlx) | Subtracts v from a and returns the old value. | +| [vatomicsz_sub_get](core_sz.h.md#function-vatomicsz_sub_get) | Subtracts v from a and returns the new value. | +| [vatomicsz_sub_get_acq](core_sz.h.md#function-vatomicsz_sub_get_acq) | Subtracts v from a and returns the new value. | +| [vatomicsz_sub_get_rel](core_sz.h.md#function-vatomicsz_sub_get_rel) | Subtracts v from a and returns the new value. | +| [vatomicsz_sub_get_rlx](core_sz.h.md#function-vatomicsz_sub_get_rlx) | Subtracts v from a and returns the new value. | +| [vatomicsz_sub](core_sz.h.md#function-vatomicsz_sub) | Subtracts v from a. | +| [vatomicsz_sub_rel](core_sz.h.md#function-vatomicsz_sub_rel) | Subtracts v from a. | +| [vatomicsz_sub_rlx](core_sz.h.md#function-vatomicsz_sub_rlx) | Subtracts v from a. | +| [vatomicsz_get_dec](core_sz.h.md#function-vatomicsz_get_dec) | Decrements the value of a and returns the old value. | +| [vatomicsz_get_dec_acq](core_sz.h.md#function-vatomicsz_get_dec_acq) | Decrements the value of a and returns the old value. | +| [vatomicsz_get_dec_rel](core_sz.h.md#function-vatomicsz_get_dec_rel) | Decrements the value of a and returns the old value. | +| [vatomicsz_get_dec_rlx](core_sz.h.md#function-vatomicsz_get_dec_rlx) | Decrements the value of a and returns the old value. | +| [vatomicsz_dec_get](core_sz.h.md#function-vatomicsz_dec_get) | Decrements the value of a and returns the new value. | +| [vatomicsz_dec_get_acq](core_sz.h.md#function-vatomicsz_dec_get_acq) | Decrements the value of a and returns the new value. | +| [vatomicsz_dec_get_rel](core_sz.h.md#function-vatomicsz_dec_get_rel) | Decrements the value of a and returns the new value. | +| [vatomicsz_dec_get_rlx](core_sz.h.md#function-vatomicsz_dec_get_rlx) | Decrements the value of a and returns the new value. | +| [vatomicsz_dec](core_sz.h.md#function-vatomicsz_dec) | Decrements the value of a. | +| [vatomicsz_dec_rel](core_sz.h.md#function-vatomicsz_dec_rel) | Decrements the value of a. | +| [vatomicsz_dec_rlx](core_sz.h.md#function-vatomicsz_dec_rlx) | Decrements the value of a. | + +## Function `vatomicsz_init` + +```c +static void vatomicsz_init(vatomicsz_t *a, vsize_t v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomicsz_write()](core_sz.h.md#function-vatomicsz_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_read` + +```c +static vsize_t vatomicsz_read(const vatomicsz_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_read_acq` + +```c +static vsize_t vatomicsz_read_acq(const vatomicsz_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomicsz_read_rlx` + +```c +static vsize_t vatomicsz_read_rlx(const vatomicsz_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_write` + +```c +static void vatomicsz_write(vatomicsz_t *a, vsize_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_write_rel` + +```c +static void vatomicsz_write_rel(vatomicsz_t *a, vsize_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomicsz_write_rlx` + +```c +static void vatomicsz_write_rlx(vatomicsz_t *a, vsize_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_xchg` + +```c +static vsize_t vatomicsz_xchg(vatomicsz_t *a, vsize_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_xchg_acq` + +```c +static vsize_t vatomicsz_xchg_acq(vatomicsz_t *a, vsize_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_xchg_rel` + +```c +static vsize_t vatomicsz_xchg_rel(vatomicsz_t *a, vsize_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_xchg_rlx` + +```c +static vsize_t vatomicsz_xchg_rlx(vatomicsz_t *a, vsize_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_cmpxchg` + +```c +static vsize_t vatomicsz_cmpxchg(vatomicsz_t *a, vsize_t e, vsize_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicsz_cmpxchg_acq` + +```c +static vsize_t vatomicsz_cmpxchg_acq(vatomicsz_t *a, vsize_t e, vsize_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicsz_cmpxchg_rel` + +```c +static vsize_t vatomicsz_cmpxchg_rel(vatomicsz_t *a, vsize_t e, vsize_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicsz_cmpxchg_rlx` + +```c +static vsize_t vatomicsz_cmpxchg_rlx(vatomicsz_t *a, vsize_t e, vsize_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomicsz_get_max` + +```c +static vsize_t vatomicsz_get_max(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_max_acq` + +```c +static vsize_t vatomicsz_get_max_acq(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_max_rel` + +```c +static vsize_t vatomicsz_get_max_rel(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: release + + +## Function `vatomicsz_get_max_rlx` + +```c +static vsize_t vatomicsz_get_max_rlx(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_max_get` + +```c +static vsize_t vatomicsz_max_get(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_max_get_acq` + +```c +static vsize_t vatomicsz_max_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: acquire + + +## Function `vatomicsz_max_get_rel` + +```c +static vsize_t vatomicsz_max_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: release + + +## Function `vatomicsz_max_get_rlx` + +```c +static vsize_t vatomicsz_max_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_max` + +```c +static void vatomicsz_max(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_max_rel` + +```c +static void vatomicsz_max_rel(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: release + + +## Function `vatomicsz_max_rlx` + +```c +static void vatomicsz_max_rlx(vatomicsz_t *a, vsize_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_and` + +```c +static vsize_t vatomicsz_get_and(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_and_acq` + +```c +static vsize_t vatomicsz_get_and_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_and_rel` + +```c +static vsize_t vatomicsz_get_and_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_and_rlx` + +```c +static vsize_t vatomicsz_get_and_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_and_get` + +```c +static vsize_t vatomicsz_and_get(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_and_get_acq` + +```c +static vsize_t vatomicsz_and_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_and_get_rel` + +```c +static vsize_t vatomicsz_and_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_and_get_rlx` + +```c +static vsize_t vatomicsz_and_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_and` + +```c +static void vatomicsz_and(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_and_rel` + +```c +static void vatomicsz_and_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomicsz_and_rlx` + +```c +static void vatomicsz_and_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_or` + +```c +static vsize_t vatomicsz_get_or(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_or_acq` + +```c +static vsize_t vatomicsz_get_or_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_or_rel` + +```c +static vsize_t vatomicsz_get_or_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_or_rlx` + +```c +static vsize_t vatomicsz_get_or_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_or_get` + +```c +static vsize_t vatomicsz_or_get(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_or_get_acq` + +```c +static vsize_t vatomicsz_or_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_or_get_rel` + +```c +static vsize_t vatomicsz_or_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_or_get_rlx` + +```c +static vsize_t vatomicsz_or_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_or` + +```c +static void vatomicsz_or(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_or_rel` + +```c +static void vatomicsz_or_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomicsz_or_rlx` + +```c +static void vatomicsz_or_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_xor` + +```c +static vsize_t vatomicsz_get_xor(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_xor_acq` + +```c +static vsize_t vatomicsz_get_xor_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_xor_rel` + +```c +static vsize_t vatomicsz_get_xor_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_xor_rlx` + +```c +static vsize_t vatomicsz_get_xor_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_xor_get` + +```c +static vsize_t vatomicsz_xor_get(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_xor_get_acq` + +```c +static vsize_t vatomicsz_xor_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_xor_get_rel` + +```c +static vsize_t vatomicsz_xor_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_xor_get_rlx` + +```c +static vsize_t vatomicsz_xor_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_xor` + +```c +static void vatomicsz_xor(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_xor_rel` + +```c +static void vatomicsz_xor_rel(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomicsz_xor_rlx` + +```c +static void vatomicsz_xor_rlx(vatomicsz_t *a, vsize_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_add` + +```c +static vsize_t vatomicsz_get_add(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_add_acq` + +```c +static vsize_t vatomicsz_get_add_acq(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_add_rel` + +```c +static vsize_t vatomicsz_get_add_rel(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_add_rlx` + +```c +static vsize_t vatomicsz_get_add_rlx(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_add_get` + +```c +static vsize_t vatomicsz_add_get(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_add_get_acq` + +```c +static vsize_t vatomicsz_add_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_add_get_rel` + +```c +static vsize_t vatomicsz_add_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_add_get_rlx` + +```c +static vsize_t vatomicsz_add_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_add` + +```c +static void vatomicsz_add(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_add_rel` + +```c +static void vatomicsz_add_rel(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: release + + +## Function `vatomicsz_add_rlx` + +```c +static void vatomicsz_add_rlx(vatomicsz_t *a, vsize_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_inc` + +```c +static vsize_t vatomicsz_get_inc(vatomicsz_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_inc_acq` + +```c +static vsize_t vatomicsz_get_inc_acq(vatomicsz_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_inc_rel` + +```c +static vsize_t vatomicsz_get_inc_rel(vatomicsz_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_inc_rlx` + +```c +static vsize_t vatomicsz_get_inc_rlx(vatomicsz_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_inc_get` + +```c +static vsize_t vatomicsz_inc_get(vatomicsz_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_inc_get_acq` + +```c +static vsize_t vatomicsz_inc_get_acq(vatomicsz_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_inc_get_rel` + +```c +static vsize_t vatomicsz_inc_get_rel(vatomicsz_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_inc_get_rlx` + +```c +static vsize_t vatomicsz_inc_get_rlx(vatomicsz_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_inc` + +```c +static void vatomicsz_inc(vatomicsz_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_inc_rel` + +```c +static void vatomicsz_inc_rel(vatomicsz_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomicsz_inc_rlx` + +```c +static void vatomicsz_inc_rlx(vatomicsz_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_sub` + +```c +static vsize_t vatomicsz_get_sub(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_sub_acq` + +```c +static vsize_t vatomicsz_get_sub_acq(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_sub_rel` + +```c +static vsize_t vatomicsz_get_sub_rel(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_sub_rlx` + +```c +static vsize_t vatomicsz_get_sub_rlx(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_sub_get` + +```c +static vsize_t vatomicsz_sub_get(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_sub_get_acq` + +```c +static vsize_t vatomicsz_sub_get_acq(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_sub_get_rel` + +```c +static vsize_t vatomicsz_sub_get_rel(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_sub_get_rlx` + +```c +static vsize_t vatomicsz_sub_get_rlx(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_sub` + +```c +static void vatomicsz_sub(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_sub_rel` + +```c +static void vatomicsz_sub_rel(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: release + + +## Function `vatomicsz_sub_rlx` + +```c +static void vatomicsz_sub_rlx(vatomicsz_t *a, vsize_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: relaxed + + +## Function `vatomicsz_get_dec` + +```c +static vsize_t vatomicsz_get_dec(vatomicsz_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_get_dec_acq` + +```c +static vsize_t vatomicsz_get_dec_acq(vatomicsz_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomicsz_get_dec_rel` + +```c +static vsize_t vatomicsz_get_dec_rel(vatomicsz_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomicsz_get_dec_rlx` + +```c +static vsize_t vatomicsz_get_dec_rlx(vatomicsz_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_dec_get` + +```c +static vsize_t vatomicsz_dec_get(vatomicsz_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_dec_get_acq` + +```c +static vsize_t vatomicsz_dec_get_acq(vatomicsz_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomicsz_dec_get_rel` + +```c +static vsize_t vatomicsz_dec_get_rel(vatomicsz_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomicsz_dec_get_rlx` + +```c +static vsize_t vatomicsz_dec_get_rlx(vatomicsz_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomicsz_dec` + +```c +static void vatomicsz_dec(vatomicsz_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomicsz_dec_rel` + +```c +static void vatomicsz_dec_rel(vatomicsz_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomicsz_dec_rlx` + +```c +static void vatomicsz_dec_rlx(vatomicsz_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/core_u16.h.md b/doc/api/vsync/atomic/core_u16.h.md new file mode 100644 index 00000000..a7f3252a --- /dev/null +++ b/doc/api/vsync/atomic/core_u16.h.md @@ -0,0 +1,2208 @@ +# [vsync](../README.md) / [atomic](README.md) / core_u16.h +_Atomic functions for vatomic16_t variables._ + +This file declares and documents the core atomic functions operating on vatomic16_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic16_init](core_u16.h.md#function-vatomic16_init) | Initializes the atomic variable a with value v. | +| [vatomic16_read](core_u16.h.md#function-vatomic16_read) | Returns the value of the atomic variable pointed by a. | +| [vatomic16_read_acq](core_u16.h.md#function-vatomic16_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomic16_read_rlx](core_u16.h.md#function-vatomic16_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomic16_write](core_u16.h.md#function-vatomic16_write) | Writes value v in the atomic variable pointed by a. | +| [vatomic16_write_rel](core_u16.h.md#function-vatomic16_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomic16_write_rlx](core_u16.h.md#function-vatomic16_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomic16_xchg](core_u16.h.md#function-vatomic16_xchg) | Writes v in a and returns old value. | +| [vatomic16_xchg_acq](core_u16.h.md#function-vatomic16_xchg_acq) | Writes v in a and returns old value. | +| [vatomic16_xchg_rel](core_u16.h.md#function-vatomic16_xchg_rel) | Writes v in a and returns old value. | +| [vatomic16_xchg_rlx](core_u16.h.md#function-vatomic16_xchg_rlx) | Writes v in a and returns old value. | +| [vatomic16_cmpxchg](core_u16.h.md#function-vatomic16_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomic16_cmpxchg_acq](core_u16.h.md#function-vatomic16_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomic16_cmpxchg_rel](core_u16.h.md#function-vatomic16_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomic16_cmpxchg_rlx](core_u16.h.md#function-vatomic16_cmpxchg_rlx) | Writes value v in a if e is the current value. | +| [vatomic16_get_max](core_u16.h.md#function-vatomic16_get_max) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic16_get_max_acq](core_u16.h.md#function-vatomic16_get_max_acq) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic16_get_max_rel](core_u16.h.md#function-vatomic16_get_max_rel) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic16_get_max_rlx](core_u16.h.md#function-vatomic16_get_max_rlx) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic16_max_get](core_u16.h.md#function-vatomic16_max_get) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic16_max_get_acq](core_u16.h.md#function-vatomic16_max_get_acq) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic16_max_get_rel](core_u16.h.md#function-vatomic16_max_get_rel) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic16_max_get_rlx](core_u16.h.md#function-vatomic16_max_get_rlx) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic16_max](core_u16.h.md#function-vatomic16_max) | Writes v to a if v is greater than *a. | +| [vatomic16_max_rel](core_u16.h.md#function-vatomic16_max_rel) | Writes v to a if v is greater than *a. | +| [vatomic16_max_rlx](core_u16.h.md#function-vatomic16_max_rlx) | Writes v to a if v is greater than *a. | +| [vatomic16_get_and](core_u16.h.md#function-vatomic16_get_and) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic16_get_and_acq](core_u16.h.md#function-vatomic16_get_and_acq) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic16_get_and_rel](core_u16.h.md#function-vatomic16_get_and_rel) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic16_get_and_rlx](core_u16.h.md#function-vatomic16_get_and_rlx) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic16_and_get](core_u16.h.md#function-vatomic16_and_get) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic16_and_get_acq](core_u16.h.md#function-vatomic16_and_get_acq) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic16_and_get_rel](core_u16.h.md#function-vatomic16_and_get_rel) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic16_and_get_rlx](core_u16.h.md#function-vatomic16_and_get_rlx) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic16_and](core_u16.h.md#function-vatomic16_and) | Applies bitwise and operation with v to the value of a. | +| [vatomic16_and_rel](core_u16.h.md#function-vatomic16_and_rel) | Applies bitwise and operation with v to the value of a. | +| [vatomic16_and_rlx](core_u16.h.md#function-vatomic16_and_rlx) | Applies bitwise and operation with v to the value of a. | +| [vatomic16_get_or](core_u16.h.md#function-vatomic16_get_or) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic16_get_or_acq](core_u16.h.md#function-vatomic16_get_or_acq) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic16_get_or_rel](core_u16.h.md#function-vatomic16_get_or_rel) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic16_get_or_rlx](core_u16.h.md#function-vatomic16_get_or_rlx) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic16_or_get](core_u16.h.md#function-vatomic16_or_get) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic16_or_get_acq](core_u16.h.md#function-vatomic16_or_get_acq) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic16_or_get_rel](core_u16.h.md#function-vatomic16_or_get_rel) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic16_or_get_rlx](core_u16.h.md#function-vatomic16_or_get_rlx) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic16_or](core_u16.h.md#function-vatomic16_or) | Applies bitwise or operation with v to the value of a. | +| [vatomic16_or_rel](core_u16.h.md#function-vatomic16_or_rel) | Applies bitwise or operation with v to the value of a. | +| [vatomic16_or_rlx](core_u16.h.md#function-vatomic16_or_rlx) | Applies bitwise or operation with v to the value of a. | +| [vatomic16_get_xor](core_u16.h.md#function-vatomic16_get_xor) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic16_get_xor_acq](core_u16.h.md#function-vatomic16_get_xor_acq) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic16_get_xor_rel](core_u16.h.md#function-vatomic16_get_xor_rel) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic16_get_xor_rlx](core_u16.h.md#function-vatomic16_get_xor_rlx) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic16_xor_get](core_u16.h.md#function-vatomic16_xor_get) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic16_xor_get_acq](core_u16.h.md#function-vatomic16_xor_get_acq) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic16_xor_get_rel](core_u16.h.md#function-vatomic16_xor_get_rel) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic16_xor_get_rlx](core_u16.h.md#function-vatomic16_xor_get_rlx) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic16_xor](core_u16.h.md#function-vatomic16_xor) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic16_xor_rel](core_u16.h.md#function-vatomic16_xor_rel) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic16_xor_rlx](core_u16.h.md#function-vatomic16_xor_rlx) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic16_get_add](core_u16.h.md#function-vatomic16_get_add) | Adds v to the value of a and returns the old value. | +| [vatomic16_get_add_acq](core_u16.h.md#function-vatomic16_get_add_acq) | Adds v to the value of a and returns the old value. | +| [vatomic16_get_add_rel](core_u16.h.md#function-vatomic16_get_add_rel) | Adds v to the value of a and returns the old value. | +| [vatomic16_get_add_rlx](core_u16.h.md#function-vatomic16_get_add_rlx) | Adds v to the value of a and returns the old value. | +| [vatomic16_add_get](core_u16.h.md#function-vatomic16_add_get) | Adds v to the value of a and returns the new value. | +| [vatomic16_add_get_acq](core_u16.h.md#function-vatomic16_add_get_acq) | Adds v to the value of a and returns the new value. | +| [vatomic16_add_get_rel](core_u16.h.md#function-vatomic16_add_get_rel) | Adds v to the value of a and returns the new value. | +| [vatomic16_add_get_rlx](core_u16.h.md#function-vatomic16_add_get_rlx) | Adds v to the value of a and returns the new value. | +| [vatomic16_add](core_u16.h.md#function-vatomic16_add) | Adds v to the value of a. | +| [vatomic16_add_rel](core_u16.h.md#function-vatomic16_add_rel) | Adds v to the value of a. | +| [vatomic16_add_rlx](core_u16.h.md#function-vatomic16_add_rlx) | Adds v to the value of a. | +| [vatomic16_get_inc](core_u16.h.md#function-vatomic16_get_inc) | Increments the value of a and returns the old value. | +| [vatomic16_get_inc_acq](core_u16.h.md#function-vatomic16_get_inc_acq) | Increments the value of a and returns the old value. | +| [vatomic16_get_inc_rel](core_u16.h.md#function-vatomic16_get_inc_rel) | Increments the value of a and returns the old value. | +| [vatomic16_get_inc_rlx](core_u16.h.md#function-vatomic16_get_inc_rlx) | Increments the value of a and returns the old value. | +| [vatomic16_inc_get](core_u16.h.md#function-vatomic16_inc_get) | Increments the value of a and returns the new value. | +| [vatomic16_inc_get_acq](core_u16.h.md#function-vatomic16_inc_get_acq) | Increments the value of a and returns the new value. | +| [vatomic16_inc_get_rel](core_u16.h.md#function-vatomic16_inc_get_rel) | Increments the value of a and returns the new value. | +| [vatomic16_inc_get_rlx](core_u16.h.md#function-vatomic16_inc_get_rlx) | Increments the value of a and returns the new value. | +| [vatomic16_inc](core_u16.h.md#function-vatomic16_inc) | Increments the value of a. | +| [vatomic16_inc_rel](core_u16.h.md#function-vatomic16_inc_rel) | Increments the value of a. | +| [vatomic16_inc_rlx](core_u16.h.md#function-vatomic16_inc_rlx) | Increments the value of a. | +| [vatomic16_get_sub](core_u16.h.md#function-vatomic16_get_sub) | Subtracts v from a and returns the old value. | +| [vatomic16_get_sub_acq](core_u16.h.md#function-vatomic16_get_sub_acq) | Subtracts v from a and returns the old value. | +| [vatomic16_get_sub_rel](core_u16.h.md#function-vatomic16_get_sub_rel) | Subtracts v from a and returns the old value. | +| [vatomic16_get_sub_rlx](core_u16.h.md#function-vatomic16_get_sub_rlx) | Subtracts v from a and returns the old value. | +| [vatomic16_sub_get](core_u16.h.md#function-vatomic16_sub_get) | Subtracts v from a and returns the new value. | +| [vatomic16_sub_get_acq](core_u16.h.md#function-vatomic16_sub_get_acq) | Subtracts v from a and returns the new value. | +| [vatomic16_sub_get_rel](core_u16.h.md#function-vatomic16_sub_get_rel) | Subtracts v from a and returns the new value. | +| [vatomic16_sub_get_rlx](core_u16.h.md#function-vatomic16_sub_get_rlx) | Subtracts v from a and returns the new value. | +| [vatomic16_sub](core_u16.h.md#function-vatomic16_sub) | Subtracts v from a. | +| [vatomic16_sub_rel](core_u16.h.md#function-vatomic16_sub_rel) | Subtracts v from a. | +| [vatomic16_sub_rlx](core_u16.h.md#function-vatomic16_sub_rlx) | Subtracts v from a. | +| [vatomic16_get_dec](core_u16.h.md#function-vatomic16_get_dec) | Decrements the value of a and returns the old value. | +| [vatomic16_get_dec_acq](core_u16.h.md#function-vatomic16_get_dec_acq) | Decrements the value of a and returns the old value. | +| [vatomic16_get_dec_rel](core_u16.h.md#function-vatomic16_get_dec_rel) | Decrements the value of a and returns the old value. | +| [vatomic16_get_dec_rlx](core_u16.h.md#function-vatomic16_get_dec_rlx) | Decrements the value of a and returns the old value. | +| [vatomic16_dec_get](core_u16.h.md#function-vatomic16_dec_get) | Decrements the value of a and returns the new value. | +| [vatomic16_dec_get_acq](core_u16.h.md#function-vatomic16_dec_get_acq) | Decrements the value of a and returns the new value. | +| [vatomic16_dec_get_rel](core_u16.h.md#function-vatomic16_dec_get_rel) | Decrements the value of a and returns the new value. | +| [vatomic16_dec_get_rlx](core_u16.h.md#function-vatomic16_dec_get_rlx) | Decrements the value of a and returns the new value. | +| [vatomic16_dec](core_u16.h.md#function-vatomic16_dec) | Decrements the value of a. | +| [vatomic16_dec_rel](core_u16.h.md#function-vatomic16_dec_rel) | Decrements the value of a. | +| [vatomic16_dec_rlx](core_u16.h.md#function-vatomic16_dec_rlx) | Decrements the value of a. | + +## Function `vatomic16_init` + +```c +static void vatomic16_init(vatomic16_t *a, vuint16_t v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomic16_write()](core_u16.h.md#function-vatomic16_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_read` + +```c +static vuint16_t vatomic16_read(const vatomic16_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_read_acq` + +```c +static vuint16_t vatomic16_read_acq(const vatomic16_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomic16_read_rlx` + +```c +static vuint16_t vatomic16_read_rlx(const vatomic16_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomic16_write` + +```c +static void vatomic16_write(vatomic16_t *a, vuint16_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_write_rel` + +```c +static void vatomic16_write_rel(vatomic16_t *a, vuint16_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomic16_write_rlx` + +```c +static void vatomic16_write_rlx(vatomic16_t *a, vuint16_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_xchg` + +```c +static vuint16_t vatomic16_xchg(vatomic16_t *a, vuint16_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_xchg_acq` + +```c +static vuint16_t vatomic16_xchg_acq(vatomic16_t *a, vuint16_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_xchg_rel` + +```c +static vuint16_t vatomic16_xchg_rel(vatomic16_t *a, vuint16_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_xchg_rlx` + +```c +static vuint16_t vatomic16_xchg_rlx(vatomic16_t *a, vuint16_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_cmpxchg` + +```c +static vuint16_t vatomic16_cmpxchg(vatomic16_t *a, vuint16_t e, vuint16_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic16_cmpxchg_acq` + +```c +static vuint16_t vatomic16_cmpxchg_acq(vatomic16_t *a, vuint16_t e, vuint16_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic16_cmpxchg_rel` + +```c +static vuint16_t vatomic16_cmpxchg_rel(vatomic16_t *a, vuint16_t e, vuint16_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic16_cmpxchg_rlx` + +```c +static vuint16_t vatomic16_cmpxchg_rlx(vatomic16_t *a, vuint16_t e, vuint16_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic16_get_max` + +```c +static vuint16_t vatomic16_get_max(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_max_acq` + +```c +static vuint16_t vatomic16_get_max_acq(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_max_rel` + +```c +static vuint16_t vatomic16_get_max_rel(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: release + + +## Function `vatomic16_get_max_rlx` + +```c +static vuint16_t vatomic16_get_max_rlx(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: relaxed + + +## Function `vatomic16_max_get` + +```c +static vuint16_t vatomic16_max_get(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_max_get_acq` + +```c +static vuint16_t vatomic16_max_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: acquire + + +## Function `vatomic16_max_get_rel` + +```c +static vuint16_t vatomic16_max_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: release + + +## Function `vatomic16_max_get_rlx` + +```c +static vuint16_t vatomic16_max_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: relaxed + + +## Function `vatomic16_max` + +```c +static void vatomic16_max(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_max_rel` + +```c +static void vatomic16_max_rel(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: release + + +## Function `vatomic16_max_rlx` + +```c +static void vatomic16_max_rlx(vatomic16_t *a, vuint16_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_and` + +```c +static vuint16_t vatomic16_get_and(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_and_acq` + +```c +static vuint16_t vatomic16_get_and_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_and_rel` + +```c +static vuint16_t vatomic16_get_and_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_and_rlx` + +```c +static vuint16_t vatomic16_get_and_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_and_get` + +```c +static vuint16_t vatomic16_and_get(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_and_get_acq` + +```c +static vuint16_t vatomic16_and_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_and_get_rel` + +```c +static vuint16_t vatomic16_and_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_and_get_rlx` + +```c +static vuint16_t vatomic16_and_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_and` + +```c +static void vatomic16_and(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_and_rel` + +```c +static void vatomic16_and_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic16_and_rlx` + +```c +static void vatomic16_and_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_or` + +```c +static vuint16_t vatomic16_get_or(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_or_acq` + +```c +static vuint16_t vatomic16_get_or_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_or_rel` + +```c +static vuint16_t vatomic16_get_or_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_or_rlx` + +```c +static vuint16_t vatomic16_get_or_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_or_get` + +```c +static vuint16_t vatomic16_or_get(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_or_get_acq` + +```c +static vuint16_t vatomic16_or_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_or_get_rel` + +```c +static vuint16_t vatomic16_or_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_or_get_rlx` + +```c +static vuint16_t vatomic16_or_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_or` + +```c +static void vatomic16_or(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_or_rel` + +```c +static void vatomic16_or_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic16_or_rlx` + +```c +static void vatomic16_or_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_xor` + +```c +static vuint16_t vatomic16_get_xor(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_xor_acq` + +```c +static vuint16_t vatomic16_get_xor_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_xor_rel` + +```c +static vuint16_t vatomic16_get_xor_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_xor_rlx` + +```c +static vuint16_t vatomic16_get_xor_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_xor_get` + +```c +static vuint16_t vatomic16_xor_get(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_xor_get_acq` + +```c +static vuint16_t vatomic16_xor_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_xor_get_rel` + +```c +static vuint16_t vatomic16_xor_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_xor_get_rlx` + +```c +static vuint16_t vatomic16_xor_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_xor` + +```c +static void vatomic16_xor(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_xor_rel` + +```c +static void vatomic16_xor_rel(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic16_xor_rlx` + +```c +static void vatomic16_xor_rlx(vatomic16_t *a, vuint16_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_add` + +```c +static vuint16_t vatomic16_get_add(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_add_acq` + +```c +static vuint16_t vatomic16_get_add_acq(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_add_rel` + +```c +static vuint16_t vatomic16_get_add_rel(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_add_rlx` + +```c +static vuint16_t vatomic16_get_add_rlx(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_add_get` + +```c +static vuint16_t vatomic16_add_get(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_add_get_acq` + +```c +static vuint16_t vatomic16_add_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_add_get_rel` + +```c +static vuint16_t vatomic16_add_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_add_get_rlx` + +```c +static vuint16_t vatomic16_add_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_add` + +```c +static void vatomic16_add(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_add_rel` + +```c +static void vatomic16_add_rel(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: release + + +## Function `vatomic16_add_rlx` + +```c +static void vatomic16_add_rlx(vatomic16_t *a, vuint16_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_inc` + +```c +static vuint16_t vatomic16_get_inc(vatomic16_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_inc_acq` + +```c +static vuint16_t vatomic16_get_inc_acq(vatomic16_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_inc_rel` + +```c +static vuint16_t vatomic16_get_inc_rel(vatomic16_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_inc_rlx` + +```c +static vuint16_t vatomic16_get_inc_rlx(vatomic16_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_inc_get` + +```c +static vuint16_t vatomic16_inc_get(vatomic16_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_inc_get_acq` + +```c +static vuint16_t vatomic16_inc_get_acq(vatomic16_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_inc_get_rel` + +```c +static vuint16_t vatomic16_inc_get_rel(vatomic16_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_inc_get_rlx` + +```c +static vuint16_t vatomic16_inc_get_rlx(vatomic16_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_inc` + +```c +static void vatomic16_inc(vatomic16_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_inc_rel` + +```c +static void vatomic16_inc_rel(vatomic16_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic16_inc_rlx` + +```c +static void vatomic16_inc_rlx(vatomic16_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_sub` + +```c +static vuint16_t vatomic16_get_sub(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_sub_acq` + +```c +static vuint16_t vatomic16_get_sub_acq(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_sub_rel` + +```c +static vuint16_t vatomic16_get_sub_rel(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_sub_rlx` + +```c +static vuint16_t vatomic16_get_sub_rlx(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_sub_get` + +```c +static vuint16_t vatomic16_sub_get(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_sub_get_acq` + +```c +static vuint16_t vatomic16_sub_get_acq(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_sub_get_rel` + +```c +static vuint16_t vatomic16_sub_get_rel(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_sub_get_rlx` + +```c +static vuint16_t vatomic16_sub_get_rlx(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_sub` + +```c +static void vatomic16_sub(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_sub_rel` + +```c +static void vatomic16_sub_rel(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: release + + +## Function `vatomic16_sub_rlx` + +```c +static void vatomic16_sub_rlx(vatomic16_t *a, vuint16_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: relaxed + + +## Function `vatomic16_get_dec` + +```c +static vuint16_t vatomic16_get_dec(vatomic16_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_get_dec_acq` + +```c +static vuint16_t vatomic16_get_dec_acq(vatomic16_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic16_get_dec_rel` + +```c +static vuint16_t vatomic16_get_dec_rel(vatomic16_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic16_get_dec_rlx` + +```c +static vuint16_t vatomic16_get_dec_rlx(vatomic16_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic16_dec_get` + +```c +static vuint16_t vatomic16_dec_get(vatomic16_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic16_dec_get_acq` + +```c +static vuint16_t vatomic16_dec_get_acq(vatomic16_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic16_dec_get_rel` + +```c +static vuint16_t vatomic16_dec_get_rel(vatomic16_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic16_dec_get_rlx` + +```c +static vuint16_t vatomic16_dec_get_rlx(vatomic16_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic16_dec` + +```c +static void vatomic16_dec(vatomic16_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic16_dec_rel` + +```c +static void vatomic16_dec_rel(vatomic16_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic16_dec_rlx` + +```c +static void vatomic16_dec_rlx(vatomic16_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/core_u32.h.md b/doc/api/vsync/atomic/core_u32.h.md new file mode 100644 index 00000000..7f0b944e --- /dev/null +++ b/doc/api/vsync/atomic/core_u32.h.md @@ -0,0 +1,2208 @@ +# [vsync](../README.md) / [atomic](README.md) / core_u32.h +_Atomic functions for vatomic32_t variables._ + +This file declares and documents the core atomic functions operating on vatomic32_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic32_init](core_u32.h.md#function-vatomic32_init) | Initializes the atomic variable a with value v. | +| [vatomic32_read](core_u32.h.md#function-vatomic32_read) | Returns the value of the atomic variable pointed by a. | +| [vatomic32_read_acq](core_u32.h.md#function-vatomic32_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomic32_read_rlx](core_u32.h.md#function-vatomic32_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomic32_write](core_u32.h.md#function-vatomic32_write) | Writes value v in the atomic variable pointed by a. | +| [vatomic32_write_rel](core_u32.h.md#function-vatomic32_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomic32_write_rlx](core_u32.h.md#function-vatomic32_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomic32_xchg](core_u32.h.md#function-vatomic32_xchg) | Writes v in a and returns old value. | +| [vatomic32_xchg_acq](core_u32.h.md#function-vatomic32_xchg_acq) | Writes v in a and returns old value. | +| [vatomic32_xchg_rel](core_u32.h.md#function-vatomic32_xchg_rel) | Writes v in a and returns old value. | +| [vatomic32_xchg_rlx](core_u32.h.md#function-vatomic32_xchg_rlx) | Writes v in a and returns old value. | +| [vatomic32_cmpxchg](core_u32.h.md#function-vatomic32_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomic32_cmpxchg_acq](core_u32.h.md#function-vatomic32_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomic32_cmpxchg_rel](core_u32.h.md#function-vatomic32_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomic32_cmpxchg_rlx](core_u32.h.md#function-vatomic32_cmpxchg_rlx) | Writes value v in a if e is the current value. | +| [vatomic32_get_max](core_u32.h.md#function-vatomic32_get_max) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic32_get_max_acq](core_u32.h.md#function-vatomic32_get_max_acq) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic32_get_max_rel](core_u32.h.md#function-vatomic32_get_max_rel) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic32_get_max_rlx](core_u32.h.md#function-vatomic32_get_max_rlx) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic32_max_get](core_u32.h.md#function-vatomic32_max_get) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic32_max_get_acq](core_u32.h.md#function-vatomic32_max_get_acq) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic32_max_get_rel](core_u32.h.md#function-vatomic32_max_get_rel) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic32_max_get_rlx](core_u32.h.md#function-vatomic32_max_get_rlx) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic32_max](core_u32.h.md#function-vatomic32_max) | Writes v to a if v is greater than *a. | +| [vatomic32_max_rel](core_u32.h.md#function-vatomic32_max_rel) | Writes v to a if v is greater than *a. | +| [vatomic32_max_rlx](core_u32.h.md#function-vatomic32_max_rlx) | Writes v to a if v is greater than *a. | +| [vatomic32_get_and](core_u32.h.md#function-vatomic32_get_and) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic32_get_and_acq](core_u32.h.md#function-vatomic32_get_and_acq) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic32_get_and_rel](core_u32.h.md#function-vatomic32_get_and_rel) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic32_get_and_rlx](core_u32.h.md#function-vatomic32_get_and_rlx) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic32_and_get](core_u32.h.md#function-vatomic32_and_get) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic32_and_get_acq](core_u32.h.md#function-vatomic32_and_get_acq) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic32_and_get_rel](core_u32.h.md#function-vatomic32_and_get_rel) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic32_and_get_rlx](core_u32.h.md#function-vatomic32_and_get_rlx) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic32_and](core_u32.h.md#function-vatomic32_and) | Applies bitwise and operation with v to the value of a. | +| [vatomic32_and_rel](core_u32.h.md#function-vatomic32_and_rel) | Applies bitwise and operation with v to the value of a. | +| [vatomic32_and_rlx](core_u32.h.md#function-vatomic32_and_rlx) | Applies bitwise and operation with v to the value of a. | +| [vatomic32_get_or](core_u32.h.md#function-vatomic32_get_or) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic32_get_or_acq](core_u32.h.md#function-vatomic32_get_or_acq) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic32_get_or_rel](core_u32.h.md#function-vatomic32_get_or_rel) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic32_get_or_rlx](core_u32.h.md#function-vatomic32_get_or_rlx) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic32_or_get](core_u32.h.md#function-vatomic32_or_get) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic32_or_get_acq](core_u32.h.md#function-vatomic32_or_get_acq) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic32_or_get_rel](core_u32.h.md#function-vatomic32_or_get_rel) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic32_or_get_rlx](core_u32.h.md#function-vatomic32_or_get_rlx) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic32_or](core_u32.h.md#function-vatomic32_or) | Applies bitwise or operation with v to the value of a. | +| [vatomic32_or_rel](core_u32.h.md#function-vatomic32_or_rel) | Applies bitwise or operation with v to the value of a. | +| [vatomic32_or_rlx](core_u32.h.md#function-vatomic32_or_rlx) | Applies bitwise or operation with v to the value of a. | +| [vatomic32_get_xor](core_u32.h.md#function-vatomic32_get_xor) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic32_get_xor_acq](core_u32.h.md#function-vatomic32_get_xor_acq) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic32_get_xor_rel](core_u32.h.md#function-vatomic32_get_xor_rel) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic32_get_xor_rlx](core_u32.h.md#function-vatomic32_get_xor_rlx) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic32_xor_get](core_u32.h.md#function-vatomic32_xor_get) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic32_xor_get_acq](core_u32.h.md#function-vatomic32_xor_get_acq) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic32_xor_get_rel](core_u32.h.md#function-vatomic32_xor_get_rel) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic32_xor_get_rlx](core_u32.h.md#function-vatomic32_xor_get_rlx) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic32_xor](core_u32.h.md#function-vatomic32_xor) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic32_xor_rel](core_u32.h.md#function-vatomic32_xor_rel) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic32_xor_rlx](core_u32.h.md#function-vatomic32_xor_rlx) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic32_get_add](core_u32.h.md#function-vatomic32_get_add) | Adds v to the value of a and returns the old value. | +| [vatomic32_get_add_acq](core_u32.h.md#function-vatomic32_get_add_acq) | Adds v to the value of a and returns the old value. | +| [vatomic32_get_add_rel](core_u32.h.md#function-vatomic32_get_add_rel) | Adds v to the value of a and returns the old value. | +| [vatomic32_get_add_rlx](core_u32.h.md#function-vatomic32_get_add_rlx) | Adds v to the value of a and returns the old value. | +| [vatomic32_add_get](core_u32.h.md#function-vatomic32_add_get) | Adds v to the value of a and returns the new value. | +| [vatomic32_add_get_acq](core_u32.h.md#function-vatomic32_add_get_acq) | Adds v to the value of a and returns the new value. | +| [vatomic32_add_get_rel](core_u32.h.md#function-vatomic32_add_get_rel) | Adds v to the value of a and returns the new value. | +| [vatomic32_add_get_rlx](core_u32.h.md#function-vatomic32_add_get_rlx) | Adds v to the value of a and returns the new value. | +| [vatomic32_add](core_u32.h.md#function-vatomic32_add) | Adds v to the value of a. | +| [vatomic32_add_rel](core_u32.h.md#function-vatomic32_add_rel) | Adds v to the value of a. | +| [vatomic32_add_rlx](core_u32.h.md#function-vatomic32_add_rlx) | Adds v to the value of a. | +| [vatomic32_get_inc](core_u32.h.md#function-vatomic32_get_inc) | Increments the value of a and returns the old value. | +| [vatomic32_get_inc_acq](core_u32.h.md#function-vatomic32_get_inc_acq) | Increments the value of a and returns the old value. | +| [vatomic32_get_inc_rel](core_u32.h.md#function-vatomic32_get_inc_rel) | Increments the value of a and returns the old value. | +| [vatomic32_get_inc_rlx](core_u32.h.md#function-vatomic32_get_inc_rlx) | Increments the value of a and returns the old value. | +| [vatomic32_inc_get](core_u32.h.md#function-vatomic32_inc_get) | Increments the value of a and returns the new value. | +| [vatomic32_inc_get_acq](core_u32.h.md#function-vatomic32_inc_get_acq) | Increments the value of a and returns the new value. | +| [vatomic32_inc_get_rel](core_u32.h.md#function-vatomic32_inc_get_rel) | Increments the value of a and returns the new value. | +| [vatomic32_inc_get_rlx](core_u32.h.md#function-vatomic32_inc_get_rlx) | Increments the value of a and returns the new value. | +| [vatomic32_inc](core_u32.h.md#function-vatomic32_inc) | Increments the value of a. | +| [vatomic32_inc_rel](core_u32.h.md#function-vatomic32_inc_rel) | Increments the value of a. | +| [vatomic32_inc_rlx](core_u32.h.md#function-vatomic32_inc_rlx) | Increments the value of a. | +| [vatomic32_get_sub](core_u32.h.md#function-vatomic32_get_sub) | Subtracts v from a and returns the old value. | +| [vatomic32_get_sub_acq](core_u32.h.md#function-vatomic32_get_sub_acq) | Subtracts v from a and returns the old value. | +| [vatomic32_get_sub_rel](core_u32.h.md#function-vatomic32_get_sub_rel) | Subtracts v from a and returns the old value. | +| [vatomic32_get_sub_rlx](core_u32.h.md#function-vatomic32_get_sub_rlx) | Subtracts v from a and returns the old value. | +| [vatomic32_sub_get](core_u32.h.md#function-vatomic32_sub_get) | Subtracts v from a and returns the new value. | +| [vatomic32_sub_get_acq](core_u32.h.md#function-vatomic32_sub_get_acq) | Subtracts v from a and returns the new value. | +| [vatomic32_sub_get_rel](core_u32.h.md#function-vatomic32_sub_get_rel) | Subtracts v from a and returns the new value. | +| [vatomic32_sub_get_rlx](core_u32.h.md#function-vatomic32_sub_get_rlx) | Subtracts v from a and returns the new value. | +| [vatomic32_sub](core_u32.h.md#function-vatomic32_sub) | Subtracts v from a. | +| [vatomic32_sub_rel](core_u32.h.md#function-vatomic32_sub_rel) | Subtracts v from a. | +| [vatomic32_sub_rlx](core_u32.h.md#function-vatomic32_sub_rlx) | Subtracts v from a. | +| [vatomic32_get_dec](core_u32.h.md#function-vatomic32_get_dec) | Decrements the value of a and returns the old value. | +| [vatomic32_get_dec_acq](core_u32.h.md#function-vatomic32_get_dec_acq) | Decrements the value of a and returns the old value. | +| [vatomic32_get_dec_rel](core_u32.h.md#function-vatomic32_get_dec_rel) | Decrements the value of a and returns the old value. | +| [vatomic32_get_dec_rlx](core_u32.h.md#function-vatomic32_get_dec_rlx) | Decrements the value of a and returns the old value. | +| [vatomic32_dec_get](core_u32.h.md#function-vatomic32_dec_get) | Decrements the value of a and returns the new value. | +| [vatomic32_dec_get_acq](core_u32.h.md#function-vatomic32_dec_get_acq) | Decrements the value of a and returns the new value. | +| [vatomic32_dec_get_rel](core_u32.h.md#function-vatomic32_dec_get_rel) | Decrements the value of a and returns the new value. | +| [vatomic32_dec_get_rlx](core_u32.h.md#function-vatomic32_dec_get_rlx) | Decrements the value of a and returns the new value. | +| [vatomic32_dec](core_u32.h.md#function-vatomic32_dec) | Decrements the value of a. | +| [vatomic32_dec_rel](core_u32.h.md#function-vatomic32_dec_rel) | Decrements the value of a. | +| [vatomic32_dec_rlx](core_u32.h.md#function-vatomic32_dec_rlx) | Decrements the value of a. | + +## Function `vatomic32_init` + +```c +static void vatomic32_init(vatomic32_t *a, vuint32_t v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomic32_write()](core_u32.h.md#function-vatomic32_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_read` + +```c +static vuint32_t vatomic32_read(const vatomic32_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_read_acq` + +```c +static vuint32_t vatomic32_read_acq(const vatomic32_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomic32_read_rlx` + +```c +static vuint32_t vatomic32_read_rlx(const vatomic32_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomic32_write` + +```c +static void vatomic32_write(vatomic32_t *a, vuint32_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_write_rel` + +```c +static void vatomic32_write_rel(vatomic32_t *a, vuint32_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomic32_write_rlx` + +```c +static void vatomic32_write_rlx(vatomic32_t *a, vuint32_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_xchg` + +```c +static vuint32_t vatomic32_xchg(vatomic32_t *a, vuint32_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_xchg_acq` + +```c +static vuint32_t vatomic32_xchg_acq(vatomic32_t *a, vuint32_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_xchg_rel` + +```c +static vuint32_t vatomic32_xchg_rel(vatomic32_t *a, vuint32_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_xchg_rlx` + +```c +static vuint32_t vatomic32_xchg_rlx(vatomic32_t *a, vuint32_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_cmpxchg` + +```c +static vuint32_t vatomic32_cmpxchg(vatomic32_t *a, vuint32_t e, vuint32_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic32_cmpxchg_acq` + +```c +static vuint32_t vatomic32_cmpxchg_acq(vatomic32_t *a, vuint32_t e, vuint32_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic32_cmpxchg_rel` + +```c +static vuint32_t vatomic32_cmpxchg_rel(vatomic32_t *a, vuint32_t e, vuint32_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic32_cmpxchg_rlx` + +```c +static vuint32_t vatomic32_cmpxchg_rlx(vatomic32_t *a, vuint32_t e, vuint32_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic32_get_max` + +```c +static vuint32_t vatomic32_get_max(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_max_acq` + +```c +static vuint32_t vatomic32_get_max_acq(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_max_rel` + +```c +static vuint32_t vatomic32_get_max_rel(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: release + + +## Function `vatomic32_get_max_rlx` + +```c +static vuint32_t vatomic32_get_max_rlx(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: relaxed + + +## Function `vatomic32_max_get` + +```c +static vuint32_t vatomic32_max_get(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_max_get_acq` + +```c +static vuint32_t vatomic32_max_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: acquire + + +## Function `vatomic32_max_get_rel` + +```c +static vuint32_t vatomic32_max_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: release + + +## Function `vatomic32_max_get_rlx` + +```c +static vuint32_t vatomic32_max_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: relaxed + + +## Function `vatomic32_max` + +```c +static void vatomic32_max(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_max_rel` + +```c +static void vatomic32_max_rel(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: release + + +## Function `vatomic32_max_rlx` + +```c +static void vatomic32_max_rlx(vatomic32_t *a, vuint32_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_and` + +```c +static vuint32_t vatomic32_get_and(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_and_acq` + +```c +static vuint32_t vatomic32_get_and_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_and_rel` + +```c +static vuint32_t vatomic32_get_and_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_and_rlx` + +```c +static vuint32_t vatomic32_get_and_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_and_get` + +```c +static vuint32_t vatomic32_and_get(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_and_get_acq` + +```c +static vuint32_t vatomic32_and_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_and_get_rel` + +```c +static vuint32_t vatomic32_and_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_and_get_rlx` + +```c +static vuint32_t vatomic32_and_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_and` + +```c +static void vatomic32_and(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_and_rel` + +```c +static void vatomic32_and_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic32_and_rlx` + +```c +static void vatomic32_and_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_or` + +```c +static vuint32_t vatomic32_get_or(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_or_acq` + +```c +static vuint32_t vatomic32_get_or_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_or_rel` + +```c +static vuint32_t vatomic32_get_or_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_or_rlx` + +```c +static vuint32_t vatomic32_get_or_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_or_get` + +```c +static vuint32_t vatomic32_or_get(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_or_get_acq` + +```c +static vuint32_t vatomic32_or_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_or_get_rel` + +```c +static vuint32_t vatomic32_or_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_or_get_rlx` + +```c +static vuint32_t vatomic32_or_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_or` + +```c +static void vatomic32_or(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_or_rel` + +```c +static void vatomic32_or_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic32_or_rlx` + +```c +static void vatomic32_or_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_xor` + +```c +static vuint32_t vatomic32_get_xor(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_xor_acq` + +```c +static vuint32_t vatomic32_get_xor_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_xor_rel` + +```c +static vuint32_t vatomic32_get_xor_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_xor_rlx` + +```c +static vuint32_t vatomic32_get_xor_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_xor_get` + +```c +static vuint32_t vatomic32_xor_get(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_xor_get_acq` + +```c +static vuint32_t vatomic32_xor_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_xor_get_rel` + +```c +static vuint32_t vatomic32_xor_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_xor_get_rlx` + +```c +static vuint32_t vatomic32_xor_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_xor` + +```c +static void vatomic32_xor(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_xor_rel` + +```c +static void vatomic32_xor_rel(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic32_xor_rlx` + +```c +static void vatomic32_xor_rlx(vatomic32_t *a, vuint32_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_add` + +```c +static vuint32_t vatomic32_get_add(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_add_acq` + +```c +static vuint32_t vatomic32_get_add_acq(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_add_rel` + +```c +static vuint32_t vatomic32_get_add_rel(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_add_rlx` + +```c +static vuint32_t vatomic32_get_add_rlx(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_add_get` + +```c +static vuint32_t vatomic32_add_get(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_add_get_acq` + +```c +static vuint32_t vatomic32_add_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_add_get_rel` + +```c +static vuint32_t vatomic32_add_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_add_get_rlx` + +```c +static vuint32_t vatomic32_add_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_add` + +```c +static void vatomic32_add(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_add_rel` + +```c +static void vatomic32_add_rel(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: release + + +## Function `vatomic32_add_rlx` + +```c +static void vatomic32_add_rlx(vatomic32_t *a, vuint32_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_inc` + +```c +static vuint32_t vatomic32_get_inc(vatomic32_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_inc_acq` + +```c +static vuint32_t vatomic32_get_inc_acq(vatomic32_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_inc_rel` + +```c +static vuint32_t vatomic32_get_inc_rel(vatomic32_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_inc_rlx` + +```c +static vuint32_t vatomic32_get_inc_rlx(vatomic32_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_inc_get` + +```c +static vuint32_t vatomic32_inc_get(vatomic32_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_inc_get_acq` + +```c +static vuint32_t vatomic32_inc_get_acq(vatomic32_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_inc_get_rel` + +```c +static vuint32_t vatomic32_inc_get_rel(vatomic32_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_inc_get_rlx` + +```c +static vuint32_t vatomic32_inc_get_rlx(vatomic32_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_inc` + +```c +static void vatomic32_inc(vatomic32_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_inc_rel` + +```c +static void vatomic32_inc_rel(vatomic32_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic32_inc_rlx` + +```c +static void vatomic32_inc_rlx(vatomic32_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_sub` + +```c +static vuint32_t vatomic32_get_sub(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_sub_acq` + +```c +static vuint32_t vatomic32_get_sub_acq(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_sub_rel` + +```c +static vuint32_t vatomic32_get_sub_rel(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_sub_rlx` + +```c +static vuint32_t vatomic32_get_sub_rlx(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_sub_get` + +```c +static vuint32_t vatomic32_sub_get(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_sub_get_acq` + +```c +static vuint32_t vatomic32_sub_get_acq(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_sub_get_rel` + +```c +static vuint32_t vatomic32_sub_get_rel(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_sub_get_rlx` + +```c +static vuint32_t vatomic32_sub_get_rlx(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_sub` + +```c +static void vatomic32_sub(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_sub_rel` + +```c +static void vatomic32_sub_rel(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: release + + +## Function `vatomic32_sub_rlx` + +```c +static void vatomic32_sub_rlx(vatomic32_t *a, vuint32_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: relaxed + + +## Function `vatomic32_get_dec` + +```c +static vuint32_t vatomic32_get_dec(vatomic32_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_get_dec_acq` + +```c +static vuint32_t vatomic32_get_dec_acq(vatomic32_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic32_get_dec_rel` + +```c +static vuint32_t vatomic32_get_dec_rel(vatomic32_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic32_get_dec_rlx` + +```c +static vuint32_t vatomic32_get_dec_rlx(vatomic32_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic32_dec_get` + +```c +static vuint32_t vatomic32_dec_get(vatomic32_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic32_dec_get_acq` + +```c +static vuint32_t vatomic32_dec_get_acq(vatomic32_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic32_dec_get_rel` + +```c +static vuint32_t vatomic32_dec_get_rel(vatomic32_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic32_dec_get_rlx` + +```c +static vuint32_t vatomic32_dec_get_rlx(vatomic32_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic32_dec` + +```c +static void vatomic32_dec(vatomic32_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic32_dec_rel` + +```c +static void vatomic32_dec_rel(vatomic32_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic32_dec_rlx` + +```c +static void vatomic32_dec_rlx(vatomic32_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/core_u64.h.md b/doc/api/vsync/atomic/core_u64.h.md new file mode 100644 index 00000000..65939de9 --- /dev/null +++ b/doc/api/vsync/atomic/core_u64.h.md @@ -0,0 +1,2208 @@ +# [vsync](../README.md) / [atomic](README.md) / core_u64.h +_Atomic functions for vatomic64_t variables._ + +This file declares and documents the core atomic functions operating on vatomic64_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic64_init](core_u64.h.md#function-vatomic64_init) | Initializes the atomic variable a with value v. | +| [vatomic64_read](core_u64.h.md#function-vatomic64_read) | Returns the value of the atomic variable pointed by a. | +| [vatomic64_read_acq](core_u64.h.md#function-vatomic64_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomic64_read_rlx](core_u64.h.md#function-vatomic64_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomic64_write](core_u64.h.md#function-vatomic64_write) | Writes value v in the atomic variable pointed by a. | +| [vatomic64_write_rel](core_u64.h.md#function-vatomic64_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomic64_write_rlx](core_u64.h.md#function-vatomic64_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomic64_xchg](core_u64.h.md#function-vatomic64_xchg) | Writes v in a and returns old value. | +| [vatomic64_xchg_acq](core_u64.h.md#function-vatomic64_xchg_acq) | Writes v in a and returns old value. | +| [vatomic64_xchg_rel](core_u64.h.md#function-vatomic64_xchg_rel) | Writes v in a and returns old value. | +| [vatomic64_xchg_rlx](core_u64.h.md#function-vatomic64_xchg_rlx) | Writes v in a and returns old value. | +| [vatomic64_cmpxchg](core_u64.h.md#function-vatomic64_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomic64_cmpxchg_acq](core_u64.h.md#function-vatomic64_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomic64_cmpxchg_rel](core_u64.h.md#function-vatomic64_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomic64_cmpxchg_rlx](core_u64.h.md#function-vatomic64_cmpxchg_rlx) | Writes value v in a if e is the current value. | +| [vatomic64_get_max](core_u64.h.md#function-vatomic64_get_max) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic64_get_max_acq](core_u64.h.md#function-vatomic64_get_max_acq) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic64_get_max_rel](core_u64.h.md#function-vatomic64_get_max_rel) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic64_get_max_rlx](core_u64.h.md#function-vatomic64_get_max_rlx) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic64_max_get](core_u64.h.md#function-vatomic64_max_get) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic64_max_get_acq](core_u64.h.md#function-vatomic64_max_get_acq) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic64_max_get_rel](core_u64.h.md#function-vatomic64_max_get_rel) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic64_max_get_rlx](core_u64.h.md#function-vatomic64_max_get_rlx) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic64_max](core_u64.h.md#function-vatomic64_max) | Writes v to a if v is greater than *a. | +| [vatomic64_max_rel](core_u64.h.md#function-vatomic64_max_rel) | Writes v to a if v is greater than *a. | +| [vatomic64_max_rlx](core_u64.h.md#function-vatomic64_max_rlx) | Writes v to a if v is greater than *a. | +| [vatomic64_get_and](core_u64.h.md#function-vatomic64_get_and) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic64_get_and_acq](core_u64.h.md#function-vatomic64_get_and_acq) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic64_get_and_rel](core_u64.h.md#function-vatomic64_get_and_rel) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic64_get_and_rlx](core_u64.h.md#function-vatomic64_get_and_rlx) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic64_and_get](core_u64.h.md#function-vatomic64_and_get) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic64_and_get_acq](core_u64.h.md#function-vatomic64_and_get_acq) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic64_and_get_rel](core_u64.h.md#function-vatomic64_and_get_rel) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic64_and_get_rlx](core_u64.h.md#function-vatomic64_and_get_rlx) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic64_and](core_u64.h.md#function-vatomic64_and) | Applies bitwise and operation with v to the value of a. | +| [vatomic64_and_rel](core_u64.h.md#function-vatomic64_and_rel) | Applies bitwise and operation with v to the value of a. | +| [vatomic64_and_rlx](core_u64.h.md#function-vatomic64_and_rlx) | Applies bitwise and operation with v to the value of a. | +| [vatomic64_get_or](core_u64.h.md#function-vatomic64_get_or) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic64_get_or_acq](core_u64.h.md#function-vatomic64_get_or_acq) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic64_get_or_rel](core_u64.h.md#function-vatomic64_get_or_rel) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic64_get_or_rlx](core_u64.h.md#function-vatomic64_get_or_rlx) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic64_or_get](core_u64.h.md#function-vatomic64_or_get) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic64_or_get_acq](core_u64.h.md#function-vatomic64_or_get_acq) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic64_or_get_rel](core_u64.h.md#function-vatomic64_or_get_rel) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic64_or_get_rlx](core_u64.h.md#function-vatomic64_or_get_rlx) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic64_or](core_u64.h.md#function-vatomic64_or) | Applies bitwise or operation with v to the value of a. | +| [vatomic64_or_rel](core_u64.h.md#function-vatomic64_or_rel) | Applies bitwise or operation with v to the value of a. | +| [vatomic64_or_rlx](core_u64.h.md#function-vatomic64_or_rlx) | Applies bitwise or operation with v to the value of a. | +| [vatomic64_get_xor](core_u64.h.md#function-vatomic64_get_xor) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic64_get_xor_acq](core_u64.h.md#function-vatomic64_get_xor_acq) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic64_get_xor_rel](core_u64.h.md#function-vatomic64_get_xor_rel) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic64_get_xor_rlx](core_u64.h.md#function-vatomic64_get_xor_rlx) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic64_xor_get](core_u64.h.md#function-vatomic64_xor_get) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic64_xor_get_acq](core_u64.h.md#function-vatomic64_xor_get_acq) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic64_xor_get_rel](core_u64.h.md#function-vatomic64_xor_get_rel) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic64_xor_get_rlx](core_u64.h.md#function-vatomic64_xor_get_rlx) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic64_xor](core_u64.h.md#function-vatomic64_xor) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic64_xor_rel](core_u64.h.md#function-vatomic64_xor_rel) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic64_xor_rlx](core_u64.h.md#function-vatomic64_xor_rlx) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic64_get_add](core_u64.h.md#function-vatomic64_get_add) | Adds v to the value of a and returns the old value. | +| [vatomic64_get_add_acq](core_u64.h.md#function-vatomic64_get_add_acq) | Adds v to the value of a and returns the old value. | +| [vatomic64_get_add_rel](core_u64.h.md#function-vatomic64_get_add_rel) | Adds v to the value of a and returns the old value. | +| [vatomic64_get_add_rlx](core_u64.h.md#function-vatomic64_get_add_rlx) | Adds v to the value of a and returns the old value. | +| [vatomic64_add_get](core_u64.h.md#function-vatomic64_add_get) | Adds v to the value of a and returns the new value. | +| [vatomic64_add_get_acq](core_u64.h.md#function-vatomic64_add_get_acq) | Adds v to the value of a and returns the new value. | +| [vatomic64_add_get_rel](core_u64.h.md#function-vatomic64_add_get_rel) | Adds v to the value of a and returns the new value. | +| [vatomic64_add_get_rlx](core_u64.h.md#function-vatomic64_add_get_rlx) | Adds v to the value of a and returns the new value. | +| [vatomic64_add](core_u64.h.md#function-vatomic64_add) | Adds v to the value of a. | +| [vatomic64_add_rel](core_u64.h.md#function-vatomic64_add_rel) | Adds v to the value of a. | +| [vatomic64_add_rlx](core_u64.h.md#function-vatomic64_add_rlx) | Adds v to the value of a. | +| [vatomic64_get_inc](core_u64.h.md#function-vatomic64_get_inc) | Increments the value of a and returns the old value. | +| [vatomic64_get_inc_acq](core_u64.h.md#function-vatomic64_get_inc_acq) | Increments the value of a and returns the old value. | +| [vatomic64_get_inc_rel](core_u64.h.md#function-vatomic64_get_inc_rel) | Increments the value of a and returns the old value. | +| [vatomic64_get_inc_rlx](core_u64.h.md#function-vatomic64_get_inc_rlx) | Increments the value of a and returns the old value. | +| [vatomic64_inc_get](core_u64.h.md#function-vatomic64_inc_get) | Increments the value of a and returns the new value. | +| [vatomic64_inc_get_acq](core_u64.h.md#function-vatomic64_inc_get_acq) | Increments the value of a and returns the new value. | +| [vatomic64_inc_get_rel](core_u64.h.md#function-vatomic64_inc_get_rel) | Increments the value of a and returns the new value. | +| [vatomic64_inc_get_rlx](core_u64.h.md#function-vatomic64_inc_get_rlx) | Increments the value of a and returns the new value. | +| [vatomic64_inc](core_u64.h.md#function-vatomic64_inc) | Increments the value of a. | +| [vatomic64_inc_rel](core_u64.h.md#function-vatomic64_inc_rel) | Increments the value of a. | +| [vatomic64_inc_rlx](core_u64.h.md#function-vatomic64_inc_rlx) | Increments the value of a. | +| [vatomic64_get_sub](core_u64.h.md#function-vatomic64_get_sub) | Subtracts v from a and returns the old value. | +| [vatomic64_get_sub_acq](core_u64.h.md#function-vatomic64_get_sub_acq) | Subtracts v from a and returns the old value. | +| [vatomic64_get_sub_rel](core_u64.h.md#function-vatomic64_get_sub_rel) | Subtracts v from a and returns the old value. | +| [vatomic64_get_sub_rlx](core_u64.h.md#function-vatomic64_get_sub_rlx) | Subtracts v from a and returns the old value. | +| [vatomic64_sub_get](core_u64.h.md#function-vatomic64_sub_get) | Subtracts v from a and returns the new value. | +| [vatomic64_sub_get_acq](core_u64.h.md#function-vatomic64_sub_get_acq) | Subtracts v from a and returns the new value. | +| [vatomic64_sub_get_rel](core_u64.h.md#function-vatomic64_sub_get_rel) | Subtracts v from a and returns the new value. | +| [vatomic64_sub_get_rlx](core_u64.h.md#function-vatomic64_sub_get_rlx) | Subtracts v from a and returns the new value. | +| [vatomic64_sub](core_u64.h.md#function-vatomic64_sub) | Subtracts v from a. | +| [vatomic64_sub_rel](core_u64.h.md#function-vatomic64_sub_rel) | Subtracts v from a. | +| [vatomic64_sub_rlx](core_u64.h.md#function-vatomic64_sub_rlx) | Subtracts v from a. | +| [vatomic64_get_dec](core_u64.h.md#function-vatomic64_get_dec) | Decrements the value of a and returns the old value. | +| [vatomic64_get_dec_acq](core_u64.h.md#function-vatomic64_get_dec_acq) | Decrements the value of a and returns the old value. | +| [vatomic64_get_dec_rel](core_u64.h.md#function-vatomic64_get_dec_rel) | Decrements the value of a and returns the old value. | +| [vatomic64_get_dec_rlx](core_u64.h.md#function-vatomic64_get_dec_rlx) | Decrements the value of a and returns the old value. | +| [vatomic64_dec_get](core_u64.h.md#function-vatomic64_dec_get) | Decrements the value of a and returns the new value. | +| [vatomic64_dec_get_acq](core_u64.h.md#function-vatomic64_dec_get_acq) | Decrements the value of a and returns the new value. | +| [vatomic64_dec_get_rel](core_u64.h.md#function-vatomic64_dec_get_rel) | Decrements the value of a and returns the new value. | +| [vatomic64_dec_get_rlx](core_u64.h.md#function-vatomic64_dec_get_rlx) | Decrements the value of a and returns the new value. | +| [vatomic64_dec](core_u64.h.md#function-vatomic64_dec) | Decrements the value of a. | +| [vatomic64_dec_rel](core_u64.h.md#function-vatomic64_dec_rel) | Decrements the value of a. | +| [vatomic64_dec_rlx](core_u64.h.md#function-vatomic64_dec_rlx) | Decrements the value of a. | + +## Function `vatomic64_init` + +```c +static void vatomic64_init(vatomic64_t *a, vuint64_t v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomic64_write()](core_u64.h.md#function-vatomic64_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_read` + +```c +static vuint64_t vatomic64_read(const vatomic64_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_read_acq` + +```c +static vuint64_t vatomic64_read_acq(const vatomic64_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomic64_read_rlx` + +```c +static vuint64_t vatomic64_read_rlx(const vatomic64_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomic64_write` + +```c +static void vatomic64_write(vatomic64_t *a, vuint64_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_write_rel` + +```c +static void vatomic64_write_rel(vatomic64_t *a, vuint64_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomic64_write_rlx` + +```c +static void vatomic64_write_rlx(vatomic64_t *a, vuint64_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_xchg` + +```c +static vuint64_t vatomic64_xchg(vatomic64_t *a, vuint64_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_xchg_acq` + +```c +static vuint64_t vatomic64_xchg_acq(vatomic64_t *a, vuint64_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_xchg_rel` + +```c +static vuint64_t vatomic64_xchg_rel(vatomic64_t *a, vuint64_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_xchg_rlx` + +```c +static vuint64_t vatomic64_xchg_rlx(vatomic64_t *a, vuint64_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_cmpxchg` + +```c +static vuint64_t vatomic64_cmpxchg(vatomic64_t *a, vuint64_t e, vuint64_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic64_cmpxchg_acq` + +```c +static vuint64_t vatomic64_cmpxchg_acq(vatomic64_t *a, vuint64_t e, vuint64_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic64_cmpxchg_rel` + +```c +static vuint64_t vatomic64_cmpxchg_rel(vatomic64_t *a, vuint64_t e, vuint64_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic64_cmpxchg_rlx` + +```c +static vuint64_t vatomic64_cmpxchg_rlx(vatomic64_t *a, vuint64_t e, vuint64_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic64_get_max` + +```c +static vuint64_t vatomic64_get_max(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_max_acq` + +```c +static vuint64_t vatomic64_get_max_acq(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_max_rel` + +```c +static vuint64_t vatomic64_get_max_rel(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: release + + +## Function `vatomic64_get_max_rlx` + +```c +static vuint64_t vatomic64_get_max_rlx(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: relaxed + + +## Function `vatomic64_max_get` + +```c +static vuint64_t vatomic64_max_get(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_max_get_acq` + +```c +static vuint64_t vatomic64_max_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: acquire + + +## Function `vatomic64_max_get_rel` + +```c +static vuint64_t vatomic64_max_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: release + + +## Function `vatomic64_max_get_rlx` + +```c +static vuint64_t vatomic64_max_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: relaxed + + +## Function `vatomic64_max` + +```c +static void vatomic64_max(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_max_rel` + +```c +static void vatomic64_max_rel(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: release + + +## Function `vatomic64_max_rlx` + +```c +static void vatomic64_max_rlx(vatomic64_t *a, vuint64_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_and` + +```c +static vuint64_t vatomic64_get_and(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_and_acq` + +```c +static vuint64_t vatomic64_get_and_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_and_rel` + +```c +static vuint64_t vatomic64_get_and_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_and_rlx` + +```c +static vuint64_t vatomic64_get_and_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_and_get` + +```c +static vuint64_t vatomic64_and_get(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_and_get_acq` + +```c +static vuint64_t vatomic64_and_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_and_get_rel` + +```c +static vuint64_t vatomic64_and_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_and_get_rlx` + +```c +static vuint64_t vatomic64_and_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_and` + +```c +static void vatomic64_and(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_and_rel` + +```c +static void vatomic64_and_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic64_and_rlx` + +```c +static void vatomic64_and_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_or` + +```c +static vuint64_t vatomic64_get_or(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_or_acq` + +```c +static vuint64_t vatomic64_get_or_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_or_rel` + +```c +static vuint64_t vatomic64_get_or_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_or_rlx` + +```c +static vuint64_t vatomic64_get_or_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_or_get` + +```c +static vuint64_t vatomic64_or_get(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_or_get_acq` + +```c +static vuint64_t vatomic64_or_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_or_get_rel` + +```c +static vuint64_t vatomic64_or_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_or_get_rlx` + +```c +static vuint64_t vatomic64_or_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_or` + +```c +static void vatomic64_or(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_or_rel` + +```c +static void vatomic64_or_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic64_or_rlx` + +```c +static void vatomic64_or_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_xor` + +```c +static vuint64_t vatomic64_get_xor(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_xor_acq` + +```c +static vuint64_t vatomic64_get_xor_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_xor_rel` + +```c +static vuint64_t vatomic64_get_xor_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_xor_rlx` + +```c +static vuint64_t vatomic64_get_xor_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_xor_get` + +```c +static vuint64_t vatomic64_xor_get(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_xor_get_acq` + +```c +static vuint64_t vatomic64_xor_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_xor_get_rel` + +```c +static vuint64_t vatomic64_xor_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_xor_get_rlx` + +```c +static vuint64_t vatomic64_xor_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_xor` + +```c +static void vatomic64_xor(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_xor_rel` + +```c +static void vatomic64_xor_rel(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic64_xor_rlx` + +```c +static void vatomic64_xor_rlx(vatomic64_t *a, vuint64_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_add` + +```c +static vuint64_t vatomic64_get_add(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_add_acq` + +```c +static vuint64_t vatomic64_get_add_acq(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_add_rel` + +```c +static vuint64_t vatomic64_get_add_rel(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_add_rlx` + +```c +static vuint64_t vatomic64_get_add_rlx(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_add_get` + +```c +static vuint64_t vatomic64_add_get(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_add_get_acq` + +```c +static vuint64_t vatomic64_add_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_add_get_rel` + +```c +static vuint64_t vatomic64_add_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_add_get_rlx` + +```c +static vuint64_t vatomic64_add_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_add` + +```c +static void vatomic64_add(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_add_rel` + +```c +static void vatomic64_add_rel(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: release + + +## Function `vatomic64_add_rlx` + +```c +static void vatomic64_add_rlx(vatomic64_t *a, vuint64_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_inc` + +```c +static vuint64_t vatomic64_get_inc(vatomic64_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_inc_acq` + +```c +static vuint64_t vatomic64_get_inc_acq(vatomic64_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_inc_rel` + +```c +static vuint64_t vatomic64_get_inc_rel(vatomic64_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_inc_rlx` + +```c +static vuint64_t vatomic64_get_inc_rlx(vatomic64_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_inc_get` + +```c +static vuint64_t vatomic64_inc_get(vatomic64_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_inc_get_acq` + +```c +static vuint64_t vatomic64_inc_get_acq(vatomic64_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_inc_get_rel` + +```c +static vuint64_t vatomic64_inc_get_rel(vatomic64_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_inc_get_rlx` + +```c +static vuint64_t vatomic64_inc_get_rlx(vatomic64_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_inc` + +```c +static void vatomic64_inc(vatomic64_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_inc_rel` + +```c +static void vatomic64_inc_rel(vatomic64_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic64_inc_rlx` + +```c +static void vatomic64_inc_rlx(vatomic64_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_sub` + +```c +static vuint64_t vatomic64_get_sub(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_sub_acq` + +```c +static vuint64_t vatomic64_get_sub_acq(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_sub_rel` + +```c +static vuint64_t vatomic64_get_sub_rel(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_sub_rlx` + +```c +static vuint64_t vatomic64_get_sub_rlx(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_sub_get` + +```c +static vuint64_t vatomic64_sub_get(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_sub_get_acq` + +```c +static vuint64_t vatomic64_sub_get_acq(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_sub_get_rel` + +```c +static vuint64_t vatomic64_sub_get_rel(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_sub_get_rlx` + +```c +static vuint64_t vatomic64_sub_get_rlx(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_sub` + +```c +static void vatomic64_sub(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_sub_rel` + +```c +static void vatomic64_sub_rel(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: release + + +## Function `vatomic64_sub_rlx` + +```c +static void vatomic64_sub_rlx(vatomic64_t *a, vuint64_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: relaxed + + +## Function `vatomic64_get_dec` + +```c +static vuint64_t vatomic64_get_dec(vatomic64_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_get_dec_acq` + +```c +static vuint64_t vatomic64_get_dec_acq(vatomic64_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic64_get_dec_rel` + +```c +static vuint64_t vatomic64_get_dec_rel(vatomic64_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic64_get_dec_rlx` + +```c +static vuint64_t vatomic64_get_dec_rlx(vatomic64_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic64_dec_get` + +```c +static vuint64_t vatomic64_dec_get(vatomic64_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic64_dec_get_acq` + +```c +static vuint64_t vatomic64_dec_get_acq(vatomic64_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic64_dec_get_rel` + +```c +static vuint64_t vatomic64_dec_get_rel(vatomic64_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic64_dec_get_rlx` + +```c +static vuint64_t vatomic64_dec_get_rlx(vatomic64_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic64_dec` + +```c +static void vatomic64_dec(vatomic64_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic64_dec_rel` + +```c +static void vatomic64_dec_rel(vatomic64_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic64_dec_rlx` + +```c +static void vatomic64_dec_rlx(vatomic64_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/core_u8.h.md b/doc/api/vsync/atomic/core_u8.h.md new file mode 100644 index 00000000..4f834854 --- /dev/null +++ b/doc/api/vsync/atomic/core_u8.h.md @@ -0,0 +1,2208 @@ +# [vsync](../README.md) / [atomic](README.md) / core_u8.h +_Atomic functions for vatomic8_t variables._ + +This file declares and documents the core atomic functions operating on vatomic8_t variables. + +Please do not include this file directly, instead use: + +```c +#include +``` + + + +--- +# Functions + +| Function | Description | +|---|---| +| [vatomic8_init](core_u8.h.md#function-vatomic8_init) | Initializes the atomic variable a with value v. | +| [vatomic8_read](core_u8.h.md#function-vatomic8_read) | Returns the value of the atomic variable pointed by a. | +| [vatomic8_read_acq](core_u8.h.md#function-vatomic8_read_acq) | Returns the value of the atomic variable pointed by a. | +| [vatomic8_read_rlx](core_u8.h.md#function-vatomic8_read_rlx) | Returns the value of the atomic variable pointed by a. | +| [vatomic8_write](core_u8.h.md#function-vatomic8_write) | Writes value v in the atomic variable pointed by a. | +| [vatomic8_write_rel](core_u8.h.md#function-vatomic8_write_rel) | Writes value v in the atomic variable pointed by a. | +| [vatomic8_write_rlx](core_u8.h.md#function-vatomic8_write_rlx) | Writes value v in the atomic variable pointed by a. | +| [vatomic8_xchg](core_u8.h.md#function-vatomic8_xchg) | Writes v in a and returns old value. | +| [vatomic8_xchg_acq](core_u8.h.md#function-vatomic8_xchg_acq) | Writes v in a and returns old value. | +| [vatomic8_xchg_rel](core_u8.h.md#function-vatomic8_xchg_rel) | Writes v in a and returns old value. | +| [vatomic8_xchg_rlx](core_u8.h.md#function-vatomic8_xchg_rlx) | Writes v in a and returns old value. | +| [vatomic8_cmpxchg](core_u8.h.md#function-vatomic8_cmpxchg) | Writes value v in a if e is the current value. | +| [vatomic8_cmpxchg_acq](core_u8.h.md#function-vatomic8_cmpxchg_acq) | Writes value v in a if e is the current value. | +| [vatomic8_cmpxchg_rel](core_u8.h.md#function-vatomic8_cmpxchg_rel) | Writes value v in a if e is the current value. | +| [vatomic8_cmpxchg_rlx](core_u8.h.md#function-vatomic8_cmpxchg_rlx) | Writes value v in a if e is the current value. | +| [vatomic8_get_max](core_u8.h.md#function-vatomic8_get_max) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic8_get_max_acq](core_u8.h.md#function-vatomic8_get_max_acq) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic8_get_max_rel](core_u8.h.md#function-vatomic8_get_max_rel) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic8_get_max_rlx](core_u8.h.md#function-vatomic8_get_max_rlx) | Writes v to a if v is greater than *a and returns the old value. | +| [vatomic8_max_get](core_u8.h.md#function-vatomic8_max_get) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic8_max_get_acq](core_u8.h.md#function-vatomic8_max_get_acq) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic8_max_get_rel](core_u8.h.md#function-vatomic8_max_get_rel) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic8_max_get_rlx](core_u8.h.md#function-vatomic8_max_get_rlx) | Writes v to a if v is greater than *a and returns the new value. | +| [vatomic8_max](core_u8.h.md#function-vatomic8_max) | Writes v to a if v is greater than *a. | +| [vatomic8_max_rel](core_u8.h.md#function-vatomic8_max_rel) | Writes v to a if v is greater than *a. | +| [vatomic8_max_rlx](core_u8.h.md#function-vatomic8_max_rlx) | Writes v to a if v is greater than *a. | +| [vatomic8_get_and](core_u8.h.md#function-vatomic8_get_and) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic8_get_and_acq](core_u8.h.md#function-vatomic8_get_and_acq) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic8_get_and_rel](core_u8.h.md#function-vatomic8_get_and_rel) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic8_get_and_rlx](core_u8.h.md#function-vatomic8_get_and_rlx) | Applies bitwise and to the value of a and returns the old value. | +| [vatomic8_and_get](core_u8.h.md#function-vatomic8_and_get) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic8_and_get_acq](core_u8.h.md#function-vatomic8_and_get_acq) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic8_and_get_rel](core_u8.h.md#function-vatomic8_and_get_rel) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic8_and_get_rlx](core_u8.h.md#function-vatomic8_and_get_rlx) | Applies bitwise and operation with v to the value of a and returns the new value. | +| [vatomic8_and](core_u8.h.md#function-vatomic8_and) | Applies bitwise and operation with v to the value of a. | +| [vatomic8_and_rel](core_u8.h.md#function-vatomic8_and_rel) | Applies bitwise and operation with v to the value of a. | +| [vatomic8_and_rlx](core_u8.h.md#function-vatomic8_and_rlx) | Applies bitwise and operation with v to the value of a. | +| [vatomic8_get_or](core_u8.h.md#function-vatomic8_get_or) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic8_get_or_acq](core_u8.h.md#function-vatomic8_get_or_acq) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic8_get_or_rel](core_u8.h.md#function-vatomic8_get_or_rel) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic8_get_or_rlx](core_u8.h.md#function-vatomic8_get_or_rlx) | Applies bitwise or operation with v to the value of a and returns the old value. | +| [vatomic8_or_get](core_u8.h.md#function-vatomic8_or_get) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic8_or_get_acq](core_u8.h.md#function-vatomic8_or_get_acq) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic8_or_get_rel](core_u8.h.md#function-vatomic8_or_get_rel) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic8_or_get_rlx](core_u8.h.md#function-vatomic8_or_get_rlx) | Applies bitwise or operation with v to the value of a and returns the new value. | +| [vatomic8_or](core_u8.h.md#function-vatomic8_or) | Applies bitwise or operation with v to the value of a. | +| [vatomic8_or_rel](core_u8.h.md#function-vatomic8_or_rel) | Applies bitwise or operation with v to the value of a. | +| [vatomic8_or_rlx](core_u8.h.md#function-vatomic8_or_rlx) | Applies bitwise or operation with v to the value of a. | +| [vatomic8_get_xor](core_u8.h.md#function-vatomic8_get_xor) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic8_get_xor_acq](core_u8.h.md#function-vatomic8_get_xor_acq) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic8_get_xor_rel](core_u8.h.md#function-vatomic8_get_xor_rel) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic8_get_xor_rlx](core_u8.h.md#function-vatomic8_get_xor_rlx) | Applies bitwise xor operation with v to the value of a and returns the old value. | +| [vatomic8_xor_get](core_u8.h.md#function-vatomic8_xor_get) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic8_xor_get_acq](core_u8.h.md#function-vatomic8_xor_get_acq) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic8_xor_get_rel](core_u8.h.md#function-vatomic8_xor_get_rel) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic8_xor_get_rlx](core_u8.h.md#function-vatomic8_xor_get_rlx) | Applies bitwise xor operation with v to the value of a and returns the new value. | +| [vatomic8_xor](core_u8.h.md#function-vatomic8_xor) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic8_xor_rel](core_u8.h.md#function-vatomic8_xor_rel) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic8_xor_rlx](core_u8.h.md#function-vatomic8_xor_rlx) | Applies bitwise xor opeation with v to the value of a. | +| [vatomic8_get_add](core_u8.h.md#function-vatomic8_get_add) | Adds v to the value of a and returns the old value. | +| [vatomic8_get_add_acq](core_u8.h.md#function-vatomic8_get_add_acq) | Adds v to the value of a and returns the old value. | +| [vatomic8_get_add_rel](core_u8.h.md#function-vatomic8_get_add_rel) | Adds v to the value of a and returns the old value. | +| [vatomic8_get_add_rlx](core_u8.h.md#function-vatomic8_get_add_rlx) | Adds v to the value of a and returns the old value. | +| [vatomic8_add_get](core_u8.h.md#function-vatomic8_add_get) | Adds v to the value of a and returns the new value. | +| [vatomic8_add_get_acq](core_u8.h.md#function-vatomic8_add_get_acq) | Adds v to the value of a and returns the new value. | +| [vatomic8_add_get_rel](core_u8.h.md#function-vatomic8_add_get_rel) | Adds v to the value of a and returns the new value. | +| [vatomic8_add_get_rlx](core_u8.h.md#function-vatomic8_add_get_rlx) | Adds v to the value of a and returns the new value. | +| [vatomic8_add](core_u8.h.md#function-vatomic8_add) | Adds v to the value of a. | +| [vatomic8_add_rel](core_u8.h.md#function-vatomic8_add_rel) | Adds v to the value of a. | +| [vatomic8_add_rlx](core_u8.h.md#function-vatomic8_add_rlx) | Adds v to the value of a. | +| [vatomic8_get_inc](core_u8.h.md#function-vatomic8_get_inc) | Increments the value of a and returns the old value. | +| [vatomic8_get_inc_acq](core_u8.h.md#function-vatomic8_get_inc_acq) | Increments the value of a and returns the old value. | +| [vatomic8_get_inc_rel](core_u8.h.md#function-vatomic8_get_inc_rel) | Increments the value of a and returns the old value. | +| [vatomic8_get_inc_rlx](core_u8.h.md#function-vatomic8_get_inc_rlx) | Increments the value of a and returns the old value. | +| [vatomic8_inc_get](core_u8.h.md#function-vatomic8_inc_get) | Increments the value of a and returns the new value. | +| [vatomic8_inc_get_acq](core_u8.h.md#function-vatomic8_inc_get_acq) | Increments the value of a and returns the new value. | +| [vatomic8_inc_get_rel](core_u8.h.md#function-vatomic8_inc_get_rel) | Increments the value of a and returns the new value. | +| [vatomic8_inc_get_rlx](core_u8.h.md#function-vatomic8_inc_get_rlx) | Increments the value of a and returns the new value. | +| [vatomic8_inc](core_u8.h.md#function-vatomic8_inc) | Increments the value of a. | +| [vatomic8_inc_rel](core_u8.h.md#function-vatomic8_inc_rel) | Increments the value of a. | +| [vatomic8_inc_rlx](core_u8.h.md#function-vatomic8_inc_rlx) | Increments the value of a. | +| [vatomic8_get_sub](core_u8.h.md#function-vatomic8_get_sub) | Subtracts v from a and returns the old value. | +| [vatomic8_get_sub_acq](core_u8.h.md#function-vatomic8_get_sub_acq) | Subtracts v from a and returns the old value. | +| [vatomic8_get_sub_rel](core_u8.h.md#function-vatomic8_get_sub_rel) | Subtracts v from a and returns the old value. | +| [vatomic8_get_sub_rlx](core_u8.h.md#function-vatomic8_get_sub_rlx) | Subtracts v from a and returns the old value. | +| [vatomic8_sub_get](core_u8.h.md#function-vatomic8_sub_get) | Subtracts v from a and returns the new value. | +| [vatomic8_sub_get_acq](core_u8.h.md#function-vatomic8_sub_get_acq) | Subtracts v from a and returns the new value. | +| [vatomic8_sub_get_rel](core_u8.h.md#function-vatomic8_sub_get_rel) | Subtracts v from a and returns the new value. | +| [vatomic8_sub_get_rlx](core_u8.h.md#function-vatomic8_sub_get_rlx) | Subtracts v from a and returns the new value. | +| [vatomic8_sub](core_u8.h.md#function-vatomic8_sub) | Subtracts v from a. | +| [vatomic8_sub_rel](core_u8.h.md#function-vatomic8_sub_rel) | Subtracts v from a. | +| [vatomic8_sub_rlx](core_u8.h.md#function-vatomic8_sub_rlx) | Subtracts v from a. | +| [vatomic8_get_dec](core_u8.h.md#function-vatomic8_get_dec) | Decrements the value of a and returns the old value. | +| [vatomic8_get_dec_acq](core_u8.h.md#function-vatomic8_get_dec_acq) | Decrements the value of a and returns the old value. | +| [vatomic8_get_dec_rel](core_u8.h.md#function-vatomic8_get_dec_rel) | Decrements the value of a and returns the old value. | +| [vatomic8_get_dec_rlx](core_u8.h.md#function-vatomic8_get_dec_rlx) | Decrements the value of a and returns the old value. | +| [vatomic8_dec_get](core_u8.h.md#function-vatomic8_dec_get) | Decrements the value of a and returns the new value. | +| [vatomic8_dec_get_acq](core_u8.h.md#function-vatomic8_dec_get_acq) | Decrements the value of a and returns the new value. | +| [vatomic8_dec_get_rel](core_u8.h.md#function-vatomic8_dec_get_rel) | Decrements the value of a and returns the new value. | +| [vatomic8_dec_get_rlx](core_u8.h.md#function-vatomic8_dec_get_rlx) | Decrements the value of a and returns the new value. | +| [vatomic8_dec](core_u8.h.md#function-vatomic8_dec) | Decrements the value of a. | +| [vatomic8_dec_rel](core_u8.h.md#function-vatomic8_dec_rel) | Decrements the value of a. | +| [vatomic8_dec_rlx](core_u8.h.md#function-vatomic8_dec_rlx) | Decrements the value of a. | + +## Function `vatomic8_init` + +```c +static void vatomic8_init(vatomic8_t *a, vuint8_t v) +``` +_Initializes the atomic variable a with value v._ + + +The initialization is equivalent to an [vatomic8_write()](core_u8.h.md#function-vatomic8_write). + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_read` + +```c +static vuint8_t vatomic8_read(const vatomic8_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_read_acq` + +```c +static vuint8_t vatomic8_read_acq(const vatomic8_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Function `vatomic8_read_rlx` + +```c +static vuint8_t vatomic8_read_rlx(const vatomic8_t *a) +``` +_Returns the value of the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Function `vatomic8_write` + +```c +static void vatomic8_write(vatomic8_t *a, vuint8_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_write_rel` + +```c +static void vatomic8_write_rel(vatomic8_t *a, vuint8_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Function `vatomic8_write_rlx` + +```c +static void vatomic8_write_rlx(vatomic8_t *a, vuint8_t v) +``` +_Writes value v in the atomic variable pointed by a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_xchg` + +```c +static vuint8_t vatomic8_xchg(vatomic8_t *a, vuint8_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_xchg_acq` + +```c +static vuint8_t vatomic8_xchg_acq(vatomic8_t *a, vuint8_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_xchg_rel` + +```c +static vuint8_t vatomic8_xchg_rel(vatomic8_t *a, vuint8_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_xchg_rlx` + +```c +static vuint8_t vatomic8_xchg_rlx(vatomic8_t *a, vuint8_t v) +``` +_Writes v in a and returns old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_cmpxchg` + +```c +static vuint8_t vatomic8_cmpxchg(vatomic8_t *a, vuint8_t e, vuint8_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic8_cmpxchg_acq` + +```c +static vuint8_t vatomic8_cmpxchg_acq(vatomic8_t *a, vuint8_t e, vuint8_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic8_cmpxchg_rel` + +```c +static vuint8_t vatomic8_cmpxchg_rel(vatomic8_t *a, vuint8_t e, vuint8_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic8_cmpxchg_rlx` + +```c +static vuint8_t vatomic8_cmpxchg_rlx(vatomic8_t *a, vuint8_t e, vuint8_t v) +``` +_Writes value v in a if e is the current value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `e`: expected value +- `v`: new value + + +**Returns:** old value + + + +## Function `vatomic8_get_max` + +```c +static vuint8_t vatomic8_get_max(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_max_acq` + +```c +static vuint8_t vatomic8_get_max_acq(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_max_rel` + +```c +static vuint8_t vatomic8_get_max_rel(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: release + + +## Function `vatomic8_get_max_rlx` + +```c +static vuint8_t vatomic8_get_max_rlx(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** old max value + +**Memory ordering**: relaxed + + +## Function `vatomic8_max_get` + +```c +static vuint8_t vatomic8_max_get(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_max_get_acq` + +```c +static vuint8_t vatomic8_max_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: acquire + + +## Function `vatomic8_max_get_rel` + +```c +static vuint8_t vatomic8_max_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: release + + +## Function `vatomic8_max_get_rlx` + +```c +static vuint8_t vatomic8_max_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Returns:** new max value + +**Memory ordering**: relaxed + + +## Function `vatomic8_max` + +```c +static void vatomic8_max(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_max_rel` + +```c +static void vatomic8_max_rel(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: release + + +## Function `vatomic8_max_rlx` + +```c +static void vatomic8_max_rlx(vatomic8_t *a, vuint8_t v) +``` +_Writes v to a if v is greater than *a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: potential max value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_and` + +```c +static vuint8_t vatomic8_get_and(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_and_acq` + +```c +static vuint8_t vatomic8_get_and_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_and_rel` + +```c +static vuint8_t vatomic8_get_and_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_and_rlx` + +```c +static vuint8_t vatomic8_get_and_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_and_get` + +```c +static vuint8_t vatomic8_and_get(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_and_get_acq` + +```c +static vuint8_t vatomic8_and_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_and_get_rel` + +```c +static vuint8_t vatomic8_and_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_and_get_rlx` + +```c +static vuint8_t vatomic8_and_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_and` + +```c +static void vatomic8_and(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_and_rel` + +```c +static void vatomic8_and_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic8_and_rlx` + +```c +static void vatomic8_and_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise and operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_or` + +```c +static vuint8_t vatomic8_get_or(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_or_acq` + +```c +static vuint8_t vatomic8_get_or_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_or_rel` + +```c +static vuint8_t vatomic8_get_or_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_or_rlx` + +```c +static vuint8_t vatomic8_get_or_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_or_get` + +```c +static vuint8_t vatomic8_or_get(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_or_get_acq` + +```c +static vuint8_t vatomic8_or_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_or_get_rel` + +```c +static vuint8_t vatomic8_or_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_or_get_rlx` + +```c +static vuint8_t vatomic8_or_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_or` + +```c +static void vatomic8_or(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_or_rel` + +```c +static void vatomic8_or_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic8_or_rlx` + +```c +static void vatomic8_or_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise or operation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_xor` + +```c +static vuint8_t vatomic8_get_xor(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_xor_acq` + +```c +static vuint8_t vatomic8_get_xor_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_xor_rel` + +```c +static vuint8_t vatomic8_get_xor_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_xor_rlx` + +```c +static vuint8_t vatomic8_get_xor_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_xor_get` + +```c +static vuint8_t vatomic8_xor_get(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_xor_get_acq` + +```c +static vuint8_t vatomic8_xor_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_xor_get_rel` + +```c +static vuint8_t vatomic8_xor_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_xor_get_rlx` + +```c +static vuint8_t vatomic8_xor_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor operation with v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_xor` + +```c +static void vatomic8_xor(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_xor_rel` + +```c +static void vatomic8_xor_rel(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: release + + +## Function `vatomic8_xor_rlx` + +```c +static void vatomic8_xor_rlx(vatomic8_t *a, vuint8_t v) +``` +_Applies bitwise xor opeation with v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: mask value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_add` + +```c +static vuint8_t vatomic8_get_add(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_add_acq` + +```c +static vuint8_t vatomic8_get_add_acq(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_add_rel` + +```c +static vuint8_t vatomic8_get_add_rel(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_add_rlx` + +```c +static vuint8_t vatomic8_get_add_rlx(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_add_get` + +```c +static vuint8_t vatomic8_add_get(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_add_get_acq` + +```c +static vuint8_t vatomic8_add_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_add_get_rel` + +```c +static vuint8_t vatomic8_add_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_add_get_rlx` + +```c +static vuint8_t vatomic8_add_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_add` + +```c +static void vatomic8_add(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_add_rel` + +```c +static void vatomic8_add_rel(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: release + + +## Function `vatomic8_add_rlx` + +```c +static void vatomic8_add_rlx(vatomic8_t *a, vuint8_t v) +``` +_Adds v to the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: addend value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_inc` + +```c +static vuint8_t vatomic8_get_inc(vatomic8_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_inc_acq` + +```c +static vuint8_t vatomic8_get_inc_acq(vatomic8_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_inc_rel` + +```c +static vuint8_t vatomic8_get_inc_rel(vatomic8_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_inc_rlx` + +```c +static vuint8_t vatomic8_get_inc_rlx(vatomic8_t *a) +``` +_Increments the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_inc_get` + +```c +static vuint8_t vatomic8_inc_get(vatomic8_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_inc_get_acq` + +```c +static vuint8_t vatomic8_inc_get_acq(vatomic8_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_inc_get_rel` + +```c +static vuint8_t vatomic8_inc_get_rel(vatomic8_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_inc_get_rlx` + +```c +static vuint8_t vatomic8_inc_get_rlx(vatomic8_t *a) +``` +_Increments the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_inc` + +```c +static void vatomic8_inc(vatomic8_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_inc_rel` + +```c +static void vatomic8_inc_rel(vatomic8_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic8_inc_rlx` + +```c +static void vatomic8_inc_rlx(vatomic8_t *a) +``` +_Increments the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_sub` + +```c +static vuint8_t vatomic8_get_sub(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_sub_acq` + +```c +static vuint8_t vatomic8_get_sub_acq(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_sub_rel` + +```c +static vuint8_t vatomic8_get_sub_rel(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_sub_rlx` + +```c +static vuint8_t vatomic8_get_sub_rlx(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_sub_get` + +```c +static vuint8_t vatomic8_sub_get(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_sub_get_acq` + +```c +static vuint8_t vatomic8_sub_get_acq(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_sub_get_rel` + +```c +static vuint8_t vatomic8_sub_get_rel(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_sub_get_rlx` + +```c +static vuint8_t vatomic8_sub_get_rlx(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_sub` + +```c +static void vatomic8_sub(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_sub_rel` + +```c +static void vatomic8_sub_rel(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: release + + +## Function `vatomic8_sub_rlx` + +```c +static void vatomic8_sub_rlx(vatomic8_t *a, vuint8_t v) +``` +_Subtracts v from a._ + + + + +**Parameters:** + +- `a`: atomic variable +- `v`: subtrahend value + + +**Memory ordering**: relaxed + + +## Function `vatomic8_get_dec` + +```c +static vuint8_t vatomic8_get_dec(vatomic8_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_get_dec_acq` + +```c +static vuint8_t vatomic8_get_dec_acq(vatomic8_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Function `vatomic8_get_dec_rel` + +```c +static vuint8_t vatomic8_get_dec_rel(vatomic8_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Function `vatomic8_get_dec_rlx` + +```c +static vuint8_t vatomic8_get_dec_rlx(vatomic8_t *a) +``` +_Decrements the value of a and returns the old value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Function `vatomic8_dec_get` + +```c +static vuint8_t vatomic8_dec_get(vatomic8_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Function `vatomic8_dec_get_acq` + +```c +static vuint8_t vatomic8_dec_get_acq(vatomic8_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Function `vatomic8_dec_get_rel` + +```c +static vuint8_t vatomic8_dec_get_rel(vatomic8_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Function `vatomic8_dec_get_rlx` + +```c +static vuint8_t vatomic8_dec_get_rlx(vatomic8_t *a) +``` +_Decrements the value of a and returns the new value._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Function `vatomic8_dec` + +```c +static void vatomic8_dec(vatomic8_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Function `vatomic8_dec_rel` + +```c +static void vatomic8_dec_rel(vatomic8_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Function `vatomic8_dec_rlx` + +```c +static void vatomic8_dec_rlx(vatomic8_t *a) +``` +_Decrements the value of a._ + + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/api/vsync/atomic/dispatch.h.md b/doc/api/vsync/atomic/dispatch.h.md new file mode 100644 index 00000000..6c04c9d3 --- /dev/null +++ b/doc/api/vsync/atomic/dispatch.h.md @@ -0,0 +1,4845 @@ +# [vsync](../README.md) / [atomic](README.md) / dispatch.h +_Set of macros to dispatch atomic functions._ + +These macros select the atomic function variant based on the argument type, for example, [vatomic_read(a)](dispatch.h.md#macro-vatomic_read) calls vatomic32_read(a) if `a` is a vatomic32_t and calls vatomic64_read(a) if `a` is a vatomic64_t. vatomicptr_t is mapped to either 32 or 64 bits functions depending on the architecture. + +--- +# Macros + +| Macro | Description | +|---|---| +| [vatomic_init](dispatch.h.md#macro-vatomic_init) | Dispatches init calls with configurable memory order. | +| [vatomic_read](dispatch.h.md#macro-vatomic_read) | Dispatches read calls with seq_cst memory order. | +| [vatomic_read_acq](dispatch.h.md#macro-vatomic_read_acq) | Dispatches read calls with acquire memory order. | +| [vatomic_read_rlx](dispatch.h.md#macro-vatomic_read_rlx) | Dispatches read calls with relaxed memory order. | +| [vatomic_write](dispatch.h.md#macro-vatomic_write) | Dispatches write calls with seq_cst memory order. | +| [vatomic_write_rel](dispatch.h.md#macro-vatomic_write_rel) | Dispatches write calls with release memory order. | +| [vatomic_write_rlx](dispatch.h.md#macro-vatomic_write_rlx) | Dispatches write calls with relaxed memory order. | +| [vatomic_xchg](dispatch.h.md#macro-vatomic_xchg) | Dispatches xchg calls with seq_cst memory order. | +| [vatomic_xchg_acq](dispatch.h.md#macro-vatomic_xchg_acq) | Dispatches xchg calls with acquire memory order. | +| [vatomic_xchg_rel](dispatch.h.md#macro-vatomic_xchg_rel) | Dispatches xchg calls with release memory order. | +| [vatomic_xchg_rlx](dispatch.h.md#macro-vatomic_xchg_rlx) | Dispatches xchg calls with relaxed memory order. | +| [vatomic_cmpxchg](dispatch.h.md#macro-vatomic_cmpxchg) | Dispatches cmpxchg calls with seq_cst memory order. | +| [vatomic_cmpxchg_acq](dispatch.h.md#macro-vatomic_cmpxchg_acq) | Dispatches cmpxchg calls with acquire memory order. | +| [vatomic_cmpxchg_rel](dispatch.h.md#macro-vatomic_cmpxchg_rel) | Dispatches cmpxchg calls with release memory order. | +| [vatomic_cmpxchg_rlx](dispatch.h.md#macro-vatomic_cmpxchg_rlx) | Dispatches cmpxchg calls with relaxed memory order. | +| [vatomic_inc](dispatch.h.md#macro-vatomic_inc) | Dispatches inc calls with seq_cst memory order. | +| [vatomic_inc_rel](dispatch.h.md#macro-vatomic_inc_rel) | Dispatches inc calls with release memory order. | +| [vatomic_inc_rlx](dispatch.h.md#macro-vatomic_inc_rlx) | Dispatches inc calls with relaxed memory order. | +| [vatomic_dec](dispatch.h.md#macro-vatomic_dec) | Dispatches dec calls with seq_cst memory order. | +| [vatomic_dec_rel](dispatch.h.md#macro-vatomic_dec_rel) | Dispatches dec calls with release memory order. | +| [vatomic_dec_rlx](dispatch.h.md#macro-vatomic_dec_rlx) | Dispatches dec calls with relaxed memory order. | +| [vatomic_inc_get](dispatch.h.md#macro-vatomic_inc_get) | Dispatches inc_get calls with seq_cst memory order. | +| [vatomic_inc_get_acq](dispatch.h.md#macro-vatomic_inc_get_acq) | Dispatches inc_get calls with acquire memory order. | +| [vatomic_inc_get_rel](dispatch.h.md#macro-vatomic_inc_get_rel) | Dispatches inc_get calls with release memory order. | +| [vatomic_inc_get_rlx](dispatch.h.md#macro-vatomic_inc_get_rlx) | Dispatches inc_get calls with relaxed memory order. | +| [vatomic_dec_get](dispatch.h.md#macro-vatomic_dec_get) | Dispatches dec_get calls with seq_cst memory order. | +| [vatomic_dec_get_acq](dispatch.h.md#macro-vatomic_dec_get_acq) | Dispatches dec_get calls with acquire memory order. | +| [vatomic_dec_get_rel](dispatch.h.md#macro-vatomic_dec_get_rel) | Dispatches dec_get calls with release memory order. | +| [vatomic_dec_get_rlx](dispatch.h.md#macro-vatomic_dec_get_rlx) | Dispatches dec_get calls with relaxed memory order. | +| [vatomic_get_inc](dispatch.h.md#macro-vatomic_get_inc) | Dispatches get_inc calls with seq_cst memory order. | +| [vatomic_get_inc_acq](dispatch.h.md#macro-vatomic_get_inc_acq) | Dispatches get_inc calls with acquire memory order. | +| [vatomic_get_inc_rel](dispatch.h.md#macro-vatomic_get_inc_rel) | Dispatches get_inc calls with release memory order. | +| [vatomic_get_inc_rlx](dispatch.h.md#macro-vatomic_get_inc_rlx) | Dispatches get_inc calls with relaxed memory order. | +| [vatomic_get_dec](dispatch.h.md#macro-vatomic_get_dec) | Dispatches get_dec calls with seq_cst memory order. | +| [vatomic_get_dec_acq](dispatch.h.md#macro-vatomic_get_dec_acq) | Dispatches get_dec calls with acquire memory order. | +| [vatomic_get_dec_rel](dispatch.h.md#macro-vatomic_get_dec_rel) | Dispatches get_dec calls with release memory order. | +| [vatomic_get_dec_rlx](dispatch.h.md#macro-vatomic_get_dec_rlx) | Dispatches get_dec calls with relaxed memory order. | +| [vatomic_add](dispatch.h.md#macro-vatomic_add) | Dispatches add calls with seq_cst memory order. | +| [vatomic_add_rel](dispatch.h.md#macro-vatomic_add_rel) | Dispatches add calls with release memory order. | +| [vatomic_add_rlx](dispatch.h.md#macro-vatomic_add_rlx) | Dispatches add calls with relaxed memory order. | +| [vatomic_sub](dispatch.h.md#macro-vatomic_sub) | Dispatches sub calls with seq_cst memory order. | +| [vatomic_sub_rel](dispatch.h.md#macro-vatomic_sub_rel) | Dispatches sub calls with release memory order. | +| [vatomic_sub_rlx](dispatch.h.md#macro-vatomic_sub_rlx) | Dispatches sub calls with relaxed memory order. | +| [vatomic_and](dispatch.h.md#macro-vatomic_and) | Dispatches and calls with seq_cst memory order. | +| [vatomic_and_rel](dispatch.h.md#macro-vatomic_and_rel) | Dispatches and calls with release memory order. | +| [vatomic_and_rlx](dispatch.h.md#macro-vatomic_and_rlx) | Dispatches and calls with relaxed memory order. | +| [vatomic_xor](dispatch.h.md#macro-vatomic_xor) | Dispatches xor calls with seq_cst memory order. | +| [vatomic_xor_rel](dispatch.h.md#macro-vatomic_xor_rel) | Dispatches xor calls with release memory order. | +| [vatomic_xor_rlx](dispatch.h.md#macro-vatomic_xor_rlx) | Dispatches xor calls with relaxed memory order. | +| [vatomic_or](dispatch.h.md#macro-vatomic_or) | Dispatches or calls with seq_cst memory order. | +| [vatomic_or_rel](dispatch.h.md#macro-vatomic_or_rel) | Dispatches or calls with release memory order. | +| [vatomic_or_rlx](dispatch.h.md#macro-vatomic_or_rlx) | Dispatches or calls with relaxed memory order. | +| [vatomic_max](dispatch.h.md#macro-vatomic_max) | Dispatches max calls with seq_cst memory order. | +| [vatomic_max_rel](dispatch.h.md#macro-vatomic_max_rel) | Dispatches max calls with release memory order. | +| [vatomic_max_rlx](dispatch.h.md#macro-vatomic_max_rlx) | Dispatches max calls with relaxed memory order. | +| [vatomic_add_get](dispatch.h.md#macro-vatomic_add_get) | Dispatches add_get calls with seq_cst memory order. | +| [vatomic_add_get_acq](dispatch.h.md#macro-vatomic_add_get_acq) | Dispatches add_get calls with acquire memory order. | +| [vatomic_add_get_rel](dispatch.h.md#macro-vatomic_add_get_rel) | Dispatches add_get calls with release memory order. | +| [vatomic_add_get_rlx](dispatch.h.md#macro-vatomic_add_get_rlx) | Dispatches add_get calls with relaxed memory order. | +| [vatomic_sub_get](dispatch.h.md#macro-vatomic_sub_get) | Dispatches sub_get calls with seq_cst memory order. | +| [vatomic_sub_get_acq](dispatch.h.md#macro-vatomic_sub_get_acq) | Dispatches sub_get calls with acquire memory order. | +| [vatomic_sub_get_rel](dispatch.h.md#macro-vatomic_sub_get_rel) | Dispatches sub_get calls with release memory order. | +| [vatomic_sub_get_rlx](dispatch.h.md#macro-vatomic_sub_get_rlx) | Dispatches sub_get calls with relaxed memory order. | +| [vatomic_and_get](dispatch.h.md#macro-vatomic_and_get) | Dispatches and_get calls with seq_cst memory order. | +| [vatomic_and_get_acq](dispatch.h.md#macro-vatomic_and_get_acq) | Dispatches and_get calls with acquire memory order. | +| [vatomic_and_get_rel](dispatch.h.md#macro-vatomic_and_get_rel) | Dispatches and_get calls with release memory order. | +| [vatomic_and_get_rlx](dispatch.h.md#macro-vatomic_and_get_rlx) | Dispatches and_get calls with relaxed memory order. | +| [vatomic_xor_get](dispatch.h.md#macro-vatomic_xor_get) | Dispatches xor_get calls with seq_cst memory order. | +| [vatomic_xor_get_acq](dispatch.h.md#macro-vatomic_xor_get_acq) | Dispatches xor_get calls with acquire memory order. | +| [vatomic_xor_get_rel](dispatch.h.md#macro-vatomic_xor_get_rel) | Dispatches xor_get calls with release memory order. | +| [vatomic_xor_get_rlx](dispatch.h.md#macro-vatomic_xor_get_rlx) | Dispatches xor_get calls with relaxed memory order. | +| [vatomic_or_get](dispatch.h.md#macro-vatomic_or_get) | Dispatches or_get calls with seq_cst memory order. | +| [vatomic_or_get_acq](dispatch.h.md#macro-vatomic_or_get_acq) | Dispatches or_get calls with acquire memory order. | +| [vatomic_or_get_rel](dispatch.h.md#macro-vatomic_or_get_rel) | Dispatches or_get calls with release memory order. | +| [vatomic_or_get_rlx](dispatch.h.md#macro-vatomic_or_get_rlx) | Dispatches or_get calls with relaxed memory order. | +| [vatomic_max_get](dispatch.h.md#macro-vatomic_max_get) | Dispatches max_get calls with seq_cst memory order. | +| [vatomic_max_get_acq](dispatch.h.md#macro-vatomic_max_get_acq) | Dispatches max_get calls with acquire memory order. | +| [vatomic_max_get_rel](dispatch.h.md#macro-vatomic_max_get_rel) | Dispatches max_get calls with release memory order. | +| [vatomic_max_get_rlx](dispatch.h.md#macro-vatomic_max_get_rlx) | Dispatches max_get calls with relaxed memory order. | +| [vatomic_get_add](dispatch.h.md#macro-vatomic_get_add) | Dispatches get_add calls with seq_cst memory order. | +| [vatomic_get_sub](dispatch.h.md#macro-vatomic_get_sub) | Dispatches get_sub calls with seq_cst memory order. | +| [vatomic_get_and](dispatch.h.md#macro-vatomic_get_and) | Dispatches get_and calls with seq_cst memory order. | +| [vatomic_get_xor](dispatch.h.md#macro-vatomic_get_xor) | Dispatches get_xor calls with seq_cst memory order. | +| [vatomic_get_or](dispatch.h.md#macro-vatomic_get_or) | Dispatches get_or calls with seq_cst memory order. | +| [vatomic_get_max](dispatch.h.md#macro-vatomic_get_max) | Dispatches get_max calls with seq_cst memory order. | +| [vatomic_await_eq](dispatch.h.md#macro-vatomic_await_eq) | Dispatches await_eq calls with seq_cst memory order. | +| [vatomic_await_eq_acq](dispatch.h.md#macro-vatomic_await_eq_acq) | Dispatches await_eq calls with acquire memory order. | +| [vatomic_await_eq_rlx](dispatch.h.md#macro-vatomic_await_eq_rlx) | Dispatches await_eq calls with relaxed memory order. | +| [vatomic_await_neq](dispatch.h.md#macro-vatomic_await_neq) | Dispatches await_neq calls with seq_cst memory order. | +| [vatomic_await_neq_acq](dispatch.h.md#macro-vatomic_await_neq_acq) | Dispatches await_neq calls with acquire memory order. | +| [vatomic_await_neq_rlx](dispatch.h.md#macro-vatomic_await_neq_rlx) | Dispatches await_neq calls with relaxed memory order. | +| [vatomic_await_le](dispatch.h.md#macro-vatomic_await_le) | Dispatches await_le calls with seq_cst memory order. | +| [vatomic_await_le_acq](dispatch.h.md#macro-vatomic_await_le_acq) | Dispatches await_le calls with acquire memory order. | +| [vatomic_await_le_rlx](dispatch.h.md#macro-vatomic_await_le_rlx) | Dispatches await_le calls with relaxed memory order. | +| [vatomic_await_lt](dispatch.h.md#macro-vatomic_await_lt) | Dispatches await_lt calls with seq_cst memory order. | +| [vatomic_await_lt_acq](dispatch.h.md#macro-vatomic_await_lt_acq) | Dispatches await_lt calls with acquire memory order. | +| [vatomic_await_lt_rlx](dispatch.h.md#macro-vatomic_await_lt_rlx) | Dispatches await_lt calls with relaxed memory order. | +| [vatomic_await_gt](dispatch.h.md#macro-vatomic_await_gt) | Dispatches await_gt calls with seq_cst memory order. | +| [vatomic_await_gt_acq](dispatch.h.md#macro-vatomic_await_gt_acq) | Dispatches await_gt calls with acquire memory order. | +| [vatomic_await_gt_rlx](dispatch.h.md#macro-vatomic_await_gt_rlx) | Dispatches await_gt calls with relaxed memory order. | +| [vatomic_await_ge](dispatch.h.md#macro-vatomic_await_ge) | Dispatches await_ge calls with seq_cst memory order. | +| [vatomic_await_ge_acq](dispatch.h.md#macro-vatomic_await_ge_acq) | Dispatches await_ge calls with acquire memory order. | +| [vatomic_await_ge_rlx](dispatch.h.md#macro-vatomic_await_ge_rlx) | Dispatches await_ge calls with relaxed memory order. | +| [vatomic_await_eq_set](dispatch.h.md#macro-vatomic_await_eq_set) | Dispatches await_eq_set calls with seq_cst memory order. | +| [vatomic_await_eq_set_acq](dispatch.h.md#macro-vatomic_await_eq_set_acq) | Dispatches await_eq_set calls with acquire memory order. | +| [vatomic_await_eq_set_rel](dispatch.h.md#macro-vatomic_await_eq_set_rel) | Dispatches await_eq_set calls with release memory order. | +| [vatomic_await_eq_set_rlx](dispatch.h.md#macro-vatomic_await_eq_set_rlx) | Dispatches await_eq_set calls with relaxed memory order. | +| [vatomic_await_eq_sub](dispatch.h.md#macro-vatomic_await_eq_sub) | Dispatches await_eq_sub calls with seq_cst memory order. | +| [vatomic_await_eq_sub_acq](dispatch.h.md#macro-vatomic_await_eq_sub_acq) | Dispatches await_eq_sub calls with acquire memory order. | +| [vatomic_await_eq_sub_rel](dispatch.h.md#macro-vatomic_await_eq_sub_rel) | Dispatches await_eq_sub calls with release memory order. | +| [vatomic_await_eq_sub_rlx](dispatch.h.md#macro-vatomic_await_eq_sub_rlx) | Dispatches await_eq_sub calls with relaxed memory order. | +| [vatomic_await_eq_add](dispatch.h.md#macro-vatomic_await_eq_add) | Dispatches await_eq_add calls with seq_cst memory order. | +| [vatomic_await_eq_add_acq](dispatch.h.md#macro-vatomic_await_eq_add_acq) | Dispatches await_eq_add calls with acquire memory order. | +| [vatomic_await_eq_add_rel](dispatch.h.md#macro-vatomic_await_eq_add_rel) | Dispatches await_eq_add calls with release memory order. | +| [vatomic_await_eq_add_rlx](dispatch.h.md#macro-vatomic_await_eq_add_rlx) | Dispatches await_eq_add calls with relaxed memory order. | +| [vatomic_await_neq_set](dispatch.h.md#macro-vatomic_await_neq_set) | Dispatches await_neq_set calls with seq_cst memory order. | +| [vatomic_await_neq_set_acq](dispatch.h.md#macro-vatomic_await_neq_set_acq) | Dispatches await_neq_set calls with acquire memory order. | +| [vatomic_await_neq_set_rel](dispatch.h.md#macro-vatomic_await_neq_set_rel) | Dispatches await_neq_set calls with release memory order. | +| [vatomic_await_neq_set_rlx](dispatch.h.md#macro-vatomic_await_neq_set_rlx) | Dispatches await_neq_set calls with relaxed memory order. | +| [vatomic_await_neq_sub](dispatch.h.md#macro-vatomic_await_neq_sub) | Dispatches await_neq_sub calls with seq_cst memory order. | +| [vatomic_await_neq_sub_acq](dispatch.h.md#macro-vatomic_await_neq_sub_acq) | Dispatches await_neq_sub calls with acquire memory order. | +| [vatomic_await_neq_sub_rel](dispatch.h.md#macro-vatomic_await_neq_sub_rel) | Dispatches await_neq_sub calls with release memory order. | +| [vatomic_await_neq_sub_rlx](dispatch.h.md#macro-vatomic_await_neq_sub_rlx) | Dispatches await_neq_sub calls with relaxed memory order. | +| [vatomic_await_neq_add](dispatch.h.md#macro-vatomic_await_neq_add) | Dispatches await_neq_add calls with seq_cst memory order. | +| [vatomic_await_neq_add_acq](dispatch.h.md#macro-vatomic_await_neq_add_acq) | Dispatches await_neq_add calls with acquire memory order. | +| [vatomic_await_neq_add_rel](dispatch.h.md#macro-vatomic_await_neq_add_rel) | Dispatches await_neq_add calls with release memory order. | +| [vatomic_await_neq_add_rlx](dispatch.h.md#macro-vatomic_await_neq_add_rlx) | Dispatches await_neq_add calls with relaxed memory order. | +| [vatomic_await_le_set](dispatch.h.md#macro-vatomic_await_le_set) | Dispatches await_le_set calls with seq_cst memory order. | +| [vatomic_await_le_set_acq](dispatch.h.md#macro-vatomic_await_le_set_acq) | Dispatches await_le_set calls with acquire memory order. | +| [vatomic_await_le_set_rel](dispatch.h.md#macro-vatomic_await_le_set_rel) | Dispatches await_le_set calls with release memory order. | +| [vatomic_await_le_set_rlx](dispatch.h.md#macro-vatomic_await_le_set_rlx) | Dispatches await_le_set calls with relaxed memory order. | +| [vatomic_await_le_sub](dispatch.h.md#macro-vatomic_await_le_sub) | Dispatches await_le_sub calls with seq_cst memory order. | +| [vatomic_await_le_sub_acq](dispatch.h.md#macro-vatomic_await_le_sub_acq) | Dispatches await_le_sub calls with acquire memory order. | +| [vatomic_await_le_sub_rel](dispatch.h.md#macro-vatomic_await_le_sub_rel) | Dispatches await_le_sub calls with release memory order. | +| [vatomic_await_le_sub_rlx](dispatch.h.md#macro-vatomic_await_le_sub_rlx) | Dispatches await_le_sub calls with relaxed memory order. | +| [vatomic_await_le_add](dispatch.h.md#macro-vatomic_await_le_add) | Dispatches await_le_add calls with seq_cst memory order. | +| [vatomic_await_le_add_acq](dispatch.h.md#macro-vatomic_await_le_add_acq) | Dispatches await_le_add calls with acquire memory order. | +| [vatomic_await_le_add_rel](dispatch.h.md#macro-vatomic_await_le_add_rel) | Dispatches await_le_add calls with release memory order. | +| [vatomic_await_le_add_rlx](dispatch.h.md#macro-vatomic_await_le_add_rlx) | Dispatches await_le_add calls with relaxed memory order. | +| [vatomic_await_lt_set](dispatch.h.md#macro-vatomic_await_lt_set) | Dispatches await_lt_set calls with seq_cst memory order. | +| [vatomic_await_lt_set_acq](dispatch.h.md#macro-vatomic_await_lt_set_acq) | Dispatches await_lt_set calls with acquire memory order. | +| [vatomic_await_lt_set_rel](dispatch.h.md#macro-vatomic_await_lt_set_rel) | Dispatches await_lt_set calls with release memory order. | +| [vatomic_await_lt_set_rlx](dispatch.h.md#macro-vatomic_await_lt_set_rlx) | Dispatches await_lt_set calls with relaxed memory order. | +| [vatomic_await_lt_sub](dispatch.h.md#macro-vatomic_await_lt_sub) | Dispatches await_lt_sub calls with seq_cst memory order. | +| [vatomic_await_lt_sub_acq](dispatch.h.md#macro-vatomic_await_lt_sub_acq) | Dispatches await_lt_sub calls with acquire memory order. | +| [vatomic_await_lt_sub_rel](dispatch.h.md#macro-vatomic_await_lt_sub_rel) | Dispatches await_lt_sub calls with release memory order. | +| [vatomic_await_lt_sub_rlx](dispatch.h.md#macro-vatomic_await_lt_sub_rlx) | Dispatches await_lt_sub calls with relaxed memory order. | +| [vatomic_await_lt_add](dispatch.h.md#macro-vatomic_await_lt_add) | Dispatches await_lt_add calls with seq_cst memory order. | +| [vatomic_await_lt_add_acq](dispatch.h.md#macro-vatomic_await_lt_add_acq) | Dispatches await_lt_add calls with acquire memory order. | +| [vatomic_await_lt_add_rel](dispatch.h.md#macro-vatomic_await_lt_add_rel) | Dispatches await_lt_add calls with release memory order. | +| [vatomic_await_lt_add_rlx](dispatch.h.md#macro-vatomic_await_lt_add_rlx) | Dispatches await_lt_add calls with relaxed memory order. | +| [vatomic_await_gt_set](dispatch.h.md#macro-vatomic_await_gt_set) | Dispatches await_gt_set calls with seq_cst memory order. | +| [vatomic_await_gt_set_acq](dispatch.h.md#macro-vatomic_await_gt_set_acq) | Dispatches await_gt_set calls with acquire memory order. | +| [vatomic_await_gt_set_rel](dispatch.h.md#macro-vatomic_await_gt_set_rel) | Dispatches await_gt_set calls with release memory order. | +| [vatomic_await_gt_set_rlx](dispatch.h.md#macro-vatomic_await_gt_set_rlx) | Dispatches await_gt_set calls with relaxed memory order. | +| [vatomic_await_gt_sub](dispatch.h.md#macro-vatomic_await_gt_sub) | Dispatches await_gt_sub calls with seq_cst memory order. | +| [vatomic_await_gt_sub_acq](dispatch.h.md#macro-vatomic_await_gt_sub_acq) | Dispatches await_gt_sub calls with acquire memory order. | +| [vatomic_await_gt_sub_rel](dispatch.h.md#macro-vatomic_await_gt_sub_rel) | Dispatches await_gt_sub calls with release memory order. | +| [vatomic_await_gt_sub_rlx](dispatch.h.md#macro-vatomic_await_gt_sub_rlx) | Dispatches await_gt_sub calls with relaxed memory order. | +| [vatomic_await_gt_add](dispatch.h.md#macro-vatomic_await_gt_add) | Dispatches await_gt_add calls with seq_cst memory order. | +| [vatomic_await_gt_add_acq](dispatch.h.md#macro-vatomic_await_gt_add_acq) | Dispatches await_gt_add calls with acquire memory order. | +| [vatomic_await_gt_add_rel](dispatch.h.md#macro-vatomic_await_gt_add_rel) | Dispatches await_gt_add calls with release memory order. | +| [vatomic_await_gt_add_rlx](dispatch.h.md#macro-vatomic_await_gt_add_rlx) | Dispatches await_gt_add calls with relaxed memory order. | +| [vatomic_await_ge_set](dispatch.h.md#macro-vatomic_await_ge_set) | Dispatches await_ge_set calls with seq_cst memory order. | +| [vatomic_await_ge_set_acq](dispatch.h.md#macro-vatomic_await_ge_set_acq) | Dispatches await_ge_set calls with acquire memory order. | +| [vatomic_await_ge_set_rel](dispatch.h.md#macro-vatomic_await_ge_set_rel) | Dispatches await_ge_set calls with release memory order. | +| [vatomic_await_ge_set_rlx](dispatch.h.md#macro-vatomic_await_ge_set_rlx) | Dispatches await_ge_set calls with relaxed memory order. | +| [vatomic_await_ge_sub](dispatch.h.md#macro-vatomic_await_ge_sub) | Dispatches await_ge_sub calls with seq_cst memory order. | +| [vatomic_await_ge_sub_acq](dispatch.h.md#macro-vatomic_await_ge_sub_acq) | Dispatches await_ge_sub calls with acquire memory order. | +| [vatomic_await_ge_sub_rel](dispatch.h.md#macro-vatomic_await_ge_sub_rel) | Dispatches await_ge_sub calls with release memory order. | +| [vatomic_await_ge_sub_rlx](dispatch.h.md#macro-vatomic_await_ge_sub_rlx) | Dispatches await_ge_sub calls with relaxed memory order. | +| [vatomic_await_ge_add](dispatch.h.md#macro-vatomic_await_ge_add) | Dispatches await_ge_add calls with seq_cst memory order. | +| [vatomic_await_ge_add_acq](dispatch.h.md#macro-vatomic_await_ge_add_acq) | Dispatches await_ge_add calls with acquire memory order. | +| [vatomic_await_ge_add_rel](dispatch.h.md#macro-vatomic_await_ge_add_rel) | Dispatches await_ge_add calls with release memory order. | +| [vatomic_await_ge_add_rlx](dispatch.h.md#macro-vatomic_await_ge_add_rlx) | Dispatches await_ge_add calls with relaxed memory order. | + +## Macro `vatomic_init` + +```c +vatomic_init(a,v) +``` + + +_Dispatches init calls with configurable memory order._ + + +See [vatomic32_init](core_u32.h.md#function-vatomic32_init) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: initial value + + + + +## Macro `vatomic_read` + +```c +vatomic_read(a) +``` + + +_Dispatches read calls with seq_cst memory order._ + + +See [vatomic32_read](core_u32.h.md#function-vatomic32_read) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_read_acq` + +```c +vatomic_read_acq(a) +``` + + +_Dispatches read calls with acquire memory order._ + + +See [vatomic32_read_acq](core_u32.h.md#function-vatomic32_read_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: acquire + + +## Macro `vatomic_read_rlx` + +```c +vatomic_read_rlx(a) +``` + + +_Dispatches read calls with relaxed memory order._ + + +See [vatomic32_read_rlx](core_u32.h.md#function-vatomic32_read_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** current value + +**Memory ordering**: relaxed + + +## Macro `vatomic_write` + +```c +vatomic_write(a,v) +``` + + +_Dispatches write calls with seq_cst memory order._ + + +See [vatomic32_write](core_u32.h.md#function-vatomic32_write) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_write_rel` + +```c +vatomic_write_rel(a,v) +``` + + +_Dispatches write calls with release memory order._ + + +See [vatomic32_write_rel](core_u32.h.md#function-vatomic32_write_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: release + + +## Macro `vatomic_write_rlx` + +```c +vatomic_write_rlx(a,v) +``` + + +_Dispatches write calls with relaxed memory order._ + + +See [vatomic32_write_rlx](core_u32.h.md#function-vatomic32_write_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_xchg` + +```c +vatomic_xchg(a,v) +``` + + +_Dispatches xchg calls with seq_cst memory order._ + + +See [vatomic32_xchg](core_u32.h.md#function-vatomic32_xchg) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_xchg_acq` + +```c +vatomic_xchg_acq(a,v) +``` + + +_Dispatches xchg calls with acquire memory order._ + + +See [vatomic32_xchg_acq](core_u32.h.md#function-vatomic32_xchg_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Macro `vatomic_xchg_rel` + +```c +vatomic_xchg_rel(a,v) +``` + + +_Dispatches xchg calls with release memory order._ + + +See [vatomic32_xchg_rel](core_u32.h.md#function-vatomic32_xchg_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Macro `vatomic_xchg_rlx` + +```c +vatomic_xchg_rlx(a,v) +``` + + +_Dispatches xchg calls with relaxed memory order._ + + +See [vatomic32_xchg_rlx](core_u32.h.md#function-vatomic32_xchg_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_cmpxchg` + +```c +vatomic_cmpxchg(a,c,v) +``` + + +_Dispatches cmpxchg calls with seq_cst memory order._ + + +See [vatomic32_cmpxchg](core_u32.h.md#function-vatomic32_cmpxchg) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: expected value +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_cmpxchg_acq` + +```c +vatomic_cmpxchg_acq(a,c,v) +``` + + +_Dispatches cmpxchg calls with acquire memory order._ + + +See [vatomic32_cmpxchg_acq](core_u32.h.md#function-vatomic32_cmpxchg_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: expected value +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Macro `vatomic_cmpxchg_rel` + +```c +vatomic_cmpxchg_rel(a,c,v) +``` + + +_Dispatches cmpxchg calls with release memory order._ + + +See [vatomic32_cmpxchg_rel](core_u32.h.md#function-vatomic32_cmpxchg_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: expected value +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: release + + +## Macro `vatomic_cmpxchg_rlx` + +```c +vatomic_cmpxchg_rlx(a,c,v) +``` + + +_Dispatches cmpxchg calls with relaxed memory order._ + + +See [vatomic32_cmpxchg_rlx](core_u32.h.md#function-vatomic32_cmpxchg_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: expected value +- `v`: new value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_inc` + +```c +vatomic_inc(a) +``` + + +_Dispatches inc calls with seq_cst memory order._ + + +See [vatomic32_inc](core_u32.h.md#function-vatomic32_inc) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_inc_rel` + +```c +vatomic_inc_rel(a) +``` + + +_Dispatches inc calls with release memory order._ + + +See [vatomic32_inc_rel](core_u32.h.md#function-vatomic32_inc_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Macro `vatomic_inc_rlx` + +```c +vatomic_inc_rlx(a) +``` + + +_Dispatches inc calls with relaxed memory order._ + + +See [vatomic32_inc_rlx](core_u32.h.md#function-vatomic32_inc_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Macro `vatomic_dec` + +```c +vatomic_dec(a) +``` + + +_Dispatches dec calls with seq_cst memory order._ + + +See [vatomic32_dec](core_u32.h.md#function-vatomic32_dec) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_dec_rel` + +```c +vatomic_dec_rel(a) +``` + + +_Dispatches dec calls with release memory order._ + + +See [vatomic32_dec_rel](core_u32.h.md#function-vatomic32_dec_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: release + + +## Macro `vatomic_dec_rlx` + +```c +vatomic_dec_rlx(a) +``` + + +_Dispatches dec calls with relaxed memory order._ + + +See [vatomic32_dec_rlx](core_u32.h.md#function-vatomic32_dec_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Memory ordering**: relaxed + + +## Macro `vatomic_inc_get` + +```c +vatomic_inc_get(a) +``` + + +_Dispatches inc_get calls with seq_cst memory order._ + + +See vatomic32_inc_get for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_inc_get_acq` + +```c +vatomic_inc_get_acq(a) +``` + + +_Dispatches inc_get calls with acquire memory order._ + + +See vatomic32_inc_get_acq for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_inc_get_rel` + +```c +vatomic_inc_get_rel(a) +``` + + +_Dispatches inc_get calls with release memory order._ + + +See vatomic32_inc_get_rel for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_inc_get_rlx` + +```c +vatomic_inc_get_rlx(a) +``` + + +_Dispatches inc_get calls with relaxed memory order._ + + +See vatomic32_inc_get_rlx for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_dec_get` + +```c +vatomic_dec_get(a) +``` + + +_Dispatches dec_get calls with seq_cst memory order._ + + +See vatomic32_dec_get for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_dec_get_acq` + +```c +vatomic_dec_get_acq(a) +``` + + +_Dispatches dec_get calls with acquire memory order._ + + +See vatomic32_dec_get_acq for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_dec_get_rel` + +```c +vatomic_dec_get_rel(a) +``` + + +_Dispatches dec_get calls with release memory order._ + + +See vatomic32_dec_get_rel for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_dec_get_rlx` + +```c +vatomic_dec_get_rlx(a) +``` + + +_Dispatches dec_get calls with relaxed memory order._ + + +See vatomic32_dec_get_rlx for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_inc` + +```c +vatomic_get_inc(a) +``` + + +_Dispatches get_inc calls with seq_cst memory order._ + + +See vatomic32_get_inc for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_get_inc_acq` + +```c +vatomic_get_inc_acq(a) +``` + + +_Dispatches get_inc calls with acquire memory order._ + + +See vatomic32_get_inc_acq for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Macro `vatomic_get_inc_rel` + +```c +vatomic_get_inc_rel(a) +``` + + +_Dispatches get_inc calls with release memory order._ + + +See vatomic32_get_inc_rel for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Macro `vatomic_get_inc_rlx` + +```c +vatomic_get_inc_rlx(a) +``` + + +_Dispatches get_inc calls with relaxed memory order._ + + +See vatomic32_get_inc_rlx for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_dec` + +```c +vatomic_get_dec(a) +``` + + +_Dispatches get_dec calls with seq_cst memory order._ + + +See vatomic32_get_dec for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_get_dec_acq` + +```c +vatomic_get_dec_acq(a) +``` + + +_Dispatches get_dec calls with acquire memory order._ + + +See vatomic32_get_dec_acq for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: acquire + + +## Macro `vatomic_get_dec_rel` + +```c +vatomic_get_dec_rel(a) +``` + + +_Dispatches get_dec calls with release memory order._ + + +See vatomic32_get_dec_rel for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: release + + +## Macro `vatomic_get_dec_rlx` + +```c +vatomic_get_dec_rlx(a) +``` + + +_Dispatches get_dec calls with relaxed memory order._ + + +See vatomic32_get_dec_rlx for details. + + + +**Parameters:** + +- `a`: atomic variable + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_add` + +```c +vatomic_add(a,v) +``` + + +_Dispatches add calls with seq_cst memory order._ + + +See [vatomic32_add](core_u32.h.md#function-vatomic32_add) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_add_rel` + +```c +vatomic_add_rel(a,v) +``` + + +_Dispatches add calls with release memory order._ + + +See [vatomic32_add_rel](core_u32.h.md#function-vatomic32_add_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_add_rlx` + +```c +vatomic_add_rlx(a,v) +``` + + +_Dispatches add calls with relaxed memory order._ + + +See [vatomic32_add_rlx](core_u32.h.md#function-vatomic32_add_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_sub` + +```c +vatomic_sub(a,v) +``` + + +_Dispatches sub calls with seq_cst memory order._ + + +See [vatomic32_sub](core_u32.h.md#function-vatomic32_sub) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_sub_rel` + +```c +vatomic_sub_rel(a,v) +``` + + +_Dispatches sub calls with release memory order._ + + +See [vatomic32_sub_rel](core_u32.h.md#function-vatomic32_sub_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_sub_rlx` + +```c +vatomic_sub_rlx(a,v) +``` + + +_Dispatches sub calls with relaxed memory order._ + + +See [vatomic32_sub_rlx](core_u32.h.md#function-vatomic32_sub_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_and` + +```c +vatomic_and(a,v) +``` + + +_Dispatches and calls with seq_cst memory order._ + + +See [vatomic32_and](core_u32.h.md#function-vatomic32_and) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_and_rel` + +```c +vatomic_and_rel(a,v) +``` + + +_Dispatches and calls with release memory order._ + + +See [vatomic32_and_rel](core_u32.h.md#function-vatomic32_and_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_and_rlx` + +```c +vatomic_and_rlx(a,v) +``` + + +_Dispatches and calls with relaxed memory order._ + + +See [vatomic32_and_rlx](core_u32.h.md#function-vatomic32_and_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_xor` + +```c +vatomic_xor(a,v) +``` + + +_Dispatches xor calls with seq_cst memory order._ + + +See [vatomic32_xor](core_u32.h.md#function-vatomic32_xor) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_xor_rel` + +```c +vatomic_xor_rel(a,v) +``` + + +_Dispatches xor calls with release memory order._ + + +See [vatomic32_xor_rel](core_u32.h.md#function-vatomic32_xor_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_xor_rlx` + +```c +vatomic_xor_rlx(a,v) +``` + + +_Dispatches xor calls with relaxed memory order._ + + +See [vatomic32_xor_rlx](core_u32.h.md#function-vatomic32_xor_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_or` + +```c +vatomic_or(a,v) +``` + + +_Dispatches or calls with seq_cst memory order._ + + +See [vatomic32_or](core_u32.h.md#function-vatomic32_or) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_or_rel` + +```c +vatomic_or_rel(a,v) +``` + + +_Dispatches or calls with release memory order._ + + +See [vatomic32_or_rel](core_u32.h.md#function-vatomic32_or_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_or_rlx` + +```c +vatomic_or_rlx(a,v) +``` + + +_Dispatches or calls with relaxed memory order._ + + +See [vatomic32_or_rlx](core_u32.h.md#function-vatomic32_or_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_max` + +```c +vatomic_max(a,v) +``` + + +_Dispatches max calls with seq_cst memory order._ + + +See [vatomic32_max](core_u32.h.md#function-vatomic32_max) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: seq_cst + + +## Macro `vatomic_max_rel` + +```c +vatomic_max_rel(a,v) +``` + + +_Dispatches max calls with release memory order._ + + +See [vatomic32_max_rel](core_u32.h.md#function-vatomic32_max_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: release + + +## Macro `vatomic_max_rlx` + +```c +vatomic_max_rlx(a,v) +``` + + +_Dispatches max calls with relaxed memory order._ + + +See [vatomic32_max_rlx](core_u32.h.md#function-vatomic32_max_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Memory ordering**: relaxed + + +## Macro `vatomic_add_get` + +```c +vatomic_add_get(a,v) +``` + + +_Dispatches add_get calls with seq_cst memory order._ + + +See [vatomic32_add_get](core_u32.h.md#function-vatomic32_add_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_add_get_acq` + +```c +vatomic_add_get_acq(a,v) +``` + + +_Dispatches add_get calls with acquire memory order._ + + +See [vatomic32_add_get_acq](core_u32.h.md#function-vatomic32_add_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_add_get_rel` + +```c +vatomic_add_get_rel(a,v) +``` + + +_Dispatches add_get calls with release memory order._ + + +See [vatomic32_add_get_rel](core_u32.h.md#function-vatomic32_add_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_add_get_rlx` + +```c +vatomic_add_get_rlx(a,v) +``` + + +_Dispatches add_get calls with relaxed memory order._ + + +See [vatomic32_add_get_rlx](core_u32.h.md#function-vatomic32_add_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_sub_get` + +```c +vatomic_sub_get(a,v) +``` + + +_Dispatches sub_get calls with seq_cst memory order._ + + +See [vatomic32_sub_get](core_u32.h.md#function-vatomic32_sub_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_sub_get_acq` + +```c +vatomic_sub_get_acq(a,v) +``` + + +_Dispatches sub_get calls with acquire memory order._ + + +See [vatomic32_sub_get_acq](core_u32.h.md#function-vatomic32_sub_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_sub_get_rel` + +```c +vatomic_sub_get_rel(a,v) +``` + + +_Dispatches sub_get calls with release memory order._ + + +See [vatomic32_sub_get_rel](core_u32.h.md#function-vatomic32_sub_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_sub_get_rlx` + +```c +vatomic_sub_get_rlx(a,v) +``` + + +_Dispatches sub_get calls with relaxed memory order._ + + +See [vatomic32_sub_get_rlx](core_u32.h.md#function-vatomic32_sub_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_and_get` + +```c +vatomic_and_get(a,v) +``` + + +_Dispatches and_get calls with seq_cst memory order._ + + +See [vatomic32_and_get](core_u32.h.md#function-vatomic32_and_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_and_get_acq` + +```c +vatomic_and_get_acq(a,v) +``` + + +_Dispatches and_get calls with acquire memory order._ + + +See [vatomic32_and_get_acq](core_u32.h.md#function-vatomic32_and_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_and_get_rel` + +```c +vatomic_and_get_rel(a,v) +``` + + +_Dispatches and_get calls with release memory order._ + + +See [vatomic32_and_get_rel](core_u32.h.md#function-vatomic32_and_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_and_get_rlx` + +```c +vatomic_and_get_rlx(a,v) +``` + + +_Dispatches and_get calls with relaxed memory order._ + + +See [vatomic32_and_get_rlx](core_u32.h.md#function-vatomic32_and_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_xor_get` + +```c +vatomic_xor_get(a,v) +``` + + +_Dispatches xor_get calls with seq_cst memory order._ + + +See [vatomic32_xor_get](core_u32.h.md#function-vatomic32_xor_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_xor_get_acq` + +```c +vatomic_xor_get_acq(a,v) +``` + + +_Dispatches xor_get calls with acquire memory order._ + + +See [vatomic32_xor_get_acq](core_u32.h.md#function-vatomic32_xor_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_xor_get_rel` + +```c +vatomic_xor_get_rel(a,v) +``` + + +_Dispatches xor_get calls with release memory order._ + + +See [vatomic32_xor_get_rel](core_u32.h.md#function-vatomic32_xor_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_xor_get_rlx` + +```c +vatomic_xor_get_rlx(a,v) +``` + + +_Dispatches xor_get calls with relaxed memory order._ + + +See [vatomic32_xor_get_rlx](core_u32.h.md#function-vatomic32_xor_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_or_get` + +```c +vatomic_or_get(a,v) +``` + + +_Dispatches or_get calls with seq_cst memory order._ + + +See [vatomic32_or_get](core_u32.h.md#function-vatomic32_or_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_or_get_acq` + +```c +vatomic_or_get_acq(a,v) +``` + + +_Dispatches or_get calls with acquire memory order._ + + +See [vatomic32_or_get_acq](core_u32.h.md#function-vatomic32_or_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_or_get_rel` + +```c +vatomic_or_get_rel(a,v) +``` + + +_Dispatches or_get calls with release memory order._ + + +See [vatomic32_or_get_rel](core_u32.h.md#function-vatomic32_or_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_or_get_rlx` + +```c +vatomic_or_get_rlx(a,v) +``` + + +_Dispatches or_get calls with relaxed memory order._ + + +See [vatomic32_or_get_rlx](core_u32.h.md#function-vatomic32_or_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_max_get` + +```c +vatomic_max_get(a,v) +``` + + +_Dispatches max_get calls with seq_cst memory order._ + + +See [vatomic32_max_get](core_u32.h.md#function-vatomic32_max_get) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_max_get_acq` + +```c +vatomic_max_get_acq(a,v) +``` + + +_Dispatches max_get calls with acquire memory order._ + + +See [vatomic32_max_get_acq](core_u32.h.md#function-vatomic32_max_get_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: acquire + + +## Macro `vatomic_max_get_rel` + +```c +vatomic_max_get_rel(a,v) +``` + + +_Dispatches max_get calls with release memory order._ + + +See [vatomic32_max_get_rel](core_u32.h.md#function-vatomic32_max_get_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: release + + +## Macro `vatomic_max_get_rlx` + +```c +vatomic_max_get_rlx(a,v) +``` + + +_Dispatches max_get calls with relaxed memory order._ + + +See [vatomic32_max_get_rlx](core_u32.h.md#function-vatomic32_max_get_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** new value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_add` + +```c +vatomic_get_add(a,v) +``` + + +_Dispatches get_add calls with seq_cst memory order._ + + +Dispatches get_add calls with relaxed memory order. + +Dispatches get_add calls with release memory order. + +Dispatches get_add calls with acquire memory order. + +See [vatomic32_get_add](core_u32.h.md#function-vatomic32_get_add) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_add_acq](core_u32.h.md#function-vatomic32_get_add_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_add_rel](core_u32.h.md#function-vatomic32_get_add_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_add_rlx](core_u32.h.md#function-vatomic32_get_add_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_sub` + +```c +vatomic_get_sub(a,v) +``` + + +_Dispatches get_sub calls with seq_cst memory order._ + + +Dispatches get_sub calls with relaxed memory order. + +Dispatches get_sub calls with release memory order. + +Dispatches get_sub calls with acquire memory order. + +See [vatomic32_get_sub](core_u32.h.md#function-vatomic32_get_sub) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_sub_acq](core_u32.h.md#function-vatomic32_get_sub_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_sub_rel](core_u32.h.md#function-vatomic32_get_sub_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_sub_rlx](core_u32.h.md#function-vatomic32_get_sub_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_and` + +```c +vatomic_get_and(a,v) +``` + + +_Dispatches get_and calls with seq_cst memory order._ + + +Dispatches get_and calls with relaxed memory order. + +Dispatches get_and calls with release memory order. + +Dispatches get_and calls with acquire memory order. + +See [vatomic32_get_and](core_u32.h.md#function-vatomic32_get_and) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_and_acq](core_u32.h.md#function-vatomic32_get_and_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_and_rel](core_u32.h.md#function-vatomic32_get_and_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_and_rlx](core_u32.h.md#function-vatomic32_get_and_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_xor` + +```c +vatomic_get_xor(a,v) +``` + + +_Dispatches get_xor calls with seq_cst memory order._ + + +Dispatches get_xor calls with relaxed memory order. + +Dispatches get_xor calls with release memory order. + +Dispatches get_xor calls with acquire memory order. + +See [vatomic32_get_xor](core_u32.h.md#function-vatomic32_get_xor) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_xor_acq](core_u32.h.md#function-vatomic32_get_xor_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_xor_rel](core_u32.h.md#function-vatomic32_get_xor_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_xor_rlx](core_u32.h.md#function-vatomic32_get_xor_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_or` + +```c +vatomic_get_or(a,v) +``` + + +_Dispatches get_or calls with seq_cst memory order._ + + +Dispatches get_or calls with relaxed memory order. + +Dispatches get_or calls with release memory order. + +Dispatches get_or calls with acquire memory order. + +See [vatomic32_get_or](core_u32.h.md#function-vatomic32_get_or) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_or_acq](core_u32.h.md#function-vatomic32_get_or_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_or_rel](core_u32.h.md#function-vatomic32_get_or_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_or_rlx](core_u32.h.md#function-vatomic32_get_or_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_get_max` + +```c +vatomic_get_max(a,v) +``` + + +_Dispatches get_max calls with seq_cst memory order._ + + +Dispatches get_max calls with relaxed memory order. + +Dispatches get_max calls with release memory order. + +Dispatches get_max calls with acquire memory order. + +See [vatomic32_get_max](core_u32.h.md#function-vatomic32_get_max) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: seq_cst + +See [vatomic32_get_max_acq](core_u32.h.md#function-vatomic32_get_max_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: acquire + +See [vatomic32_get_max_rel](core_u32.h.md#function-vatomic32_get_max_rel) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: release + +See [vatomic32_get_max_rlx](core_u32.h.md#function-vatomic32_get_max_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `v`: operand value + + +**Returns:** old value + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_eq` + +```c +vatomic_await_eq(a,c) +``` + + +_Dispatches await_eq calls with seq_cst memory order._ + + +See [vatomic32_await_eq](await_u32.h.md#function-vatomic32_await_eq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** previously read value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_eq_acq` + +```c +vatomic_await_eq_acq(a,c) +``` + + +_Dispatches await_eq calls with acquire memory order._ + + +See [vatomic32_await_eq_acq](await_u32.h.md#function-vatomic32_await_eq_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** previously read value + +**Memory ordering**: acquire + + +## Macro `vatomic_await_eq_rlx` + +```c +vatomic_await_eq_rlx(a,c) +``` + + +_Dispatches await_eq calls with relaxed memory order._ + + +See [vatomic32_await_eq_rlx](await_u32.h.md#function-vatomic32_await_eq_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** previously read value + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_neq` + +```c +vatomic_await_neq(a,c) +``` + + +_Dispatches await_neq calls with seq_cst memory order._ + + +See [vatomic32_await_neq](await_u32.h.md#function-vatomic32_await_neq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_neq_acq` + +```c +vatomic_await_neq_acq(a,c) +``` + + +_Dispatches await_neq calls with acquire memory order._ + + +See [vatomic32_await_neq_acq](await_u32.h.md#function-vatomic32_await_neq_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_neq_rlx` + +```c +vatomic_await_neq_rlx(a,c) +``` + + +_Dispatches await_neq calls with relaxed memory order._ + + +See [vatomic32_await_neq_rlx](await_u32.h.md#function-vatomic32_await_neq_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_le` + +```c +vatomic_await_le(a,c) +``` + + +_Dispatches await_le calls with seq_cst memory order._ + + +See [vatomic32_await_le](await_u32.h.md#function-vatomic32_await_le) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_le_acq` + +```c +vatomic_await_le_acq(a,c) +``` + + +_Dispatches await_le calls with acquire memory order._ + + +See [vatomic32_await_le_acq](await_u32.h.md#function-vatomic32_await_le_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_le_rlx` + +```c +vatomic_await_le_rlx(a,c) +``` + + +_Dispatches await_le calls with relaxed memory order._ + + +See [vatomic32_await_le_rlx](await_u32.h.md#function-vatomic32_await_le_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_lt` + +```c +vatomic_await_lt(a,c) +``` + + +_Dispatches await_lt calls with seq_cst memory order._ + + +See [vatomic32_await_lt](await_u32.h.md#function-vatomic32_await_lt) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_lt_acq` + +```c +vatomic_await_lt_acq(a,c) +``` + + +_Dispatches await_lt calls with acquire memory order._ + + +See [vatomic32_await_lt_acq](await_u32.h.md#function-vatomic32_await_lt_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_lt_rlx` + +```c +vatomic_await_lt_rlx(a,c) +``` + + +_Dispatches await_lt calls with relaxed memory order._ + + +See [vatomic32_await_lt_rlx](await_u32.h.md#function-vatomic32_await_lt_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_gt` + +```c +vatomic_await_gt(a,c) +``` + + +_Dispatches await_gt calls with seq_cst memory order._ + + +See [vatomic32_await_gt](await_u32.h.md#function-vatomic32_await_gt) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_gt_acq` + +```c +vatomic_await_gt_acq(a,c) +``` + + +_Dispatches await_gt calls with acquire memory order._ + + +See [vatomic32_await_gt_acq](await_u32.h.md#function-vatomic32_await_gt_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_gt_rlx` + +```c +vatomic_await_gt_rlx(a,c) +``` + + +_Dispatches await_gt calls with relaxed memory order._ + + +See [vatomic32_await_gt_rlx](await_u32.h.md#function-vatomic32_await_gt_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_ge` + +```c +vatomic_await_ge(a,c) +``` + + +_Dispatches await_ge calls with seq_cst memory order._ + + +See [vatomic32_await_ge](await_u32.h.md#function-vatomic32_await_ge) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_ge_acq` + +```c +vatomic_await_ge_acq(a,c) +``` + + +_Dispatches await_ge calls with acquire memory order._ + + +See [vatomic32_await_ge_acq](await_u32.h.md#function-vatomic32_await_ge_acq) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_ge_rlx` + +```c +vatomic_await_ge_rlx(a,c) +``` + + +_Dispatches await_ge calls with relaxed memory order._ + + +See [vatomic32_await_ge_rlx](await_u32.h.md#function-vatomic32_await_ge_rlx) for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_eq_set` + +```c +vatomic_await_eq_set(a,c,v) +``` + + +_Dispatches await_eq_set calls with seq_cst memory order._ + + +See vatomic32_await_eq_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_eq_set_acq` + +```c +vatomic_await_eq_set_acq(a,c,v) +``` + + +_Dispatches await_eq_set calls with acquire memory order._ + + +See vatomic32_await_eq_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: acquire + + +## Macro `vatomic_await_eq_set_rel` + +```c +vatomic_await_eq_set_rel(a,c,v) +``` + + +_Dispatches await_eq_set calls with release memory order._ + + +See vatomic32_await_eq_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: release + + +## Macro `vatomic_await_eq_set_rlx` + +```c +vatomic_await_eq_set_rlx(a,c,v) +``` + + +_Dispatches await_eq_set calls with relaxed memory order._ + + +See vatomic32_await_eq_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_eq_sub` + +```c +vatomic_await_eq_sub(a,c,v) +``` + + +_Dispatches await_eq_sub calls with seq_cst memory order._ + + +See vatomic32_await_eq_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_eq_sub_acq` + +```c +vatomic_await_eq_sub_acq(a,c,v) +``` + + +_Dispatches await_eq_sub calls with acquire memory order._ + + +See vatomic32_await_eq_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: acquire + + +## Macro `vatomic_await_eq_sub_rel` + +```c +vatomic_await_eq_sub_rel(a,c,v) +``` + + +_Dispatches await_eq_sub calls with release memory order._ + + +See vatomic32_await_eq_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: release + + +## Macro `vatomic_await_eq_sub_rlx` + +```c +vatomic_await_eq_sub_rlx(a,c,v) +``` + + +_Dispatches await_eq_sub calls with relaxed memory order._ + + +See vatomic32_await_eq_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_eq_add` + +```c +vatomic_await_eq_add(a,c,v) +``` + + +_Dispatches await_eq_add calls with seq_cst memory order._ + + +See vatomic32_await_eq_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_eq_add_acq` + +```c +vatomic_await_eq_add_acq(a,c,v) +``` + + +_Dispatches await_eq_add calls with acquire memory order._ + + +See vatomic32_await_eq_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: acquire + + +## Macro `vatomic_await_eq_add_rel` + +```c +vatomic_await_eq_add_rel(a,c,v) +``` + + +_Dispatches await_eq_add calls with release memory order._ + + +See vatomic32_await_eq_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: release + + +## Macro `vatomic_await_eq_add_rlx` + +```c +vatomic_await_eq_add_rlx(a,c,v) +``` + + +_Dispatches await_eq_add calls with relaxed memory order._ + + +See vatomic32_await_eq_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** previously read value + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_neq_set` + +```c +vatomic_await_neq_set(a,c,v) +``` + + +_Dispatches await_neq_set calls with seq_cst memory order._ + + +See vatomic32_await_neq_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_neq_set_acq` + +```c +vatomic_await_neq_set_acq(a,c,v) +``` + + +_Dispatches await_neq_set calls with acquire memory order._ + + +See vatomic32_await_neq_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_neq_set_rel` + +```c +vatomic_await_neq_set_rel(a,c,v) +``` + + +_Dispatches await_neq_set calls with release memory order._ + + +See vatomic32_await_neq_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_neq_set_rlx` + +```c +vatomic_await_neq_set_rlx(a,c,v) +``` + + +_Dispatches await_neq_set calls with relaxed memory order._ + + +See vatomic32_await_neq_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_neq_sub` + +```c +vatomic_await_neq_sub(a,c,v) +``` + + +_Dispatches await_neq_sub calls with seq_cst memory order._ + + +See vatomic32_await_neq_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_neq_sub_acq` + +```c +vatomic_await_neq_sub_acq(a,c,v) +``` + + +_Dispatches await_neq_sub calls with acquire memory order._ + + +See vatomic32_await_neq_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_neq_sub_rel` + +```c +vatomic_await_neq_sub_rel(a,c,v) +``` + + +_Dispatches await_neq_sub calls with release memory order._ + + +See vatomic32_await_neq_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_neq_sub_rlx` + +```c +vatomic_await_neq_sub_rlx(a,c,v) +``` + + +_Dispatches await_neq_sub calls with relaxed memory order._ + + +See vatomic32_await_neq_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_neq_add` + +```c +vatomic_await_neq_add(a,c,v) +``` + + +_Dispatches await_neq_add calls with seq_cst memory order._ + + +See vatomic32_await_neq_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_neq_add_acq` + +```c +vatomic_await_neq_add_acq(a,c,v) +``` + + +_Dispatches await_neq_add calls with acquire memory order._ + + +See vatomic32_await_neq_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_neq_add_rel` + +```c +vatomic_await_neq_add_rel(a,c,v) +``` + + +_Dispatches await_neq_add calls with release memory order._ + + +See vatomic32_await_neq_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_neq_add_rlx` + +```c +vatomic_await_neq_add_rlx(a,c,v) +``` + + +_Dispatches await_neq_add calls with relaxed memory order._ + + +See vatomic32_await_neq_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_le_set` + +```c +vatomic_await_le_set(a,c,v) +``` + + +_Dispatches await_le_set calls with seq_cst memory order._ + + +See vatomic32_await_le_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_le_set_acq` + +```c +vatomic_await_le_set_acq(a,c,v) +``` + + +_Dispatches await_le_set calls with acquire memory order._ + + +See vatomic32_await_le_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_le_set_rel` + +```c +vatomic_await_le_set_rel(a,c,v) +``` + + +_Dispatches await_le_set calls with release memory order._ + + +See vatomic32_await_le_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_le_set_rlx` + +```c +vatomic_await_le_set_rlx(a,c,v) +``` + + +_Dispatches await_le_set calls with relaxed memory order._ + + +See vatomic32_await_le_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_le_sub` + +```c +vatomic_await_le_sub(a,c,v) +``` + + +_Dispatches await_le_sub calls with seq_cst memory order._ + + +See vatomic32_await_le_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_le_sub_acq` + +```c +vatomic_await_le_sub_acq(a,c,v) +``` + + +_Dispatches await_le_sub calls with acquire memory order._ + + +See vatomic32_await_le_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_le_sub_rel` + +```c +vatomic_await_le_sub_rel(a,c,v) +``` + + +_Dispatches await_le_sub calls with release memory order._ + + +See vatomic32_await_le_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_le_sub_rlx` + +```c +vatomic_await_le_sub_rlx(a,c,v) +``` + + +_Dispatches await_le_sub calls with relaxed memory order._ + + +See vatomic32_await_le_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_le_add` + +```c +vatomic_await_le_add(a,c,v) +``` + + +_Dispatches await_le_add calls with seq_cst memory order._ + + +See vatomic32_await_le_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_le_add_acq` + +```c +vatomic_await_le_add_acq(a,c,v) +``` + + +_Dispatches await_le_add calls with acquire memory order._ + + +See vatomic32_await_le_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_le_add_rel` + +```c +vatomic_await_le_add_rel(a,c,v) +``` + + +_Dispatches await_le_add calls with release memory order._ + + +See vatomic32_await_le_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_le_add_rlx` + +```c +vatomic_await_le_add_rlx(a,c,v) +``` + + +_Dispatches await_le_add calls with relaxed memory order._ + + +See vatomic32_await_le_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_lt_set` + +```c +vatomic_await_lt_set(a,c,v) +``` + + +_Dispatches await_lt_set calls with seq_cst memory order._ + + +See vatomic32_await_lt_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_lt_set_acq` + +```c +vatomic_await_lt_set_acq(a,c,v) +``` + + +_Dispatches await_lt_set calls with acquire memory order._ + + +See vatomic32_await_lt_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_lt_set_rel` + +```c +vatomic_await_lt_set_rel(a,c,v) +``` + + +_Dispatches await_lt_set calls with release memory order._ + + +See vatomic32_await_lt_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_lt_set_rlx` + +```c +vatomic_await_lt_set_rlx(a,c,v) +``` + + +_Dispatches await_lt_set calls with relaxed memory order._ + + +See vatomic32_await_lt_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_lt_sub` + +```c +vatomic_await_lt_sub(a,c,v) +``` + + +_Dispatches await_lt_sub calls with seq_cst memory order._ + + +See vatomic32_await_lt_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_lt_sub_acq` + +```c +vatomic_await_lt_sub_acq(a,c,v) +``` + + +_Dispatches await_lt_sub calls with acquire memory order._ + + +See vatomic32_await_lt_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_lt_sub_rel` + +```c +vatomic_await_lt_sub_rel(a,c,v) +``` + + +_Dispatches await_lt_sub calls with release memory order._ + + +See vatomic32_await_lt_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_lt_sub_rlx` + +```c +vatomic_await_lt_sub_rlx(a,c,v) +``` + + +_Dispatches await_lt_sub calls with relaxed memory order._ + + +See vatomic32_await_lt_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_lt_add` + +```c +vatomic_await_lt_add(a,c,v) +``` + + +_Dispatches await_lt_add calls with seq_cst memory order._ + + +See vatomic32_await_lt_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_lt_add_acq` + +```c +vatomic_await_lt_add_acq(a,c,v) +``` + + +_Dispatches await_lt_add calls with acquire memory order._ + + +See vatomic32_await_lt_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_lt_add_rel` + +```c +vatomic_await_lt_add_rel(a,c,v) +``` + + +_Dispatches await_lt_add calls with release memory order._ + + +See vatomic32_await_lt_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_lt_add_rlx` + +```c +vatomic_await_lt_add_rlx(a,c,v) +``` + + +_Dispatches await_lt_add calls with relaxed memory order._ + + +See vatomic32_await_lt_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_gt_set` + +```c +vatomic_await_gt_set(a,c,v) +``` + + +_Dispatches await_gt_set calls with seq_cst memory order._ + + +See vatomic32_await_gt_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_gt_set_acq` + +```c +vatomic_await_gt_set_acq(a,c,v) +``` + + +_Dispatches await_gt_set calls with acquire memory order._ + + +See vatomic32_await_gt_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_gt_set_rel` + +```c +vatomic_await_gt_set_rel(a,c,v) +``` + + +_Dispatches await_gt_set calls with release memory order._ + + +See vatomic32_await_gt_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_gt_set_rlx` + +```c +vatomic_await_gt_set_rlx(a,c,v) +``` + + +_Dispatches await_gt_set calls with relaxed memory order._ + + +See vatomic32_await_gt_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_gt_sub` + +```c +vatomic_await_gt_sub(a,c,v) +``` + + +_Dispatches await_gt_sub calls with seq_cst memory order._ + + +See vatomic32_await_gt_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_gt_sub_acq` + +```c +vatomic_await_gt_sub_acq(a,c,v) +``` + + +_Dispatches await_gt_sub calls with acquire memory order._ + + +See vatomic32_await_gt_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_gt_sub_rel` + +```c +vatomic_await_gt_sub_rel(a,c,v) +``` + + +_Dispatches await_gt_sub calls with release memory order._ + + +See vatomic32_await_gt_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_gt_sub_rlx` + +```c +vatomic_await_gt_sub_rlx(a,c,v) +``` + + +_Dispatches await_gt_sub calls with relaxed memory order._ + + +See vatomic32_await_gt_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_gt_add` + +```c +vatomic_await_gt_add(a,c,v) +``` + + +_Dispatches await_gt_add calls with seq_cst memory order._ + + +See vatomic32_await_gt_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_gt_add_acq` + +```c +vatomic_await_gt_add_acq(a,c,v) +``` + + +_Dispatches await_gt_add calls with acquire memory order._ + + +See vatomic32_await_gt_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_gt_add_rel` + +```c +vatomic_await_gt_add_rel(a,c,v) +``` + + +_Dispatches await_gt_add calls with release memory order._ + + +See vatomic32_await_gt_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_gt_add_rlx` + +```c +vatomic_await_gt_add_rlx(a,c,v) +``` + + +_Dispatches await_gt_add calls with relaxed memory order._ + + +See vatomic32_await_gt_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_ge_set` + +```c +vatomic_await_ge_set(a,c,v) +``` + + +_Dispatches await_ge_set calls with seq_cst memory order._ + + +See vatomic32_await_ge_set_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_ge_set_acq` + +```c +vatomic_await_ge_set_acq(a,c,v) +``` + + +_Dispatches await_ge_set calls with acquire memory order._ + + +See vatomic32_await_ge_set_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_ge_set_rel` + +```c +vatomic_await_ge_set_rel(a,c,v) +``` + + +_Dispatches await_ge_set calls with release memory order._ + + +See vatomic32_await_ge_set_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_ge_set_rlx` + +```c +vatomic_await_ge_set_rlx(a,c,v) +``` + + +_Dispatches await_ge_set calls with relaxed memory order._ + + +See vatomic32_await_ge_set_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_ge_sub` + +```c +vatomic_await_ge_sub(a,c,v) +``` + + +_Dispatches await_ge_sub calls with seq_cst memory order._ + + +See vatomic32_await_ge_sub_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_ge_sub_acq` + +```c +vatomic_await_ge_sub_acq(a,c,v) +``` + + +_Dispatches await_ge_sub calls with acquire memory order._ + + +See vatomic32_await_ge_sub_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_ge_sub_rel` + +```c +vatomic_await_ge_sub_rel(a,c,v) +``` + + +_Dispatches await_ge_sub calls with release memory order._ + + +See vatomic32_await_ge_sub_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_ge_sub_rlx` + +```c +vatomic_await_ge_sub_rlx(a,c,v) +``` + + +_Dispatches await_ge_sub calls with relaxed memory order._ + + +See vatomic32_await_ge_sub_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + +## Macro `vatomic_await_ge_add` + +```c +vatomic_await_ge_add(a,c,v) +``` + + +_Dispatches await_ge_add calls with seq_cst memory order._ + + +See vatomic32_await_ge_add_seq_cst for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: seq_cst + + +## Macro `vatomic_await_ge_add_acq` + +```c +vatomic_await_ge_add_acq(a,c,v) +``` + + +_Dispatches await_ge_add calls with acquire memory order._ + + +See vatomic32_await_ge_add_acquire for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: acquire + + +## Macro `vatomic_await_ge_add_rel` + +```c +vatomic_await_ge_add_rel(a,c,v) +``` + + +_Dispatches await_ge_add calls with release memory order._ + + +See vatomic32_await_ge_add_release for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: release + + +## Macro `vatomic_await_ge_add_rlx` + +```c +vatomic_await_ge_add_rlx(a,c,v) +``` + + +_Dispatches await_ge_add calls with relaxed memory order._ + + +See vatomic32_await_ge_add_relaxed for details. + + + +**Parameters:** + +- `a`: atomic variable +- `c`: condition value +- `v`: operand value + + +**Returns:** value satisfying condition + +**Memory ordering**: relaxed + + + +--- diff --git a/doc/man/CMakeLists.txt b/doc/man/CMakeLists.txt new file mode 100644 index 00000000..6b16893e --- /dev/null +++ b/doc/man/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# SPDX-License-Identifier: MIT + +set(__DATE__ "Dec 2025") +set(__VERSION__ ${PROJECT_VERSION}) + +configure_file(vatomic.7 ${CMAKE_CURRENT_BINARY_DIR}/vatomic.7) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/vatomic.7 + DESTINATION ${CMAKE_INSTALL_MANDIR}/man7) + +file(GLOB MAN3 *.3) +foreach(file ${MAN3}) + get_filename_component(target ${file} NAME) + configure_file(${file} ${CMAKE_CURRENT_BINARY_DIR}/${target}) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${target} + DESTINATION ${CMAKE_INSTALL_MANDIR}/man3) +endforeach() diff --git a/doc/man/vatomic.7 b/doc/man/vatomic.7 new file mode 100644 index 00000000..2ab71bf2 --- /dev/null +++ b/doc/man/vatomic.7 @@ -0,0 +1,89 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC 7 "@__DATE__@" "vatomic @__VERSION__@" "Miscellaneous Information Manual" +.SH NAME +vatomic \- formally-verified atomic operations library +.SH DESCRIPTION +.PP +vatomic is a header-only library that provides a rich set of atomic operations +for mainstream architectures, including ARMv7, ARMv8 (AArch32 and AArch64), +RISC-V, and x86_64. +Its interface matches the guarantees described by the VSync Memory Model +(VMM) in \fBvmm.cat\fP), allowing developers to reason about memory ordering +using the same formal description employed by the library itself. +.PP +The library exposes both a low-level core interface and higher-level await and +dispatch helpers, all implemented in portable C with architecture-specific +backends when required. +Because the headers adhere to VMM, programs that rely on vatomic can be +verified with the Dartagnan model checker by reusing the provided cat +specification. +.PP +All operations have the form \fBprefix_\fPoperation\fB_suffix\fP, where +\fBprefix_\fP indicates the \fIatomic type\fP and \fB_suffix\fP indicates +the \fImemory ordering\fP as described below.. +.SH ATOMIC TYPES +.PP +vatomic defines fixed-width atomic operations matching their unsigned integral +counterparts: \fBvatomic8_t\fP, \fBvatomic16_t\fP, \fBvatomic32_t\fP, +\fBvatomic64_t\fP, and \fBvatomicsz_t\fP for size_t. +Pointer atomics are represented by \fBvatomicptr_t\fP while +\fBvatomicptr(T)\fP documents the pointee type at the declaration site. +Each type pairs with a non-atomic counterpart +(\fBvuint8_t\fP, \fBvuint16_t\fP, etc.) to describe the underlying storage used +by the read/modify/write helpers. +These types map by default to \fBuint8_t\fP, \fBuint16_t\fP, etc, but can be +overwritten when \fBVSYNC_ENABLE_FREESTANDING\fP is defined. +.SH MEMORY ORDERING +The memory ordering suffixes in vatomic are: +\fB_rlx\fP (relaxed), \fB_acq\fP (acquire), and \fB_rel\fP (release). +Operations without such a suffix are sequentially consistent. +.SH CONFIGURATION +.PP +The header \fBvsync/atomic/config.h\fP defines project-wide switches that can be +set before including the core header: +.TP +.B VATOMIC_DISABLE_POLITE_AWAIT +Forces busy-wait loops to spin without issuing PAUSE/WFE instructions on x86_64 +and arm64, overriding the default polite behavior. +.TP +.B VATOMIC_BUILTINS +Builds every atomic primitive using the compiler's \fB__atomic\fP builtins. +When unset, the library selects hand-written assembly backends for arm32/arm64 +when appropriate. +.TP +.B VATOMIC_ENABLE_ATOMIC_SC +Turns the memory ordering of every operation to sequentially consistent, +regardless of function suffixes. +This configuration comes in handy when debugging: developers can use it to +ensure certain bugs are not due to weak memory orderings. The configuration is +also useful to measure the overhead of turning off weak memory optimizations. +.TP +.B VATOMIC_ENABLE_ATOMIC_RLX +Turns the memory ordering of every operation to relaxed. This configuration +is only relevant for benchmarking and it can potentially introduce weak +memory bugs. Use it with care. +.TP +.B VATOMIC_DISABLE_ARM64_LSE +Disables use of ARMv8 LSE instructions even when the toolchain advertises them. +.TP +.B VATOMIC_ENABLE_ARM64_LXE +Enables ARMv8 LSE instructions in the slow path of LLSC operations whenever the +compiler is configured with \fB-march=armv8-a+lse\fP or equivalent. +.PP +vatomic is a component of the broader VSync effort and is also used by +libvsync. +Refer to the project documentation and ASPLOS'21 publication for more detail on +the research behind the library. +.SH REFERENCES +.IP \(bu 2 +Oberhauser et al., "VSync: push-button verification and optimization for +synchronization primitives on weak memory models," ASPLOS 2021, +. +.SH FILES +.TP +.B vmm.cat +VSync Memory Model (VMM) specification referenced throughout the interface. +.SH SEE ALSO +\fBvmm.cat\fP, +\fBvsync/atomic/core.h\fP, +the project README, and the VSync documentation hosted in the \fBdoc/\fP tree. diff --git a/doc/man/vatomic_add.3 b/doc/man/vatomic_add.3 new file mode 100644 index 00000000..a639acdc --- /dev/null +++ b/doc/man/vatomic_add.3 @@ -0,0 +1 @@ +.so man3/vatomic_arith.3 diff --git a/doc/man/vatomic_and.3 b/doc/man/vatomic_and.3 new file mode 100644 index 00000000..333b1e49 --- /dev/null +++ b/doc/man/vatomic_and.3 @@ -0,0 +1 @@ +.so man3/vatomic_bitwise.3 diff --git a/doc/man/vatomic_arith.3 b/doc/man/vatomic_arith.3 new file mode 100644 index 00000000..d6406c75 --- /dev/null +++ b/doc/man/vatomic_arith.3 @@ -0,0 +1,54 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_ARITH 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_inc, vatomic_dec, vatomic_add, vatomic_sub, vatomic_max +\- arithmetic vatomic operations +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "void vatomic_inc(A *" obj ); +.BI "T vatomic_get_inc(A *" obj ); +.BI "T vatomic_inc_get(A *" obj ); +.PP +.BI "void vatomic_dec(A *" obj ); +.BI "T vatomic_get_dec(A *" obj ); +.BI "T vatomic_dec_get(A *" obj ); +.PP +.BI "void vatomic_add(A *" obj ", T " value ); +.BI "T vatomic_get_add(A *" obj ", T " value ); +.BI "T vatomic_add_get(A *" obj ", T " value ); +.PP +.BI "void vatomic_sub(A *" obj ", T " value ); +.BI "T vatomic_get_sub(A *" obj ", T " value ); +.BI "T vatomic_sub_get(A *" obj ", T " value ); +.PP +.BI "void vatomic_max(A *" obj ", T " value ); +.BI "T vatomic_get_max(A *" obj ", T " value ); +.BI "T vatomic_max_get(A *" obj ", T " value ); +.fi +.SH DESCRIPTION +These helpers perform the common arithmetic read/modify/write sequences on a +vatomic object \fIobj\fP. +\fBvatomic_inc\fP/\fBvatomic_dec\fP increment/decrement the value by one, +\fBvatomic_add\fP/\fBvatomic_sub\fP add/subtract the operand \fIvalue\fP, and +\fBvatomic_max\fP updates \fIobj\fP to \fIvalue\fP when \fIvalue\fP is +greater than the current content of \fIobj\fP. +Each macro dispatches to the width-specific implementation +(\fBvatomic32_add\fP, \fBvatomic64_inc_get\fP, etc.) based on the atomic pointer +provided. +.PP +Unsuffixed forms use sequential consistency; suffixes `_acq`, `_rel`, and `_rlx` +offer acquire, release, or relaxed semantics when the variant exists. +.SH RETURN VALUE +.PP +\fBvatomic_inc\fP, \fBvatomic_dec\fP, \fBvatomic_add\fP, \fBvatomic_sub\fP, and +\fBvatomic_max\fP return no value. +Their `get_*` forms return the old contents of \fIobj\fP before the update. +Their `*_get` forms return the value after the update. +.SH TYPES AND VARIANTS +.PP +Refer to \fBvatomic_basic(3)\fP for the \fBA\fP/\fBT\fP notation and guidance on +bypassing the dispatcher via explicit prefixes such as \fBvatomic32_add_get\fP. +.SH SEE ALSO +\fBvatomic_basic(3)\fP, \fBvatomic_bitwise(3)\fP, \fBvatomic(7)\fP diff --git a/doc/man/vatomic_await.3 b/doc/man/vatomic_await.3 new file mode 100644 index 00000000..0484d614 --- /dev/null +++ b/doc/man/vatomic_await.3 @@ -0,0 +1,60 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_AWAIT 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_await_* \- polite wait operations for basic conditions +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "T vatomic_await_eq (const A *" obj ", T " value ); +.BI "T vatomic_await_neq (const A *" obj ", T " value ); +.BI "T vatomic_await_lt (const A *" obj ", T " value ); +.BI "T vatomic_await_le (const A *" obj ", T " value ); +.BI "T vatomic_await_gt (const A *" obj ", T " value ); +.BI "T vatomic_await_ge (const A *" obj ", T " value ); +.PP +.BI "T vatomic_await_eq_acq (const A *" obj ", T " value ); +.BI "T vatomic_await_eq_rlx (const A *" obj ", T " value ); +\fI...and analogous *_acq / *_rlx variants for each condition\fP +.fi +.SH DESCRIPTION +Await helpers spin politely (issuing PAUSE/WFE unless +\fBVATOMIC_DISABLE_POLITE_AWAIT\fP is defined) until the specified condition on +\fIobj\fP becomes true. +Each macro family dispatches to the correctly sized implementation +(\fBvatomic32_await_eq\fP, \fBvatomicptr_await_lt\fP, etc.) based on the atomic +pointer passed in. +.PP +Conditions: +.IP \(bu 2 +\fB_eq\fP / \fB_neq\fP – wait for equality or inequality with \fIvalue\fP. +.IP \(bu 2 +\fB_lt\fP / \fB_le\fP – wait until the atomic is strictly / weakly less than +\fIvalue\fP. +.IP \(bu 2 +\fB_gt\fP / \fB_ge\fP – wait until the atomic is strictly / weakly greater than +\fIvalue\fP. +.PP +Suffixes `_acq` and `_rlx` request acquire or relaxed semantics; when absent, +the default is sequential consistency. Pointer-specific convenience macros such +as \fBvatomicptr_await_eq\fP follow the same contract. Boolean helpers +(\fBvatomic64_await_true\fP, etc.) are aliases for the respective equality +variants. +.SH RETURN VALUE +Each await function returns the value observed when the condition became +true except for \fBvatomic_await_eq\fP, which returns the last value read +before satisfying the condition. +.SH TYPES AND VARIANTS +.PP +\fBA\fP is a vatomic storage type (\fBvatomic32_t\fP, \fBvatomic64_t\fP, +and \fBvatomicptr_t\fP). +\fBT\fP is the corresponding plain type (integer or pointer). +All helpers can be called either through the dispatcher macros +(\fBvatomic_await_eq\fP) or by explicitly selecting a width-specific version +such as \fBvatomic32_await_eq\fP. +.PP +Note that currently the API only offers await functions for the types +indicated above (32, 64 and ptr). +.SH SEE ALSO +\fBvatomic_await_op(3)\fP, \fBvatomic_basic(3)\fP, \fBvatomic(7)\fP, +\fBvsync/atomic/config.h\fP diff --git a/doc/man/vatomic_await_op.3 b/doc/man/vatomic_await_op.3 new file mode 100644 index 00000000..56abf844 --- /dev/null +++ b/doc/man/vatomic_await_op.3 @@ -0,0 +1,53 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_AWAIT_OP 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_await_*_op \- polite wait operations combined with updates +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "T vatomic_await_eq_add (A *" obj ", T " cond ", T " value ); +.BI "T vatomic_await_eq_sub (A *" obj ", T " cond ", T " value ); +.BI "T vatomic_await_eq_set (A *" obj ", T " cond ", T " value ); +.PP +.BI "T vatomic_await_neq_add (A *" obj ", T " cond ", T " value ); +.BI "T vatomic_await_neq_sub (A *" obj ", T " cond ", T " value ); +.BI "T vatomic_await_neq_set (A *" obj ", T " cond ", T " value ); +.PP +.BI "T vatomic_await_lt_add (A *" obj ", T " cond ", T " value ); +\fI...and analogous lt/le variants with *_add, *_sub, *_set\fP +.PP +.BI "T vatomic_await_eq_add_acq (A *" obj ", T " cond ", T " value ); +\fI...acquire / release / relaxed suffixes for each operation\fP +.fi +.SH DESCRIPTION +Await-op helpers combine an await condition with a single update applied once +the condition succeeds. The condition naming follows \fBvatomic_await(3)\fP +(\fB_eq\fP, \fB_neq\fP, \fB_lt\fP, \fB_le\fP) while the operation suffix +indicates the action performed after the wait: +.IP \(bu 2 +\fB_add\fP – add \fIvalue\fP to the atomic object. +.IP \(bu 2 +\fB_sub\fP – subtract \fIvalue\fP from the atomic object. +.IP \(bu 2 +\fB_set\fP – write \fIvalue\fP into the atomic object. +.PP +Each macro dispatches to the width-specific implementation +(\fBvatomic32_await_eq_add\fP, \fBvatomicptr_await_neq_set\fP, etc.) based on +the pointer supplied. Unsuffixed forms are sequentially consistent; `_acq`, +`_rel`, and `_rlx` request acquire, release, and relaxed variants where +provided. +.SH RETURN VALUE +Returns the value observed while checking the condition (i.e., before the +update is applied). This matches the behavior of the underlying vatomic await +routines and allows callers to inspect the original contents. +.SH TYPES AND VARIANTS +.PP +\fBA\fP/\fBT\fP follow the same conventions as in \fBvatomic_basic(3)\fP. +Typed entry points exist for every supported width, for example +\fBvatomic64_await_eq_sub_rel\fP and \fBvatomicptr_await_eq_set_acq\fP. +.PP +Note that currently the API only offers await functions for the types +indicated above (32, 64 and ptr). +.SH SEE ALSO +\fBvatomic_await(3)\fP, \fBvatomic_arith(3)\fP, \fBvatomic(7)\fP diff --git a/doc/man/vatomic_basic.3 b/doc/man/vatomic_basic.3 new file mode 100644 index 00000000..d7549707 --- /dev/null +++ b/doc/man/vatomic_basic.3 @@ -0,0 +1,57 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_BASIC 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_read, vatomic_write, vatomic_init \- basic vatomic operations +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "T vatomic_read(A *" obj ); +.BI "T vatomic_read_acq(A *" obj ); +.BI "T vatomic_read_rlx(A *" obj ); +.PP +.BI "void vatomic_write(A *" obj ", T " value ); +.BI "void vatomic_write_rel(A *" obj ", T " value ); +.BI "void vatomic_write_rlx(A *" obj ", T " value ); +.PP +.BI "void vatomic_init(A *" obj ", T " value ); +.fi +.SH DESCRIPTION +.SS vatomic_read() +Reads the current value stored in \fIobj\fP and returns it. +The unsuffixed form uses sequentially consistent ordering; the `_acq` and `_rlx` +variants apply acquire and relaxed orderings respectively. +Each macro invocation dispatches to the concrete typed function +(\fBvatomic8_read\fP, \fBvatomic16_read\fP, etc.) based on the size of +\fIobj\fP, so callers only need to pass the appropriate atomic type. +.SS vatomic_write() +Writes \fIvalue\fP into \fIobj\fP. +The default (unsuffixed) form uses a sequentially consistent orddering, while +the `_rel` and `_rlx` suffixes restrict the ordering to release or relaxed, +respectively. +.SS vatomic_init() +Initialises \fIobj\fP with \fIvalue\fP. +It is an alias for \fBvatomic_write\fP and is convenient when highlighting +initialization sites (including static helpers such as \fBVATOMIC_INIT\fP). +.SH RETURN VALUE +.PP +\fBvatomic_read\fP returns the current value stored in \fIobj\fP. +\fBvatomic_write\fP and \fBvatomic_init\fP return no value. +.SH TYPES AND VARIANTS +.PP +\fBA\fP denotes one of the vatomic storage types (\fBvatomic8_t\fP, +\fBvatomic16_t\fP, \fBvatomic32_t\fP, \fBvatomic64_t\fP, \fBvatomicsz_t\fP, or +\fBvatomicptr_t\fP), while \fBT\fP is the corresponding non-atomic type +(\fBvuint8_t\fP, \fBvuint16_t\fP, \fBvuint32_t\fP, \fBvuint64_t\fP, +\fBvsize_t\fP, or \fBvoid *\fP). +The dispatcher macros select the appropriate typed implementation based on the +pointer you pass. +.PP +When you need to lock a call to a specific width (for example, inside a public +header) include \fB\fP and use the concrete prefix, +e.g. \fBvatomic32_read_acq\fP, \fBvatomic64_write_rel\fP, or +\fBvatomicptr_init\fP. +These variants bypass the dispatcher in \fB\fP and can +help catch mismatched types during compilation. +.SH SEE ALSO +\fBvatomic(7)\fP, \fBvsync/atomic/core.h\fP, \fBvsync/atomic/dispatch.h\fP diff --git a/doc/man/vatomic_bitwise.3 b/doc/man/vatomic_bitwise.3 new file mode 100644 index 00000000..6d245f87 --- /dev/null +++ b/doc/man/vatomic_bitwise.3 @@ -0,0 +1,47 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_BITWISE 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_xor, vatomic_and, vatomic_or \- bitwise vatomic operations +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "void vatomic_xor(A *" obj ", T " mask ); +.BI "T vatomic_get_xor(A *" obj ", T " mask ); +.BI "T vatomic_xor_get(A *" obj ", T " mask ); +.PP +.BI "void vatomic_and(A *" obj ", T " mask ); +.BI "T vatomic_get_and(A *" obj ", T " mask ); +.BI "T vatomic_and_get(A *" obj ", T " mask ); +.PP +.BI "void vatomic_or(A *" obj ", T " mask ); +.BI "T vatomic_get_or(A *" obj ", T " mask ); +.BI "T vatomic_or_get(A *" obj ", T " mask ); +.fi +.SH DESCRIPTION +These macros provide bitwise read/modify/write operations. +\fBvatomic_xor\fP, \fBvatomic_and\fP, and \fBvatomic_or\fP apply the specified +mask to \fIobj\fP. +As with other vatomic helpers, each macro dispatches to the size-specific +implementation (\fBvatomic32_get_and\fP, \fBvatomic64_max\fP, etc.) based on the +pointer type. +.PP +Sequential consistency is the default ordering; acquire, release, and relaxed +variants are available via the `_acq`, `_rel`, and `_rlx` suffixes (see the +corresponding macros in \fB\fP). +`get_*` versions return the previous value, while `*_get` versions return the +value after applying the operation. +.SH RETURN VALUE +.PP +The plain \fBvatomic_xor\fP/\fB_and\fP/\fB_or\fP functions return no +value. +`get_*` variants return the prior contents of \fIobj\fP and `*_get` variants +return the updated contents. +.SH TYPES AND VARIANTS +.PP +See \fBvatomic_basic(3)\fP for the notation of \fBA\fP/\fBT\fP and for guidance +on calling the explicit typed functions, e.g. \fBvatomic64_or_get\fP. +.PP +For the \fBmax\fP family, refer to \fBvatomic_arith(3)\fP. +.SH SEE ALSO +\fBvatomic_arith(3)\fP, \fBvatomic(7)\fP diff --git a/doc/man/vatomic_cmpxchg.3 b/doc/man/vatomic_cmpxchg.3 new file mode 100644 index 00000000..fd095132 --- /dev/null +++ b/doc/man/vatomic_cmpxchg.3 @@ -0,0 +1,54 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_CMPXCHG 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_cmpxchg \- vatomic compare-and-exchange operations +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "T vatomic_cmpxchg(A *" obj ", T " expect ", T " value ); +.BI "T vatomic_cmpxchg_acq(A *" obj ", T " expect ", T " value ); +.BI "T vatomic_cmpxchg_rel(A *" obj ", T " expect ", T " value ); +.BI "T vatomic_cmpxchg_rlx(A *" obj ", T " expect ", T " value ); +.fi +.SH DESCRIPTION +Reads \fIobj\fP and compares it against \fIexpect\fP. +If they match, writes \fIvalue\fP into \fIobj\fP. +Either way, the previous contents are returned so callers can detect whether the +operation succeeded. +The unsuffixed form uses sequential consistency; `_acq`, `_rel`, and `_rlx` +provide acquire, release, and relaxed semantics respectively. +Each dispatcher macro invokes the appropriate typed implementation +(\fBvatomic32_cmpxchg\fP, \fBvatomic64_cmpxchg\fP, etc.) according to the atomic +pointer. +.SH RETURN VALUE +Returns the old contents of \fIobj\fP before the comparison. +If the return value equals \fIexpect\fP, the exchange succeeded. +.SH TYPES AND VARIANTS +.PP +See \fBvatomic_basic(3)\fP for the \fBA\fP/\fBT\fP notation and for explicit +typed alternatives such as \fBvatomic32_cmpxchg_acq\fP. +.SH EXAMPLES +.PP +Here are some typical usage patterns. +.PP +Checking for success: +.nf + if (vatomic_cmpxchg(&obj, exp, new) == exp) { + // success + } else { + // failure + } +.fi +.PP +Retry until the compare-exchange succeeds: +.nf + vatomic32_t obj; + uint32_t old = exp = 0; + uint32-t new = 123; + do { + exp = old; + old = vatomic_cmpxchg(&obj, exp, new); + } while (old != exp); +.SH SEE ALSO +\fBvatomic_basic(3)\fP, \fBvatomic_xchg(3)\fP, \fBvatomic(7)\fP diff --git a/doc/man/vatomic_dec.3 b/doc/man/vatomic_dec.3 new file mode 100644 index 00000000..a639acdc --- /dev/null +++ b/doc/man/vatomic_dec.3 @@ -0,0 +1 @@ +.so man3/vatomic_arith.3 diff --git a/doc/man/vatomic_fence.3 b/doc/man/vatomic_fence.3 new file mode 100644 index 00000000..e58897aa --- /dev/null +++ b/doc/man/vatomic_fence.3 @@ -0,0 +1,24 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_FENCE 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_fence, vatomic_fence_acq, vatomic_fence_rel, vatomic_fence_rlx +\- VSync atomic fences +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "void vatomic_fence(void);" +.BI "void vatomic_fence_acq(void);" +.BI "void vatomic_fence_rel(void);" +.BI "void vatomic_fence_rlx(void);" +.fi +.SH DESCRIPTION +Issues a full memory fence with the requested ordering. +The default \fBvatomic_fence\fP emits a sequentially consistent barrier. +\fBvatomic_fence_acq\fP and \fBvatomic_fence_rel\fP implement acquire and +release semantics, while \fBvatomic_fence_rlx\fP expands to a relaxed fence +(typically a compiler barrier or NOP depending on configuration). +.SH RETURN VALUE +None. +.SH SEE ALSO +\fBvatomic_read(3)\fP, \fBvatomic(7)\fP, \fBvsync/atomic/config.h\fP diff --git a/doc/man/vatomic_inc.3 b/doc/man/vatomic_inc.3 new file mode 100644 index 00000000..a639acdc --- /dev/null +++ b/doc/man/vatomic_inc.3 @@ -0,0 +1 @@ +.so man3/vatomic_arith.3 diff --git a/doc/man/vatomic_init.3 b/doc/man/vatomic_init.3 new file mode 100644 index 00000000..c2bd4b87 --- /dev/null +++ b/doc/man/vatomic_init.3 @@ -0,0 +1 @@ +.so man3/vatomic_basic.3 diff --git a/doc/man/vatomic_max.3 b/doc/man/vatomic_max.3 new file mode 100644 index 00000000..a639acdc --- /dev/null +++ b/doc/man/vatomic_max.3 @@ -0,0 +1 @@ +.so man3/vatomic_arith.3 diff --git a/doc/man/vatomic_or.3 b/doc/man/vatomic_or.3 new file mode 100644 index 00000000..333b1e49 --- /dev/null +++ b/doc/man/vatomic_or.3 @@ -0,0 +1 @@ +.so man3/vatomic_bitwise.3 diff --git a/doc/man/vatomic_read.3 b/doc/man/vatomic_read.3 new file mode 100644 index 00000000..c2bd4b87 --- /dev/null +++ b/doc/man/vatomic_read.3 @@ -0,0 +1 @@ +.so man3/vatomic_basic.3 diff --git a/doc/man/vatomic_sub.3 b/doc/man/vatomic_sub.3 new file mode 100644 index 00000000..a639acdc --- /dev/null +++ b/doc/man/vatomic_sub.3 @@ -0,0 +1 @@ +.so man3/vatomic_arith.3 diff --git a/doc/man/vatomic_write.3 b/doc/man/vatomic_write.3 new file mode 100644 index 00000000..c2bd4b87 --- /dev/null +++ b/doc/man/vatomic_write.3 @@ -0,0 +1 @@ +.so man3/vatomic_basic.3 diff --git a/doc/man/vatomic_xchg.3 b/doc/man/vatomic_xchg.3 new file mode 100644 index 00000000..48ff3494 --- /dev/null +++ b/doc/man/vatomic_xchg.3 @@ -0,0 +1,30 @@ +.\" SPDX-License-Identifier: MIT +.TH VATOMIC_XCHG 3 "@__DATE__@" "vatomic @__VERSION__@" "Library Functions Manual" +.SH NAME +vatomic_xchg \- vatomic exchange operations +.SH SYNOPSIS +.nf +.B #include +.PP +.BI "T vatomic_xchg(A *" obj ", T " value ); +.BI "T vatomic_xchg_acq(A *" obj ", T " value ); +.BI "T vatomic_xchg_rel(A *" obj ", T " value ); +.BI "T vatomic_xchg_rlx(A *" obj ", T " value ); +.fi +.SH DESCRIPTION +Atomically writes \fIvalue\fP into \fIobj\fP and returns the previous contents. +The unsuffixed form enforces sequential consistency; +`_acq`, `_rel`, and `_rlx` restrict the exchange to acquire, release, or relaxed +orderings respectively. +Each macro dispatches to the width-specific function +(\fBvatomic8_xchg\fP, \fBvatomic16_xchg\fP, etc.) based on the atomic pointer +you pass. +.SH RETURN VALUE +Returns the value that was stored in \fIobj\fP immediately before the exchange. +.SH TYPES AND VARIANTS +.PP +See \fBvatomic_basic(3)\fP for the notation of \fBA\fP and \fBT\fP and guidance +on bypassing the dispatcher with explicit prefixes (for example, +\fBvatomic32_xchg_rel\fP). +.SH SEE ALSO +\fBvatomic_basic(3)\fP, \fBvatomic_cmpxchg(3)\fP, \fBvatomic(7)\fP diff --git a/doc/man/vatomic_xor.3 b/doc/man/vatomic_xor.3 new file mode 100644 index 00000000..333b1e49 --- /dev/null +++ b/doc/man/vatomic_xor.3 @@ -0,0 +1 @@ +.so man3/vatomic_bitwise.3 diff --git a/examples/eg_core.c b/examples/eg_core.c new file mode 100644 index 00000000..f3582957 --- /dev/null +++ b/examples/eg_core.c @@ -0,0 +1,30 @@ +#include +#include +#include + +vatomic32_t var; +vatomicptr_t ptr; +int x; + +void +foo(void) +{ + vatomic32_write(&var, 1000); + vatomic32_add(&var, 10); + vuint32_t val = vatomic32_read(&var); + assert(val == 1010); + assert(vatomic32_cmpxchg(&var, val, 0) == val); + + x = 123; + vatomicptr_write(&ptr, &x); + int *y = vatomicptr_read(&ptr); + (*y)++; + assert(*y == x); + + printf("passed\n"); +} +int +main(void) +{ + foo(); +} diff --git a/include/vsync/atomic.hpp b/include/vsync/atomic.hpp new file mode 100644 index 00000000..92ea1393 --- /dev/null +++ b/include/vsync/atomic.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VSYNC_ATOMIC_HPP +#define VSYNC_ATOMIC_HPP + +#include +extern "C" { +#include +} +namespace vsync +{ + typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst + } memory_order; + + template struct atomic; + + void atomic_thread_fence(vsync::memory_order order) + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + vatomic_fence_acq(); + break; + case memory_order_relaxed: + vatomic_fence_rlx(); + break; + case memory_order_release: + vatomic_fence_rel(); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + vatomic_fence(); + break; + default: + assert(0 && "Order is not supported"); + } + } +} // namespace vsync + +#include "atomic/core_u8.hpp" +#include "atomic/core_bool.hpp" +#include "atomic/core_u16.hpp" +#include "atomic/core_u32.hpp" +#if defined(__APPLE__) + #include "atomic/core_sz.hpp" +#endif +#include "atomic/core_u64.hpp" +#include "atomic/core_ptr.hpp" +#include "atomic/core_s8.hpp" +#include "atomic/core_s16.hpp" +#include "atomic/core_s32.hpp" +#include "atomic/core_s64.hpp" +#endif diff --git a/include/vsync/atomic/compat.h b/include/vsync/atomic/compat.h deleted file mode 100644 index 259432e0..00000000 --- a/include/vsync/atomic/compat.h +++ /dev/null @@ -1,2095 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. - * SPDX-License-Identifier: MIT - * Author: Huawei Dresden Research Center - */ -#ifndef VATOMIC_COMPAT_H -#define VATOMIC_COMPAT_H -/******************************************************************************* - * @file compat.h - * @brief Set of functions backward compatible with libvsync 1.0 - * - * These functions are deprecated. Please, use the new vatomic interface. - ******************************************************************************/ -/* !!!Warning: File generated by tmpl; DO NOT EDIT.!!! */ - -#include - -/* atomic initializer */ -#define ATOMIC_INIT(v) VATOMIC_INIT(v) - -/* type mapping */ -typedef vatomic32_t V_DEPRECATED atomic_t; -typedef vatomic32_t V_DEPRECATED atomic32_t; -typedef vatomic64_t V_DEPRECATED atomic64_t; -typedef vatomicptr_t V_DEPRECATED atomicptr_t; - -/******************************************************************************* - * read/write/init functions - ******************************************************************************/ - -/** - * Deprecated: See vatomic32_read - */ -static inline vuint32_t V_DEPRECATED -atomic_read(atomic_t *a) -{ - return vatomic32_read(a); -} - -/** - * Deprecated: See vatomic32_read_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_read_acq(atomic_t *a) -{ - return vatomic32_read_acq(a); -} - -/** - * Deprecated: See vatomic32_read_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_read_rlx(atomic_t *a) -{ - return vatomic32_read_rlx(a); -} - -/** - * Deprecated: See vatomic32_read - */ -static inline vuint32_t V_DEPRECATED -atomic32_read(atomic32_t *a) -{ - return vatomic32_read(a); -} - -/** - * Deprecated: See vatomic32_read_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_read_acq(atomic32_t *a) -{ - return vatomic32_read_acq(a); -} - -/** - * Deprecated: See vatomic32_read_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_read_rlx(atomic32_t *a) -{ - return vatomic32_read_rlx(a); -} - -/** - * Deprecated: See vatomic64_read - */ -static inline vuint64_t V_DEPRECATED -atomic64_read(atomic64_t *a) -{ - return vatomic64_read(a); -} - -/** - * Deprecated: See vatomic64_read_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_read_acq(atomic64_t *a) -{ - return vatomic64_read_acq(a); -} - -/** - * Deprecated: See vatomic64_read_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_read_rlx(atomic64_t *a) -{ - return vatomic64_read_rlx(a); -} - -/** - * Deprecated: See vatomicptr_read - */ -static inline void *V_DEPRECATED -atomicptr_read(atomicptr_t *a) -{ - return vatomicptr_read(a); -} - -/** - * Deprecated: See vatomicptr_read_acq - */ -static inline void *V_DEPRECATED -atomicptr_read_acq(atomicptr_t *a) -{ - return vatomicptr_read_acq(a); -} - -/** - * Deprecated: See vatomicptr_read_rlx - */ -static inline void *V_DEPRECATED -atomicptr_read_rlx(atomicptr_t *a) -{ - return vatomicptr_read_rlx(a); -} - -/** - * Deprecated: See vatomic32_write - */ -static inline void V_DEPRECATED -atomic_write(atomic_t *a, vuint32_t v) -{ - vatomic32_write(a, v); -} - -/** - * Deprecated: See vatomic32_write_rel - */ -static inline void V_DEPRECATED -atomic_write_rel(atomic_t *a, vuint32_t v) -{ - vatomic32_write_rel(a, v); -} - -/** - * Deprecated: See vatomic32_write_rlx - */ -static inline void V_DEPRECATED -atomic_write_rlx(atomic_t *a, vuint32_t v) -{ - vatomic32_write_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_write - */ -static inline void V_DEPRECATED -atomic32_write(atomic32_t *a, vuint32_t v) -{ - vatomic32_write(a, v); -} - -/** - * Deprecated: See vatomic32_write_rel - */ -static inline void V_DEPRECATED -atomic32_write_rel(atomic32_t *a, vuint32_t v) -{ - vatomic32_write_rel(a, v); -} - -/** - * Deprecated: See vatomic32_write_rlx - */ -static inline void V_DEPRECATED -atomic32_write_rlx(atomic32_t *a, vuint32_t v) -{ - vatomic32_write_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_write - */ -static inline void V_DEPRECATED -atomic64_write(atomic64_t *a, vuint64_t v) -{ - vatomic64_write(a, v); -} - -/** - * Deprecated: See vatomic64_write_rel - */ -static inline void V_DEPRECATED -atomic64_write_rel(atomic64_t *a, vuint64_t v) -{ - vatomic64_write_rel(a, v); -} - -/** - * Deprecated: See vatomic64_write_rlx - */ -static inline void V_DEPRECATED -atomic64_write_rlx(atomic64_t *a, vuint64_t v) -{ - vatomic64_write_rlx(a, v); -} - -/** - * Deprecated: See vatomicptr_write - */ -static inline void V_DEPRECATED -atomicptr_write(atomicptr_t *a, void *v) -{ - vatomicptr_write(a, v); -} - -/** - * Deprecated: See vatomicptr_write_rel - */ -static inline void V_DEPRECATED -atomicptr_write_rel(atomicptr_t *a, void *v) -{ - vatomicptr_write_rel(a, v); -} - -/** - * Deprecated: See vatomicptr_write_rlx - */ -static inline void V_DEPRECATED -atomicptr_write_rlx(atomicptr_t *a, void *v) -{ - vatomicptr_write_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_init - */ -static inline void V_DEPRECATED -atomic_init(atomic_t *a, vuint32_t v) -{ - vatomic32_init(a, v); -} - -/** - * Deprecated: See vatomic32_init - */ -static inline void V_DEPRECATED -atomic32_init(atomic32_t *a, vuint32_t v) -{ - vatomic32_init(a, v); -} - -/** - * Deprecated: See vatomic64_init - */ -static inline void V_DEPRECATED -atomic64_init(atomic64_t *a, vuint64_t v) -{ - vatomic64_init(a, v); -} - -/** - * Deprecated: See vatomicptr_init - */ -static inline void V_DEPRECATED -atomicptr_init(atomicptr_t *a, void *v) -{ - vatomicptr_init(a, v); -} - -/******************************************************************************* - * cmpxchg/xchg functions - ******************************************************************************/ - -/** - * Deprecated: See vatomic32_xchg - */ -static inline vuint32_t V_DEPRECATED -atomic_xchg(atomic_t *a, vuint32_t v) -{ - return vatomic32_xchg(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg - */ -static inline vuint32_t V_DEPRECATED -atomic_cmpxchg(atomic_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_xchg_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_xchg_acq(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_cmpxchg_acq(atomic_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_acq(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_xchg_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_xchg_rel(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_cmpxchg_rel(atomic_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_rel(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_xchg_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_xchg_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_cmpxchg_rlx(atomic_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_rlx(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg - */ -static inline vuint32_t V_DEPRECATED -atomic32_xchg(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xchg(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg - */ -static inline vuint32_t V_DEPRECATED -atomic32_cmpxchg(atomic32_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_xchg_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xchg_acq(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_cmpxchg_acq(atomic32_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_acq(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_xchg_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xchg_rel(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_cmpxchg_rel(atomic32_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_rel(a, c, v); -} - -/** - * Deprecated: See vatomic32_xchg_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_xchg_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xchg_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_cmpxchg_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_cmpxchg_rlx(atomic32_t *a, vuint32_t c, vuint32_t v) -{ - return vatomic32_cmpxchg_rlx(a, c, v); -} - -/** - * Deprecated: See vatomic64_xchg - */ -static inline vuint64_t V_DEPRECATED -atomic64_xchg(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xchg(a, v); -} - -/** - * Deprecated: See vatomic64_cmpxchg - */ -static inline vuint64_t V_DEPRECATED -atomic64_cmpxchg(atomic64_t *a, vuint64_t c, vuint64_t v) -{ - return vatomic64_cmpxchg(a, c, v); -} - -/** - * Deprecated: See vatomic64_xchg_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_xchg_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xchg_acq(a, v); -} - -/** - * Deprecated: See vatomic64_cmpxchg_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_cmpxchg_acq(atomic64_t *a, vuint64_t c, vuint64_t v) -{ - return vatomic64_cmpxchg_acq(a, c, v); -} - -/** - * Deprecated: See vatomic64_xchg_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_xchg_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xchg_rel(a, v); -} - -/** - * Deprecated: See vatomic64_cmpxchg_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_cmpxchg_rel(atomic64_t *a, vuint64_t c, vuint64_t v) -{ - return vatomic64_cmpxchg_rel(a, c, v); -} - -/** - * Deprecated: See vatomic64_xchg_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_xchg_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xchg_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_cmpxchg_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_cmpxchg_rlx(atomic64_t *a, vuint64_t c, vuint64_t v) -{ - return vatomic64_cmpxchg_rlx(a, c, v); -} - -/** - * Deprecated: See vatomicptr_xchg - */ -static inline void *V_DEPRECATED -atomicptr_xchg(atomicptr_t *a, void *v) -{ - return vatomicptr_xchg(a, v); -} - -/** - * Deprecated: See vatomicptr_cmpxchg - */ -static inline void *V_DEPRECATED -atomicptr_cmpxchg(atomicptr_t *a, void *c, void *v) -{ - return vatomicptr_cmpxchg(a, c, v); -} - -/** - * Deprecated: See vatomicptr_xchg_acq - */ -static inline void *V_DEPRECATED -atomicptr_xchg_acq(atomicptr_t *a, void *v) -{ - return vatomicptr_xchg_acq(a, v); -} - -/** - * Deprecated: See vatomicptr_cmpxchg_acq - */ -static inline void *V_DEPRECATED -atomicptr_cmpxchg_acq(atomicptr_t *a, void *c, void *v) -{ - return vatomicptr_cmpxchg_acq(a, c, v); -} - -/** - * Deprecated: See vatomicptr_xchg_rel - */ -static inline void *V_DEPRECATED -atomicptr_xchg_rel(atomicptr_t *a, void *v) -{ - return vatomicptr_xchg_rel(a, v); -} - -/** - * Deprecated: See vatomicptr_cmpxchg_rel - */ -static inline void *V_DEPRECATED -atomicptr_cmpxchg_rel(atomicptr_t *a, void *c, void *v) -{ - return vatomicptr_cmpxchg_rel(a, c, v); -} - -/** - * Deprecated: See vatomicptr_xchg_rlx - */ -static inline void *V_DEPRECATED -atomicptr_xchg_rlx(atomicptr_t *a, void *v) -{ - return vatomicptr_xchg_rlx(a, v); -} - -/** - * Deprecated: See vatomicptr_cmpxchg_rlx - */ -static inline void *V_DEPRECATED -atomicptr_cmpxchg_rlx(atomicptr_t *a, void *c, void *v) -{ - return vatomicptr_cmpxchg_rlx(a, c, v); -} - -/******************************************************************************* - * add, sub, or, and, xor - ******************************************************************************/ - -/** - * Deprecated: See vatomic32_add_get - */ -static inline vuint32_t V_DEPRECATED -atomic_add(atomic_t *a, vuint32_t v) -{ - return vatomic32_add_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_add - */ -static inline vuint32_t V_DEPRECATED -atomic_get_add(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_add(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_add_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_add_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_add_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_add_acq(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_add_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_add_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_add_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_add_rel(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_add_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_add_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_add_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_add_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get - */ -static inline vuint32_t V_DEPRECATED -atomic_sub(atomic_t *a, vuint32_t v) -{ - return vatomic32_sub_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub - */ -static inline vuint32_t V_DEPRECATED -atomic_get_sub(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_sub(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_sub_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_sub_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_sub_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_sub_acq(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_sub_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_sub_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_sub_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_sub_rel(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_sub_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_sub_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_sub_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_sub_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get - */ -static inline vuint32_t V_DEPRECATED -atomic_xor(atomic_t *a, vuint32_t v) -{ - return vatomic32_xor_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor - */ -static inline vuint32_t V_DEPRECATED -atomic_get_xor(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_xor(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_xor_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_xor_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_xor_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_xor_acq(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_xor_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_xor_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_xor_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_xor_rel(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_xor_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_xor_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_xor_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_xor_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_and_get - */ -static inline vuint32_t V_DEPRECATED -atomic_and(atomic_t *a, vuint32_t v) -{ - return vatomic32_and_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_and - */ -static inline vuint32_t V_DEPRECATED -atomic_get_and(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_and(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_and_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_and_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_and_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_and_acq(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_and_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_and_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_and_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_and_rel(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_and_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_and_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_and_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_and_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_or_get - */ -static inline vuint32_t V_DEPRECATED -atomic_or(atomic_t *a, vuint32_t v) -{ - return vatomic32_or_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_or - */ -static inline vuint32_t V_DEPRECATED -atomic_get_or(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_or(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_or_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_or_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_or_acq(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_or_acq(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_or_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_or_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_or_rel(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_or_rel(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_or_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_or_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_or_rlx(atomic_t *a, vuint32_t v) -{ - return vatomic32_get_or_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_add_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_add(atomic32_t *a, vuint32_t v) -{ - return vatomic32_add_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_add - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_add(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_add(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_add_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_add_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_add_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_add_acq(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_add_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_add_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_add_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_add_rel(a, v); -} - -/** - * Deprecated: See vatomic32_add_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_add_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_add_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_add_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_add_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_add_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_sub(atomic32_t *a, vuint32_t v) -{ - return vatomic32_sub_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_sub(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_sub(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_sub_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_sub_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_sub_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_sub_acq(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_sub_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_sub_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_sub_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_sub_rel(a, v); -} - -/** - * Deprecated: See vatomic32_sub_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_sub_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_sub_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_sub_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_sub_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_sub_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_xor(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xor_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_xor(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_xor(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_xor_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xor_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_xor_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_xor_acq(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_xor_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xor_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_xor_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_xor_rel(a, v); -} - -/** - * Deprecated: See vatomic32_xor_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_xor_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_xor_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_xor_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_xor_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_xor_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_and_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_and(atomic32_t *a, vuint32_t v) -{ - return vatomic32_and_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_and - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_and(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_and(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_and_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_and_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_and_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_and_acq(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_and_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_and_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_and_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_and_rel(a, v); -} - -/** - * Deprecated: See vatomic32_and_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_and_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_and_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_and_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_and_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_and_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_or_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_or(atomic32_t *a, vuint32_t v) -{ - return vatomic32_or_get(a, v); -} - -/** - * Deprecated: See vatomic32_get_or - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_or(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_or(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_or_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_or_get_acq(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_or_acq(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_or_acq(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_or_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_or_get_rel(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_or_rel(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_or_rel(a, v); -} - -/** - * Deprecated: See vatomic32_or_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_or_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_or_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic32_get_or_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_or_rlx(atomic32_t *a, vuint32_t v) -{ - return vatomic32_get_or_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_add_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_add(atomic64_t *a, vuint64_t v) -{ - return vatomic64_add_get(a, v); -} - -/** - * Deprecated: See vatomic64_get_add - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_add(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_add(a, v); -} - -/** - * Deprecated: See vatomic64_add_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_add_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_add_get_acq(a, v); -} - -/** - * Deprecated: See vatomic64_get_add_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_add_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_add_acq(a, v); -} - -/** - * Deprecated: See vatomic64_add_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_add_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_add_get_rel(a, v); -} - -/** - * Deprecated: See vatomic64_get_add_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_add_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_add_rel(a, v); -} - -/** - * Deprecated: See vatomic64_add_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_add_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_add_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_get_add_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_add_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_add_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_sub_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_sub(atomic64_t *a, vuint64_t v) -{ - return vatomic64_sub_get(a, v); -} - -/** - * Deprecated: See vatomic64_get_sub - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_sub(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_sub(a, v); -} - -/** - * Deprecated: See vatomic64_sub_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_sub_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_sub_get_acq(a, v); -} - -/** - * Deprecated: See vatomic64_get_sub_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_sub_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_sub_acq(a, v); -} - -/** - * Deprecated: See vatomic64_sub_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_sub_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_sub_get_rel(a, v); -} - -/** - * Deprecated: See vatomic64_get_sub_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_sub_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_sub_rel(a, v); -} - -/** - * Deprecated: See vatomic64_sub_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_sub_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_sub_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_get_sub_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_sub_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_sub_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_xor_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_xor(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xor_get(a, v); -} - -/** - * Deprecated: See vatomic64_get_xor - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_xor(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_xor(a, v); -} - -/** - * Deprecated: See vatomic64_xor_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_xor_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xor_get_acq(a, v); -} - -/** - * Deprecated: See vatomic64_get_xor_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_xor_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_xor_acq(a, v); -} - -/** - * Deprecated: See vatomic64_xor_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_xor_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xor_get_rel(a, v); -} - -/** - * Deprecated: See vatomic64_get_xor_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_xor_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_xor_rel(a, v); -} - -/** - * Deprecated: See vatomic64_xor_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_xor_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_xor_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_get_xor_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_xor_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_xor_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_and_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_and(atomic64_t *a, vuint64_t v) -{ - return vatomic64_and_get(a, v); -} - -/** - * Deprecated: See vatomic64_get_and - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_and(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_and(a, v); -} - -/** - * Deprecated: See vatomic64_and_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_and_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_and_get_acq(a, v); -} - -/** - * Deprecated: See vatomic64_get_and_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_and_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_and_acq(a, v); -} - -/** - * Deprecated: See vatomic64_and_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_and_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_and_get_rel(a, v); -} - -/** - * Deprecated: See vatomic64_get_and_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_and_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_and_rel(a, v); -} - -/** - * Deprecated: See vatomic64_and_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_and_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_and_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_get_and_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_and_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_and_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_or_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_or(atomic64_t *a, vuint64_t v) -{ - return vatomic64_or_get(a, v); -} - -/** - * Deprecated: See vatomic64_get_or - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_or(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_or(a, v); -} - -/** - * Deprecated: See vatomic64_or_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_or_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_or_get_acq(a, v); -} - -/** - * Deprecated: See vatomic64_get_or_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_or_acq(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_or_acq(a, v); -} - -/** - * Deprecated: See vatomic64_or_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_or_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_or_get_rel(a, v); -} - -/** - * Deprecated: See vatomic64_get_or_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_or_rel(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_or_rel(a, v); -} - -/** - * Deprecated: See vatomic64_or_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_or_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_or_get_rlx(a, v); -} - -/** - * Deprecated: See vatomic64_get_or_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_or_rlx(atomic64_t *a, vuint64_t v) -{ - return vatomic64_get_or_rlx(a, v); -} - -/******************************************************************************* - * increment/decrement - ******************************************************************************/ - -/** - * Deprecated: See vatomic32_inc_get - */ -static inline vuint32_t V_DEPRECATED -atomic_inc(atomic_t *a) -{ - return vatomic32_inc_get(a); -} - -/** - * Deprecated: See vatomic32_get_inc - */ -static inline vuint32_t V_DEPRECATED -atomic_get_inc(atomic_t *a) -{ - return vatomic32_get_inc(a); -} - -/** - * Deprecated: See vatomic32_inc_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_inc_acq(atomic_t *a) -{ - return vatomic32_inc_get_acq(a); -} - -/** - * Deprecated: See vatomic32_get_inc_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_inc_acq(atomic_t *a) -{ - return vatomic32_get_inc_acq(a); -} - -/** - * Deprecated: See vatomic32_inc_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_inc_rel(atomic_t *a) -{ - return vatomic32_inc_get_rel(a); -} - -/** - * Deprecated: See vatomic32_get_inc_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_inc_rel(atomic_t *a) -{ - return vatomic32_get_inc_rel(a); -} - -/** - * Deprecated: See vatomic32_inc_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_inc_rlx(atomic_t *a) -{ - return vatomic32_inc_get_rlx(a); -} - -/** - * Deprecated: See vatomic32_get_inc_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_inc_rlx(atomic_t *a) -{ - return vatomic32_get_inc_rlx(a); -} - -/** - * Deprecated: See vatomic32_dec_get - */ -static inline vuint32_t V_DEPRECATED -atomic_dec(atomic_t *a) -{ - return vatomic32_dec_get(a); -} - -/** - * Deprecated: See vatomic32_get_dec - */ -static inline vuint32_t V_DEPRECATED -atomic_get_dec(atomic_t *a) -{ - return vatomic32_get_dec(a); -} - -/** - * Deprecated: See vatomic32_dec_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_dec_acq(atomic_t *a) -{ - return vatomic32_dec_get_acq(a); -} - -/** - * Deprecated: See vatomic32_get_dec_acq - */ -static inline vuint32_t V_DEPRECATED -atomic_get_dec_acq(atomic_t *a) -{ - return vatomic32_get_dec_acq(a); -} - -/** - * Deprecated: See vatomic32_dec_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_dec_rel(atomic_t *a) -{ - return vatomic32_dec_get_rel(a); -} - -/** - * Deprecated: See vatomic32_get_dec_rel - */ -static inline vuint32_t V_DEPRECATED -atomic_get_dec_rel(atomic_t *a) -{ - return vatomic32_get_dec_rel(a); -} - -/** - * Deprecated: See vatomic32_dec_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_dec_rlx(atomic_t *a) -{ - return vatomic32_dec_get_rlx(a); -} - -/** - * Deprecated: See vatomic32_get_dec_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic_get_dec_rlx(atomic_t *a) -{ - return vatomic32_get_dec_rlx(a); -} - -/** - * Deprecated: See vatomic32_inc_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_inc(atomic32_t *a) -{ - return vatomic32_inc_get(a); -} - -/** - * Deprecated: See vatomic32_get_inc - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_inc(atomic32_t *a) -{ - return vatomic32_get_inc(a); -} - -/** - * Deprecated: See vatomic32_inc_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_inc_acq(atomic32_t *a) -{ - return vatomic32_inc_get_acq(a); -} - -/** - * Deprecated: See vatomic32_get_inc_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_inc_acq(atomic32_t *a) -{ - return vatomic32_get_inc_acq(a); -} - -/** - * Deprecated: See vatomic32_inc_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_inc_rel(atomic32_t *a) -{ - return vatomic32_inc_get_rel(a); -} - -/** - * Deprecated: See vatomic32_get_inc_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_inc_rel(atomic32_t *a) -{ - return vatomic32_get_inc_rel(a); -} - -/** - * Deprecated: See vatomic32_inc_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_inc_rlx(atomic32_t *a) -{ - return vatomic32_inc_get_rlx(a); -} - -/** - * Deprecated: See vatomic32_get_inc_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_inc_rlx(atomic32_t *a) -{ - return vatomic32_get_inc_rlx(a); -} - -/** - * Deprecated: See vatomic32_dec_get - */ -static inline vuint32_t V_DEPRECATED -atomic32_dec(atomic32_t *a) -{ - return vatomic32_dec_get(a); -} - -/** - * Deprecated: See vatomic32_get_dec - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_dec(atomic32_t *a) -{ - return vatomic32_get_dec(a); -} - -/** - * Deprecated: See vatomic32_dec_get_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_dec_acq(atomic32_t *a) -{ - return vatomic32_dec_get_acq(a); -} - -/** - * Deprecated: See vatomic32_get_dec_acq - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_dec_acq(atomic32_t *a) -{ - return vatomic32_get_dec_acq(a); -} - -/** - * Deprecated: See vatomic32_dec_get_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_dec_rel(atomic32_t *a) -{ - return vatomic32_dec_get_rel(a); -} - -/** - * Deprecated: See vatomic32_get_dec_rel - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_dec_rel(atomic32_t *a) -{ - return vatomic32_get_dec_rel(a); -} - -/** - * Deprecated: See vatomic32_dec_get_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_dec_rlx(atomic32_t *a) -{ - return vatomic32_dec_get_rlx(a); -} - -/** - * Deprecated: See vatomic32_get_dec_rlx - */ -static inline vuint32_t V_DEPRECATED -atomic32_get_dec_rlx(atomic32_t *a) -{ - return vatomic32_get_dec_rlx(a); -} - -/** - * Deprecated: See vatomic64_inc_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_inc(atomic64_t *a) -{ - return vatomic64_inc_get(a); -} - -/** - * Deprecated: See vatomic64_get_inc - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_inc(atomic64_t *a) -{ - return vatomic64_get_inc(a); -} - -/** - * Deprecated: See vatomic64_inc_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_inc_acq(atomic64_t *a) -{ - return vatomic64_inc_get_acq(a); -} - -/** - * Deprecated: See vatomic64_get_inc_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_inc_acq(atomic64_t *a) -{ - return vatomic64_get_inc_acq(a); -} - -/** - * Deprecated: See vatomic64_inc_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_inc_rel(atomic64_t *a) -{ - return vatomic64_inc_get_rel(a); -} - -/** - * Deprecated: See vatomic64_get_inc_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_inc_rel(atomic64_t *a) -{ - return vatomic64_get_inc_rel(a); -} - -/** - * Deprecated: See vatomic64_inc_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_inc_rlx(atomic64_t *a) -{ - return vatomic64_inc_get_rlx(a); -} - -/** - * Deprecated: See vatomic64_get_inc_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_inc_rlx(atomic64_t *a) -{ - return vatomic64_get_inc_rlx(a); -} - -/** - * Deprecated: See vatomic64_dec_get - */ -static inline vuint64_t V_DEPRECATED -atomic64_dec(atomic64_t *a) -{ - return vatomic64_dec_get(a); -} - -/** - * Deprecated: See vatomic64_get_dec - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_dec(atomic64_t *a) -{ - return vatomic64_get_dec(a); -} - -/** - * Deprecated: See vatomic64_dec_get_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_dec_acq(atomic64_t *a) -{ - return vatomic64_dec_get_acq(a); -} - -/** - * Deprecated: See vatomic64_get_dec_acq - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_dec_acq(atomic64_t *a) -{ - return vatomic64_get_dec_acq(a); -} - -/** - * Deprecated: See vatomic64_dec_get_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_dec_rel(atomic64_t *a) -{ - return vatomic64_dec_get_rel(a); -} - -/** - * Deprecated: See vatomic64_get_dec_rel - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_dec_rel(atomic64_t *a) -{ - return vatomic64_get_dec_rel(a); -} - -/** - * Deprecated: See vatomic64_dec_get_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_dec_rlx(atomic64_t *a) -{ - return vatomic64_dec_get_rlx(a); -} - -/** - * Deprecated: See vatomic64_get_dec_rlx - */ -static inline vuint64_t V_DEPRECATED -atomic64_get_dec_rlx(atomic64_t *a) -{ - return vatomic64_get_dec_rlx(a); -} - -#endif /* VATOMIC_COMPAT_H */ diff --git a/include/vsync/atomic/config.h b/include/vsync/atomic/config.h index 1d7cf53b..9b5eb517 100644 --- a/include/vsync/atomic/config.h +++ b/include/vsync/atomic/config.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ @@ -93,7 +93,7 @@ /******************************************************************************* * @def VATOMIC_ENABLE_ARM64_LXE - * @brief use ARM64 LSE instructions in slow path of LXSX for atomic operations. + * @brief use ARM64 LSE instructions in slow path of LLSC for atomic operations. * * To use this option, the compiler must be configured to emit LSE instructions * with some flag such as -march=armv8-a+lse. diff --git a/include/vsync/atomic/core.h b/include/vsync/atomic/core.h index 60954dde..b4b0adbf 100644 --- a/include/vsync/atomic/core.h +++ b/include/vsync/atomic/core.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ @@ -114,24 +114,24 @@ static inline void vatomic_fence_rlx(void); #include /* ***************************************************************************** - * config + * Barrier remap * ****************************************************************************/ #if defined(VATOMIC_ENABLE_ATOMIC_SC) - #include - #include - #include - #include - #include - #include - #include + #include + #include + #include + #include + #include + #include + #include #elif defined(VATOMIC_ENABLE_ATOMIC_RLX) - #include - #include - #include - #include - #include - #include - #include + #include + #include + #include + #include + #include + #include + #include #endif /* ***************************************************************************** * Select vatomic implementation and include definitions diff --git a/include/vsync/atomic/core_bool.hpp b/include/vsync/atomic/core_bool.hpp new file mode 100644 index 00000000..4432bed2 --- /dev/null +++ b/include/vsync/atomic/core_bool.hpp @@ -0,0 +1,259 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_BOOL_HPP +#define VATOMIC_CORE_BOOL_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomic8_init(&_v, false); + } + atomic(vbool_t v) + { + vatomic8_init(&_v, v); + } + vbool_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return static_cast( + vatomic8_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return static_cast( + vatomic8_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast( + vatomic8_read(const_cast(&_v))); + } + } + void store(vbool_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomic8_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomic8_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic8_write(const_cast(&_v), v); + } + } + + vbool_t operator=(vbool_t v) volatile noexcept + { + store(v); + return v; + } + + operator vbool_t() volatile const noexcept + { + return load(); + } + + vbool_t + exchange(vbool_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return static_cast( + vatomic8_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return static_cast( + vatomic8_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return static_cast( + vatomic8_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast( + vatomic8_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vbool_t &expected, vbool_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vbool_t old = 0; + switch (order) { + case memory_order_release: + old = static_cast(vatomic8_cmpxchg_rel( + const_cast(&_v), expected, desired)); + break; + case memory_order_relaxed: + old = static_cast(vatomic8_cmpxchg_rlx( + const_cast(&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = static_cast(vatomic8_cmpxchg_acq( + const_cast(&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = static_cast(vatomic8_cmpxchg( + const_cast(&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vbool_t &expected, vbool_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vbool_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return static_cast(vatomic8_read_acq((&_v))); + case memory_order_relaxed: + return static_cast(vatomic8_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast(vatomic8_read((&_v))); + } + } + void store(vbool_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomic8_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomic8_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic8_write((&_v), v); + } + } + + vbool_t operator=(vbool_t v) noexcept + { + store(v); + return v; + } + + operator vbool_t() const noexcept + { + return load(); + } + + vbool_t exchange(vbool_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return static_cast(vatomic8_xchg_rel((&_v), v)); + case memory_order_relaxed: + return static_cast(vatomic8_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return static_cast(vatomic8_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast(vatomic8_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vbool_t &expected, vbool_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vbool_t old = 0; + switch (order) { + case memory_order_release: + old = static_cast( + vatomic8_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = static_cast( + vatomic8_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = static_cast( + vatomic8_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = static_cast( + vatomic8_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vbool_t &expected, vbool_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomic8_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_ptr.hpp b/include/vsync/atomic/core_ptr.hpp new file mode 100644 index 00000000..30854996 --- /dev/null +++ b/include/vsync/atomic/core_ptr.hpp @@ -0,0 +1,480 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_PTR_HPP +#define VATOMIC_CORE_PTR_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomicptr_init(&_v, nullptr); + } + atomic(PTR *v) + { + vatomicptr_init(&_v, v); + } + PTR * + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return static_cast( + vatomicptr_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return static_cast( + vatomicptr_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast( + vatomicptr_read(const_cast(&_v))); + } + } + void store(PTR *v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomicptr_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomicptr_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomicptr_write(const_cast(&_v), v); + } + } + + PTR *operator=(PTR *v) volatile noexcept + { + store(v); + return v; + } + + operator PTR *() volatile const noexcept + { + return load(); + } + + PTR * + exchange(PTR *v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return static_cast(vatomicptr_xchg_rel( + const_cast(&_v), v)); + case memory_order_relaxed: + return static_cast(vatomicptr_xchg_rlx( + const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return static_cast(vatomicptr_xchg_acq( + const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast( + vatomicptr_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + PTR *&expected, PTR *desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + PTR *old = 0; + switch (order) { + case memory_order_release: + old = static_cast(vatomicptr_cmpxchg_rel( + const_cast(&_v), expected, desired)); + break; + case memory_order_relaxed: + old = static_cast(vatomicptr_cmpxchg_rlx( + const_cast(&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = static_cast(vatomicptr_cmpxchg_acq( + const_cast(&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = static_cast(vatomicptr_cmpxchg( + const_cast(&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + PTR *&expected, PTR *desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + PTR *load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return static_cast(vatomicptr_read_acq((&_v))); + case memory_order_relaxed: + return static_cast(vatomicptr_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast(vatomicptr_read((&_v))); + } + } + void store(PTR *v, memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomicptr_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomicptr_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomicptr_write((&_v), v); + } + } + + PTR *operator=(PTR *v) noexcept + { + store(v); + return v; + } + + operator PTR *() const noexcept + { + return load(); + } + + PTR *exchange(PTR *v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return static_cast(vatomicptr_xchg_rel((&_v), v)); + case memory_order_relaxed: + return static_cast(vatomicptr_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return static_cast(vatomicptr_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return static_cast(vatomicptr_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + PTR *&expected, PTR *desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + PTR *old = 0; + switch (order) { + case memory_order_release: + old = static_cast( + vatomicptr_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = static_cast( + vatomicptr_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = static_cast( + vatomicptr_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = static_cast( + vatomicptr_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + PTR *&expected, PTR *desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + PTR * + fetch_add(ptrdiff_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return add_rel(v, true); + case memory_order_relaxed: + return add_rlx(v, true); + case memory_order_consume: + case memory_order_acquire: + return add_acq(v, true); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return add(v, true); + } + } + PTR *operator+=(ptrdiff_t v) volatile noexcept + { + return add(v, true); + } + // ptr++ + PTR *operator++(int) volatile noexcept + { + return add(1, true); + } + // ++ptr + PTR *operator++() volatile noexcept + { + return add(1, false); + } + + PTR * + fetch_sub(ptrdiff_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return fetch_add(-v, order); + } + + PTR *operator-=(ptrdiff_t v) volatile noexcept + { + return add(-v, true); + } + // ptr-- + PTR *operator--(int) volatile noexcept + { + return add(-1, true); + } + // --ptr + PTR *operator--() volatile noexcept + { + return add(-1, false); + } + PTR *fetch_add(ptrdiff_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return add_rel(v, true); + case memory_order_relaxed: + return add_rlx(v, true); + case memory_order_consume: + case memory_order_acquire: + return add_acq(v, true); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return add(v, true); + } + } + PTR *operator+=(ptrdiff_t v) noexcept + { + return add(v, true); + } + // ptr++ + PTR *operator++(int) noexcept + { + return add(1, true); + } + // ++ptr + PTR *operator++() noexcept + { + return add(1, false); + } + + PTR *fetch_sub(ptrdiff_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return fetch_add(-v, order); + } + + PTR *operator-=(ptrdiff_t v) noexcept + { + return add(-v, true); + } + // ptr-- + PTR *operator--(int) noexcept + { + return add(-1, true); + } + // --ptr + PTR *operator--() noexcept + { + return add(-1, false); + } + inline PTR *add(ptrdiff_t v, bool return_old) volatile + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast( + vatomicptr_read(const_cast(&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast(vatomicptr_cmpxchg( + const_cast(&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add(ptrdiff_t v, bool return_old) + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast(vatomicptr_read((&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast( + vatomicptr_cmpxchg((&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_acq(ptrdiff_t v, bool return_old) volatile + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast( + vatomicptr_read(const_cast(&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast(vatomicptr_cmpxchg_acq( + const_cast(&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_acq(ptrdiff_t v, bool return_old) + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast(vatomicptr_read((&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast( + vatomicptr_cmpxchg_acq((&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_rel(ptrdiff_t v, bool return_old) volatile + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast( + vatomicptr_read(const_cast(&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast(vatomicptr_cmpxchg_rel( + const_cast(&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_rel(ptrdiff_t v, bool return_old) + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast(vatomicptr_read((&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast( + vatomicptr_cmpxchg_rel((&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_rlx(ptrdiff_t v, bool return_old) volatile + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast( + vatomicptr_read(const_cast(&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast(vatomicptr_cmpxchg_rlx( + const_cast(&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + inline PTR *add_rlx(ptrdiff_t v, bool return_old) + { + PTR *old = nullptr; + PTR *expected = nullptr; + PTR *desired = nullptr; + old = static_cast(vatomicptr_read((&_v))); + do { + expected = old; + desired = expected + v; + old = static_cast( + vatomicptr_cmpxchg_rlx((&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomicptr_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_s16.hpp b/include/vsync/atomic/core_s16.hpp new file mode 100644 index 00000000..208bf265 --- /dev/null +++ b/include/vsync/atomic/core_s16.hpp @@ -0,0 +1,279 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + atomic() : _a() + { + } + atomic(vint16_t v) : _a(static_cast(v)) + { + } + vint16_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + return _a.load(order); + } + void store(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + _a.store(static_cast(v), order); + } + + vint16_t operator=(vint16_t v) volatile noexcept + { + store(v); + return v; + } + + operator vint16_t() volatile const noexcept + { + return load(); + } + + vint16_t + exchange(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint16_t &expected, vint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint16_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint16_t &expected, vint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint16_t + fetch_add(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint16_t operator+=(vint16_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vint16_t operator++(int) volatile noexcept + { + return static_cast(_a++); + } + // ++v + vint16_t operator++() volatile noexcept + { + return static_cast(++_a); + } + + vint16_t + fetch_sub(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint16_t operator-=(vint16_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vint16_t operator--(int) volatile noexcept + { + return static_cast(_a--); + } + // --v + vint16_t operator--() volatile noexcept + { + return static_cast(--_a); + } + + vint16_t + fetch_and(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint16_t operator&=(vint16_t v) volatile noexcept + { + return fetch_and(v); + } + + vint16_t + fetch_or(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint16_t operator|=(vint16_t v) volatile noexcept + { + return fetch_or(v); + } + + vint16_t + fetch_xor(vint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint16_t operator^=(vint16_t v) volatile noexcept + { + return fetch_xor(v); + } + vint16_t load(memory_order order = memory_order_seq_cst) const noexcept + { + return _a.load(order); + } + void store(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + _a.store(static_cast(v), order); + } + + vint16_t operator=(vint16_t v) noexcept + { + store(v); + return v; + } + + operator vint16_t() const noexcept + { + return load(); + } + + vint16_t exchange(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint16_t &expected, vint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint16_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint16_t &expected, vint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint16_t fetch_add(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint16_t operator+=(vint16_t v) noexcept + { + return fetch_add(v); + } + // v++ + vint16_t operator++(int) noexcept + { + return static_cast(_a++); + } + // ++v + vint16_t operator++() noexcept + { + return static_cast(++_a); + } + + vint16_t fetch_sub(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint16_t operator-=(vint16_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vint16_t operator--(int) noexcept + { + return static_cast(_a--); + } + // --v + vint16_t operator--() noexcept + { + return static_cast(--_a); + } + + vint16_t fetch_and(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint16_t operator&=(vint16_t v) noexcept + { + return fetch_and(v); + } + + vint16_t fetch_or(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint16_t operator|=(vint16_t v) noexcept + { + return fetch_or(v); + } + + vint16_t fetch_xor(vint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint16_t operator^=(vint16_t v) noexcept + { + return fetch_xor(v); + } + + private: + vsync::atomic _a; + }; +} // namespace vsync diff --git a/include/vsync/atomic/core_s32.hpp b/include/vsync/atomic/core_s32.hpp new file mode 100644 index 00000000..29818913 --- /dev/null +++ b/include/vsync/atomic/core_s32.hpp @@ -0,0 +1,279 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + atomic() : _a() + { + } + atomic(vint32_t v) : _a(static_cast(v)) + { + } + vint32_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + return _a.load(order); + } + void store(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + _a.store(static_cast(v), order); + } + + vint32_t operator=(vint32_t v) volatile noexcept + { + store(v); + return v; + } + + operator vint32_t() volatile const noexcept + { + return load(); + } + + vint32_t + exchange(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint32_t &expected, vint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint32_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint32_t &expected, vint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint32_t + fetch_add(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint32_t operator+=(vint32_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vint32_t operator++(int) volatile noexcept + { + return static_cast(_a++); + } + // ++v + vint32_t operator++() volatile noexcept + { + return static_cast(++_a); + } + + vint32_t + fetch_sub(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint32_t operator-=(vint32_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vint32_t operator--(int) volatile noexcept + { + return static_cast(_a--); + } + // --v + vint32_t operator--() volatile noexcept + { + return static_cast(--_a); + } + + vint32_t + fetch_and(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint32_t operator&=(vint32_t v) volatile noexcept + { + return fetch_and(v); + } + + vint32_t + fetch_or(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint32_t operator|=(vint32_t v) volatile noexcept + { + return fetch_or(v); + } + + vint32_t + fetch_xor(vint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint32_t operator^=(vint32_t v) volatile noexcept + { + return fetch_xor(v); + } + vint32_t load(memory_order order = memory_order_seq_cst) const noexcept + { + return _a.load(order); + } + void store(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + _a.store(static_cast(v), order); + } + + vint32_t operator=(vint32_t v) noexcept + { + store(v); + return v; + } + + operator vint32_t() const noexcept + { + return load(); + } + + vint32_t exchange(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint32_t &expected, vint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint32_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint32_t &expected, vint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint32_t fetch_add(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint32_t operator+=(vint32_t v) noexcept + { + return fetch_add(v); + } + // v++ + vint32_t operator++(int) noexcept + { + return static_cast(_a++); + } + // ++v + vint32_t operator++() noexcept + { + return static_cast(++_a); + } + + vint32_t fetch_sub(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint32_t operator-=(vint32_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vint32_t operator--(int) noexcept + { + return static_cast(_a--); + } + // --v + vint32_t operator--() noexcept + { + return static_cast(--_a); + } + + vint32_t fetch_and(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint32_t operator&=(vint32_t v) noexcept + { + return fetch_and(v); + } + + vint32_t fetch_or(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint32_t operator|=(vint32_t v) noexcept + { + return fetch_or(v); + } + + vint32_t fetch_xor(vint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint32_t operator^=(vint32_t v) noexcept + { + return fetch_xor(v); + } + + private: + vsync::atomic _a; + }; +} // namespace vsync diff --git a/include/vsync/atomic/core_s64.hpp b/include/vsync/atomic/core_s64.hpp new file mode 100644 index 00000000..89bd6a4a --- /dev/null +++ b/include/vsync/atomic/core_s64.hpp @@ -0,0 +1,279 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + atomic() : _a() + { + } + atomic(vint64_t v) : _a(static_cast(v)) + { + } + vint64_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + return _a.load(order); + } + void store(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + _a.store(static_cast(v), order); + } + + vint64_t operator=(vint64_t v) volatile noexcept + { + store(v); + return v; + } + + operator vint64_t() volatile const noexcept + { + return load(); + } + + vint64_t + exchange(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint64_t &expected, vint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint64_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint64_t &expected, vint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint64_t + fetch_add(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint64_t operator+=(vint64_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vint64_t operator++(int) volatile noexcept + { + return static_cast(_a++); + } + // ++v + vint64_t operator++() volatile noexcept + { + return static_cast(++_a); + } + + vint64_t + fetch_sub(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint64_t operator-=(vint64_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vint64_t operator--(int) volatile noexcept + { + return static_cast(_a--); + } + // --v + vint64_t operator--() volatile noexcept + { + return static_cast(--_a); + } + + vint64_t + fetch_and(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint64_t operator&=(vint64_t v) volatile noexcept + { + return fetch_and(v); + } + + vint64_t + fetch_or(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint64_t operator|=(vint64_t v) volatile noexcept + { + return fetch_or(v); + } + + vint64_t + fetch_xor(vint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint64_t operator^=(vint64_t v) volatile noexcept + { + return fetch_xor(v); + } + vint64_t load(memory_order order = memory_order_seq_cst) const noexcept + { + return _a.load(order); + } + void store(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + _a.store(static_cast(v), order); + } + + vint64_t operator=(vint64_t v) noexcept + { + store(v); + return v; + } + + operator vint64_t() const noexcept + { + return load(); + } + + vint64_t exchange(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint64_t &expected, vint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint64_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint64_t &expected, vint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint64_t fetch_add(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint64_t operator+=(vint64_t v) noexcept + { + return fetch_add(v); + } + // v++ + vint64_t operator++(int) noexcept + { + return static_cast(_a++); + } + // ++v + vint64_t operator++() noexcept + { + return static_cast(++_a); + } + + vint64_t fetch_sub(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint64_t operator-=(vint64_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vint64_t operator--(int) noexcept + { + return static_cast(_a--); + } + // --v + vint64_t operator--() noexcept + { + return static_cast(--_a); + } + + vint64_t fetch_and(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint64_t operator&=(vint64_t v) noexcept + { + return fetch_and(v); + } + + vint64_t fetch_or(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint64_t operator|=(vint64_t v) noexcept + { + return fetch_or(v); + } + + vint64_t fetch_xor(vint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint64_t operator^=(vint64_t v) noexcept + { + return fetch_xor(v); + } + + private: + vsync::atomic _a; + }; +} // namespace vsync diff --git a/include/vsync/atomic/core_s8.hpp b/include/vsync/atomic/core_s8.hpp new file mode 100644 index 00000000..076edec1 --- /dev/null +++ b/include/vsync/atomic/core_s8.hpp @@ -0,0 +1,279 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + atomic() : _a() + { + } + atomic(vint8_t v) : _a(static_cast(v)) + { + } + vint8_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + return _a.load(order); + } + void store(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + _a.store(static_cast(v), order); + } + + vint8_t operator=(vint8_t v) volatile noexcept + { + store(v); + return v; + } + + operator vint8_t() volatile const noexcept + { + return load(); + } + + vint8_t + exchange(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint8_t &expected, vint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint8_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint8_t &expected, vint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint8_t + fetch_add(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint8_t operator+=(vint8_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vint8_t operator++(int) volatile noexcept + { + return static_cast(_a++); + } + // ++v + vint8_t operator++() volatile noexcept + { + return static_cast(++_a); + } + + vint8_t + fetch_sub(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint8_t operator-=(vint8_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vint8_t operator--(int) volatile noexcept + { + return static_cast(_a--); + } + // --v + vint8_t operator--() volatile noexcept + { + return static_cast(--_a); + } + + vint8_t + fetch_and(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint8_t operator&=(vint8_t v) volatile noexcept + { + return fetch_and(v); + } + + vint8_t + fetch_or(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint8_t operator|=(vint8_t v) volatile noexcept + { + return fetch_or(v); + } + + vint8_t + fetch_xor(vint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint8_t operator^=(vint8_t v) volatile noexcept + { + return fetch_xor(v); + } + vint8_t load(memory_order order = memory_order_seq_cst) const noexcept + { + return _a.load(order); + } + void store(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + _a.store(static_cast(v), order); + } + + vint8_t operator=(vint8_t v) noexcept + { + store(v); + return v; + } + + operator vint8_t() const noexcept + { + return load(); + } + + vint8_t exchange(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + vint8_t &expected, vint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong((vuint8_t &)(expected), + static_cast(desired), + order, failure); + } + bool compare_exchange_weak( + vint8_t &expected, vint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vint8_t fetch_add(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_add(static_cast(v), order)); + } + vint8_t operator+=(vint8_t v) noexcept + { + return fetch_add(v); + } + // v++ + vint8_t operator++(int) noexcept + { + return static_cast(_a++); + } + // ++v + vint8_t operator++() noexcept + { + return static_cast(++_a); + } + + vint8_t fetch_sub(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_sub(static_cast(v), order)); + } + vint8_t operator-=(vint8_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vint8_t operator--(int) noexcept + { + return static_cast(_a--); + } + // --v + vint8_t operator--() noexcept + { + return static_cast(--_a); + } + + vint8_t fetch_and(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_and(static_cast(v), order)); + } + + vint8_t operator&=(vint8_t v) noexcept + { + return fetch_and(v); + } + + vint8_t fetch_or(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_or(static_cast(v), order)); + } + + vint8_t operator|=(vint8_t v) noexcept + { + return fetch_or(v); + } + + vint8_t fetch_xor(vint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + return static_cast( + _a.fetch_xor(static_cast(v), order)); + } + + vint8_t operator^=(vint8_t v) noexcept + { + return fetch_xor(v); + } + + private: + vsync::atomic _a; + }; +} // namespace vsync diff --git a/include/vsync/atomic/core_sz.hpp b/include/vsync/atomic/core_sz.hpp new file mode 100644 index 00000000..5c1d3dbb --- /dev/null +++ b/include/vsync/atomic/core_sz.hpp @@ -0,0 +1,536 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_SZ_HPP +#define VATOMIC_CORE_SZ_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomicsz_init(&_v, 0U); + } + atomic(vsize_t v) + { + vatomicsz_init(&_v, v); + } + vsize_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomicsz_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return (vatomicsz_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomicsz_read(const_cast(&_v))); + } + } + void store(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomicsz_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomicsz_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomicsz_write(const_cast(&_v), v); + } + } + + vsize_t operator=(vsize_t v) volatile noexcept + { + store(v); + return v; + } + + operator vsize_t() volatile const noexcept + { + return load(); + } + + vsize_t + exchange(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomicsz_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomicsz_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomicsz_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomicsz_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vsize_t &expected, vsize_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vsize_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomicsz_cmpxchg_rel(const_cast(&_v), + expected, desired)); + break; + case memory_order_relaxed: + old = (vatomicsz_cmpxchg_rlx(const_cast(&_v), + expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomicsz_cmpxchg_acq(const_cast(&_v), + expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomicsz_cmpxchg(const_cast(&_v), + expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vsize_t &expected, vsize_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vsize_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomicsz_read_acq((&_v))); + case memory_order_relaxed: + return (vatomicsz_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomicsz_read((&_v))); + } + } + void store(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomicsz_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomicsz_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomicsz_write((&_v), v); + } + } + + vsize_t operator=(vsize_t v) noexcept + { + store(v); + return v; + } + + operator vsize_t() const noexcept + { + return load(); + } + + vsize_t exchange(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomicsz_xchg_rel((&_v), v)); + case memory_order_relaxed: + return (vatomicsz_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomicsz_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomicsz_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vsize_t &expected, vsize_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vsize_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomicsz_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = (vatomicsz_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomicsz_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomicsz_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vsize_t &expected, vsize_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vsize_t + fetch_add(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return (vatomicsz_get_add_rel( + const_cast(&_v), v)); + case memory_order_relaxed: + return (vatomicsz_get_add_rlx( + const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomicsz_get_add_acq( + const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return ( + vatomicsz_get_add(const_cast(&_v), v)); + } + } + vsize_t operator+=(vsize_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vsize_t operator++(int) volatile noexcept + { + return vatomicsz_get_inc(const_cast(&_v)); + } + // ++v + vsize_t operator++() volatile noexcept + { + return vatomicsz_inc_get(const_cast(&_v)); + } + + vsize_t + fetch_sub(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_sub_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomicsz_get_sub_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_sub_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_sub(const_cast(&_v), v); + } + } + vsize_t operator-=(vsize_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vsize_t operator--(int) volatile noexcept + { + return vatomicsz_get_dec(const_cast(&_v)); + } + // --v + vsize_t operator--() volatile noexcept + { + return vatomicsz_dec_get(const_cast(&_v)); + } + + vsize_t + fetch_and(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_and_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomicsz_get_and_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_and_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_and(const_cast(&_v), v); + } + } + + vsize_t operator&=(vsize_t v) volatile noexcept + { + return fetch_and(v); + } + + vsize_t + fetch_or(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_or_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomicsz_get_or_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_or_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_or(const_cast(&_v), v); + } + } + + vsize_t operator|=(vsize_t v) volatile noexcept + { + return fetch_or(v); + } + + vsize_t + fetch_xor(vsize_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_xor_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomicsz_get_xor_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_xor_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_xor(const_cast(&_v), v); + } + } + + vsize_t operator^=(vsize_t v) volatile noexcept + { + return fetch_xor(v); + } + vsize_t fetch_add(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomicsz_get_add_rel((&_v), v)); + case memory_order_relaxed: + return (vatomicsz_get_add_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomicsz_get_add_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomicsz_get_add((&_v), v)); + } + } + vsize_t operator+=(vsize_t v) noexcept + { + return fetch_add(v); + } + // v++ + vsize_t operator++(int) noexcept + { + return vatomicsz_get_inc((&_v)); + } + // ++v + vsize_t operator++() noexcept + { + return vatomicsz_inc_get((&_v)); + } + + vsize_t fetch_sub(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_sub_rel((&_v), v); + case memory_order_relaxed: + return vatomicsz_get_sub_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_sub_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_sub((&_v), v); + } + } + vsize_t operator-=(vsize_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vsize_t operator--(int) noexcept + { + return vatomicsz_get_dec((&_v)); + } + // --v + vsize_t operator--() noexcept + { + return vatomicsz_dec_get((&_v)); + } + + vsize_t fetch_and(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_and_rel((&_v), v); + case memory_order_relaxed: + return vatomicsz_get_and_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_and_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_and((&_v), v); + } + } + + vsize_t operator&=(vsize_t v) noexcept + { + return fetch_and(v); + } + + vsize_t fetch_or(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_or_rel((&_v), v); + case memory_order_relaxed: + return vatomicsz_get_or_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_or_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_or((&_v), v); + } + } + + vsize_t operator|=(vsize_t v) noexcept + { + return fetch_or(v); + } + + vsize_t fetch_xor(vsize_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomicsz_get_xor_rel((&_v), v); + case memory_order_relaxed: + return vatomicsz_get_xor_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomicsz_get_xor_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomicsz_get_xor((&_v), v); + } + } + + vsize_t operator^=(vsize_t v) noexcept + { + return fetch_xor(v); + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomicsz_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_u16.hpp b/include/vsync/atomic/core_u16.hpp new file mode 100644 index 00000000..6648752c --- /dev/null +++ b/include/vsync/atomic/core_u16.hpp @@ -0,0 +1,536 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_U16_HPP +#define VATOMIC_CORE_U16_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomic16_init(&_v, 0U); + } + atomic(vuint16_t v) + { + vatomic16_init(&_v, v); + } + vuint16_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic16_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return (vatomic16_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic16_read(const_cast(&_v))); + } + } + void store(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomic16_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomic16_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic16_write(const_cast(&_v), v); + } + } + + vuint16_t operator=(vuint16_t v) volatile noexcept + { + store(v); + return v; + } + + operator vuint16_t() volatile const noexcept + { + return load(); + } + + vuint16_t + exchange(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomic16_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomic16_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomic16_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic16_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vuint16_t &expected, vuint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vuint16_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic16_cmpxchg_rel(const_cast(&_v), + expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic16_cmpxchg_rlx(const_cast(&_v), + expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic16_cmpxchg_acq(const_cast(&_v), + expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic16_cmpxchg(const_cast(&_v), + expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint16_t &expected, vuint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint16_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic16_read_acq((&_v))); + case memory_order_relaxed: + return (vatomic16_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic16_read((&_v))); + } + } + void store(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomic16_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomic16_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic16_write((&_v), v); + } + } + + vuint16_t operator=(vuint16_t v) noexcept + { + store(v); + return v; + } + + operator vuint16_t() const noexcept + { + return load(); + } + + vuint16_t exchange(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic16_xchg_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic16_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic16_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic16_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vuint16_t &expected, vuint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vuint16_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic16_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic16_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic16_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic16_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint16_t &expected, vuint16_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint16_t + fetch_add(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return (vatomic16_get_add_rel( + const_cast(&_v), v)); + case memory_order_relaxed: + return (vatomic16_get_add_rlx( + const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic16_get_add_acq( + const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return ( + vatomic16_get_add(const_cast(&_v), v)); + } + } + vuint16_t operator+=(vuint16_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vuint16_t operator++(int) volatile noexcept + { + return vatomic16_get_inc(const_cast(&_v)); + } + // ++v + vuint16_t operator++() volatile noexcept + { + return vatomic16_inc_get(const_cast(&_v)); + } + + vuint16_t + fetch_sub(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_sub_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic16_get_sub_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_sub_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_sub(const_cast(&_v), v); + } + } + vuint16_t operator-=(vuint16_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vuint16_t operator--(int) volatile noexcept + { + return vatomic16_get_dec(const_cast(&_v)); + } + // --v + vuint16_t operator--() volatile noexcept + { + return vatomic16_dec_get(const_cast(&_v)); + } + + vuint16_t + fetch_and(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_and_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic16_get_and_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_and_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_and(const_cast(&_v), v); + } + } + + vuint16_t operator&=(vuint16_t v) volatile noexcept + { + return fetch_and(v); + } + + vuint16_t + fetch_or(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_or_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic16_get_or_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_or_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_or(const_cast(&_v), v); + } + } + + vuint16_t operator|=(vuint16_t v) volatile noexcept + { + return fetch_or(v); + } + + vuint16_t + fetch_xor(vuint16_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_xor_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic16_get_xor_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_xor_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_xor(const_cast(&_v), v); + } + } + + vuint16_t operator^=(vuint16_t v) volatile noexcept + { + return fetch_xor(v); + } + vuint16_t fetch_add(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic16_get_add_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic16_get_add_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic16_get_add_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic16_get_add((&_v), v)); + } + } + vuint16_t operator+=(vuint16_t v) noexcept + { + return fetch_add(v); + } + // v++ + vuint16_t operator++(int) noexcept + { + return vatomic16_get_inc((&_v)); + } + // ++v + vuint16_t operator++() noexcept + { + return vatomic16_inc_get((&_v)); + } + + vuint16_t fetch_sub(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_sub_rel((&_v), v); + case memory_order_relaxed: + return vatomic16_get_sub_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_sub_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_sub((&_v), v); + } + } + vuint16_t operator-=(vuint16_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vuint16_t operator--(int) noexcept + { + return vatomic16_get_dec((&_v)); + } + // --v + vuint16_t operator--() noexcept + { + return vatomic16_dec_get((&_v)); + } + + vuint16_t fetch_and(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_and_rel((&_v), v); + case memory_order_relaxed: + return vatomic16_get_and_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_and_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_and((&_v), v); + } + } + + vuint16_t operator&=(vuint16_t v) noexcept + { + return fetch_and(v); + } + + vuint16_t fetch_or(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_or_rel((&_v), v); + case memory_order_relaxed: + return vatomic16_get_or_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_or_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_or((&_v), v); + } + } + + vuint16_t operator|=(vuint16_t v) noexcept + { + return fetch_or(v); + } + + vuint16_t fetch_xor(vuint16_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic16_get_xor_rel((&_v), v); + case memory_order_relaxed: + return vatomic16_get_xor_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic16_get_xor_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic16_get_xor((&_v), v); + } + } + + vuint16_t operator^=(vuint16_t v) noexcept + { + return fetch_xor(v); + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomic16_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_u32.hpp b/include/vsync/atomic/core_u32.hpp new file mode 100644 index 00000000..a9fca7d9 --- /dev/null +++ b/include/vsync/atomic/core_u32.hpp @@ -0,0 +1,536 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_U32_HPP +#define VATOMIC_CORE_U32_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomic32_init(&_v, 0U); + } + atomic(vuint32_t v) + { + vatomic32_init(&_v, v); + } + vuint32_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic32_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return (vatomic32_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic32_read(const_cast(&_v))); + } + } + void store(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomic32_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomic32_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic32_write(const_cast(&_v), v); + } + } + + vuint32_t operator=(vuint32_t v) volatile noexcept + { + store(v); + return v; + } + + operator vuint32_t() volatile const noexcept + { + return load(); + } + + vuint32_t + exchange(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomic32_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomic32_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomic32_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic32_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vuint32_t &expected, vuint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vuint32_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic32_cmpxchg_rel(const_cast(&_v), + expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic32_cmpxchg_rlx(const_cast(&_v), + expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic32_cmpxchg_acq(const_cast(&_v), + expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic32_cmpxchg(const_cast(&_v), + expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint32_t &expected, vuint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint32_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic32_read_acq((&_v))); + case memory_order_relaxed: + return (vatomic32_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic32_read((&_v))); + } + } + void store(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomic32_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomic32_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic32_write((&_v), v); + } + } + + vuint32_t operator=(vuint32_t v) noexcept + { + store(v); + return v; + } + + operator vuint32_t() const noexcept + { + return load(); + } + + vuint32_t exchange(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic32_xchg_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic32_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic32_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic32_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vuint32_t &expected, vuint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vuint32_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic32_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic32_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic32_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic32_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint32_t &expected, vuint32_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint32_t + fetch_add(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return (vatomic32_get_add_rel( + const_cast(&_v), v)); + case memory_order_relaxed: + return (vatomic32_get_add_rlx( + const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic32_get_add_acq( + const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return ( + vatomic32_get_add(const_cast(&_v), v)); + } + } + vuint32_t operator+=(vuint32_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vuint32_t operator++(int) volatile noexcept + { + return vatomic32_get_inc(const_cast(&_v)); + } + // ++v + vuint32_t operator++() volatile noexcept + { + return vatomic32_inc_get(const_cast(&_v)); + } + + vuint32_t + fetch_sub(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_sub_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic32_get_sub_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_sub_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_sub(const_cast(&_v), v); + } + } + vuint32_t operator-=(vuint32_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vuint32_t operator--(int) volatile noexcept + { + return vatomic32_get_dec(const_cast(&_v)); + } + // --v + vuint32_t operator--() volatile noexcept + { + return vatomic32_dec_get(const_cast(&_v)); + } + + vuint32_t + fetch_and(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_and_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic32_get_and_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_and_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_and(const_cast(&_v), v); + } + } + + vuint32_t operator&=(vuint32_t v) volatile noexcept + { + return fetch_and(v); + } + + vuint32_t + fetch_or(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_or_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic32_get_or_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_or_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_or(const_cast(&_v), v); + } + } + + vuint32_t operator|=(vuint32_t v) volatile noexcept + { + return fetch_or(v); + } + + vuint32_t + fetch_xor(vuint32_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_xor_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic32_get_xor_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_xor_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_xor(const_cast(&_v), v); + } + } + + vuint32_t operator^=(vuint32_t v) volatile noexcept + { + return fetch_xor(v); + } + vuint32_t fetch_add(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic32_get_add_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic32_get_add_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic32_get_add_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic32_get_add((&_v), v)); + } + } + vuint32_t operator+=(vuint32_t v) noexcept + { + return fetch_add(v); + } + // v++ + vuint32_t operator++(int) noexcept + { + return vatomic32_get_inc((&_v)); + } + // ++v + vuint32_t operator++() noexcept + { + return vatomic32_inc_get((&_v)); + } + + vuint32_t fetch_sub(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_sub_rel((&_v), v); + case memory_order_relaxed: + return vatomic32_get_sub_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_sub_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_sub((&_v), v); + } + } + vuint32_t operator-=(vuint32_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vuint32_t operator--(int) noexcept + { + return vatomic32_get_dec((&_v)); + } + // --v + vuint32_t operator--() noexcept + { + return vatomic32_dec_get((&_v)); + } + + vuint32_t fetch_and(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_and_rel((&_v), v); + case memory_order_relaxed: + return vatomic32_get_and_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_and_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_and((&_v), v); + } + } + + vuint32_t operator&=(vuint32_t v) noexcept + { + return fetch_and(v); + } + + vuint32_t fetch_or(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_or_rel((&_v), v); + case memory_order_relaxed: + return vatomic32_get_or_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_or_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_or((&_v), v); + } + } + + vuint32_t operator|=(vuint32_t v) noexcept + { + return fetch_or(v); + } + + vuint32_t fetch_xor(vuint32_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic32_get_xor_rel((&_v), v); + case memory_order_relaxed: + return vatomic32_get_xor_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic32_get_xor_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic32_get_xor((&_v), v); + } + } + + vuint32_t operator^=(vuint32_t v) noexcept + { + return fetch_xor(v); + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomic32_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_u64.hpp b/include/vsync/atomic/core_u64.hpp new file mode 100644 index 00000000..5a1fd6f9 --- /dev/null +++ b/include/vsync/atomic/core_u64.hpp @@ -0,0 +1,536 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_U64_HPP +#define VATOMIC_CORE_U64_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomic64_init(&_v, 0U); + } + atomic(vuint64_t v) + { + vatomic64_init(&_v, v); + } + vuint64_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic64_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return (vatomic64_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic64_read(const_cast(&_v))); + } + } + void store(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomic64_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomic64_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic64_write(const_cast(&_v), v); + } + } + + vuint64_t operator=(vuint64_t v) volatile noexcept + { + store(v); + return v; + } + + operator vuint64_t() volatile const noexcept + { + return load(); + } + + vuint64_t + exchange(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomic64_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomic64_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomic64_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic64_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vuint64_t &expected, vuint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vuint64_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic64_cmpxchg_rel(const_cast(&_v), + expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic64_cmpxchg_rlx(const_cast(&_v), + expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic64_cmpxchg_acq(const_cast(&_v), + expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic64_cmpxchg(const_cast(&_v), + expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint64_t &expected, vuint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint64_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic64_read_acq((&_v))); + case memory_order_relaxed: + return (vatomic64_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic64_read((&_v))); + } + } + void store(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomic64_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomic64_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic64_write((&_v), v); + } + } + + vuint64_t operator=(vuint64_t v) noexcept + { + store(v); + return v; + } + + operator vuint64_t() const noexcept + { + return load(); + } + + vuint64_t exchange(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic64_xchg_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic64_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic64_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic64_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vuint64_t &expected, vuint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vuint64_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic64_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic64_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic64_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic64_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint64_t &expected, vuint64_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint64_t + fetch_add(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return (vatomic64_get_add_rel( + const_cast(&_v), v)); + case memory_order_relaxed: + return (vatomic64_get_add_rlx( + const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic64_get_add_acq( + const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return ( + vatomic64_get_add(const_cast(&_v), v)); + } + } + vuint64_t operator+=(vuint64_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vuint64_t operator++(int) volatile noexcept + { + return vatomic64_get_inc(const_cast(&_v)); + } + // ++v + vuint64_t operator++() volatile noexcept + { + return vatomic64_inc_get(const_cast(&_v)); + } + + vuint64_t + fetch_sub(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_sub_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic64_get_sub_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_sub_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_sub(const_cast(&_v), v); + } + } + vuint64_t operator-=(vuint64_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vuint64_t operator--(int) volatile noexcept + { + return vatomic64_get_dec(const_cast(&_v)); + } + // --v + vuint64_t operator--() volatile noexcept + { + return vatomic64_dec_get(const_cast(&_v)); + } + + vuint64_t + fetch_and(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_and_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic64_get_and_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_and_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_and(const_cast(&_v), v); + } + } + + vuint64_t operator&=(vuint64_t v) volatile noexcept + { + return fetch_and(v); + } + + vuint64_t + fetch_or(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_or_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic64_get_or_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_or_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_or(const_cast(&_v), v); + } + } + + vuint64_t operator|=(vuint64_t v) volatile noexcept + { + return fetch_or(v); + } + + vuint64_t + fetch_xor(vuint64_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_xor_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic64_get_xor_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_xor_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_xor(const_cast(&_v), v); + } + } + + vuint64_t operator^=(vuint64_t v) volatile noexcept + { + return fetch_xor(v); + } + vuint64_t fetch_add(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic64_get_add_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic64_get_add_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic64_get_add_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic64_get_add((&_v), v)); + } + } + vuint64_t operator+=(vuint64_t v) noexcept + { + return fetch_add(v); + } + // v++ + vuint64_t operator++(int) noexcept + { + return vatomic64_get_inc((&_v)); + } + // ++v + vuint64_t operator++() noexcept + { + return vatomic64_inc_get((&_v)); + } + + vuint64_t fetch_sub(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_sub_rel((&_v), v); + case memory_order_relaxed: + return vatomic64_get_sub_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_sub_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_sub((&_v), v); + } + } + vuint64_t operator-=(vuint64_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vuint64_t operator--(int) noexcept + { + return vatomic64_get_dec((&_v)); + } + // --v + vuint64_t operator--() noexcept + { + return vatomic64_dec_get((&_v)); + } + + vuint64_t fetch_and(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_and_rel((&_v), v); + case memory_order_relaxed: + return vatomic64_get_and_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_and_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_and((&_v), v); + } + } + + vuint64_t operator&=(vuint64_t v) noexcept + { + return fetch_and(v); + } + + vuint64_t fetch_or(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_or_rel((&_v), v); + case memory_order_relaxed: + return vatomic64_get_or_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_or_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_or((&_v), v); + } + } + + vuint64_t operator|=(vuint64_t v) noexcept + { + return fetch_or(v); + } + + vuint64_t fetch_xor(vuint64_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic64_get_xor_rel((&_v), v); + case memory_order_relaxed: + return vatomic64_get_xor_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic64_get_xor_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic64_get_xor((&_v), v); + } + } + + vuint64_t operator^=(vuint64_t v) noexcept + { + return fetch_xor(v); + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomic64_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/core_u8.hpp b/include/vsync/atomic/core_u8.hpp new file mode 100644 index 00000000..4131c5a7 --- /dev/null +++ b/include/vsync/atomic/core_u8.hpp @@ -0,0 +1,535 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#ifndef VATOMIC_CORE_U8_HPP +#define VATOMIC_CORE_U8_HPP +/*!!!Warning: File generated by tmplr; DO NOT EDIT.!!!*/ +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + vatomic8_init(&_v, 0U); + } + atomic(vuint8_t v) + { + vatomic8_init(&_v, v); + } + vuint8_t + load(memory_order order = memory_order_seq_cst) volatile const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic8_read_acq(const_cast(&_v))); + case memory_order_relaxed: + return (vatomic8_read_rlx(const_cast(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_read(const_cast(&_v))); + } + } + void store(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + vatomic8_write_rel(const_cast(&_v), v); + break; + case memory_order_relaxed: + vatomic8_write_rlx(const_cast(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic8_write(const_cast(&_v), v); + } + } + + vuint8_t operator=(vuint8_t v) volatile noexcept + { + store(v); + return v; + } + + operator vuint8_t() volatile const noexcept + { + return load(); + } + + vuint8_t + exchange(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomic8_xchg_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomic8_xchg_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomic8_xchg_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_xchg(const_cast(&_v), v)); + } + } + + bool compare_exchange_strong( + vuint8_t &expected, vuint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + vuint8_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic8_cmpxchg_rel(const_cast(&_v), + expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic8_cmpxchg_rlx(const_cast(&_v), + expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic8_cmpxchg_acq(const_cast(&_v), + expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic8_cmpxchg(const_cast(&_v), + expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint8_t &expected, vuint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint8_t load(memory_order order = memory_order_seq_cst) const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return (vatomic8_read_acq((&_v))); + case memory_order_relaxed: + return (vatomic8_read_rlx((&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_read((&_v))); + } + } + void store(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + vatomic8_write_rel((&_v), v); + break; + case memory_order_relaxed: + vatomic8_write_rlx((&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return vatomic8_write((&_v), v); + } + } + + vuint8_t operator=(vuint8_t v) noexcept + { + store(v); + return v; + } + + operator vuint8_t() const noexcept + { + return load(); + } + + vuint8_t exchange(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic8_xchg_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic8_xchg_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic8_xchg_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_xchg((&_v), v)); + } + } + + bool compare_exchange_strong( + vuint8_t &expected, vuint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + vuint8_t old = 0; + switch (order) { + case memory_order_release: + old = (vatomic8_cmpxchg_rel((&_v), expected, desired)); + break; + case memory_order_relaxed: + old = (vatomic8_cmpxchg_rlx((&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = (vatomic8_cmpxchg_acq((&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = (vatomic8_cmpxchg((&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool compare_exchange_weak( + vuint8_t &expected, vuint8_t desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + vuint8_t + fetch_add(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return ( + vatomic8_get_add_rel(const_cast(&_v), v)); + case memory_order_relaxed: + return ( + vatomic8_get_add_rlx(const_cast(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return ( + vatomic8_get_add_acq(const_cast(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_get_add(const_cast(&_v), v)); + } + } + vuint8_t operator+=(vuint8_t v) volatile noexcept + { + return fetch_add(v); + } + // v++ + vuint8_t operator++(int) volatile noexcept + { + return vatomic8_get_inc(const_cast(&_v)); + } + // ++v + vuint8_t operator++() volatile noexcept + { + return vatomic8_inc_get(const_cast(&_v)); + } + + vuint8_t + fetch_sub(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_sub_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic8_get_sub_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_sub_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_sub(const_cast(&_v), v); + } + } + vuint8_t operator-=(vuint8_t v) volatile noexcept + { + return fetch_sub(v); + } + // v-- + vuint8_t operator--(int) volatile noexcept + { + return vatomic8_get_dec(const_cast(&_v)); + } + // --v + vuint8_t operator--() volatile noexcept + { + return vatomic8_dec_get(const_cast(&_v)); + } + + vuint8_t + fetch_and(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_and_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic8_get_and_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_and_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_and(const_cast(&_v), v); + } + } + + vuint8_t operator&=(vuint8_t v) volatile noexcept + { + return fetch_and(v); + } + + vuint8_t + fetch_or(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_or_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic8_get_or_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_or_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_or(const_cast(&_v), v); + } + } + + vuint8_t operator|=(vuint8_t v) volatile noexcept + { + return fetch_or(v); + } + + vuint8_t + fetch_xor(vuint8_t v, + memory_order order = memory_order_seq_cst) volatile noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_xor_rel(const_cast(&_v), + v); + case memory_order_relaxed: + return vatomic8_get_xor_rlx(const_cast(&_v), + v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_xor_acq(const_cast(&_v), + v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_xor(const_cast(&_v), v); + } + } + + vuint8_t operator^=(vuint8_t v) volatile noexcept + { + return fetch_xor(v); + } + vuint8_t fetch_add(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return (vatomic8_get_add_rel((&_v), v)); + case memory_order_relaxed: + return (vatomic8_get_add_rlx((&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return (vatomic8_get_add_acq((&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return (vatomic8_get_add((&_v), v)); + } + } + vuint8_t operator+=(vuint8_t v) noexcept + { + return fetch_add(v); + } + // v++ + vuint8_t operator++(int) noexcept + { + return vatomic8_get_inc((&_v)); + } + // ++v + vuint8_t operator++() noexcept + { + return vatomic8_inc_get((&_v)); + } + + vuint8_t fetch_sub(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_sub_rel((&_v), v); + case memory_order_relaxed: + return vatomic8_get_sub_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_sub_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_sub((&_v), v); + } + } + vuint8_t operator-=(vuint8_t v) noexcept + { + return fetch_sub(v); + } + // v-- + vuint8_t operator--(int) noexcept + { + return vatomic8_get_dec((&_v)); + } + // --v + vuint8_t operator--() noexcept + { + return vatomic8_dec_get((&_v)); + } + + vuint8_t fetch_and(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_and_rel((&_v), v); + case memory_order_relaxed: + return vatomic8_get_and_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_and_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_and((&_v), v); + } + } + + vuint8_t operator&=(vuint8_t v) noexcept + { + return fetch_and(v); + } + + vuint8_t fetch_or(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_or_rel((&_v), v); + case memory_order_relaxed: + return vatomic8_get_or_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_or_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_or((&_v), v); + } + } + + vuint8_t operator|=(vuint8_t v) noexcept + { + return fetch_or(v); + } + + vuint8_t fetch_xor(vuint8_t v, + memory_order order = memory_order_seq_cst) noexcept + { + switch (order) { + case memory_order_release: + return vatomic8_get_xor_rel((&_v), v); + case memory_order_relaxed: + return vatomic8_get_xor_rlx((&_v), v); + case memory_order_consume: + case memory_order_acquire: + return vatomic8_get_xor_acq((&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return vatomic8_get_xor((&_v), v); + } + } + + vuint8_t operator^=(vuint8_t v) noexcept + { + return fetch_xor(v); + } + bool is_lock_free() volatile const noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + private: + vatomic8_t _v; + }; +}; // namespace vsync + +#endif diff --git a/include/vsync/atomic/doc.h b/include/vsync/atomic/doc.h index 0f19eeaa..af26616c 100644 --- a/include/vsync/atomic/doc.h +++ b/include/vsync/atomic/doc.h @@ -1,7 +1,6 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. * SPDX-License-Identifier: MIT - * Author: Huawei Dresden Research Center */ #ifndef VATOMIC_DOC_H #define VATOMIC_DOC_H diff --git a/include/vsync/atomic/internal/atomic_rlx.h b/include/vsync/atomic/internal/atomic_rlx.h deleted file mode 100644 index f8109c23..00000000 --- a/include/vsync/atomic/internal/atomic_rlx.h +++ /dev/null @@ -1,928 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2024. All rights reserved. - * SPDX-License-Identifier: MIT - */ - -#ifndef VATOMIC_CONFIG_RLX_H -#define VATOMIC_CONFIG_RLX_H - -/* !!!Warning: File generated by tmpl; DO NOT EDIT.!!! */ - -#if defined(VATOMIC_ENABLE_ATOMIC_RLX) - - #define VATOMIC_FENCE - #define vatomic_fence vatomic_fence_rlx - #define VATOMIC_FENCE_ACQ - #define vatomic_fence_acq vatomic_fence_rlx - #define VATOMIC_FENCE_REL - #define vatomic_fence_rel vatomic_fence_rlx - - #define VATOMIC8_READ - #define vatomic8_read vatomic8_read_rlx - #define VATOMIC8_READ_ACQ - #define vatomic8_read_acq vatomic8_read_rlx - #define VATOMIC16_READ - #define vatomic16_read vatomic16_read_rlx - #define VATOMIC16_READ_ACQ - #define vatomic16_read_acq vatomic16_read_rlx - #define VATOMIC32_READ - #define vatomic32_read vatomic32_read_rlx - #define VATOMIC32_READ_ACQ - #define vatomic32_read_acq vatomic32_read_rlx - #define VATOMIC64_READ - #define vatomic64_read vatomic64_read_rlx - #define VATOMIC64_READ_ACQ - #define vatomic64_read_acq vatomic64_read_rlx - #define VATOMICSZ_READ - #define vatomicsz_read vatomicsz_read_rlx - #define VATOMICSZ_READ_ACQ - #define vatomicsz_read_acq vatomicsz_read_rlx - #define VATOMICPTR_READ - #define vatomicptr_read vatomicptr_read_rlx - #define VATOMICPTR_READ_ACQ - #define vatomicptr_read_acq vatomicptr_read_rlx - - #define VATOMIC8_WRITE - #define vatomic8_write vatomic8_write_rlx - #define VATOMIC8_WRITE_REL - #define vatomic8_write_rel vatomic8_write_rlx - #define VATOMIC16_WRITE - #define vatomic16_write vatomic16_write_rlx - #define VATOMIC16_WRITE_REL - #define vatomic16_write_rel vatomic16_write_rlx - #define VATOMIC32_WRITE - #define vatomic32_write vatomic32_write_rlx - #define VATOMIC32_WRITE_REL - #define vatomic32_write_rel vatomic32_write_rlx - #define VATOMIC64_WRITE - #define vatomic64_write vatomic64_write_rlx - #define VATOMIC64_WRITE_REL - #define vatomic64_write_rel vatomic64_write_rlx - #define VATOMICSZ_WRITE - #define vatomicsz_write vatomicsz_write_rlx - #define VATOMICSZ_WRITE_REL - #define vatomicsz_write_rel vatomicsz_write_rlx - #define VATOMICPTR_WRITE - #define vatomicptr_write vatomicptr_write_rlx - #define VATOMICPTR_WRITE_REL - #define vatomicptr_write_rel vatomicptr_write_rlx - - #define VATOMIC8_CMPXCHG - #define vatomic8_cmpxchg vatomic8_cmpxchg_rlx - #define VATOMIC8_XCHG - #define vatomic8_xchg vatomic8_xchg_rlx - #define VATOMIC8_CMPXCHG_ACQ - #define vatomic8_cmpxchg_acq vatomic8_cmpxchg_rlx - #define VATOMIC8_XCHG_ACQ - #define vatomic8_xchg_acq vatomic8_xchg_rlx - #define VATOMIC8_CMPXCHG_REL - #define vatomic8_cmpxchg_rel vatomic8_cmpxchg_rlx - #define VATOMIC8_XCHG_REL - #define vatomic8_xchg_rel vatomic8_xchg_rlx - #define VATOMIC16_CMPXCHG - #define vatomic16_cmpxchg vatomic16_cmpxchg_rlx - #define VATOMIC16_XCHG - #define vatomic16_xchg vatomic16_xchg_rlx - #define VATOMIC16_CMPXCHG_ACQ - #define vatomic16_cmpxchg_acq vatomic16_cmpxchg_rlx - #define VATOMIC16_XCHG_ACQ - #define vatomic16_xchg_acq vatomic16_xchg_rlx - #define VATOMIC16_CMPXCHG_REL - #define vatomic16_cmpxchg_rel vatomic16_cmpxchg_rlx - #define VATOMIC16_XCHG_REL - #define vatomic16_xchg_rel vatomic16_xchg_rlx - #define VATOMIC32_CMPXCHG - #define vatomic32_cmpxchg vatomic32_cmpxchg_rlx - #define VATOMIC32_XCHG - #define vatomic32_xchg vatomic32_xchg_rlx - #define VATOMIC32_CMPXCHG_ACQ - #define vatomic32_cmpxchg_acq vatomic32_cmpxchg_rlx - #define VATOMIC32_XCHG_ACQ - #define vatomic32_xchg_acq vatomic32_xchg_rlx - #define VATOMIC32_CMPXCHG_REL - #define vatomic32_cmpxchg_rel vatomic32_cmpxchg_rlx - #define VATOMIC32_XCHG_REL - #define vatomic32_xchg_rel vatomic32_xchg_rlx - #define VATOMIC64_CMPXCHG - #define vatomic64_cmpxchg vatomic64_cmpxchg_rlx - #define VATOMIC64_XCHG - #define vatomic64_xchg vatomic64_xchg_rlx - #define VATOMIC64_CMPXCHG_ACQ - #define vatomic64_cmpxchg_acq vatomic64_cmpxchg_rlx - #define VATOMIC64_XCHG_ACQ - #define vatomic64_xchg_acq vatomic64_xchg_rlx - #define VATOMIC64_CMPXCHG_REL - #define vatomic64_cmpxchg_rel vatomic64_cmpxchg_rlx - #define VATOMIC64_XCHG_REL - #define vatomic64_xchg_rel vatomic64_xchg_rlx - #define VATOMICSZ_CMPXCHG - #define vatomicsz_cmpxchg vatomicsz_cmpxchg_rlx - #define VATOMICSZ_XCHG - #define vatomicsz_xchg vatomicsz_xchg_rlx - #define VATOMICSZ_CMPXCHG_ACQ - #define vatomicsz_cmpxchg_acq vatomicsz_cmpxchg_rlx - #define VATOMICSZ_XCHG_ACQ - #define vatomicsz_xchg_acq vatomicsz_xchg_rlx - #define VATOMICSZ_CMPXCHG_REL - #define vatomicsz_cmpxchg_rel vatomicsz_cmpxchg_rlx - #define VATOMICSZ_XCHG_REL - #define vatomicsz_xchg_rel vatomicsz_xchg_rlx - #define VATOMICPTR_CMPXCHG - #define vatomicptr_cmpxchg vatomicptr_cmpxchg_rlx - #define VATOMICPTR_XCHG - #define vatomicptr_xchg vatomicptr_xchg_rlx - #define VATOMICPTR_CMPXCHG_ACQ - #define vatomicptr_cmpxchg_acq vatomicptr_cmpxchg_rlx - #define VATOMICPTR_XCHG_ACQ - #define vatomicptr_xchg_acq vatomicptr_xchg_rlx - #define VATOMICPTR_CMPXCHG_REL - #define vatomicptr_cmpxchg_rel vatomicptr_cmpxchg_rlx - #define VATOMICPTR_XCHG_REL - #define vatomicptr_xchg_rel vatomicptr_xchg_rlx - - #define VATOMIC8_GET_MAX - #define vatomic8_get_max vatomic8_get_max_rlx - #define VATOMIC8_GET_AND - #define vatomic8_get_and vatomic8_get_and_rlx - #define VATOMIC8_GET_OR - #define vatomic8_get_or vatomic8_get_or_rlx - #define VATOMIC8_GET_XOR - #define vatomic8_get_xor vatomic8_get_xor_rlx - #define VATOMIC8_GET_ADD - #define vatomic8_get_add vatomic8_get_add_rlx - #define VATOMIC8_GET_INC - #define vatomic8_get_inc vatomic8_get_inc_rlx - #define VATOMIC8_GET_SUB - #define vatomic8_get_sub vatomic8_get_sub_rlx - #define VATOMIC8_GET_DEC - #define vatomic8_get_dec vatomic8_get_dec_rlx - #define VATOMIC8_MAX_GET - #define vatomic8_max_get vatomic8_max_get_rlx - #define VATOMIC8_AND_GET - #define vatomic8_and_get vatomic8_and_get_rlx - #define VATOMIC8_OR_GET - #define vatomic8_or_get vatomic8_or_get_rlx - #define VATOMIC8_XOR_GET - #define vatomic8_xor_get vatomic8_xor_get_rlx - #define VATOMIC8_ADD_GET - #define vatomic8_add_get vatomic8_add_get_rlx - #define VATOMIC8_INC_GET - #define vatomic8_inc_get vatomic8_inc_get_rlx - #define VATOMIC8_SUB_GET - #define vatomic8_sub_get vatomic8_sub_get_rlx - #define VATOMIC8_DEC_GET - #define vatomic8_dec_get vatomic8_dec_get_rlx - #define VATOMIC8_GET_MAX_ACQ - #define vatomic8_get_max_acq vatomic8_get_max_rlx - #define VATOMIC8_GET_AND_ACQ - #define vatomic8_get_and_acq vatomic8_get_and_rlx - #define VATOMIC8_GET_OR_ACQ - #define vatomic8_get_or_acq vatomic8_get_or_rlx - #define VATOMIC8_GET_XOR_ACQ - #define vatomic8_get_xor_acq vatomic8_get_xor_rlx - #define VATOMIC8_GET_ADD_ACQ - #define vatomic8_get_add_acq vatomic8_get_add_rlx - #define VATOMIC8_GET_INC_ACQ - #define vatomic8_get_inc_acq vatomic8_get_inc_rlx - #define VATOMIC8_GET_SUB_ACQ - #define vatomic8_get_sub_acq vatomic8_get_sub_rlx - #define VATOMIC8_GET_DEC_ACQ - #define vatomic8_get_dec_acq vatomic8_get_dec_rlx - #define VATOMIC8_MAX_GET_ACQ - #define vatomic8_max_get_acq vatomic8_max_get_rlx - #define VATOMIC8_AND_GET_ACQ - #define vatomic8_and_get_acq vatomic8_and_get_rlx - #define VATOMIC8_OR_GET_ACQ - #define vatomic8_or_get_acq vatomic8_or_get_rlx - #define VATOMIC8_XOR_GET_ACQ - #define vatomic8_xor_get_acq vatomic8_xor_get_rlx - #define VATOMIC8_ADD_GET_ACQ - #define vatomic8_add_get_acq vatomic8_add_get_rlx - #define VATOMIC8_INC_GET_ACQ - #define vatomic8_inc_get_acq vatomic8_inc_get_rlx - #define VATOMIC8_SUB_GET_ACQ - #define vatomic8_sub_get_acq vatomic8_sub_get_rlx - #define VATOMIC8_DEC_GET_ACQ - #define vatomic8_dec_get_acq vatomic8_dec_get_rlx - #define VATOMIC8_GET_MAX_REL - #define vatomic8_get_max_rel vatomic8_get_max_rlx - #define VATOMIC8_GET_AND_REL - #define vatomic8_get_and_rel vatomic8_get_and_rlx - #define VATOMIC8_GET_OR_REL - #define vatomic8_get_or_rel vatomic8_get_or_rlx - #define VATOMIC8_GET_XOR_REL - #define vatomic8_get_xor_rel vatomic8_get_xor_rlx - #define VATOMIC8_GET_ADD_REL - #define vatomic8_get_add_rel vatomic8_get_add_rlx - #define VATOMIC8_GET_INC_REL - #define vatomic8_get_inc_rel vatomic8_get_inc_rlx - #define VATOMIC8_GET_SUB_REL - #define vatomic8_get_sub_rel vatomic8_get_sub_rlx - #define VATOMIC8_GET_DEC_REL - #define vatomic8_get_dec_rel vatomic8_get_dec_rlx - #define VATOMIC8_MAX_GET_REL - #define vatomic8_max_get_rel vatomic8_max_get_rlx - #define VATOMIC8_AND_GET_REL - #define vatomic8_and_get_rel vatomic8_and_get_rlx - #define VATOMIC8_OR_GET_REL - #define vatomic8_or_get_rel vatomic8_or_get_rlx - #define VATOMIC8_XOR_GET_REL - #define vatomic8_xor_get_rel vatomic8_xor_get_rlx - #define VATOMIC8_ADD_GET_REL - #define vatomic8_add_get_rel vatomic8_add_get_rlx - #define VATOMIC8_INC_GET_REL - #define vatomic8_inc_get_rel vatomic8_inc_get_rlx - #define VATOMIC8_SUB_GET_REL - #define vatomic8_sub_get_rel vatomic8_sub_get_rlx - #define VATOMIC8_DEC_GET_REL - #define vatomic8_dec_get_rel vatomic8_dec_get_rlx - #define VATOMIC16_GET_MAX - #define vatomic16_get_max vatomic16_get_max_rlx - #define VATOMIC16_GET_AND - #define vatomic16_get_and vatomic16_get_and_rlx - #define VATOMIC16_GET_OR - #define vatomic16_get_or vatomic16_get_or_rlx - #define VATOMIC16_GET_XOR - #define vatomic16_get_xor vatomic16_get_xor_rlx - #define VATOMIC16_GET_ADD - #define vatomic16_get_add vatomic16_get_add_rlx - #define VATOMIC16_GET_INC - #define vatomic16_get_inc vatomic16_get_inc_rlx - #define VATOMIC16_GET_SUB - #define vatomic16_get_sub vatomic16_get_sub_rlx - #define VATOMIC16_GET_DEC - #define vatomic16_get_dec vatomic16_get_dec_rlx - #define VATOMIC16_MAX_GET - #define vatomic16_max_get vatomic16_max_get_rlx - #define VATOMIC16_AND_GET - #define vatomic16_and_get vatomic16_and_get_rlx - #define VATOMIC16_OR_GET - #define vatomic16_or_get vatomic16_or_get_rlx - #define VATOMIC16_XOR_GET - #define vatomic16_xor_get vatomic16_xor_get_rlx - #define VATOMIC16_ADD_GET - #define vatomic16_add_get vatomic16_add_get_rlx - #define VATOMIC16_INC_GET - #define vatomic16_inc_get vatomic16_inc_get_rlx - #define VATOMIC16_SUB_GET - #define vatomic16_sub_get vatomic16_sub_get_rlx - #define VATOMIC16_DEC_GET - #define vatomic16_dec_get vatomic16_dec_get_rlx - #define VATOMIC16_GET_MAX_ACQ - #define vatomic16_get_max_acq vatomic16_get_max_rlx - #define VATOMIC16_GET_AND_ACQ - #define vatomic16_get_and_acq vatomic16_get_and_rlx - #define VATOMIC16_GET_OR_ACQ - #define vatomic16_get_or_acq vatomic16_get_or_rlx - #define VATOMIC16_GET_XOR_ACQ - #define vatomic16_get_xor_acq vatomic16_get_xor_rlx - #define VATOMIC16_GET_ADD_ACQ - #define vatomic16_get_add_acq vatomic16_get_add_rlx - #define VATOMIC16_GET_INC_ACQ - #define vatomic16_get_inc_acq vatomic16_get_inc_rlx - #define VATOMIC16_GET_SUB_ACQ - #define vatomic16_get_sub_acq vatomic16_get_sub_rlx - #define VATOMIC16_GET_DEC_ACQ - #define vatomic16_get_dec_acq vatomic16_get_dec_rlx - #define VATOMIC16_MAX_GET_ACQ - #define vatomic16_max_get_acq vatomic16_max_get_rlx - #define VATOMIC16_AND_GET_ACQ - #define vatomic16_and_get_acq vatomic16_and_get_rlx - #define VATOMIC16_OR_GET_ACQ - #define vatomic16_or_get_acq vatomic16_or_get_rlx - #define VATOMIC16_XOR_GET_ACQ - #define vatomic16_xor_get_acq vatomic16_xor_get_rlx - #define VATOMIC16_ADD_GET_ACQ - #define vatomic16_add_get_acq vatomic16_add_get_rlx - #define VATOMIC16_INC_GET_ACQ - #define vatomic16_inc_get_acq vatomic16_inc_get_rlx - #define VATOMIC16_SUB_GET_ACQ - #define vatomic16_sub_get_acq vatomic16_sub_get_rlx - #define VATOMIC16_DEC_GET_ACQ - #define vatomic16_dec_get_acq vatomic16_dec_get_rlx - #define VATOMIC16_GET_MAX_REL - #define vatomic16_get_max_rel vatomic16_get_max_rlx - #define VATOMIC16_GET_AND_REL - #define vatomic16_get_and_rel vatomic16_get_and_rlx - #define VATOMIC16_GET_OR_REL - #define vatomic16_get_or_rel vatomic16_get_or_rlx - #define VATOMIC16_GET_XOR_REL - #define vatomic16_get_xor_rel vatomic16_get_xor_rlx - #define VATOMIC16_GET_ADD_REL - #define vatomic16_get_add_rel vatomic16_get_add_rlx - #define VATOMIC16_GET_INC_REL - #define vatomic16_get_inc_rel vatomic16_get_inc_rlx - #define VATOMIC16_GET_SUB_REL - #define vatomic16_get_sub_rel vatomic16_get_sub_rlx - #define VATOMIC16_GET_DEC_REL - #define vatomic16_get_dec_rel vatomic16_get_dec_rlx - #define VATOMIC16_MAX_GET_REL - #define vatomic16_max_get_rel vatomic16_max_get_rlx - #define VATOMIC16_AND_GET_REL - #define vatomic16_and_get_rel vatomic16_and_get_rlx - #define VATOMIC16_OR_GET_REL - #define vatomic16_or_get_rel vatomic16_or_get_rlx - #define VATOMIC16_XOR_GET_REL - #define vatomic16_xor_get_rel vatomic16_xor_get_rlx - #define VATOMIC16_ADD_GET_REL - #define vatomic16_add_get_rel vatomic16_add_get_rlx - #define VATOMIC16_INC_GET_REL - #define vatomic16_inc_get_rel vatomic16_inc_get_rlx - #define VATOMIC16_SUB_GET_REL - #define vatomic16_sub_get_rel vatomic16_sub_get_rlx - #define VATOMIC16_DEC_GET_REL - #define vatomic16_dec_get_rel vatomic16_dec_get_rlx - #define VATOMIC32_GET_MAX - #define vatomic32_get_max vatomic32_get_max_rlx - #define VATOMIC32_GET_AND - #define vatomic32_get_and vatomic32_get_and_rlx - #define VATOMIC32_GET_OR - #define vatomic32_get_or vatomic32_get_or_rlx - #define VATOMIC32_GET_XOR - #define vatomic32_get_xor vatomic32_get_xor_rlx - #define VATOMIC32_GET_ADD - #define vatomic32_get_add vatomic32_get_add_rlx - #define VATOMIC32_GET_INC - #define vatomic32_get_inc vatomic32_get_inc_rlx - #define VATOMIC32_GET_SUB - #define vatomic32_get_sub vatomic32_get_sub_rlx - #define VATOMIC32_GET_DEC - #define vatomic32_get_dec vatomic32_get_dec_rlx - #define VATOMIC32_MAX_GET - #define vatomic32_max_get vatomic32_max_get_rlx - #define VATOMIC32_AND_GET - #define vatomic32_and_get vatomic32_and_get_rlx - #define VATOMIC32_OR_GET - #define vatomic32_or_get vatomic32_or_get_rlx - #define VATOMIC32_XOR_GET - #define vatomic32_xor_get vatomic32_xor_get_rlx - #define VATOMIC32_ADD_GET - #define vatomic32_add_get vatomic32_add_get_rlx - #define VATOMIC32_INC_GET - #define vatomic32_inc_get vatomic32_inc_get_rlx - #define VATOMIC32_SUB_GET - #define vatomic32_sub_get vatomic32_sub_get_rlx - #define VATOMIC32_DEC_GET - #define vatomic32_dec_get vatomic32_dec_get_rlx - #define VATOMIC32_GET_MAX_ACQ - #define vatomic32_get_max_acq vatomic32_get_max_rlx - #define VATOMIC32_GET_AND_ACQ - #define vatomic32_get_and_acq vatomic32_get_and_rlx - #define VATOMIC32_GET_OR_ACQ - #define vatomic32_get_or_acq vatomic32_get_or_rlx - #define VATOMIC32_GET_XOR_ACQ - #define vatomic32_get_xor_acq vatomic32_get_xor_rlx - #define VATOMIC32_GET_ADD_ACQ - #define vatomic32_get_add_acq vatomic32_get_add_rlx - #define VATOMIC32_GET_INC_ACQ - #define vatomic32_get_inc_acq vatomic32_get_inc_rlx - #define VATOMIC32_GET_SUB_ACQ - #define vatomic32_get_sub_acq vatomic32_get_sub_rlx - #define VATOMIC32_GET_DEC_ACQ - #define vatomic32_get_dec_acq vatomic32_get_dec_rlx - #define VATOMIC32_MAX_GET_ACQ - #define vatomic32_max_get_acq vatomic32_max_get_rlx - #define VATOMIC32_AND_GET_ACQ - #define vatomic32_and_get_acq vatomic32_and_get_rlx - #define VATOMIC32_OR_GET_ACQ - #define vatomic32_or_get_acq vatomic32_or_get_rlx - #define VATOMIC32_XOR_GET_ACQ - #define vatomic32_xor_get_acq vatomic32_xor_get_rlx - #define VATOMIC32_ADD_GET_ACQ - #define vatomic32_add_get_acq vatomic32_add_get_rlx - #define VATOMIC32_INC_GET_ACQ - #define vatomic32_inc_get_acq vatomic32_inc_get_rlx - #define VATOMIC32_SUB_GET_ACQ - #define vatomic32_sub_get_acq vatomic32_sub_get_rlx - #define VATOMIC32_DEC_GET_ACQ - #define vatomic32_dec_get_acq vatomic32_dec_get_rlx - #define VATOMIC32_GET_MAX_REL - #define vatomic32_get_max_rel vatomic32_get_max_rlx - #define VATOMIC32_GET_AND_REL - #define vatomic32_get_and_rel vatomic32_get_and_rlx - #define VATOMIC32_GET_OR_REL - #define vatomic32_get_or_rel vatomic32_get_or_rlx - #define VATOMIC32_GET_XOR_REL - #define vatomic32_get_xor_rel vatomic32_get_xor_rlx - #define VATOMIC32_GET_ADD_REL - #define vatomic32_get_add_rel vatomic32_get_add_rlx - #define VATOMIC32_GET_INC_REL - #define vatomic32_get_inc_rel vatomic32_get_inc_rlx - #define VATOMIC32_GET_SUB_REL - #define vatomic32_get_sub_rel vatomic32_get_sub_rlx - #define VATOMIC32_GET_DEC_REL - #define vatomic32_get_dec_rel vatomic32_get_dec_rlx - #define VATOMIC32_MAX_GET_REL - #define vatomic32_max_get_rel vatomic32_max_get_rlx - #define VATOMIC32_AND_GET_REL - #define vatomic32_and_get_rel vatomic32_and_get_rlx - #define VATOMIC32_OR_GET_REL - #define vatomic32_or_get_rel vatomic32_or_get_rlx - #define VATOMIC32_XOR_GET_REL - #define vatomic32_xor_get_rel vatomic32_xor_get_rlx - #define VATOMIC32_ADD_GET_REL - #define vatomic32_add_get_rel vatomic32_add_get_rlx - #define VATOMIC32_INC_GET_REL - #define vatomic32_inc_get_rel vatomic32_inc_get_rlx - #define VATOMIC32_SUB_GET_REL - #define vatomic32_sub_get_rel vatomic32_sub_get_rlx - #define VATOMIC32_DEC_GET_REL - #define vatomic32_dec_get_rel vatomic32_dec_get_rlx - #define VATOMIC64_GET_MAX - #define vatomic64_get_max vatomic64_get_max_rlx - #define VATOMIC64_GET_AND - #define vatomic64_get_and vatomic64_get_and_rlx - #define VATOMIC64_GET_OR - #define vatomic64_get_or vatomic64_get_or_rlx - #define VATOMIC64_GET_XOR - #define vatomic64_get_xor vatomic64_get_xor_rlx - #define VATOMIC64_GET_ADD - #define vatomic64_get_add vatomic64_get_add_rlx - #define VATOMIC64_GET_INC - #define vatomic64_get_inc vatomic64_get_inc_rlx - #define VATOMIC64_GET_SUB - #define vatomic64_get_sub vatomic64_get_sub_rlx - #define VATOMIC64_GET_DEC - #define vatomic64_get_dec vatomic64_get_dec_rlx - #define VATOMIC64_MAX_GET - #define vatomic64_max_get vatomic64_max_get_rlx - #define VATOMIC64_AND_GET - #define vatomic64_and_get vatomic64_and_get_rlx - #define VATOMIC64_OR_GET - #define vatomic64_or_get vatomic64_or_get_rlx - #define VATOMIC64_XOR_GET - #define vatomic64_xor_get vatomic64_xor_get_rlx - #define VATOMIC64_ADD_GET - #define vatomic64_add_get vatomic64_add_get_rlx - #define VATOMIC64_INC_GET - #define vatomic64_inc_get vatomic64_inc_get_rlx - #define VATOMIC64_SUB_GET - #define vatomic64_sub_get vatomic64_sub_get_rlx - #define VATOMIC64_DEC_GET - #define vatomic64_dec_get vatomic64_dec_get_rlx - #define VATOMIC64_GET_MAX_ACQ - #define vatomic64_get_max_acq vatomic64_get_max_rlx - #define VATOMIC64_GET_AND_ACQ - #define vatomic64_get_and_acq vatomic64_get_and_rlx - #define VATOMIC64_GET_OR_ACQ - #define vatomic64_get_or_acq vatomic64_get_or_rlx - #define VATOMIC64_GET_XOR_ACQ - #define vatomic64_get_xor_acq vatomic64_get_xor_rlx - #define VATOMIC64_GET_ADD_ACQ - #define vatomic64_get_add_acq vatomic64_get_add_rlx - #define VATOMIC64_GET_INC_ACQ - #define vatomic64_get_inc_acq vatomic64_get_inc_rlx - #define VATOMIC64_GET_SUB_ACQ - #define vatomic64_get_sub_acq vatomic64_get_sub_rlx - #define VATOMIC64_GET_DEC_ACQ - #define vatomic64_get_dec_acq vatomic64_get_dec_rlx - #define VATOMIC64_MAX_GET_ACQ - #define vatomic64_max_get_acq vatomic64_max_get_rlx - #define VATOMIC64_AND_GET_ACQ - #define vatomic64_and_get_acq vatomic64_and_get_rlx - #define VATOMIC64_OR_GET_ACQ - #define vatomic64_or_get_acq vatomic64_or_get_rlx - #define VATOMIC64_XOR_GET_ACQ - #define vatomic64_xor_get_acq vatomic64_xor_get_rlx - #define VATOMIC64_ADD_GET_ACQ - #define vatomic64_add_get_acq vatomic64_add_get_rlx - #define VATOMIC64_INC_GET_ACQ - #define vatomic64_inc_get_acq vatomic64_inc_get_rlx - #define VATOMIC64_SUB_GET_ACQ - #define vatomic64_sub_get_acq vatomic64_sub_get_rlx - #define VATOMIC64_DEC_GET_ACQ - #define vatomic64_dec_get_acq vatomic64_dec_get_rlx - #define VATOMIC64_GET_MAX_REL - #define vatomic64_get_max_rel vatomic64_get_max_rlx - #define VATOMIC64_GET_AND_REL - #define vatomic64_get_and_rel vatomic64_get_and_rlx - #define VATOMIC64_GET_OR_REL - #define vatomic64_get_or_rel vatomic64_get_or_rlx - #define VATOMIC64_GET_XOR_REL - #define vatomic64_get_xor_rel vatomic64_get_xor_rlx - #define VATOMIC64_GET_ADD_REL - #define vatomic64_get_add_rel vatomic64_get_add_rlx - #define VATOMIC64_GET_INC_REL - #define vatomic64_get_inc_rel vatomic64_get_inc_rlx - #define VATOMIC64_GET_SUB_REL - #define vatomic64_get_sub_rel vatomic64_get_sub_rlx - #define VATOMIC64_GET_DEC_REL - #define vatomic64_get_dec_rel vatomic64_get_dec_rlx - #define VATOMIC64_MAX_GET_REL - #define vatomic64_max_get_rel vatomic64_max_get_rlx - #define VATOMIC64_AND_GET_REL - #define vatomic64_and_get_rel vatomic64_and_get_rlx - #define VATOMIC64_OR_GET_REL - #define vatomic64_or_get_rel vatomic64_or_get_rlx - #define VATOMIC64_XOR_GET_REL - #define vatomic64_xor_get_rel vatomic64_xor_get_rlx - #define VATOMIC64_ADD_GET_REL - #define vatomic64_add_get_rel vatomic64_add_get_rlx - #define VATOMIC64_INC_GET_REL - #define vatomic64_inc_get_rel vatomic64_inc_get_rlx - #define VATOMIC64_SUB_GET_REL - #define vatomic64_sub_get_rel vatomic64_sub_get_rlx - #define VATOMIC64_DEC_GET_REL - #define vatomic64_dec_get_rel vatomic64_dec_get_rlx - #define VATOMICSZ_GET_MAX - #define vatomicsz_get_max vatomicsz_get_max_rlx - #define VATOMICSZ_GET_AND - #define vatomicsz_get_and vatomicsz_get_and_rlx - #define VATOMICSZ_GET_OR - #define vatomicsz_get_or vatomicsz_get_or_rlx - #define VATOMICSZ_GET_XOR - #define vatomicsz_get_xor vatomicsz_get_xor_rlx - #define VATOMICSZ_GET_ADD - #define vatomicsz_get_add vatomicsz_get_add_rlx - #define VATOMICSZ_GET_INC - #define vatomicsz_get_inc vatomicsz_get_inc_rlx - #define VATOMICSZ_GET_SUB - #define vatomicsz_get_sub vatomicsz_get_sub_rlx - #define VATOMICSZ_GET_DEC - #define vatomicsz_get_dec vatomicsz_get_dec_rlx - #define VATOMICSZ_MAX_GET - #define vatomicsz_max_get vatomicsz_max_get_rlx - #define VATOMICSZ_AND_GET - #define vatomicsz_and_get vatomicsz_and_get_rlx - #define VATOMICSZ_OR_GET - #define vatomicsz_or_get vatomicsz_or_get_rlx - #define VATOMICSZ_XOR_GET - #define vatomicsz_xor_get vatomicsz_xor_get_rlx - #define VATOMICSZ_ADD_GET - #define vatomicsz_add_get vatomicsz_add_get_rlx - #define VATOMICSZ_INC_GET - #define vatomicsz_inc_get vatomicsz_inc_get_rlx - #define VATOMICSZ_SUB_GET - #define vatomicsz_sub_get vatomicsz_sub_get_rlx - #define VATOMICSZ_DEC_GET - #define vatomicsz_dec_get vatomicsz_dec_get_rlx - #define VATOMICSZ_GET_MAX_ACQ - #define vatomicsz_get_max_acq vatomicsz_get_max_rlx - #define VATOMICSZ_GET_AND_ACQ - #define vatomicsz_get_and_acq vatomicsz_get_and_rlx - #define VATOMICSZ_GET_OR_ACQ - #define vatomicsz_get_or_acq vatomicsz_get_or_rlx - #define VATOMICSZ_GET_XOR_ACQ - #define vatomicsz_get_xor_acq vatomicsz_get_xor_rlx - #define VATOMICSZ_GET_ADD_ACQ - #define vatomicsz_get_add_acq vatomicsz_get_add_rlx - #define VATOMICSZ_GET_INC_ACQ - #define vatomicsz_get_inc_acq vatomicsz_get_inc_rlx - #define VATOMICSZ_GET_SUB_ACQ - #define vatomicsz_get_sub_acq vatomicsz_get_sub_rlx - #define VATOMICSZ_GET_DEC_ACQ - #define vatomicsz_get_dec_acq vatomicsz_get_dec_rlx - #define VATOMICSZ_MAX_GET_ACQ - #define vatomicsz_max_get_acq vatomicsz_max_get_rlx - #define VATOMICSZ_AND_GET_ACQ - #define vatomicsz_and_get_acq vatomicsz_and_get_rlx - #define VATOMICSZ_OR_GET_ACQ - #define vatomicsz_or_get_acq vatomicsz_or_get_rlx - #define VATOMICSZ_XOR_GET_ACQ - #define vatomicsz_xor_get_acq vatomicsz_xor_get_rlx - #define VATOMICSZ_ADD_GET_ACQ - #define vatomicsz_add_get_acq vatomicsz_add_get_rlx - #define VATOMICSZ_INC_GET_ACQ - #define vatomicsz_inc_get_acq vatomicsz_inc_get_rlx - #define VATOMICSZ_SUB_GET_ACQ - #define vatomicsz_sub_get_acq vatomicsz_sub_get_rlx - #define VATOMICSZ_DEC_GET_ACQ - #define vatomicsz_dec_get_acq vatomicsz_dec_get_rlx - #define VATOMICSZ_GET_MAX_REL - #define vatomicsz_get_max_rel vatomicsz_get_max_rlx - #define VATOMICSZ_GET_AND_REL - #define vatomicsz_get_and_rel vatomicsz_get_and_rlx - #define VATOMICSZ_GET_OR_REL - #define vatomicsz_get_or_rel vatomicsz_get_or_rlx - #define VATOMICSZ_GET_XOR_REL - #define vatomicsz_get_xor_rel vatomicsz_get_xor_rlx - #define VATOMICSZ_GET_ADD_REL - #define vatomicsz_get_add_rel vatomicsz_get_add_rlx - #define VATOMICSZ_GET_INC_REL - #define vatomicsz_get_inc_rel vatomicsz_get_inc_rlx - #define VATOMICSZ_GET_SUB_REL - #define vatomicsz_get_sub_rel vatomicsz_get_sub_rlx - #define VATOMICSZ_GET_DEC_REL - #define vatomicsz_get_dec_rel vatomicsz_get_dec_rlx - #define VATOMICSZ_MAX_GET_REL - #define vatomicsz_max_get_rel vatomicsz_max_get_rlx - #define VATOMICSZ_AND_GET_REL - #define vatomicsz_and_get_rel vatomicsz_and_get_rlx - #define VATOMICSZ_OR_GET_REL - #define vatomicsz_or_get_rel vatomicsz_or_get_rlx - #define VATOMICSZ_XOR_GET_REL - #define vatomicsz_xor_get_rel vatomicsz_xor_get_rlx - #define VATOMICSZ_ADD_GET_REL - #define vatomicsz_add_get_rel vatomicsz_add_get_rlx - #define VATOMICSZ_INC_GET_REL - #define vatomicsz_inc_get_rel vatomicsz_inc_get_rlx - #define VATOMICSZ_SUB_GET_REL - #define vatomicsz_sub_get_rel vatomicsz_sub_get_rlx - #define VATOMICSZ_DEC_GET_REL - #define vatomicsz_dec_get_rel vatomicsz_dec_get_rlx - - #define VATOMIC8_MAX - #define vatomic8_max vatomic8_max_rlx - #define VATOMIC8_AND - #define vatomic8_and vatomic8_and_rlx - #define VATOMIC8_OR - #define vatomic8_or vatomic8_or_rlx - #define VATOMIC8_XOR - #define vatomic8_xor vatomic8_xor_rlx - #define VATOMIC8_ADD - #define vatomic8_add vatomic8_add_rlx - #define VATOMIC8_INC - #define vatomic8_inc vatomic8_inc_rlx - #define VATOMIC8_SUB - #define vatomic8_sub vatomic8_sub_rlx - #define VATOMIC8_DEC - #define vatomic8_dec vatomic8_dec_rlx - #define VATOMIC8_MAX_REL - #define vatomic8_max_rel vatomic8_max_rlx - #define VATOMIC8_AND_REL - #define vatomic8_and_rel vatomic8_and_rlx - #define VATOMIC8_OR_REL - #define vatomic8_or_rel vatomic8_or_rlx - #define VATOMIC8_XOR_REL - #define vatomic8_xor_rel vatomic8_xor_rlx - #define VATOMIC8_ADD_REL - #define vatomic8_add_rel vatomic8_add_rlx - #define VATOMIC8_INC_REL - #define vatomic8_inc_rel vatomic8_inc_rlx - #define VATOMIC8_SUB_REL - #define vatomic8_sub_rel vatomic8_sub_rlx - #define VATOMIC8_DEC_REL - #define vatomic8_dec_rel vatomic8_dec_rlx - #define VATOMIC16_MAX - #define vatomic16_max vatomic16_max_rlx - #define VATOMIC16_AND - #define vatomic16_and vatomic16_and_rlx - #define VATOMIC16_OR - #define vatomic16_or vatomic16_or_rlx - #define VATOMIC16_XOR - #define vatomic16_xor vatomic16_xor_rlx - #define VATOMIC16_ADD - #define vatomic16_add vatomic16_add_rlx - #define VATOMIC16_INC - #define vatomic16_inc vatomic16_inc_rlx - #define VATOMIC16_SUB - #define vatomic16_sub vatomic16_sub_rlx - #define VATOMIC16_DEC - #define vatomic16_dec vatomic16_dec_rlx - #define VATOMIC16_MAX_REL - #define vatomic16_max_rel vatomic16_max_rlx - #define VATOMIC16_AND_REL - #define vatomic16_and_rel vatomic16_and_rlx - #define VATOMIC16_OR_REL - #define vatomic16_or_rel vatomic16_or_rlx - #define VATOMIC16_XOR_REL - #define vatomic16_xor_rel vatomic16_xor_rlx - #define VATOMIC16_ADD_REL - #define vatomic16_add_rel vatomic16_add_rlx - #define VATOMIC16_INC_REL - #define vatomic16_inc_rel vatomic16_inc_rlx - #define VATOMIC16_SUB_REL - #define vatomic16_sub_rel vatomic16_sub_rlx - #define VATOMIC16_DEC_REL - #define vatomic16_dec_rel vatomic16_dec_rlx - #define VATOMIC32_MAX - #define vatomic32_max vatomic32_max_rlx - #define VATOMIC32_AND - #define vatomic32_and vatomic32_and_rlx - #define VATOMIC32_OR - #define vatomic32_or vatomic32_or_rlx - #define VATOMIC32_XOR - #define vatomic32_xor vatomic32_xor_rlx - #define VATOMIC32_ADD - #define vatomic32_add vatomic32_add_rlx - #define VATOMIC32_INC - #define vatomic32_inc vatomic32_inc_rlx - #define VATOMIC32_SUB - #define vatomic32_sub vatomic32_sub_rlx - #define VATOMIC32_DEC - #define vatomic32_dec vatomic32_dec_rlx - #define VATOMIC32_MAX_REL - #define vatomic32_max_rel vatomic32_max_rlx - #define VATOMIC32_AND_REL - #define vatomic32_and_rel vatomic32_and_rlx - #define VATOMIC32_OR_REL - #define vatomic32_or_rel vatomic32_or_rlx - #define VATOMIC32_XOR_REL - #define vatomic32_xor_rel vatomic32_xor_rlx - #define VATOMIC32_ADD_REL - #define vatomic32_add_rel vatomic32_add_rlx - #define VATOMIC32_INC_REL - #define vatomic32_inc_rel vatomic32_inc_rlx - #define VATOMIC32_SUB_REL - #define vatomic32_sub_rel vatomic32_sub_rlx - #define VATOMIC32_DEC_REL - #define vatomic32_dec_rel vatomic32_dec_rlx - #define VATOMIC64_MAX - #define vatomic64_max vatomic64_max_rlx - #define VATOMIC64_AND - #define vatomic64_and vatomic64_and_rlx - #define VATOMIC64_OR - #define vatomic64_or vatomic64_or_rlx - #define VATOMIC64_XOR - #define vatomic64_xor vatomic64_xor_rlx - #define VATOMIC64_ADD - #define vatomic64_add vatomic64_add_rlx - #define VATOMIC64_INC - #define vatomic64_inc vatomic64_inc_rlx - #define VATOMIC64_SUB - #define vatomic64_sub vatomic64_sub_rlx - #define VATOMIC64_DEC - #define vatomic64_dec vatomic64_dec_rlx - #define VATOMIC64_MAX_REL - #define vatomic64_max_rel vatomic64_max_rlx - #define VATOMIC64_AND_REL - #define vatomic64_and_rel vatomic64_and_rlx - #define VATOMIC64_OR_REL - #define vatomic64_or_rel vatomic64_or_rlx - #define VATOMIC64_XOR_REL - #define vatomic64_xor_rel vatomic64_xor_rlx - #define VATOMIC64_ADD_REL - #define vatomic64_add_rel vatomic64_add_rlx - #define VATOMIC64_INC_REL - #define vatomic64_inc_rel vatomic64_inc_rlx - #define VATOMIC64_SUB_REL - #define vatomic64_sub_rel vatomic64_sub_rlx - #define VATOMIC64_DEC_REL - #define vatomic64_dec_rel vatomic64_dec_rlx - #define VATOMICSZ_MAX - #define vatomicsz_max vatomicsz_max_rlx - #define VATOMICSZ_AND - #define vatomicsz_and vatomicsz_and_rlx - #define VATOMICSZ_OR - #define vatomicsz_or vatomicsz_or_rlx - #define VATOMICSZ_XOR - #define vatomicsz_xor vatomicsz_xor_rlx - #define VATOMICSZ_ADD - #define vatomicsz_add vatomicsz_add_rlx - #define VATOMICSZ_INC - #define vatomicsz_inc vatomicsz_inc_rlx - #define VATOMICSZ_SUB - #define vatomicsz_sub vatomicsz_sub_rlx - #define VATOMICSZ_DEC - #define vatomicsz_dec vatomicsz_dec_rlx - #define VATOMICSZ_MAX_REL - #define vatomicsz_max_rel vatomicsz_max_rlx - #define VATOMICSZ_AND_REL - #define vatomicsz_and_rel vatomicsz_and_rlx - #define VATOMICSZ_OR_REL - #define vatomicsz_or_rel vatomicsz_or_rlx - #define VATOMICSZ_XOR_REL - #define vatomicsz_xor_rel vatomicsz_xor_rlx - #define VATOMICSZ_ADD_REL - #define vatomicsz_add_rel vatomicsz_add_rlx - #define VATOMICSZ_INC_REL - #define vatomicsz_inc_rel vatomicsz_inc_rlx - #define VATOMICSZ_SUB_REL - #define vatomicsz_sub_rel vatomicsz_sub_rlx - #define VATOMICSZ_DEC_REL - #define vatomicsz_dec_rel vatomicsz_dec_rlx - - #define VATOMIC32_AWAIT_EQ - #define vatomic32_eq vatomic32_await_eq_rlx - #define VATOMIC32_AWAIT_NEQ - #define vatomic32_neq vatomic32_await_neq_rlx - #define VATOMIC32_AWAIT_LT - #define vatomic32_lt vatomic32_await_lt_rlx - #define VATOMIC32_AWAIT_LE - #define vatomic32_le vatomic32_await_le_rlx - #define VATOMIC32_AWAIT_GT - #define vatomic32_gt vatomic32_await_gt_rlx - #define VATOMIC32_AWAIT_GE - #define vatomic32_ge vatomic32_await_ge_rlx - #define VATOMIC32_AWAIT_EQ_ACQ - #define vatomic32_eq_acq vatomic32_await_eq_rlx - #define VATOMIC32_AWAIT_NEQ_ACQ - #define vatomic32_neq_acq vatomic32_await_neq_rlx - #define VATOMIC32_AWAIT_LT_ACQ - #define vatomic32_lt_acq vatomic32_await_lt_rlx - #define VATOMIC32_AWAIT_LE_ACQ - #define vatomic32_le_acq vatomic32_await_le_rlx - #define VATOMIC32_AWAIT_GT_ACQ - #define vatomic32_gt_acq vatomic32_await_gt_rlx - #define VATOMIC32_AWAIT_GE_ACQ - #define vatomic32_ge_acq vatomic32_await_ge_rlx - #define VATOMIC64_AWAIT_EQ - #define vatomic64_eq vatomic64_await_eq_rlx - #define VATOMIC64_AWAIT_NEQ - #define vatomic64_neq vatomic64_await_neq_rlx - #define VATOMIC64_AWAIT_LT - #define vatomic64_lt vatomic64_await_lt_rlx - #define VATOMIC64_AWAIT_LE - #define vatomic64_le vatomic64_await_le_rlx - #define VATOMIC64_AWAIT_GT - #define vatomic64_gt vatomic64_await_gt_rlx - #define VATOMIC64_AWAIT_GE - #define vatomic64_ge vatomic64_await_ge_rlx - #define VATOMIC64_AWAIT_EQ_ACQ - #define vatomic64_eq_acq vatomic64_await_eq_rlx - #define VATOMIC64_AWAIT_NEQ_ACQ - #define vatomic64_neq_acq vatomic64_await_neq_rlx - #define VATOMIC64_AWAIT_LT_ACQ - #define vatomic64_lt_acq vatomic64_await_lt_rlx - #define VATOMIC64_AWAIT_LE_ACQ - #define vatomic64_le_acq vatomic64_await_le_rlx - #define VATOMIC64_AWAIT_GT_ACQ - #define vatomic64_gt_acq vatomic64_await_gt_rlx - #define VATOMIC64_AWAIT_GE_ACQ - #define vatomic64_ge_acq vatomic64_await_ge_rlx - #define VATOMICPTR_AWAIT_EQ - #define vatomicptr_eq vatomicptr_await_eq_rlx - #define VATOMICPTR_AWAIT_NEQ - #define vatomicptr_neq vatomicptr_await_neq_rlx - #define VATOMICPTR_AWAIT_EQ_ACQ - #define vatomicptr_eq_acq vatomicptr_await_eq_rlx - #define VATOMICPTR_AWAIT_NEQ_ACQ - #define vatomicptr_neq_acq vatomicptr_await_neq_rlx - - #define VATOMIC32_AWAIT_EQ_OP - #define vatomic32_await_eq_OP vatomic32_await_eq_OP_rlx - #define VATOMIC32_AWAIT_NEQ_OP - #define vatomic32_await_neq_OP vatomic32_await_neq_OP_rlx - #define VATOMIC32_AWAIT_LT_OP - #define vatomic32_await_lt_OP vatomic32_await_lt_OP_rlx - #define VATOMIC32_AWAIT_LE_OP - #define vatomic32_await_le_OP vatomic32_await_le_OP_rlx - #define VATOMIC32_AWAIT_GT_OP - #define vatomic32_await_gt_OP vatomic32_await_gt_OP_rlx - #define VATOMIC32_AWAIT_GE_OP - #define vatomic32_await_ge_OP vatomic32_await_ge_OP_rlx - #define VATOMIC32_AWAIT_EQ_OP_ACQ - #define vatomic32_await_eq_OP_acq vatomic32_await_eq_OP_rlx - #define VATOMIC32_AWAIT_NEQ_OP_ACQ - #define vatomic32_await_neq_OP_acq vatomic32_await_neq_OP_rlx - #define VATOMIC32_AWAIT_LT_OP_ACQ - #define vatomic32_await_lt_OP_acq vatomic32_await_lt_OP_rlx - #define VATOMIC32_AWAIT_LE_OP_ACQ - #define vatomic32_await_le_OP_acq vatomic32_await_le_OP_rlx - #define VATOMIC32_AWAIT_GT_OP_ACQ - #define vatomic32_await_gt_OP_acq vatomic32_await_gt_OP_rlx - #define VATOMIC32_AWAIT_GE_OP_ACQ - #define vatomic32_await_ge_OP_acq vatomic32_await_ge_OP_rlx - #define VATOMIC32_AWAIT_EQ_OP_REL - #define vatomic32_await_eq_OP_rel vatomic32_await_eq_OP_rlx - #define VATOMIC32_AWAIT_NEQ_OP_REL - #define vatomic32_await_neq_OP_rel vatomic32_await_neq_OP_rlx - #define VATOMIC32_AWAIT_LT_OP_REL - #define vatomic32_await_lt_OP_rel vatomic32_await_lt_OP_rlx - #define VATOMIC32_AWAIT_LE_OP_REL - #define vatomic32_await_le_OP_rel vatomic32_await_le_OP_rlx - #define VATOMIC32_AWAIT_GT_OP_REL - #define vatomic32_await_gt_OP_rel vatomic32_await_gt_OP_rlx - #define VATOMIC32_AWAIT_GE_OP_REL - #define vatomic32_await_ge_OP_rel vatomic32_await_ge_OP_rlx - #define VATOMIC64_AWAIT_EQ_OP - #define vatomic64_await_eq_OP vatomic64_await_eq_OP_rlx - #define VATOMIC64_AWAIT_NEQ_OP - #define vatomic64_await_neq_OP vatomic64_await_neq_OP_rlx - #define VATOMIC64_AWAIT_LT_OP - #define vatomic64_await_lt_OP vatomic64_await_lt_OP_rlx - #define VATOMIC64_AWAIT_LE_OP - #define vatomic64_await_le_OP vatomic64_await_le_OP_rlx - #define VATOMIC64_AWAIT_GT_OP - #define vatomic64_await_gt_OP vatomic64_await_gt_OP_rlx - #define VATOMIC64_AWAIT_GE_OP - #define vatomic64_await_ge_OP vatomic64_await_ge_OP_rlx - #define VATOMIC64_AWAIT_EQ_OP_ACQ - #define vatomic64_await_eq_OP_acq vatomic64_await_eq_OP_rlx - #define VATOMIC64_AWAIT_NEQ_OP_ACQ - #define vatomic64_await_neq_OP_acq vatomic64_await_neq_OP_rlx - #define VATOMIC64_AWAIT_LT_OP_ACQ - #define vatomic64_await_lt_OP_acq vatomic64_await_lt_OP_rlx - #define VATOMIC64_AWAIT_LE_OP_ACQ - #define vatomic64_await_le_OP_acq vatomic64_await_le_OP_rlx - #define VATOMIC64_AWAIT_GT_OP_ACQ - #define vatomic64_await_gt_OP_acq vatomic64_await_gt_OP_rlx - #define VATOMIC64_AWAIT_GE_OP_ACQ - #define vatomic64_await_ge_OP_acq vatomic64_await_ge_OP_rlx - #define VATOMIC64_AWAIT_EQ_OP_REL - #define vatomic64_await_eq_OP_rel vatomic64_await_eq_OP_rlx - #define VATOMIC64_AWAIT_NEQ_OP_REL - #define vatomic64_await_neq_OP_rel vatomic64_await_neq_OP_rlx - #define VATOMIC64_AWAIT_LT_OP_REL - #define vatomic64_await_lt_OP_rel vatomic64_await_lt_OP_rlx - #define VATOMIC64_AWAIT_LE_OP_REL - #define vatomic64_await_le_OP_rel vatomic64_await_le_OP_rlx - #define VATOMIC64_AWAIT_GT_OP_REL - #define vatomic64_await_gt_OP_rel vatomic64_await_gt_OP_rlx - #define VATOMIC64_AWAIT_GE_OP_REL - #define vatomic64_await_ge_OP_rel vatomic64_await_ge_OP_rlx - #define VATOMICPTR_AWAIT_EQ_OP - #define vatomicptr_await_eq_OP vatomicptr_await_eq_OP_rlx - #define VATOMICPTR_AWAIT_NEQ_OP - #define vatomicptr_await_neq_OP vatomicptr_await_neq_OP_rlx - #define VATOMICPTR_AWAIT_EQ_OP_ACQ - #define vatomicptr_await_eq_OP_acq vatomicptr_await_eq_OP_rlx - #define VATOMICPTR_AWAIT_NEQ_OP_ACQ - #define vatomicptr_await_neq_OP_acq vatomicptr_await_neq_OP_rlx - #define VATOMICPTR_AWAIT_EQ_OP_REL - #define vatomicptr_await_eq_OP_rel vatomicptr_await_eq_OP_rlx - #define VATOMICPTR_AWAIT_NEQ_OP_REL - #define vatomicptr_await_neq_OP_rel vatomicptr_await_neq_OP_rlx - -#endif -#endif diff --git a/include/vsync/atomic/internal/config/fnc_rlx.h b/include/vsync/atomic/internal/remap_fnc_rlx.h similarity index 89% rename from include/vsync/atomic/internal/config/fnc_rlx.h rename to include/vsync/atomic/internal/remap_fnc_rlx.h index 280b9f18..17036506 100644 --- a/include/vsync/atomic/internal/config/fnc_rlx.h +++ b/include/vsync/atomic/internal/remap_fnc_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/fnc_sc.h b/include/vsync/atomic/internal/remap_fnc_sc.h similarity index 89% rename from include/vsync/atomic/internal/config/fnc_sc.h rename to include/vsync/atomic/internal/remap_fnc_sc.h index ce1a99e4..68782c73 100644 --- a/include/vsync/atomic/internal/config/fnc_sc.h +++ b/include/vsync/atomic/internal/remap_fnc_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/ptr_rlx.h b/include/vsync/atomic/internal/remap_ptr_rlx.h similarity index 98% rename from include/vsync/atomic/internal/config/ptr_rlx.h rename to include/vsync/atomic/internal/remap_ptr_rlx.h index 4159bccf..63e92c5e 100644 --- a/include/vsync/atomic/internal/config/ptr_rlx.h +++ b/include/vsync/atomic/internal/remap_ptr_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/ptr_sc.h b/include/vsync/atomic/internal/remap_ptr_sc.h similarity index 98% rename from include/vsync/atomic/internal/config/ptr_sc.h rename to include/vsync/atomic/internal/remap_ptr_sc.h index 5de39895..1ea8e57e 100644 --- a/include/vsync/atomic/internal/config/ptr_sc.h +++ b/include/vsync/atomic/internal/remap_ptr_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/sz_rlx.h b/include/vsync/atomic/internal/remap_sz_rlx.h similarity index 99% rename from include/vsync/atomic/internal/config/sz_rlx.h rename to include/vsync/atomic/internal/remap_sz_rlx.h index d35ce2a8..6512b09e 100644 --- a/include/vsync/atomic/internal/config/sz_rlx.h +++ b/include/vsync/atomic/internal/remap_sz_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/sz_sc.h b/include/vsync/atomic/internal/remap_sz_sc.h similarity index 99% rename from include/vsync/atomic/internal/config/sz_sc.h rename to include/vsync/atomic/internal/remap_sz_sc.h index fdc593a9..790330f3 100644 --- a/include/vsync/atomic/internal/config/sz_sc.h +++ b/include/vsync/atomic/internal/remap_sz_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u16_rlx.h b/include/vsync/atomic/internal/remap_u16_rlx.h similarity index 99% rename from include/vsync/atomic/internal/config/u16_rlx.h rename to include/vsync/atomic/internal/remap_u16_rlx.h index a08dd97c..7e99bb9c 100644 --- a/include/vsync/atomic/internal/config/u16_rlx.h +++ b/include/vsync/atomic/internal/remap_u16_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u16_sc.h b/include/vsync/atomic/internal/remap_u16_sc.h similarity index 99% rename from include/vsync/atomic/internal/config/u16_sc.h rename to include/vsync/atomic/internal/remap_u16_sc.h index dd5ddbeb..8607765a 100644 --- a/include/vsync/atomic/internal/config/u16_sc.h +++ b/include/vsync/atomic/internal/remap_u16_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u32_rlx.h b/include/vsync/atomic/internal/remap_u32_rlx.h similarity index 99% rename from include/vsync/atomic/internal/config/u32_rlx.h rename to include/vsync/atomic/internal/remap_u32_rlx.h index 88500626..eb3f86d9 100644 --- a/include/vsync/atomic/internal/config/u32_rlx.h +++ b/include/vsync/atomic/internal/remap_u32_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u32_sc.h b/include/vsync/atomic/internal/remap_u32_sc.h similarity index 99% rename from include/vsync/atomic/internal/config/u32_sc.h rename to include/vsync/atomic/internal/remap_u32_sc.h index 92817286..aa4a993b 100644 --- a/include/vsync/atomic/internal/config/u32_sc.h +++ b/include/vsync/atomic/internal/remap_u32_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u64_rlx.h b/include/vsync/atomic/internal/remap_u64_rlx.h similarity index 99% rename from include/vsync/atomic/internal/config/u64_rlx.h rename to include/vsync/atomic/internal/remap_u64_rlx.h index a1dc0713..cba8b386 100644 --- a/include/vsync/atomic/internal/config/u64_rlx.h +++ b/include/vsync/atomic/internal/remap_u64_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u64_sc.h b/include/vsync/atomic/internal/remap_u64_sc.h similarity index 99% rename from include/vsync/atomic/internal/config/u64_sc.h rename to include/vsync/atomic/internal/remap_u64_sc.h index fd932121..5585b8c9 100644 --- a/include/vsync/atomic/internal/config/u64_sc.h +++ b/include/vsync/atomic/internal/remap_u64_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u8_rlx.h b/include/vsync/atomic/internal/remap_u8_rlx.h similarity index 99% rename from include/vsync/atomic/internal/config/u8_rlx.h rename to include/vsync/atomic/internal/remap_u8_rlx.h index 70c82b73..2f6fb15f 100644 --- a/include/vsync/atomic/internal/config/u8_rlx.h +++ b/include/vsync/atomic/internal/remap_u8_rlx.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/atomic/internal/config/u8_sc.h b/include/vsync/atomic/internal/remap_u8_sc.h similarity index 99% rename from include/vsync/atomic/internal/config/u8_sc.h rename to include/vsync/atomic/internal/remap_u8_sc.h index 7ca714aa..9df96881 100644 --- a/include/vsync/atomic/internal/config/u8_sc.h +++ b/include/vsync/atomic/internal/remap_u8_sc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ diff --git a/include/vsync/doc.h b/include/vsync/doc.h index 9ece4599..70308a5c 100644 --- a/include/vsync/doc.h +++ b/include/vsync/doc.h @@ -1,113 +1,16 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. * SPDX-License-Identifier: MIT - * Description: VSync API documention - * Author: Huawei Dresden Research Center */ #ifndef VSYNC_DOC_H #define VSYNC_DOC_H /******************************************************************************* * @dir vsync * - * libvsync is library of robust atomics, synchronization primitives, - * concurrent data structures and safe memory reclamation schemes. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/common - * @brief Macros and helpers used throughout libvsync. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/stack - * @brief Concurrent stacks. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/bitmap - * @brief Bitmap implementations. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/queue - * @brief Queues, priority queues and ringbuffers. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/utils - * @brief Utilities and supporting algorithms. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/spinlock - * @brief Spinlocks for kernel and userspace. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/thread - * @brief Userspace synchronization primitives. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/smr - * @brief Safe Memory Reclamation Schemes. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/alloc - * @brief Memory allocators. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/pool - * @brief Memory pools. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/smr_alloc - * @brief Memory allocators with integrated SMR schemes. - ******************************************************************************/ - -/******************************************************************************* - * @defgroup numa_aware Numa-aware - * Group of numa-aware algorithms. - ******************************************************************************/ - -/******************************************************************************* - * @defgroup lock_free Lock-free - * Group of algorithms with lock-free progress condition. - ******************************************************************************/ - -/******************************************************************************* - * @defgroup requires_smr SMR-required - * Group of algorithms that require safe memory reclamation. - * Users are expected to couple the usage of these algorithms with an SMR - * scheme from vsync/smr e.g. vsync/smr/gdump.h . - * Detached/retired nodes of these algorithms must be retired to the SMR - * and not freed/recycled directly. Operations of these algorithms should - * be called within the critical section of the SMR within enter/exit, unless - * stated otherwise. - ******************************************************************************/ - -/******************************************************************************* - * @defgroup linearizable Linearizable - * Group of algorithms linearizable algorithms. - ******************************************************************************/ - -/******************************************************************************* - * @defgroup unbounded_queue Unbounded-Queue - * Group of Unbounded Queues. These queues have no capacity limit and thus - * enqueue operations shall always succeed. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/map - * @brief This is a collection of algos that implement map interface. - ******************************************************************************/ - -/******************************************************************************* - * @dir vsync/thread/mutex - * @brief Different implementations of user-space mutex. + * vatomic is a header library of atomics operations, supporting mainstream + * architectures: ARMv7, ARMv8 (AArch32 and AArch64), RISC-V, and x86_64. The + * memory ordering guarantees provided by the atomic interface are formally + * described in the VSync Memory Model (VMM) file. + * ******************************************************************************/ - #endif diff --git a/include/vsync/vtypes.h b/include/vsync/vtypes.h index 7d8b5686..f1ddfdac 100644 --- a/include/vsync/vtypes.h +++ b/include/vsync/vtypes.h @@ -1,5 +1,5 @@ /* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. * SPDX-License-Identifier: MIT */ @@ -29,9 +29,15 @@ #include #include #include - #if !defined(__APPLE__) - #include + #if defined(__APPLE__) && !defined(__STDC_FORMAT_MACROS) + // For the github's runner this is not really needed. + // However, there has been cases mentioned on the internet + // where one needs to define this for e.g. PRIuPTR + // is defined in `inttypes.h`. + // https://stackoverflow.com/questions/26182336/priuptr-preprocessor-bug-in-gcc + #define __STDC_FORMAT_MACROS #endif + #include typedef uint8_t vuint8_t; typedef uint16_t vuint16_t; typedef uint32_t vuint32_t; @@ -107,12 +113,30 @@ typedef bool vbool_t; #define VUINTPTR_WIDTH (sizeof(vuintptr_t) * 8) #endif +#if defined(INT8_MAX) + #define VINT8_MAX INT8_MAX +#else + #define VINT8_MAX V_SIGNED_INT_MAX(vint8_t) +#endif + +#if defined(INT16_MAX) + #define VINT16_MAX INT16_MAX +#else + #define VINT16_MAX V_SIGNED_INT_MAX(vint16_t) +#endif + #if defined(INT32_MAX) #define VINT32_MAX INT32_MAX #else #define VINT32_MAX V_SIGNED_INT_MAX(vint32_t) #endif +#if defined(INT64_MAX) + #define VINT64_MAX INT64_MAX +#else + #define VINT64_MAX V_SIGNED_INT_MAX(vint64_t) +#endif + /* Format */ #if !defined(VSYNC_ENABLE_FREESTANDING) #define VUINTPTR_FORMAT PRIuPTR diff --git a/scripts/ensure-cmd.sh b/scripts/ensure-cmd.sh new file mode 100755 index 00000000..c360b264 --- /dev/null +++ b/scripts/ensure-cmd.sh @@ -0,0 +1,304 @@ +#!/bin/sh +# ensure-cmd.sh: Fetch/build command and print the absolute path to the binary. +# ------------------------------------------------------------------------------ +# Copyright (c) 2025 Diogo Behrens +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# ------------------------------------------------------------------------------ +# You can copy this file into another project to ensure the command is installed +# automatically and adapt or remove the license above. +# +# Usage: +# ensure-cmd.sh [OPTIONS] +# +# See ensure-cmd.sh for details on the usage. +# +# This snippet can be used in a CMake file to find the program or fetch/build +# it, setting the path to the program in variable PROGRAM_PATH: +# +# execute_process( +# COMMAND "path/to/ensure-cmd.sh" -q +# --workdir "${CMAKE_BINARY_DIR}" +# --url "${URL}" +# --sha256 "${SHA256}" +# "${PROGRAM}" "${VERSION}" +# OUTPUT_VARIABLE PROGRAM_PATH +# OUTPUT_STRIP_TRAILING_WHITESPACE +# RESULT_VARIABLE ENSURE_RC) +# if(NOT ENSURE_RC EQUAL 0) +# message(FATAL_ERROR "Failed to ensure ${VERSION} is available") +# endif() +# +set -eu + +usage() { + cat >&2 < + +Ensures exists and matches . Without --url only existing +installations are validated. DIR defaults to the current working directory. + +Options: + -q Quiet output, only print path to program + --workdir DIR Directory used to cache/download/build (default: \$PWD) + --url URL Tarball URL for requested version (required to download/build) + --sha256 HASH Optional SHA256 checksum (format: HEX or SHA256=HEX) + -h, --help Show this help text and exit +EOF +} + +log() { + if [ "$QUIET" != true ]; then + printf '%s\n' "$*" >&2 + fi +} + +warn() { + if [ "$QUIET" != true ]; then + printf 'warning: %s\n' "$*" >&2 + fi +} + +output() { + if [ "$QUIET" = true ]; then + cat <&0 > /dev/null + else + cat <&0 + fi +} + +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +require_command() { + if ! command_exists "$1"; then + log "Required command '$1' not found" + exit 1 + fi +} + +normalize_sha() { + value=$1 + case "$value" in + SHA256=*|sha256=*) + value=${value#*=} + ;; + esac + printf '%s' "$value" | tr 'A-Z' 'a-z' +} + +sha256_of() { + file=$1 + if command_exists shasum; then + shasum -a 256 "$file" | awk '{print $1}' + elif command_exists sha256sum; then + sha256sum "$file" | awk '{print $1}' + elif command_exists openssl; then + openssl dgst -sha256 "$file" | awk '{print $NF}' + else + log "Need shasum, sha256sum, or openssl to verify checksums" + exit 1 + fi +} + +verify_sha256() { + file=$1 + expected=$2 + actual=$(sha256_of "$file" | tr 'A-Z' 'a-z') + if [ "$actual" != "$expected" ]; then + log "Checksum mismatch for $file" + log " expected: $expected" + log " actual: $actual" + exit 1 + fi +} + +version_matches() { + required=$1 + installed=$2 + + [ "$required" = "$installed" ] && return 0 + + case "$installed" in + "$required"|"$required".*) + return 0 + ;; + esac + + wildcard=$(printf '%s' "$required" | tr 'X' '*' | tr 'x' '*') + case "$installed" in + $wildcard|$wildcard.*) + return 0 + ;; + esac + + return 1 +} + +binary_has_version() { + binary=$1 + required=$2 + + [ -x "$binary" ] || return 1 + installed=$("$binary" -V 2>/dev/null) || return 1 + installed=$(printf '%s' "$installed" | tr -d '\r') + [ -n "$installed" ] || return 1 + version_matches "$required" "$installed" +} + +download_archive() { + url=$1 + destination=$2 + + tmp="${destination}.tmp.$$" + if command_exists curl; then + curl -L --fail --silent --show-error -o "$tmp" "$url" | output + elif command_exists wget; then + wget -q -O "$tmp" "$url" | output + else + log "Need curl or wget to download archive" + rm -f "$tmp" + exit 1 + fi + mv "$tmp" "$destination" +} + +extract_archive() { + archive=$1 + workdir=$2 + require_command tar + tar -xzf "$archive" -C "$workdir" | output +} + +build_binary() { + srcdir=$1 + make_bin=${MAKE:-make} + require_command "$make_bin" + ( + cd "$srcdir" + if ! "$make_bin" | output; then + log "Failed to build in $srcdir" + exit 1 + fi + ) +} + +WORKDIR= +URL= +RAW_SHA= +QUIET= + +while [ $# -gt 0 ]; do + case "$1" in + -q) + QUIET=true + shift + ;; + --workdir) + [ $# -ge 2 ] || usage + WORKDIR=$2 + shift 2 + ;; + --url) + [ $# -ge 2 ] || usage + URL=$2 + shift 2 + ;; + --sha256) + [ $# -ge 2 ] || usage + RAW_SHA=$2 + shift 2 + ;; + --help|-h) + usage + exit 0 + ;; + --) + shift + break + ;; + --*) + log "Unknown option: $1" + usage + ;; + *) + break + ;; + esac +done + +[ $# -ge 2 ] || usage +PROGRAM=$1 +VERSION=$2 +shift 2 +[ $# -eq 0 ] || usage + +if [ -n "$WORKDIR" ]; then + mkdir -p "$WORKDIR" + WORKDIR=$(cd "$WORKDIR" && pwd) +else + WORKDIR=$(pwd) +fi + +ARCHIVE="$WORKDIR/${PROGRAM}-${VERSION}.tar.gz" +SRCDIR="$WORKDIR/${PROGRAM}-${VERSION}" +BINARY="$SRCDIR/$PROGRAM" + +if command_exists "$PROGRAM"; then + system_bin=$(command -v "$PROGRAM") + if binary_has_version "$system_bin" "$VERSION"; then + printf '%s\n' "$system_bin" + exit 0 + fi +fi + +if binary_has_version "$BINARY" "$VERSION"; then + printf '%s\n' "$BINARY" + exit 0 +fi + +if [ -z "$URL" ]; then + log "Version $VERSION of $PROGRAM not found and no --url provided" + exit 1 +fi + +if [ -n "$RAW_SHA" ]; then + SHA256=$(normalize_sha "$RAW_SHA") +else + SHA256= + warn "No --sha256 provided; skipping checksum verification" +fi + +if [ ! -f "$ARCHIVE" ]; then + log "Downloading $PROGRAM from $URL" + download_archive "$URL" "$ARCHIVE" +fi + +if [ -n "$SHA256" ]; then + verify_sha256 "$ARCHIVE" "$SHA256" +fi + +if [ ! -x "$BINARY" ]; then + log "Extracting into $SRCDIR" + rm -rf "$SRCDIR" + extract_archive "$ARCHIVE" "$WORKDIR" + log "Building $PROGRAM" + build_binary "$SRCDIR" +fi + +if ! binary_has_version "$BINARY" "$VERSION"; then + log "Built $PROGRAM does not match required version $VERSION" + exit 1 +fi + +printf '%s\n' "$BINARY" diff --git a/scripts/license-check.sh b/scripts/license-check.sh index feaaa7b4..623d759f 100755 --- a/scripts/license-check.sh +++ b/scripts/license-check.sh @@ -14,14 +14,18 @@ else fi FILES=$(find $DIR\ -name '*.h' -o \ + -name '*.hpp' -o \ -name '*.c' -o \ + -name '*.cpp' -o \ -name '*.c.in' -o \ + -name '*.cpp.in' -o \ -name '*.h.in' -o \ - -name '*.hs' -o \ + -name '*.hpp.in' -o \ + -name '*.rs' -o \ -name '*.bpl' -o \ -name '*.cmake.in' -o \ -name 'CMake*.txt' -o \ - -name '*.cmake' | grep -v "build") + -name '*.cmake' | grep -v "build" | grep -v "examples") fi # we use GNU sed @@ -36,9 +40,11 @@ if ! sed --version > /dev/null 2>&1; then fi fi -COPYRIGHT_TEXT_HASKELL="\ --- Copyright (C) Huawei Technologies Co., Ltd. . All rights reserved.\n\ --- SPDX-License-Identifier: MIT\n\n" +COPYRIGHT_TEXT_RUST="\ +/*\n\ + * Copyright (C) Huawei Technologies Co., Ltd. . All rights reserved.\n\ + * SPDX-License-Identifier: MIT\n\ + */\n" COPYRIGHT_TEXT_CMAKE="\ \# Copyright (C) Huawei Technologies Co., Ltd. . All rights reserved.\n\ \# SPDX-License-Identifier: MIT\n\n" @@ -60,8 +66,8 @@ for f in ${FILES}; do ext="${fname##*.}" if [ "$ext" = "txt" ] || echo $fname | grep cmake > /dev/null; then COPYRIGHT_TEXT="$COPYRIGHT_TEXT_CMAKE" - elif [ "$ext" = "hs" ]; then - COPYRIGHT_TEXT="$COPYRIGHT_TEXT_HASKELL" + elif [ "$ext" = "rs" ]; then + COPYRIGHT_TEXT="$COPYRIGHT_TEXT_RUST" elif [ "$ext" = "bpl" ]; then COPYRIGHT_TEXT="$COPYRIGHT_TEXT_BOOGIE" else diff --git a/template/CMakeLists.txt b/template/CMakeLists.txt index aa122777..8406ba5c 100644 --- a/template/CMakeLists.txt +++ b/template/CMakeLists.txt @@ -1,6 +1,11 @@ # Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. # SPDX-License-Identifier: MIT +if(NOT VATOMIC_DEV) + return() +endif() + add_custom_target(vatomic-generate) + # after running vatomic-generate, run clang-format-apply, in order to fix format # in autogenerted files. add_custom_command( @@ -9,7 +14,11 @@ add_custom_command( COMMAND cmake --build "${CMAKE_BINARY_DIR}" --target clang-format-apply WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) -add_subdirectory(include) +set(VATOMIC_RULES ${CMAKE_CURRENT_SOURCE_DIR}/vatomic.rules) + +add_subdirectory(atomic) +add_subdirectory(cpp-atomic) +add_subdirectory(test-atomic) # The following empty file helps LSP servers (eg, clangd) to figure out the # symbols in the .h.in template files by updating the compile_commands.json diff --git a/template/include/vsync/atomic/CMakeLists.txt b/template/atomic/CMakeLists.txt similarity index 85% rename from template/include/vsync/atomic/CMakeLists.txt rename to template/atomic/CMakeLists.txt index d6907c95..99ae7e16 100644 --- a/template/include/vsync/atomic/CMakeLists.txt +++ b/template/atomic/CMakeLists.txt @@ -13,7 +13,7 @@ foreach(TARGET ${TARGETS}) add_custom_target( ${TARGET} COMMAND - $ ${CMAKE_CURRENT_SOURCE_DIR}/vatomic.rules + ${TMPLR_PROGRAM} ${VATOMIC_RULES} ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.in > ${PROJECT_SOURCE_DIR}/include/vsync/atomic/${TARGET}) add_dependencies(vatomic-generate ${TARGET}) @@ -29,8 +29,7 @@ foreach(TARGET ${TARGETS}) add_custom_target( ${OUT} COMMAND - $ -DTY=${TY} - ${CMAKE_CURRENT_SOURCE_DIR}/vatomic.rules + ${TMPLR_PROGRAM} -FTY=${TY} ${VATOMIC_RULES} ${CMAKE_CURRENT_SOURCE_DIR}/${TMPL}.in > ${PROJECT_SOURCE_DIR}/include/vsync/atomic/${OUT}) add_dependencies(vatomic-generate ${OUT}) diff --git a/template/include/vsync/atomic/await.h.in b/template/atomic/await.h.in similarity index 100% rename from template/include/vsync/atomic/await.h.in rename to template/atomic/await.h.in diff --git a/template/include/vsync/atomic/await_TY.h.in b/template/atomic/await_TY.h.in similarity index 100% rename from template/include/vsync/atomic/await_TY.h.in rename to template/atomic/await_TY.h.in diff --git a/template/include/vsync/atomic/core.h.in b/template/atomic/core.h.in similarity index 98% rename from template/include/vsync/atomic/core.h.in rename to template/atomic/core.h.in index 1a13cb7c..63a34be7 100644 --- a/template/include/vsync/atomic/core.h.in +++ b/template/atomic/core.h.in @@ -110,15 +110,15 @@ _tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], h.in = h); _tmpl_end; /* ***************************************************************************** - * config + * Barrier remap * ****************************************************************************/ #if defined(VATOMIC_ENABLE_ATOMIC_SC) _tmpl_begin(TY = [[fnc; u8; u16; u32; u64; sz; ptr]], h.in = h); -#include +#include _tmpl_end; #elif defined(VATOMIC_ENABLE_ATOMIC_RLX) _tmpl_begin(TY = [[fnc; u8; u16; u32; u64; sz; ptr]], h.in = h); -#include +#include _tmpl_end; #endif /* ***************************************************************************** diff --git a/template/include/vsync/atomic/core_TY.h.in b/template/atomic/core_TY.h.in similarity index 100% rename from template/include/vsync/atomic/core_TY.h.in rename to template/atomic/core_TY.h.in diff --git a/template/include/vsync/atomic/dispatch.h.in b/template/atomic/dispatch.h.in similarity index 100% rename from template/include/vsync/atomic/dispatch.h.in rename to template/atomic/dispatch.h.in diff --git a/template/include/vsync/atomic/internal/CMakeLists.txt b/template/atomic/internal/CMakeLists.txt similarity index 82% rename from template/include/vsync/atomic/internal/CMakeLists.txt rename to template/atomic/internal/CMakeLists.txt index 2e750131..4ff1ad00 100644 --- a/template/include/vsync/atomic/internal/CMakeLists.txt +++ b/template/atomic/internal/CMakeLists.txt @@ -1,6 +1,6 @@ # Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. # SPDX-License-Identifier: MIT -add_subdirectory(config) +add_subdirectory(remap) # grep templates for each .in .h will be generated file(GLOB TARGETS *.in) @@ -11,8 +11,7 @@ foreach(TARGET ${TARGETS}) add_custom_target( ${TARGET} COMMAND - $ - ${CMAKE_CURRENT_SOURCE_DIR}/../vatomic.rules + ${TMPLR_PROGRAM} ${VATOMIC_RULES} ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.in > ${PROJECT_SOURCE_DIR}/include/vsync/atomic/internal/${TARGET}) add_dependencies(vatomic-generate ${TARGET}) diff --git a/template/include/vsync/atomic/internal/arm32_v7.h.in b/template/atomic/internal/arm32_v7.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm32_v7.h.in rename to template/atomic/internal/arm32_v7.h.in diff --git a/template/include/vsync/atomic/internal/arm32_v8.h.in b/template/atomic/internal/arm32_v8.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm32_v8.h.in rename to template/atomic/internal/arm32_v8.h.in diff --git a/template/include/vsync/atomic/internal/arm64.h.in b/template/atomic/internal/arm64.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm64.h.in rename to template/atomic/internal/arm64.h.in diff --git a/template/include/vsync/atomic/internal/arm64_llsc.h.in b/template/atomic/internal/arm64_llsc.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm64_llsc.h.in rename to template/atomic/internal/arm64_llsc.h.in diff --git a/template/include/vsync/atomic/internal/arm64_lse.h.in b/template/atomic/internal/arm64_lse.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm64_lse.h.in rename to template/atomic/internal/arm64_lse.h.in diff --git a/template/include/vsync/atomic/internal/arm64_lxe.h.in b/template/atomic/internal/arm64_lxe.h.in similarity index 100% rename from template/include/vsync/atomic/internal/arm64_lxe.h.in rename to template/atomic/internal/arm64_lxe.h.in diff --git a/template/include/vsync/atomic/internal/builtins.h.in b/template/atomic/internal/builtins.h.in similarity index 100% rename from template/include/vsync/atomic/internal/builtins.h.in rename to template/atomic/internal/builtins.h.in diff --git a/template/include/vsync/atomic/internal/fallback.h.in b/template/atomic/internal/fallback.h.in similarity index 100% rename from template/include/vsync/atomic/internal/fallback.h.in rename to template/atomic/internal/fallback.h.in diff --git a/template/include/vsync/atomic/internal/config/CMakeLists.txt b/template/atomic/internal/remap/CMakeLists.txt similarity index 86% rename from template/include/vsync/atomic/internal/config/CMakeLists.txt rename to template/atomic/internal/remap/CMakeLists.txt index f56e0ceb..492584f0 100644 --- a/template/include/vsync/atomic/internal/config/CMakeLists.txt +++ b/template/atomic/internal/remap/CMakeLists.txt @@ -14,9 +14,7 @@ set(ALL_TYPES fnc) foreach(TARGET IN ITEMS ${TARGETS}) - get_filename_component(TARGET ${TARGET} NAME_WE) - if(DEFINED ${TARGET}_TYPES) set(TYPES ${${TARGET}_TYPES}) else() @@ -30,10 +28,9 @@ foreach(TARGET IN ITEMS ${TARGETS}) add_custom_target( ${OUT} COMMAND - $ -DTY=${TY} - ${CMAKE_CURRENT_SOURCE_DIR}/../../vatomic.rules + ${TMPLR_PROGRAM} -FTY=${TY} ${VATOMIC_RULES} ${CMAKE_CURRENT_SOURCE_DIR}/${TMPL}.in > - ${PROJECT_SOURCE_DIR}/include/vsync/atomic/internal/config/${OUT} + ${PROJECT_SOURCE_DIR}/include/vsync/atomic/internal/remap_${OUT} ) add_dependencies(vatomic-generate ${OUT}) endforeach() diff --git a/template/atomic/internal/remap/TY_rlx.h.in b/template/atomic/internal/remap/TY_rlx.h.in new file mode 100644 index 00000000..0e374c0a --- /dev/null +++ b/template/atomic/internal/remap/TY_rlx.h.in @@ -0,0 +1,163 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ + +_tmpl_begin(TY = [[fnc; u8; u16; u32; u64; ptr; sz]]); +#ifndef VATOMIC_CONFIG_UPCASE_TY__RLX_H +#define VATOMIC_CONFIG_UPCASE_TY__RLX_H +_tmpl_end; +_tmpl_begin(=); +AUTOGEN +_tmpl_end; + +#include + +#define _tmpl_mute +#include +#define _tmpl_unmute + +#if defined(VATOMIC_ENABLE_ATOMIC_RLX) + +_tmpl_dl; // fence +_tmpl_begin(TY = [[fnc]], MO = [[seq; acq; rel]]); + #define _tmpl_upcase(vatomic_fence_MS) +static inline void +vatomic_fence_MS(void) +{ + vatomic_fence_rlx(); +} +_tmpl_end; + +_tmpl_dl; // read +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq;]]); + #define _tmpl_upcase(__vatomic_read_MS) +static inline TT +__vatomic_read_MS(const AA *a) +{ + return __vatomic_read_rlx(a); +} +_tmpl_end; + +_tmpl_dl; // write +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; rel;]]); + #define _tmpl_upcase(__vatomic_write_MS) +static inline void +__vatomic_write_MS(AA *a, TT v) +{ + __vatomic_write_rlx(a, v); +} +_tmpl_end; + +_tmpl_dl; // xchg +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq; rel]]); + #define _tmpl_upcase(__vatomic_xchg_MS) +static inline TT +__vatomic_xchg_MS(AA *a, TT v) +{ + return __vatomic_xchg_rlx(a, v); +} +_tmpl_end; + +_tmpl_dl; // cmpxchg +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq; rel]]); + #define _tmpl_upcase(__vatomic_cmpxchg_MS) +static inline TT +__vatomic_cmpxchg_MS(AA *a, TT e, TT v) +{ + return __vatomic_cmpxchg_rlx(a, e, v); +} +_tmpl_end; + +_tmpl_dl; // get_op/ op_get +_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; acq; rel]], + FUNC = [[get_max; get_and; get_or; get_xor; get_add; get_sub; max_get; and_get; or_get; xor_get; add_get; sub_get]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline TT +__vatomic_FUNC_MS(AA *a, TT v) +{ + return __vatomic_FUNC_rlx(a, v); +} +_tmpl_end; + +_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; acq; rel]], + FUNC = [[get_inc; inc_get; get_dec; dec_get]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline TT +__vatomic_FUNC_MS(AA *a) +{ + return __vatomic_FUNC_rlx(a); +} +_tmpl_end; + +_tmpl_dl; // op +_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; rel]], + FUNC = [[max; and; or ; xor ; add; sub]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline void +__vatomic_FUNC_MS(AA *a, TT v) +{ + __vatomic_FUNC_rlx(a, v); +} +_tmpl_end; + +_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; rel]], + FUNC = [[inc; dec]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline void +__vatomic_FUNC_MS(AA *a) +{ + __vatomic_FUNC_rlx(a); +} +_tmpl_end; + +_tmpl_dl; // await_cond +_tmpl_begin(TY = [[u32; u64; ptr]], MO = [[seq; acq]], + COND = [[eq; neq; lt; le; gt; ge]], $F_ptr_eq = BLK_KEEP, + $F_ptr_neq = BLK_KEEP, $F_ptr = BLK_SKIP); +$F_TY_COND; + #define _tmpl_upcase(__vatomic_await_COND_MS) +static inline TT +__vatomic_await_COND_MS(const AA *a, TT v) +{ + return __vatomic_await_COND_rlx(a, v); +} +_tmpl_end; + +_tmpl_begin(TY = [[u32; u64]], COND = [[le; lt; ge; gt]], + OP = [[add; sub; set]], MO = [[seq; acq; rel]], + FUNC = await_COND_OP); + #define _tmpl_upcase(__vatomic_await_COND_OP_MS) +static inline TT +__vatomic_await_COND_OP_MS(AA *a, TT c, TT v) +{ + return __vatomic_await_COND_OP_rlx(a, c, v); +} +_tmpl_end; + +_tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], + MO = [[seq; acq; rel]], FUNC = await_neq_OP, $F_ptr_set = BLK_KEEP, + $F_ptr = BLK_SKIP); +$F_TY_OP; + #define _tmpl_upcase(__vatomic_await_neq_OP_MS) +static inline TT +__vatomic_await_neq_OP_MS(AA *a, TT c, TT v) +{ + return __vatomic_await_neq_OP_rlx(a, c, v); +} +_tmpl_end; + +_tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], + MO = [[seq; acq; rel]], FUNC = await_eq_OP, $F_ptr_set = BLK_KEEP, + $F_ptr = BLK_SKIP); +$F_TY_OP; + #define _tmpl_upcase(__vatomic_await_eq_OP_MS) +static inline TT +__vatomic_await_eq_OP_MS(AA *a, TT c, TT v) +{ + return __vatomic_await_eq_OP_rlx(a, c, v); +} +_tmpl_end; + +#endif +#endif diff --git a/template/include/vsync/atomic/internal/config/TY_sc.h.in b/template/atomic/internal/remap/TY_sc.h.in similarity index 50% rename from template/include/vsync/atomic/internal/config/TY_sc.h.in rename to template/atomic/internal/remap/TY_sc.h.in index d3cc1b22..df69a956 100644 --- a/template/include/vsync/atomic/internal/config/TY_sc.h.in +++ b/template/atomic/internal/remap/TY_sc.h.in @@ -3,7 +3,7 @@ * SPDX-License-Identifier: MIT */ -_tmpl_begin(TY = [[fnc;u8; u16; u32; u64; ptr; sz]]); +_tmpl_begin(TY = [[fnc; u8; u16; u32; u64; ptr; sz]]); #ifndef VATOMIC_CONFIG_UPCASE_TY__SC_H #define VATOMIC_CONFIG_UPCASE_TY__SC_H _tmpl_end; @@ -21,93 +21,113 @@ _tmpl_end; _tmpl_dl; // fence _tmpl_begin(TY = [[fnc]], MO = [[rlx; acq; rel]]); -#define _tmpl_upcase(vatomic_fence_MS) -static inline void vatomic_fence_MS(void) { - vatomic_fence(); + #define _tmpl_upcase(vatomic_fence_MS) +static inline void +vatomic_fence_MS(void) +{ + vatomic_fence(); } _tmpl_end; _tmpl_dl; // read -_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[rlx; acq;]]); -#define _tmpl_upcase(__vatomic_read_MS) -static inline TT __vatomic_read_MS(const AA *a) { - return __vatomic_read(a); +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[rlx; acq;]]); + #define _tmpl_upcase(__vatomic_read_MS) +static inline TT +__vatomic_read_MS(const AA *a) +{ + return __vatomic_read(a); } _tmpl_end; _tmpl_dl; // write _tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[rlx; rel;]]); -#define _tmpl_upcase(__vatomic_write_MS) -static inline void __vatomic_write_MS(AA *a, TT v) { - __vatomic_write(a, v); + #define _tmpl_upcase(__vatomic_write_MS) +static inline void +__vatomic_write_MS(AA *a, TT v) +{ + __vatomic_write(a, v); } _tmpl_end; _tmpl_dl; // xchg _tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[rlx; acq; rel]]); -#define _tmpl_upcase(__vatomic_xchg_MS) -static inline TT __vatomic_xchg_MS(AA *a, TT v) { - return __vatomic_xchg(a, v); + #define _tmpl_upcase(__vatomic_xchg_MS) +static inline TT +__vatomic_xchg_MS(AA *a, TT v) +{ + return __vatomic_xchg(a, v); } _tmpl_end; _tmpl_dl; // cmpxchg _tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[rlx; acq; rel]]); -#define _tmpl_upcase(__vatomic_cmpxchg_MS) -static inline TT __vatomic_cmpxchg_MS(AA *a, TT e, TT v) { - return __vatomic_cmpxchg(a, e, v); + #define _tmpl_upcase(__vatomic_cmpxchg_MS) +static inline TT +__vatomic_cmpxchg_MS(AA *a, TT e, TT v) +{ + return __vatomic_cmpxchg(a, e, v); } _tmpl_end; _tmpl_dl; // get_op/ op_get _tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[rlx; acq; rel]], - FUNC = [[get_max; get_and; get_or; get_xor; get_add; get_sub; max_get; and_get; or_get; xor_get; add_get; sub_get]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline TT __vatomic_FUNC_MS(AA *a, TT v) { - return __vatomic_FUNC(a, v); + FUNC = [[get_max; get_and; get_or; get_xor; get_add; get_sub; max_get; and_get; or_get; xor_get; add_get; sub_get]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline TT +__vatomic_FUNC_MS(AA *a, TT v) +{ + return __vatomic_FUNC(a, v); } _tmpl_end; _tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[rlx; acq; rel]], - FUNC = [[get_inc; inc_get; get_dec; dec_get]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline TT __vatomic_FUNC_MS(AA *a) { - return __vatomic_FUNC(a); + FUNC = [[get_inc; inc_get; get_dec; dec_get]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline TT +__vatomic_FUNC_MS(AA *a) +{ + return __vatomic_FUNC(a); } _tmpl_end; _tmpl_dl; // op _tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[rlx; rel]], - FUNC = [[max; and; or; xor; add; sub]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline void __vatomic_FUNC_MS(AA *a, TT v) { - __vatomic_FUNC(a, v); + FUNC = [[max; and; or ; xor ; add; sub]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline void +__vatomic_FUNC_MS(AA *a, TT v) +{ + __vatomic_FUNC(a, v); } _tmpl_end; _tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[rlx; rel]], - FUNC = [[inc; dec]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline void __vatomic_FUNC_MS(AA *a) { - __vatomic_FUNC(a); + FUNC = [[inc; dec]]); + #define _tmpl_upcase(__vatomic_FUNC_MS) +static inline void +__vatomic_FUNC_MS(AA *a) +{ + __vatomic_FUNC(a); } _tmpl_end; _tmpl_dl; // await_cond _tmpl_begin(TY = [[u32; u64; ptr]], MO = [[rlx; acq]], - COND = [[eq; neq; lt; le; gt; ge]], - $F_ptr_eq = BLK_KEEP, $F_ptr_neq = BLK_KEEP, $F_ptr = BLK_SKIP); + COND = [[eq; neq; lt; le; gt; ge]], $F_ptr_eq = BLK_KEEP, + $F_ptr_neq = BLK_KEEP, $F_ptr = BLK_SKIP); $F_TY_COND; -#define _tmpl_upcase(__vatomic_await_COND_MS) -static inline TT __vatomic_await_COND_MS(const AA *a, TT v) { - return __vatomic_await_COND(a, v); + #define _tmpl_upcase(__vatomic_await_COND_MS) +static inline TT +__vatomic_await_COND_MS(const AA *a, TT v) +{ + return __vatomic_await_COND(a, v); } _tmpl_end; _tmpl_begin(TY = [[u32; u64]], COND = [[le; lt; ge; gt]], OP = [[add; sub; set]], MO = [[rlx; acq; rel]], FUNC = await_COND_OP); -#define _tmpl_upcase(__vatomic_await_COND_OP_MS) + #define _tmpl_upcase(__vatomic_await_COND_OP_MS) static inline TT __vatomic_await_COND_OP_MS(AA *a, TT c, TT v) { @@ -119,7 +139,7 @@ _tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], MO = [[rlx; acq; rel]], FUNC = await_neq_OP, $F_ptr_set = BLK_KEEP, $F_ptr = BLK_SKIP); $F_TY_OP; -#define _tmpl_upcase(__vatomic_await_neq_OP_MS) + #define _tmpl_upcase(__vatomic_await_neq_OP_MS) static inline TT __vatomic_await_neq_OP_MS(AA *a, TT c, TT v) { @@ -128,10 +148,10 @@ __vatomic_await_neq_OP_MS(AA *a, TT c, TT v) _tmpl_end; _tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], - MO = [[rlx; acq; rel]], FUNC = await_eq_OP, - $F_ptr_set = BLK_KEEP, $F_ptr = BLK_SKIP); + MO = [[rlx; acq; rel]], FUNC = await_eq_OP, $F_ptr_set = BLK_KEEP, + $F_ptr = BLK_SKIP); $F_TY_OP; -#define _tmpl_upcase(__vatomic_await_eq_OP_MS) + #define _tmpl_upcase(__vatomic_await_eq_OP_MS) static inline TT __vatomic_await_eq_OP_MS(AA *a, TT c, TT v) { diff --git a/template/include/vsync/atomic/tmplr.h b/template/atomic/tmplr.h similarity index 100% rename from template/include/vsync/atomic/tmplr.h rename to template/atomic/tmplr.h diff --git a/template/cpp-atomic/CMakeLists.txt b/template/cpp-atomic/CMakeLists.txt new file mode 100644 index 00000000..aa4bb305 --- /dev/null +++ b/template/cpp-atomic/CMakeLists.txt @@ -0,0 +1,26 @@ +# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# SPDX-License-Identifier: MIT + +set(unsigned_core_TYPES + u8 + u16 + u32 + u64 + sz + ptr + bool) +set(signed_core_TYPES s8 s16 s32 s64) +set(TARGETS signed_core unsigned_core) +foreach(TARGET ${TARGETS}) + set(TMPL ${TARGET}_TY.hpp) + foreach(TY ${${TARGET}_TYPES}) + set(OUT core_${TY}.hpp) + add_custom_target( + ${OUT} + COMMAND + ${TMPLR_PROGRAM} -FTY=${TY} ${VATOMIC_RULES} + ${CMAKE_CURRENT_SOURCE_DIR}/${TMPL}.in > + ${PROJECT_SOURCE_DIR}/include/vsync/atomic/${OUT}) + add_dependencies(vatomic-generate ${OUT}) + endforeach() +endforeach() diff --git a/template/cpp-atomic/signed_core_TY.hpp.in b/template/cpp-atomic/signed_core_TY.hpp.in new file mode 100644 index 00000000..d681d5fb --- /dev/null +++ b/template/cpp-atomic/signed_core_TY.hpp.in @@ -0,0 +1,150 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#define _tmpl_mute +#include +#define _tmpl_unmute +_tmpl_map(MAP_MIRROR_s8, vuint8_t); +_tmpl_map(MAP_MIRROR_s16, vuint16_t); +_tmpl_map(MAP_MIRROR_s32, vuint32_t); +_tmpl_map(MAP_MIRROR_s64, vuint64_t); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_VOL, volatile); +_tmpl_map(MAP_NON_VOL, ); +_tmpl_begin(TY = [[s8; s16; s32; s64;]]); +extern "C" { +#include +} +namespace vsync +{ + template <> struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + atomic() : _a() + { + } + atomic(TT v) : _a(static_cast(v)) + { + } +_tmpl_end; +_tmpl_begin(TY = [[s8; s16; s32; s64;]], VL =[[VOL; NON_VOL]]); + TT load(memory_order order = memory_order_seq_cst) MAP_VL const noexcept + { + return _a.load(order); + } + void store(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + _a.store(static_cast(v), order); + } + + TT operator=(TT v) MAP_VL noexcept + { + store(v); + return v; + } + + operator TT() MAP_VL const noexcept + { + return load(); + } + + TT exchange(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast( + _a.exchange(static_cast(v), order)); + } + + + bool compare_exchange_strong( + TT &expected, TT desired, memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) MAP_VL noexcept + { + // TODO: find a way to make the cast on expected safe! + return _a.compare_exchange_strong( + (MAP_MIRROR_TY &)(expected), + static_cast(desired), order, failure); + } + bool compare_exchange_weak( + TT &expected, TT desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) MAP_VL noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + + TT fetch_add(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast(_a.fetch_add(static_cast(v), order)); + } + TT operator+=(TT v) MAP_VL noexcept + { + return fetch_add(v); + } + // v++ + TT operator++(int) MAP_VL noexcept + { + return static_cast(_a++); + } + // ++v + TT operator++() MAP_VL noexcept + { + return static_cast(++_a); + } + + TT fetch_sub(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast(_a.fetch_sub(static_cast(v), order)); + } + TT operator-=(TT v) MAP_VL noexcept + { + return fetch_sub(v); + } + // v-- + TT operator--(int) MAP_VL noexcept + { + return static_cast(_a--); + } + // --v + TT operator--() MAP_VL noexcept + { + return static_cast(--_a); + } + + TT fetch_and(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast(_a.fetch_and(static_cast(v), order)); + } + + TT operator&=(TT v) MAP_VL noexcept + { + return fetch_and(v); + } + + TT fetch_or(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast(_a.fetch_or(static_cast(v), order)); + } + + TT operator|=(TT v) MAP_VL noexcept + { + return fetch_or(v); + } + + TT fetch_xor(TT v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return static_cast(_a.fetch_xor(static_cast(v), order)); + } + + TT operator^=(TT v) MAP_VL noexcept + { + return fetch_xor(v); + } +_tmpl_end; +_tmpl_begin(TY = [[s8; s16; s32; s64;]]); + private: + vsync::atomic _a; + }; +} // namespace vsync +_tmpl_end; diff --git a/template/cpp-atomic/unsigned_core_TY.hpp.in b/template/cpp-atomic/unsigned_core_TY.hpp.in new file mode 100644 index 00000000..324be4d4 --- /dev/null +++ b/template/cpp-atomic/unsigned_core_TY.hpp.in @@ -0,0 +1,395 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#define _tmpl_mute +#include +#define _tmpl_unmute +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; bool; ptr]]); +#ifndef VATOMIC_CORE_UPCASE_TY__HPP +#define VATOMIC_CORE_UPCASE_TY__HPP +_tmpl_end; +_tmpl_dl; //-------------------------------------- +_tmpl_begin(=); +AUTOGEN +_tmpl_end; +_tmpl_dl; //-------------------------------------- +extern "C" { + #include +} +_tmpl_map(MAP_TMPL_T_ptr, PTR *); +_tmpl_map(MAP_TMPL_N_ptr, typename PTR); +_tmpl_map(MAP_CAST_ptr, static_cast); +_tmpl_map(MAP_INIT_ptr, nullptr); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_u8, TT); +_tmpl_map(MAP_TMPL_N_u8, ); +_tmpl_map(MAP_INIT_u8, 0U); +_tmpl_map(MAP_CAST_u8, ); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_bool, TT); +_tmpl_map(MAP_TMPL_N_bool, ); +_tmpl_map(MAP_INIT_bool, false); +_tmpl_map(MAP_CAST_bool, static_cast); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_u16, TT); +_tmpl_map(MAP_TMPL_N_u16, ); +_tmpl_map(MAP_INIT_u16, 0U); +_tmpl_map(MAP_CAST_u16, ); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_u32, TT); +_tmpl_map(MAP_TMPL_N_u32, ); +_tmpl_map(MAP_CAST_u32, ); +_tmpl_map(MAP_INIT_u32, 0U); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_u64, TT); +_tmpl_map(MAP_TMPL_N_u64, ); +_tmpl_map(MAP_CAST_u64, ); +_tmpl_map(MAP_INIT_u64, 0U); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_TMPL_T_sz, TT); +_tmpl_map(MAP_TMPL_N_sz, ); +_tmpl_map(MAP_CAST_sz, ); +_tmpl_map(MAP_INIT_sz, 0U); +_tmpl_dl; //-------------------------------------- +_tmpl_map(MAP_VOL, volatile); +_tmpl_map(MAP_NON_VOL, ); +_tmpl_map(MAP_CAST_VOL, const_cast); +_tmpl_map(MAP_CAST_NON_VOL, ); +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; bool; ptr]]); +namespace vsync +{ +template struct atomic { + atomic(const atomic &) = delete; + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + atomic() + { + __vatomic_init(&_v, MAP_INIT_TY); + } + atomic(MAP_TMPL_T_TY v) + { + __vatomic_init(&_v, v); + } +_tmpl_end; +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; bool; ptr]], VL =[[VOL; NON_VOL]]); + MAP_TMPL_T_TY load(memory_order order = memory_order_seq_cst) MAP_VL const noexcept + { + switch (order) { + case memory_order_consume: + case memory_order_acquire: + return MAP_CAST_TY(__vatomic_read_acq(MAP_CAST_VL(&_v))); + case memory_order_relaxed: + return MAP_CAST_TY(__vatomic_read_rlx(MAP_CAST_VL(&_v))); + case memory_order_release: + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return MAP_CAST_TY(__vatomic_read(MAP_CAST_VL(&_v))); + } + } + void store(MAP_TMPL_T_TY v, memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + __vatomic_write_rel(MAP_CAST_VL(&_v), v); + break; + case memory_order_relaxed: + __vatomic_write_rlx(MAP_CAST_VL(&_v), v); + break; + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_consume: + case memory_order_seq_cst: + default: + return __vatomic_write(MAP_CAST_VL(&_v), v); + } + } + + MAP_TMPL_T_TY operator=(MAP_TMPL_T_TY v) MAP_VL noexcept + { + store(v); + return v; + } + + operator MAP_TMPL_T_TY() MAP_VL const noexcept + { + return load(); + } + + MAP_TMPL_T_TY exchange(MAP_TMPL_T_TY v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return MAP_CAST_TY(__vatomic_xchg_rel(MAP_CAST_VL(&_v), v)); + case memory_order_relaxed: + return MAP_CAST_TY(__vatomic_xchg_rlx(MAP_CAST_VL(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return MAP_CAST_TY(__vatomic_xchg_acq(MAP_CAST_VL(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return MAP_CAST_TY(__vatomic_xchg(MAP_CAST_VL(&_v), v)); + } + } + + bool compare_exchange_strong( + MAP_TMPL_T_TY &expected, MAP_TMPL_T_TY desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) MAP_VL noexcept + { + MAP_TMPL_T_TY old = 0; + switch (order) { + case memory_order_release: + old = MAP_CAST_TY(__vatomic_cmpxchg_rel(MAP_CAST_VL(&_v), expected, desired)); + break; + case memory_order_relaxed: + old = MAP_CAST_TY(__vatomic_cmpxchg_rlx(MAP_CAST_VL(&_v), expected, desired)); + break; + case memory_order_consume: + case memory_order_acquire: + old = MAP_CAST_TY(__vatomic_cmpxchg_acq(MAP_CAST_VL(&_v), expected, desired)); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + old = MAP_CAST_TY(__vatomic_cmpxchg(MAP_CAST_VL(&_v), expected, desired)); + break; + } + if (old == expected) { + return true; + } else { + expected = old; + return false; + } + } + bool + compare_exchange_weak(MAP_TMPL_T_TY &expected, MAP_TMPL_T_TY desired, + memory_order order = memory_order_seq_cst, + memory_order failure = memory_order_seq_cst) MAP_VL noexcept + { + return compare_exchange_strong(expected, desired, order, failure); + } + +_tmpl_end; +_tmpl_begin(TY = [[u8; u16; u32; u64; sz]], VL =[[VOL; NON_VOL]]); + TT fetch_add(TT v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return MAP_CAST_TY(__vatomic_get_add_rel(MAP_CAST_VL(&_v), v)); + case memory_order_relaxed: + return MAP_CAST_TY(__vatomic_get_add_rlx(MAP_CAST_VL(&_v), v)); + case memory_order_consume: + case memory_order_acquire: + return MAP_CAST_TY(__vatomic_get_add_acq(MAP_CAST_VL(&_v), v)); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return MAP_CAST_TY(__vatomic_get_add(MAP_CAST_VL(&_v), v)); + } + } + TT operator+=(TT v) MAP_VL noexcept + { + return fetch_add(v); + } + // v++ + TT operator++(int) MAP_VL noexcept + { + return __vatomic_get_inc(MAP_CAST_VL(&_v)); + } + // ++v + TT operator++() MAP_VL noexcept + { + return __vatomic_inc_get(MAP_CAST_VL(&_v)); + } + + TT fetch_sub(TT v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return __vatomic_get_sub_rel(MAP_CAST_VL(&_v), v); + case memory_order_relaxed: + return __vatomic_get_sub_rlx(MAP_CAST_VL(&_v), v); + case memory_order_consume: + case memory_order_acquire: + return __vatomic_get_sub_acq(MAP_CAST_VL(&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return __vatomic_get_sub(MAP_CAST_VL(&_v), v); + } + } + TT operator-=(TT v) MAP_VL noexcept + { + return fetch_sub(v); + } + // v-- + TT operator--(int) MAP_VL noexcept + { + return __vatomic_get_dec(MAP_CAST_VL(&_v)); + } + // --v + TT operator--() MAP_VL noexcept + { + return __vatomic_dec_get(MAP_CAST_VL(&_v)); + } + + TT fetch_and(TT v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return __vatomic_get_and_rel(MAP_CAST_VL(&_v), v); + case memory_order_relaxed: + return __vatomic_get_and_rlx(MAP_CAST_VL(&_v), v); + case memory_order_consume: + case memory_order_acquire: + return __vatomic_get_and_acq(MAP_CAST_VL(&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return __vatomic_get_and(MAP_CAST_VL(&_v), v); + } + } + + TT operator&=(TT v) MAP_VL noexcept + { + return fetch_and(v); + } + + TT fetch_or(TT v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return __vatomic_get_or_rel(MAP_CAST_VL(&_v), v); + case memory_order_relaxed: + return __vatomic_get_or_rlx(MAP_CAST_VL(&_v), v); + case memory_order_consume: + case memory_order_acquire: + return __vatomic_get_or_acq(MAP_CAST_VL(&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return __vatomic_get_or(MAP_CAST_VL(&_v), v); + } + } + + TT operator|=(TT v) MAP_VL noexcept + { + return fetch_or(v); + } + + TT fetch_xor(TT v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return __vatomic_get_xor_rel(MAP_CAST_VL(&_v), v); + case memory_order_relaxed: + return __vatomic_get_xor_rlx(MAP_CAST_VL(&_v), v); + case memory_order_consume: + case memory_order_acquire: + return __vatomic_get_xor_acq(MAP_CAST_VL(&_v), v); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return __vatomic_get_xor(MAP_CAST_VL(&_v), v); + } + } + + TT operator^=(TT v) MAP_VL noexcept + { + return fetch_xor(v); + } +_tmpl_end; +_tmpl_begin(TY = [[ptr]], VL =[[VOL; NON_VOL]]); + MAP_TMPL_T_TY fetch_add(ptrdiff_t v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + switch (order) { + case memory_order_release: + return add_rel(v, true); + case memory_order_relaxed: + return add_rlx(v, true); + case memory_order_consume: + case memory_order_acquire: + return add_acq(v, true); + case memory_order_acq_rel: + case memory_order_seq_cst: + default: + return add(v, true); + } + } + MAP_TMPL_T_TY operator+=(ptrdiff_t v) MAP_VL noexcept + { + return add(v, true); + } + // ptr++ + MAP_TMPL_T_TY operator++(int) MAP_VL noexcept + { + return add(1, true); + } + // ++ptr + MAP_TMPL_T_TY operator++() MAP_VL noexcept + { + return add(1, false); + } + + MAP_TMPL_T_TY fetch_sub(ptrdiff_t v, + memory_order order = memory_order_seq_cst) MAP_VL noexcept + { + return fetch_add(-v, order); + } + + MAP_TMPL_T_TY operator-=(ptrdiff_t v) MAP_VL noexcept + { + return add(-v, true); + } + // ptr-- + MAP_TMPL_T_TY operator--(int) MAP_VL noexcept + { + return add(-1, true); + } + // --ptr + MAP_TMPL_T_TY operator--() MAP_VL noexcept + { + return add(-1, false); + } +_tmpl_end; +_tmpl_begin(TY = [[ptr]], MO = [[seq; acq; rel; rlx]], VL =[[VOL; NON_VOL]]); +inline MAP_TMPL_T_TY add_MS(ptrdiff_t v, bool return_old) MAP_VL +{ + MAP_TMPL_T_TY old = nullptr; + MAP_TMPL_T_TY expected = nullptr; + MAP_TMPL_T_TY desired = nullptr; + old = MAP_CAST_TY(vatomicptr_read(MAP_CAST_VL(&_v))); + do { + expected = old; + desired = expected + v; + old = MAP_CAST_TY( + __vatomic_cmpxchg_MS(MAP_CAST_VL(&_v), expected, desired)); + } while (old != expected); + return return_old ? old : desired; +} +_tmpl_end; +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; bool; ptr]], VL =[[VOL; NON_VOL]]); + bool is_lock_free() MAP_VL const noexcept + { + return true; + } +_tmpl_end; +_tmpl_begin(TY = [[u8; u16; u32; u64; sz; bool; ptr]]); + private: + AA _v; +}; +_tmpl_end; +}; + +#endif diff --git a/template/include/CMakeLists.txt b/template/include/CMakeLists.txt deleted file mode 100644 index 15276969..00000000 --- a/template/include/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. -# SPDX-License-Identifier: MIT -add_subdirectory(vsync) diff --git a/template/include/vsync/CMakeLists.txt b/template/include/vsync/CMakeLists.txt deleted file mode 100644 index c9b4bb29..00000000 --- a/template/include/vsync/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. -# SPDX-License-Identifier: MIT -add_subdirectory(atomic) diff --git a/template/include/vsync/atomic/internal/config/TY_rlx.h.in b/template/include/vsync/atomic/internal/config/TY_rlx.h.in deleted file mode 100644 index c6394426..00000000 --- a/template/include/vsync/atomic/internal/config/TY_rlx.h.in +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ - -_tmpl_begin(TY = [[fnc;u8; u16; u32; u64; ptr; sz]]); -#ifndef VATOMIC_CONFIG_UPCASE_TY__RLX_H -#define VATOMIC_CONFIG_UPCASE_TY__RLX_H -_tmpl_end; -_tmpl_begin(=); -AUTOGEN -_tmpl_end; - -#include - -#define _tmpl_mute -#include -#define _tmpl_unmute - -#if defined(VATOMIC_ENABLE_ATOMIC_RLX) - -_tmpl_dl; // fence -_tmpl_begin(TY = [[fnc]], MO = [[seq; acq; rel]]); -#define _tmpl_upcase(vatomic_fence_MS) -static inline void vatomic_fence_MS(void) { - vatomic_fence_rlx(); -} -_tmpl_end; - -_tmpl_dl; // read -_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq;]]); -#define _tmpl_upcase(__vatomic_read_MS) -static inline TT __vatomic_read_MS(const AA *a) { - return __vatomic_read_rlx(a); -} -_tmpl_end; - -_tmpl_dl; // write -_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; rel;]]); -#define _tmpl_upcase(__vatomic_write_MS) -static inline void __vatomic_write_MS(AA *a, TT v) { - __vatomic_write_rlx(a, v); -} -_tmpl_end; - -_tmpl_dl; // xchg -_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq; rel]]); -#define _tmpl_upcase(__vatomic_xchg_MS) -static inline TT __vatomic_xchg_MS(AA *a, TT v) { - return __vatomic_xchg_rlx(a, v); -} -_tmpl_end; - -_tmpl_dl; // cmpxchg -_tmpl_begin(TY = [[u8; u16; u32; u64; sz; ptr]], MO = [[seq; acq; rel]]); -#define _tmpl_upcase(__vatomic_cmpxchg_MS) -static inline TT __vatomic_cmpxchg_MS(AA *a, TT e, TT v) { - return __vatomic_cmpxchg_rlx(a, e, v); -} -_tmpl_end; - -_tmpl_dl; // get_op/ op_get -_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; acq; rel]], - FUNC = [[get_max; get_and; get_or; get_xor; get_add; get_sub; max_get; and_get; or_get; xor_get; add_get; sub_get]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline TT __vatomic_FUNC_MS(AA *a, TT v) { - return __vatomic_FUNC_rlx(a, v); -} -_tmpl_end; - -_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; acq; rel]], - FUNC = [[get_inc; inc_get; get_dec; dec_get]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline TT __vatomic_FUNC_MS(AA *a) { - return __vatomic_FUNC_rlx(a); -} -_tmpl_end; - -_tmpl_dl; // op -_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; rel]], - FUNC = [[max; and; or; xor; add; sub]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline void __vatomic_FUNC_MS(AA *a, TT v) { - __vatomic_FUNC_rlx(a, v); -} -_tmpl_end; - -_tmpl_begin(TY = [[u8; u16; u32; u64; sz;]], MO = [[seq; rel]], - FUNC = [[inc; dec]]); -#define _tmpl_upcase(__vatomic_FUNC_MS) -static inline void __vatomic_FUNC_MS(AA *a) { - __vatomic_FUNC_rlx(a); -} -_tmpl_end; - -_tmpl_dl; // await_cond -_tmpl_begin(TY = [[u32; u64; ptr]], MO = [[seq; acq]], - COND = [[eq; neq; lt; le; gt; ge]], - $F_ptr_eq = BLK_KEEP, $F_ptr_neq = BLK_KEEP, $F_ptr = BLK_SKIP); -$F_TY_COND; -#define _tmpl_upcase(__vatomic_await_COND_MS) -static inline TT __vatomic_await_COND_MS(const AA *a, TT v) { - return __vatomic_await_COND_rlx(a, v); -} -_tmpl_end; - -_tmpl_begin(TY = [[u32; u64]], COND = [[le; lt; ge; gt]], - OP = [[add; sub; set]], MO = [[seq; acq; rel]], - FUNC = await_COND_OP); -#define _tmpl_upcase(__vatomic_await_COND_OP_MS) -static inline TT -__vatomic_await_COND_OP_MS(AA *a, TT c, TT v) -{ - return __vatomic_await_COND_OP_rlx(a, c, v); -} -_tmpl_end; - -_tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], - MO = [[seq; acq; rel]], FUNC = await_neq_OP, $F_ptr_set = BLK_KEEP, - $F_ptr = BLK_SKIP); -$F_TY_OP; -#define _tmpl_upcase(__vatomic_await_neq_OP_MS) -static inline TT -__vatomic_await_neq_OP_MS(AA *a, TT c, TT v) -{ - return __vatomic_await_neq_OP_rlx(a, c, v); -} -_tmpl_end; - -_tmpl_begin(TY = [[u32; u64; ptr]], OP = [[add; sub; set]], - MO = [[seq; acq; rel]], FUNC = await_eq_OP, - $F_ptr_set = BLK_KEEP, $F_ptr = BLK_SKIP); -$F_TY_OP; -#define _tmpl_upcase(__vatomic_await_eq_OP_MS) -static inline TT -__vatomic_await_eq_OP_MS(AA *a, TT c, TT v) -{ - return __vatomic_await_eq_OP_rlx(a, c, v); -} -_tmpl_end; - -#endif -#endif diff --git a/test/atomics_gen/templates/CMakeLists.txt b/template/test-atomic/CMakeLists.txt similarity index 83% rename from test/atomics_gen/templates/CMakeLists.txt rename to template/test-atomic/CMakeLists.txt index 1f5edd73..127ce465 100644 --- a/test/atomics_gen/templates/CMakeLists.txt +++ b/template/test-atomic/CMakeLists.txt @@ -3,7 +3,7 @@ add_custom_target(vatomic-test-generate) file(GLOB TEMPLATES *TY.c.in) -set(GEN_TEST_OUTPUT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) +set(GEN_TEST_OUTPUT_DIR ${PROJECT_SOURCE_DIR}/test/atomics_gen) set(ut_test_TY_TYPES u8 u16 u32 u64 sz ptr) set(ut_test_await_TY_TYPES u32 u64 ptr) @@ -16,10 +16,8 @@ foreach(TEMPLATE_FILE IN ITEMS ${TEMPLATES}) string(REPLACE TY ${TY} TEST_FILE ${TEMPLATE_NAME}.c) add_custom_target( ${TEST_FILE} - COMMAND - $ -DTY=${TY} - ${PROJECT_SOURCE_DIR}/template/include/vsync/atomic/vatomic.rules - ${TEMPLATE_FILE} > ${GEN_TEST_OUTPUT_DIR}/${TEST_FILE}) + COMMAND ${TMPLR_PROGRAM} -FTY=${TY} ${VATOMIC_RULES} + ${TEMPLATE_FILE} > ${GEN_TEST_OUTPUT_DIR}/${TEST_FILE}) add_dependencies(vatomic-test-generate ${TEST_FILE}) endforeach() endforeach() @@ -85,8 +83,7 @@ foreach(FUN IN ITEMS ${FUNS}) add_custom_target( ${OUTPUT_FILE} COMMAND - $ -DFUNC=${FUN} - ${PROJECT_SOURCE_DIR}/template/include/vsync/atomic/vatomic.rules + ${TMPLR_PROGRAM} -FFUNC=${FUN} ${VATOMIC_RULES} ${CMAKE_CURRENT_SOURCE_DIR}/call_FUNC.c.in > ${GEN_TEST_OUTPUT_DIR}/call_tests/${OUTPUT_FILE}) add_dependencies(vatomic-test-generate ${OUTPUT_FILE}) diff --git a/test/atomics_gen/templates/call_FUNC.c.in b/template/test-atomic/call_FUNC.c.in similarity index 100% rename from test/atomics_gen/templates/call_FUNC.c.in rename to template/test-atomic/call_FUNC.c.in diff --git a/test/atomics_gen/templates/mt_test_await_TY.c.in b/template/test-atomic/mt_test_await_TY.c.in similarity index 100% rename from test/atomics_gen/templates/mt_test_await_TY.c.in rename to template/test-atomic/mt_test_await_TY.c.in diff --git a/test/atomics_gen/templates/mt_test_rmw_TY.c.in b/template/test-atomic/mt_test_rmw_TY.c.in similarity index 100% rename from test/atomics_gen/templates/mt_test_rmw_TY.c.in rename to template/test-atomic/mt_test_rmw_TY.c.in diff --git a/test/atomics_gen/templates/ut_test_TY.c.in b/template/test-atomic/ut_test_TY.c.in similarity index 100% rename from test/atomics_gen/templates/ut_test_TY.c.in rename to template/test-atomic/ut_test_TY.c.in diff --git a/test/atomics_gen/templates/ut_test_await_TY.c.in b/template/test-atomic/ut_test_await_TY.c.in similarity index 100% rename from test/atomics_gen/templates/ut_test_await_TY.c.in rename to template/test-atomic/ut_test_await_TY.c.in diff --git a/template/include/vsync/atomic/vatomic.rules b/template/vatomic.rules similarity index 82% rename from template/include/vsync/atomic/vatomic.rules rename to template/vatomic.rules index ee1d44ca..c6bfb81e 100644 --- a/template/include/vsync/atomic/vatomic.rules +++ b/template/vatomic.rules @@ -2,9 +2,14 @@ _tmpl_dl *********************************************************************** _tmpl_dl *** Prefix mappings _tmpl_dl *********************************************************************** _tmpl_map(MAP_P_u8, vatomic8); +_tmpl_map(MAP_P_bool, vatomic8); _tmpl_map(MAP_P_u16, vatomic16); _tmpl_map(MAP_P_u32, vatomic32); _tmpl_map(MAP_P_u64, vatomic64); +_tmpl_map(MAP_P_s8, vatomic8); +_tmpl_map(MAP_P_s16, vatomic16); +_tmpl_map(MAP_P_s32, vatomic32); +_tmpl_map(MAP_P_s64, vatomic64); _tmpl_map(MAP_P_sz, vatomicsz); _tmpl_map(MAP_P_ptr, vatomicptr); _tmpl_map(MAP_P_va, vatomic); @@ -13,22 +18,32 @@ _tmpl_dl *********************************************************************** _tmpl_dl *** Plain type mappings _tmpl_dl *********************************************************************** _tmpl_map(MAP_T_u8, vuint8_t); +_tmpl_map(MAP_T_bool, vbool_t); _tmpl_map(MAP_T_u16, vuint16_t); _tmpl_map(MAP_T_u32, vuint32_t); _tmpl_map(MAP_T_u64, vuint64_t); _tmpl_map(MAP_T_sz, vsize_t); +_tmpl_map(MAP_T_s8, vint8_t); +_tmpl_map(MAP_T_s16, vint16_t); +_tmpl_map(MAP_T_s32, vint32_t); +_tmpl_map(MAP_T_s64, vint64_t); _tmpl_map(MAP_T_ptr, void *); _tmpl_map(MAP_T_va, T); _tmpl_map(MAP_T_VA, T); _tmpl_dl *********************************************************************** _tmpl_dl *** Atomic mappings *** _tmpl_dl *********************************************************************** -_tmpl_map(MAP_A_u8, PP_t); -_tmpl_map(MAP_A_u16, PP_t); -_tmpl_map(MAP_A_u32, PP_t); -_tmpl_map(MAP_A_u64, PP_t); -_tmpl_map(MAP_A_ptr, PP_t); -_tmpl_map(MAP_A_sz, PP_t); +_tmpl_map(MAP_A_u8, _t); +_tmpl_map(MAP_A_bool, _t); +_tmpl_map(MAP_A_u16, _t); +_tmpl_map(MAP_A_u32, _t); +_tmpl_map(MAP_A_u64, _t); +_tmpl_map(MAP_A_s8, _t); +_tmpl_map(MAP_A_s16, _t); +_tmpl_map(MAP_A_s32, _t); +_tmpl_map(MAP_A_s64, _t); +_tmpl_map(MAP_A_ptr, _t); +_tmpl_map(MAP_A_sz, _t); _tmpl_map(MAP_A_va, A); _tmpl_map(MAP_A_VA, A); _tmpl_dl *********************************************************************** @@ -43,9 +58,14 @@ _tmpl_dl *** Memory ordering extensive _tmpl_dl *********************************************************************** _tmpl_map(MAP_E_va_seq, mo); _tmpl_map(MAP_E_u8, MAP_E); +_tmpl_map(MAP_E_bool, MAP_E); _tmpl_map(MAP_E_u16, MAP_E); _tmpl_map(MAP_E_u32, MAP_E); _tmpl_map(MAP_E_u64, MAP_E); +_tmpl_map(MAP_E_s8, MAP_E); +_tmpl_map(MAP_E_s16, MAP_E); +_tmpl_map(MAP_E_s32, MAP_E); +_tmpl_map(MAP_E_s64, MAP_E); _tmpl_map(MAP_E_sz, MAP_E); _tmpl_map(MAP_E_ptr, MAP_E); _tmpl_map(MAP_E_VA, MAP_E); @@ -57,11 +77,11 @@ _tmpl_dl *********************************************************************** _tmpl_dl *** Additional mappings _tmpl_dl *********************************************************************** _tmpl_map(AA, MAP_A_TY); -_tmpl_map(PP, MAP_P_TY); +_tmpl_map(, MAP_P_TY); _tmpl_map(TT, MAP_T_TY); _tmpl_map(ME, MAP_E_MO); _tmpl_map(_MS, MAP_M_MO); -_tmpl_map(__vatomic, PP); +_tmpl_map(__vatomic, ); _tmpl_dl *********************************************************************** _tmpl_dl *** Additional mappings _tmpl_dl *********************************************************************** @@ -92,7 +112,7 @@ _tmpl_dl *** Requirement: _tmpl_dl *** - FUNC defined in iter_vars of block (lower case is ok) _tmpl_dl *********************************************************************** _tmpl_map(\#, #); -_tmpl_map(FUNUP, UPCASE(PP_FUNC_MS)); +_tmpl_map(FUNUP, UPCASE(_FUNC_MS)); _tmpl_map(IFDEF_FUN, _tmpl_nl\#ifndef FUNUP_tmpl_nl\#define FUNUP_tmpl_nl); _tmpl_map(ENDIF_FUN, _tmpl_nl\#endif /* FUNUP */); _tmpl_dl *********************************************************************** diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 8068ddc6..fc76a9e5 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -11,3 +11,4 @@ add_subdirectory(atomics_basic) add_subdirectory(atomics_cxx) add_subdirectory(atomics_gen) add_subdirectory(atomics) +add_subdirectory(sanity) diff --git a/test/atomics/CMakeLists.txt b/test/atomics/CMakeLists.txt index ff550e85..02402284 100644 --- a/test/atomics/CMakeLists.txt +++ b/test/atomics/CMakeLists.txt @@ -157,35 +157,30 @@ endif() # ############################################################################## separate_arguments(CFLAGS UNIX_COMMAND "${CMAKE_C_FLAGS}") -if(NOT LIBVSYNC_OPEN_DISTRO_TESTING) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h" - COMMAND - ${CMAKE_C_COMPILER} -E ${CFLAGS} "${VATOMIC_INC}" -include - vsync/atomic.h ${CMAKE_CURRENT_SOURCE_DIR}/dispatcher_test.c > - "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h" - DEPENDS vatomic "${CMAKE_CURRENT_SOURCE_DIR}/dispatcher_test.c" - COMMAND_EXPAND_LISTS VERBATIM) - - add_custom_target(expand ALL - DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h") - - # ########################################################################## - # test vatomic dispatcher - # ########################################################################## - add_executable(dispatcher_test dispatcher_test.c) - target_compile_options(dispatcher_test PUBLIC ${TEST_OPTS}) - target_link_libraries(dispatcher_test vatomic) - v_add_bin_test(NAME dispatcher_test COMMAND dispatcher_test) - - # ########################################################################## - # test freestanding - # ########################################################################## - add_executable(freestanding_test freestanding_test.c) - target_compile_options(freestanding_test PUBLIC ${TEST_OPTS} -nostdinc) - configure_file(include/vfreestanding.h.tpl include/vfreestanding.h) - target_include_directories(freestanding_test - PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include) - target_link_libraries(freestanding_test vatomic) - v_add_bin_test(NAME freestanding_test COMMAND freestanding_test) -endif() +add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h" + COMMAND + ${CMAKE_C_COMPILER} -E ${CFLAGS} "${VATOMIC_INC}" -include + vsync/atomic.h ${CMAKE_CURRENT_SOURCE_DIR}/dispatcher_test.c > + "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h" + DEPENDS vatomic "${CMAKE_CURRENT_SOURCE_DIR}/dispatcher_test.c" + COMMAND_EXPAND_LISTS VERBATIM) +add_custom_target(expand ALL + DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/vatomic_expand.h") +# ############################################################################## +# test vatomic dispatcher +# ############################################################################## +add_executable(dispatcher_test dispatcher_test.c) +target_compile_options(dispatcher_test PUBLIC ${TEST_OPTS}) +target_link_libraries(dispatcher_test vatomic) +v_add_bin_test(NAME dispatcher_test COMMAND dispatcher_test) +# ############################################################################## +# test freestanding +# ############################################################################## +add_executable(freestanding_test freestanding_test.c) +target_compile_options(freestanding_test PUBLIC ${TEST_OPTS} -nostdinc) +configure_file(include/vfreestanding.h.tpl include/vfreestanding.h) +target_include_directories(freestanding_test + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/include) +target_link_libraries(freestanding_test vatomic) +v_add_bin_test(NAME freestanding_test COMMAND freestanding_test) diff --git a/test/atomics/unit.sh b/test/atomics/unit.sh deleted file mode 100644 index 5e679a97..00000000 --- a/test/atomics/unit.sh +++ /dev/null @@ -1,230 +0,0 @@ -#!/bin/bash -set -e - -CC_ARM32_BE=$ARMEB_PATH/armeb-linux-gnueabi-gcc -CC_ARM32=arm-linux-gnueabi-gcc -CC_AARCH64=aarch64-linux-gnu-gcc -CC_X86_64=gcc - -QEMU_ARM="qemu-arm -L /usr/arm-linux-gnueabi" -QEMU_ARM_BE="qemu-armeb -L /usr/armeb-linux-gnueabi" -QEMU_X86_64="qemu-x86_64 -L /usr/bin/gcc" -QEMU_ARRCH64="qemu-aarch64 -L /usr/aarch64-linux-gnu" - - -TEST_TEMPLATE="atomics-test-master.c" -TIMEOUT_MSG="TIMEOUT" - - -CFLAGS="-I../include -g -std=c99 -Wall -Wextra -pedantic -ffreestanding -nostdlib -Wl,--defsym,_start=main -Wa,-L -O2" -CFLAGS="${CFLAGS} -DVSYNC_DISABLE_POLITE_AWAIT" -CFLAGS="${CFLAGS} -DVSYNC_ENABLE_ATOMIC_MAPPING" - - -# supported arch names, used in output files' names -declare -a archs -archs=("arm7" \ - "arm8" \ - "aarch64" \ - "aarch64-lse" \ - "aarch64-c11" \ - "aarch64-builtins" \ - "x86-c11" \ - "x86-builtins" \ - "arm7-be" \ - "arm8-be" \ - "armv7-le-builtins" \ - "armv7-be-builtins" \ -) - -arch_len=${#archs[@]} - - -# compilers -declare -a compilers -declare -a run -declare -a defines - -compilers=(\ - $CC_ARM32 \ - $CC_ARM32 \ - $CC_AARCH64 \ - $CC_AARCH64 \ - $CC_AARCH64 \ - $CC_AARCH64 \ - $CC_X86_64 \ - $CC_X86_64 \ - $CC_ARM32_BE \ - $CC_ARM32_BE \ - $CC_ARM32 \ - $CC_ARM32_BE \ -) -run[0]=$QEMU_ARM -run[1]=$QEMU_ARM -run[2]=$QEMU_ARRCH64 -run[3]=$QEMU_ARRCH64 -run[4]=$QEMU_ARRCH64 -run[5]=$QEMU_ARRCH64 -run[6]=$QEMU_X86_64 -run[7]=$QEMU_X86_64 -run[8]=$QEMU_ARM_BE -run[9]=$QEMU_ARM_BE -run[10]=$QEMU_ARM -run[11]=$QEMU_ARM_BE - - -defines=("-DVSYNC_ATOMIC_ARMV7" \ - "-DVSYNC_ATOMIC_ARMV8" \ - "" \ - "-DVSYNC_ATOMIC_AARCH64_LSE" \ - "-DVSYNC_STDATOMIC" \ - "-DVSYNC_BUILTINS" \ - "-DVSYNC_STDATOMIC" \ - "" \ - "-DVSYNC_ATOMIC_ARMV7" \ - "-DVSYNC_ATOMIC_ARMV8" \ - "-DVSYNC_BUILTINS" \ - "-DVSYNC_BUILTINS") -# compiler options for the used arch. - - -# -marm option is necessary to not get this error Error: thumb conditional instruction should be in IT block -- `cmpeq r7,fp' -# -static is necessary so that it does not complain about ld-linux.so.3 etc - -declare -a options -options[0]="-O2 -march=armv7-a -mno-thumb-interwork -mfpu=vfp -mcpu=cortex-a9 -pthread" -options[1]="-O2 -march=armv8-a -mno-thumb-interwork -mfpu=vfp -pthread" -options[2]="-O2 -pthread" -options[3]="-O2 -pthread -march=armv8.2-a+lse" -options[4]="-O2 -pthread " -options[5]="-O2 -pthread " -options[6]="-O2 -pthread " -options[7]="-O2 -pthread " -options[8]="-static -O2 -march=armv7-a -marm -mno-thumb-interwork -mfpu=vfp -mcpu=cortex-a9 -msoft-float -pthread -mbig-endian" -options[9]="-static -O2 -march=armv8-a -marm -mno-thumb-interwork -mfpu=vfp -pthread -mbig-endian" -options[10]="-O2 -march=armv7-a -mno-thumb-interwork -mfpu=vfp -pthread -mtune=cortex-a9 -mlittle-endian" -options[11]="-static -O2 -march=armv7-a -marm -mno-thumb-interwork -mfpu=vfp -pthread -mtune=cortex-a9 -mbig-endian" - -case "$1" in - "-clean") - rm -rf build - mkdir build -;; - "-compile") - PRINT_CSV="" - case "$2" in - "-csv") - PRINT_CSV="-DPRINT_CSV" - ;; - esac - rm -rf build - mkdir build - for ((i=0; i< arch_len; i++)); do - for t in atomic32 atomic64 atomicptr - do - echo "Compiling ${archs[$i]}-$t" - #expand - expansion_input_file=$TEST_TEMPLATE - expansion_output_file=build/test-$t-${archs[$i]}.c - expansion_asm_file=build/test-$t-${archs[$i]}.s - - ${compilers[$i]} -E $CFLAGS -DTYPE=$t $PRINT_CSV -DTYPE_IS_$t ${defines[$i]} $expansion_input_file -o $expansion_output_file - #compile - compilation_output_file=build/test-$t-${archs[$i]}.out - ${compilers[$i]} -DTYPE=$t ${options[$i]} $expansion_output_file -o $compilation_output_file - - compilation_output_file=build/test-$t-${archs[$i]}.out - ${compilers[$i]} -DTYPE=$t ${options[$i]} $expansion_output_file -S -o $expansion_asm_file - done - done - chmod +x build/*.out -;; - "-run-all") - NO_NUMA=1 - NO_CPU=12 - NO_ITERATIONS=1000 - OFFSET=0 - NO_THREAD=5 - for thrdno in 1 2 5; do # threads - for i in {0..6}; do # ARCHs - for file in build/*${archs[$i]}.out - do - timeout 10s ${run[$i]} ./$file $thrdno $NO_ITERATIONS $NO_CPU $NO_NUMA $OFFSET $api $mo || echo $TIMEOUT_MSG - - done # files - done # archs - done #threads -;; - "-run-sanity") - for thrdcount in 1 10; do - for iter in 1 15 30 31 32 60 61 64 1000; do - for ((i=0; i< arch_len; i++)); do # ARCHs - for file in build/*${archs[$i]}.out - do - timeout 10s ${run[$i]} ./$file $thrdcount $iter || echo $TIMEOUT_MSG - done - done - done - done -;; - "-run-max") - for ((i=0; i< arch_len; i++)); do # ARCHs - for file in build/*${archs[$i]}.out - do -# # timeout 120s - ${run[$i]} ./$file 500 5 - #|| echo -n $TIMEOUT_MSG - done - done -;; - "-run-min-32") - for ((i=0; i< arch_len; i++)); do # ARCHs - for file in build/test-atomic32-${archs[$i]}.out - do - timeout 10s ${run[$i]} ./$file 1 1 || echo $TIMEOUT_MSG - done - done -;; - "-test-few") - for thrdcount in 1 3; do - for ((i=0; i< arch_len; i++)); do - for file in build/*${archs[$i]}.out - do - err=0 - if ! timeout 10s ${run[$i]} ./$file $thrdcount 1000; then - echo $TIMEOUT_MSG file=$file threads=$thrdcount - exit 1 - fi - done - done - done -;; - "-run-sub") - suit=20 - for thrdcount in 10; do - for iter in 1 15 30 31 32 60 61 64 1000; do - for ((i=0; i< arch_len; i++)); do - for file in build/test-atomic64-${archs[$i]}.out - do - timeout 10s ${run[$i]} ./$file $thrdcount $iter 12 1 0 $suit || echo $TIMEOUT_MSG - done - done - done - done -;; - "-run-be") - suit=20 - for thrdcount in 10; do - for iter in 64; do - for i in {6..7}; do - for file in build/test-atomic64-${archs[$i]}.out - do - ${run[$i]} ./$file $thrdcount $iter 12 1 0 $suit - done - done - done - done -;; - *) - echo "Unknown Command! available commands -clean, -compile <-csv>, -run-all, -run-sanity, -run-min-32, -run-sub, -run-be" -esac diff --git a/test/atomics_basic/CMakeLists.txt b/test/atomics_basic/CMakeLists.txt index d39353e1..a8486d41 100644 --- a/test/atomics_basic/CMakeLists.txt +++ b/test/atomics_basic/CMakeLists.txt @@ -71,22 +71,10 @@ set(FATOMIC_xor atomic8 atomic16 atomic32 atomic64 atomicsz) set(FATOMIC_and atomic8 atomic16 atomic32 atomic64 atomicsz) set(FATOMIC_max atomic8 atomic16 atomic32 atomic64 atomicsz) -# interface variants -# -# * core: main interface with vatomic32, vatomic64 and vatomicptr -# * dispatch: core + dispatcher-based vatomic_ interface -# * compat: dispatch + backwards-compatible atomic*_ interface -if(LIBVSYNC_OPEN_DISTRO_TESTING) - set(TC_VARIANTS core) -elseif(LIBVSYNC_DRC_DISTRO_TESTING) - set(TC_VARIANTS core dispatch) -else() - set(TC_VARIANTS core dispatch compat) -endif() +set(TC_VARIANTS core dispatch) set(HEADER_core vsync/atomic/core.h) set(HEADER_dispatch vsync/atomic/dispatch.h) -set(HEADER_compat vsync/atomic/compat.h) foreach(TC_VARIANT ${TC_VARIANTS}) set(TC_ATOMIC_HEADER ${HEADER_${TC_VARIANT}}) @@ -94,22 +82,7 @@ foreach(TC_VARIANT ${TC_VARIANTS}) foreach(TC_FUNC ${TC_FUNCS}) foreach(TC_ATOMIC ${FATOMIC_${TC_FUNC}}) - if(${TC_VARIANT} STREQUAL compat) - if(${TC_ATOMIC} STREQUAL atomic8) - continue() - endif() - if(${TC_ATOMIC} STREQUAL atomic16) - continue() - endif() - if(${TC_ATOMIC} STREQUAL atomicsz) - continue() - endif() - if(${TC_FUNC} STREQUAL max) - continue() - endif() - set(TC_TYPE_PRFX ${TC_ATOMIC}) - set(TC_FVARIANT ${TC_ATOMIC}) - elseif(${TC_VARIANT} STREQUAL core) + if(${TC_VARIANT} STREQUAL core) set(TC_TYPE_PRFX v${TC_ATOMIC}) set(TC_FVARIANT v${TC_ATOMIC}) else() @@ -143,23 +116,11 @@ foreach(TC_VARIANT ${TC_VARIANTS}) set(TC_GETX 1) endif() - if(${TC_VARIANT} STREQUAL compat) - if(${FT} STREQUAL XGET) - continue() - elseif(${FT} STREQUAL X) - set(TC_XGET 1) - endif() - endif() - set(TC ${TC_VARIANT}-v${TC_ATOMIC}_${TC_FUNC_PRFX}${TC_FUNC}${TC_FUNC_SUFX} ) configure_file(vatomic_basic.c.in ${TC}.c) add_executable(${TC} ${TC}.c) - if(${TC_VARIANT} STREQUAL compat) - target_compile_definitions( - ${TC} PRIVATE VSYNC_DISABLE_DEPRECATED_WARNING) - endif() target_link_libraries(${TC} vatomic pthread) v_add_bin_test(NAME ${TC} COMMAND ${TC}) diff --git a/test/atomics_cxx/CMakeLists.txt b/test/atomics_cxx/CMakeLists.txt index 365b8551..c681ab8c 100644 --- a/test/atomics_cxx/CMakeLists.txt +++ b/test/atomics_cxx/CMakeLists.txt @@ -1,10 +1,18 @@ # Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. # SPDX-License-Identifier: MIT project(vatomic_cpp LANGUAGES CXX) +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) # ############################################################################## # compilation with C++ # ############################################################################## -add_executable(vatomic_compile_cxx vatomic_empty.cpp) -target_link_libraries(vatomic_compile_cxx vatomic) -v_add_bin_test(NAME vatomic-compile-cxx COMMAND vatomic_compile_cxx) +file(GLOB TESTS *.cpp) + +foreach(TEST ${TESTS}) + get_filename_component(TEST_NAME ${TEST} NAME_WE) + add_executable(${TEST_NAME} ${TEST}) + target_link_libraries(${TEST_NAME} vatomic pthread) + v_add_bin_test(NAME ${TEST_NAME} COMMAND ${TEST_NAME}) +endforeach() diff --git a/test/atomics_cxx/mt.cpp b/test/atomics_cxx/mt.cpp new file mode 100644 index 00000000..0ecc777d --- /dev/null +++ b/test/atomics_cxx/mt.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#include +#include +#include +#include +#include +#include +#include +#if !defined(__ARM_ARCH) && !defined(__NetBSD__) + #define USE_BARRIER +#endif +#include +#if defined(USE_BARRIER) + #if defined(__cpp_lib_barrier) + #include +using vsync_thread_barrier = std::barrier<>; + #else + #include + #include +class vsync_thread_barrier +{ + public: + explicit vsync_thread_barrier(size_t thread_count) + : threshold(thread_count), count(thread_count), generation(0) + { + } + + void arrive_and_wait() + { + std::unique_lock lock(mutex); + auto gen = generation; + if (--count == 0U) { + generation++; + count = threshold; + cv.notify_all(); + } else { + cv.wait(lock, [this, gen] { return generation != gen; }); + } + } + + private: + const size_t threshold; + size_t count; + size_t generation; + std::mutex mutex; + std::condition_variable cv; +}; + #endif +#endif + +template class MT_Test +{ +#if defined(__APPLE__) + using time_point = int64_t; +#else + using time_point = std::chrono::_V2::system_clock::time_point; +#endif + + + public: + static void run_tests() + { + static MT_Test ins; + ins.mt_store(); + ins.mt_xchg(); + ins.mt_inc(); + ins.mt_add(); + ins.mt_dec(); + ins.mt_sub(); + ins.mt_cmpxchg(); + } + + + private: + static constexpr int64_t tolerate = 0; + static constexpr TT OP_COUNT = IT * N; + void info(std::string name, intptr_t vsync_t, intptr_t std_t) + { + std::cout << "[" << name << "] vsync: " << vsync_t + << "ms, std: " << std_t << "ms"; + if (vsync_t <= std_t) { + std::cout << " [winner: \033[1;32mvsync_t\033[0m (" + << std_t - vsync_t << ")]"; + } else { + std::cout << " [winner: \033[1;31mstd_t\033[0m(" << vsync_t - std_t + << ")]"; + } + std::cout << " with #threads: " << N << ", #Iter: " << IT << std::endl; + } + void mt_store() + { + constexpr TT v = std::numeric_limits::max(); + auto vsync_t = test([&] { subject = v; }); + auto std_t = test([&] { mirror = v; }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + } + + void mt_xchg() + { + constexpr TT v = std::numeric_limits::max(); + auto vsync_t = test([&] { subject.exchange(v); }); + auto std_t = test([&] { mirror.exchange(v); }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + } + + void mt_cmpxchg() + { + constexpr TT init_val = std::numeric_limits::min(); + + mirror = subject = init_val; + auto vsync_t = test([&] { + TT e = init_val; + while (!subject.compare_exchange_strong(e, e + 1)) {} + }); + auto std_t = test([&] { + TT e = init_val; + while (!mirror.compare_exchange_strong(e, e + 1)) {} + }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + assert(mirror = (init_val + OP_COUNT)); + } + + void mt_inc() + { + auto constexpr init_val = std::numeric_limits::min(); + mirror = subject = init_val; + auto vsync_t = test([&] { subject++; }); + auto std_t = test([&] { mirror++; }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + assert(mirror == (init_val + OP_COUNT)); + } + + void mt_add() + { + constexpr TT init_val = std::numeric_limits::min(); + constexpr TT step = 3; + mirror = subject = init_val; + auto vsync_t = test([&] { subject += step; }); + auto std_t = test([&] { mirror += step; }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + assert(mirror == (init_val + (OP_COUNT * step))); + } + + void mt_dec() + { + auto constexpr init_val = std::numeric_limits::max(); + mirror = subject = init_val; + auto vsync_t = test([&] { subject--; }); + auto std_t = test([&] { mirror--; }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + assert(mirror == (init_val - OP_COUNT)); + } + + + void mt_sub() + { + constexpr TT init_val = std::numeric_limits::max(); + constexpr TT step = 3; + mirror = subject = init_val; + auto vsync_t = test([&] { subject -= step; }); + auto std_t = test([&] { mirror -= step; }); + info(__FUNCTION__, vsync_t, std_t); + assert(subject == mirror); + assert(mirror == (init_val - (OP_COUNT * step))); + } + + inline int64_t diff(time_point start, time_point end) + { + auto duration = end - start; +#if defined(__APPLE__) + return duration; +#else + return std::chrono::duration_cast(duration) + .count(); +#endif + } + int64_t test(std::function f) + { +#ifdef USE_BARRIER + vsync_thread_barrier barrier(N); +#endif + std::vector threads; + auto start = launch(threads, [&] { +#ifdef USE_BARRIER + barrier.arrive_and_wait(); +#endif + for (auto i = 0; i < IT; i++) { + f(); + } + }); + auto end = join(threads); + return diff(start, end); + } + + + inline time_point now() + { +#if defined(__APPLE__) + return 0; +#else + return std::chrono::high_resolution_clock::now(); +#endif + } + time_point launch(std::vector &threads, + std::function f) + { + auto start = now(); + for (size_t i = 0; i < N; i++) { + threads.push_back(std::thread(f)); + } + return start; + } + time_point join(std::vector &threads) + { + for (auto &t : threads) { + t.join(); + } + return now(); + } + std::atomic subject; + vsync::atomic mirror; +}; + +int +main(void) +{ + constexpr size_t IT = 1000000; + constexpr size_t N = 12; + std::cout << "hardware_concurrency: " << std::thread::hardware_concurrency() + << " " << std::endl; + MT_Test::run_tests(); + MT_Test::run_tests(); + return 0; +} diff --git a/test/atomics_cxx/ut_int_types.cpp b/test/atomics_cxx/ut_int_types.cpp new file mode 100644 index 00000000..edca3c71 --- /dev/null +++ b/test/atomics_cxx/ut_int_types.cpp @@ -0,0 +1,399 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Success criteria is to match the behavior of std::atomic */ +template struct TestAtomics { + /* we define the subject(vsync::atomic), and the mirror(std::atomic) with + * volatile qualifier or not based on template parameter IsVolatile*/ + using SubjectType = + typename std::conditional, + vsync::atomic>::type; + using MirrorType = + typename std::conditional, + std::atomic>::type; + + void ut_rw() + { + for (TT v : vals) { + mirror = v; + subject = v; + assert(mirror == subject); + } + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (TT v : vals) { + mirror.store(v, static_cast(order)); + subject.store(v, static_cast(order)); + assert(mirror.load(static_cast(order)) == + subject.load(static_cast(order))); + } + } + } + void ut_xchg() + { + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (TT v : vals) { + TT mirror_r = + mirror.exchange(v, static_cast(order)); + TT subject_r = subject.exchange( + v, static_cast(order)); + assert(mirror_r == subject_r); + assert(mirror == subject); + } + } + } + void ut_cmpxchg() + { + constexpr vsize_t repeat = 3; + bool r_var = false; + bool r_mirror = false; + TT v_var = 0; + TT v_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (TT val : vals) { + for (vsize_t i = 0; i < repeat; i++) { + r_mirror = mirror.compare_exchange_strong( + v_var, val, static_cast(order), + static_cast(order)); + r_var = subject.compare_exchange_strong( + v_mirror, val, static_cast(order), + static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + assert(v_var == v_mirror); + } + } + + for (TT val : vals) { + for (vsize_t i = 0; i < repeat; i++) { + r_mirror = mirror.compare_exchange_weak( + v_var, val, static_cast(order), + static_cast(order)); + r_var = subject.compare_exchange_weak( + v_mirror, val, static_cast(order), + static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + assert(v_var == v_mirror); + } + } + } + } + + /* We do not add arithmetic tests for bool, since arithmetic functions are + * not defined for it */ + template + /* enable only if the type is not bool */ + typename std::enable_if::value>::type + ut_dec() + { + TT r_var = 0; + TT r_mirror = 0; + + for (TT val : vals) { + mirror = subject = val; + r_mirror = mirror--; + r_var = subject--; + assert(mirror == subject); + assert(r_mirror == r_var); + } + + for (TT val : vals) { + mirror = subject = val; + r_mirror = --mirror; + r_var = --subject; + assert(mirror == subject); + assert(r_mirror == r_var); + } + } + template + typename std::enable_if::value>::type + ut_inc() + { + TT r_var = 0; + TT r_mirror = 0; + + for (TT val : vals) { + mirror = subject = val; + r_mirror = mirror++; + r_var = subject++; + assert(mirror == subject); + assert(r_mirror == r_var); + } + + for (TT val : vals) { + mirror = subject = val; + r_mirror = ++mirror; + r_var = ++subject; + assert(mirror == subject); + assert(r_mirror == r_var); + } + } + template + typename std::enable_if::value>::type + ut_fetch_xor() + { + TT r_var = 0; + TT r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + subject = mirror = max; + for (TT val : vals) { + r_mirror = mirror.fetch_xor( + val, static_cast(order)); + r_var = subject.fetch_xor( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + template + typename std::enable_if::value>::type + ut_fetch_or() + { + TT r_var = 0; + TT r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + subject = mirror = min; + for (TT val : vals) { + r_mirror = + mirror.fetch_or(val, static_cast(order)); + r_var = subject.fetch_or( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + template + typename std::enable_if::value>::type + ut_fetch_and() + { + TT r_var = 0; + TT r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + subject = mirror = max; + // TODO: reverse vals start from end and go to begin + for (TT val : vals) { + r_mirror = mirror.fetch_and( + val, static_cast(order)); + r_var = subject.fetch_and( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + template + typename std::enable_if::value>::type + ut_add_overload() + { + mirror = subject = min; + for (TT v : vals) { + mirror += v; + subject += v; + assert(mirror == subject); + } + } + template + typename std::enable_if::value>::type + ut_sub_overload() + { + mirror = subject = max; + for (TT v : vals) { + mirror -= v; + subject -= v; + assert(mirror == subject); + } + } + template + typename std::enable_if::value>::type + ut_fetch_add() + { + TT r_var = 0; + TT r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + mirror = subject = min; + for (TT val : vals) { + r_mirror = mirror.fetch_add( + val, static_cast(order)); + r_var = subject.fetch_add( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + template + typename std::enable_if::value>::type + ut_fetch_sub() + { + TT r_var = 0; + TT r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + mirror = subject = max; + for (TT val : vals) { + r_mirror = mirror.fetch_sub( + val, static_cast(order)); + r_var = subject.fetch_sub( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + template + typename std::enable_if::value>::type + ut_bitwise() + { + mirror = subject = max; + for (TT val : vals) { + mirror ^= val; + subject ^= val; + assert(mirror == subject); + + mirror |= val; + subject |= val; + assert(mirror == subject); + + mirror &= val; + subject &= val; + assert(mirror == subject); + } + } + + template + typename std::enable_if::value>::type + run_arithmetic(TestAtomics &ins) + { + std::cout << "Arithmetic tests are active for the given type." + << std::endl; + ins.ut_inc(); + ins.ut_fetch_add(); + ins.ut_add_overload(); + ins.ut_dec(); + ins.ut_fetch_sub(); + ins.ut_sub_overload(); + ins.ut_fetch_xor(); + ins.ut_fetch_or(); + ins.ut_fetch_and(); + ins.ut_bitwise(); + } + template + typename std::enable_if::value>::type + run_arithmetic_if_supported(TestAtomics &ins) + { + ins.run_arithmetic(ins); + } + + template + typename std::enable_if::value>::type + run_arithmetic_if_supported(TestAtomics &) + { + } + + static void run_tests() + { + static TestAtomics ins; + std::cout << "Testing type [" << typeid(TT).name() << "] Max = " << max + << " Min = " << min << " with size = " << sizeof(TT) + << "byte(s)" << std::endl; + + assert(ins.mirror == ins.subject); + ins.ut_rw(); + ins.ut_xchg(); + ins.ut_cmpxchg(); + ins.run_arithmetic_if_supported(ins); + assert(ins.mirror == ins.subject); + } + + static constexpr TT max = std::numeric_limits::max(); + static constexpr TT min = std::numeric_limits::min(); + + private: + std::vector vals = {min, (max / 4), (max / 2), max}; + MirrorType mirror; + SubjectType subject; +}; + +int +main(void) +{ + /* Run with all c++ primitive types*/ + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); +#if !(defined(__APPLE__) || defined(__ARM_ARCH)) + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); +#endif + // TODO: TestAtomics::run_tests(); + // TODO: TestAtomics::run_tests(); + // TODO: TestAtomics::run_tests(); + // TODO: TestAtomics::run_tests(); + // TODO: TestAtomics::run_tests(); + // TODO: TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + + /* Run with all vatomic types*/ + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + + /* Run with all vatomic types with volatile */ + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); + TestAtomics::run_tests(); +} diff --git a/test/atomics_cxx/ut_ptr.cpp b/test/atomics_cxx/ut_ptr.cpp new file mode 100644 index 00000000..7ca6371c --- /dev/null +++ b/test/atomics_cxx/ut_ptr.cpp @@ -0,0 +1,235 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#include +#include +#include +#include +#include + +struct Student { + static constexpr size_t length = 20; + int id; + int age; + char name[length]; +}; + +template struct TestPtrAtomic { + void ut_rw() + { + mirror = subject = nullptr; + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (size_t i = 0; i < length; i++) { + mirror.store(&arr[i], static_cast(order)); + subject.store(&arr[i], static_cast(order)); + + assert(mirror.load(static_cast(order)) == + subject.load(static_cast(order))); + } + } + } + void ut_xchg() + { + TT *r_var = 0; + TT *r_mirror = 0; + subject = mirror = nullptr; + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (size_t i = 0; i < length; i++) { + r_mirror = mirror.exchange( + &arr[i], static_cast(order)); + r_var = subject.exchange( + &arr[i], static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + void ut_cmpxchg() + { + constexpr size_t repeat = 3; + bool r_var = false; + bool r_mirror = false; + TT *v_var = nullptr; + TT *v_mirror = nullptr; + + mirror = subject = nullptr; + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (size_t i = 0; i < length; i++) { + v_var = v_mirror = nullptr; + for (vsize_t i = 0; i < repeat; i++) { + r_mirror = mirror.compare_exchange_strong( + v_var, &arr[i], static_cast(order), + static_cast(order)); + r_var = subject.compare_exchange_strong( + v_mirror, &arr[i], + static_cast(order), + static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + assert(v_var == v_mirror); + } + } + } + + mirror = subject = nullptr; + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + for (size_t i = 0; i < length; i++) { + v_var = v_mirror = nullptr; + for (vsize_t i = 0; i < repeat; i++) { + r_mirror = mirror.compare_exchange_weak( + v_var, &arr[i], static_cast(order), + static_cast(order)); + r_var = subject.compare_exchange_weak( + v_mirror, &arr[i], + static_cast(order), + static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + assert(v_var == v_mirror); + } + } + } + } + void ut_fetch_add() + { + TT *r_var = 0; + TT *r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + mirror = subject = &arr[0]; + for (auto val : offsets) { + r_mirror = mirror.fetch_add( + val, static_cast(order)); + r_var = subject.fetch_add( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + void ut_fetch_sub() + { + TT *r_var = 0; + TT *r_mirror = 0; + + for (int order = vsync::memory_order_relaxed; + order <= vsync::memory_order_seq_cst; order++) { + mirror = subject = &arr[length - 1]; + for (auto val : offsets) { + r_mirror = mirror.fetch_sub( + val, static_cast(order)); + r_var = subject.fetch_sub( + val, static_cast(order)); + assert(subject == mirror); + assert(r_mirror == r_var); + } + } + } + void ut_inc() + { + mirror = subject = &arr[0]; + for (size_t i = 0; i < length; i++) { + auto r_m = mirror++; + auto r_s = subject++; + assert(r_m == r_s); + assert(subject == mirror); + if (i < length - 1) { + assert(mirror == &arr[i + 1]); + } + } + mirror = subject = &arr[0]; + for (size_t i = 0; i < length; i++) { + auto r_m = ++mirror; + auto r_s = ++subject; + assert(r_m == r_s); + assert(subject == mirror); + if (i < length - 1) { + assert(mirror == &arr[i + 1]); + } + } + } + void ut_dec() + { + mirror = subject = &arr[length - 1]; + for (size_t i = length - 1; i < length; i--) { + auto r_m = mirror--; + auto r_s = subject--; + assert(r_m == r_s); + assert(subject == mirror); + if (i > 0) { + assert(mirror == &arr[i - 1]); + } + } + mirror = subject = &arr[length - 1]; + for (size_t i = length - 1; i < length - 1; i--) { + auto r_m = --mirror; + auto r_s = --subject; + assert(r_m == r_s); + assert(subject == mirror); + if (i > 0) { + assert(mirror == &arr[i - 1]); + } + } + } + void ut_add_overload() + { + constexpr size_t step = 2; + for (size_t i = 0; i < length; i += step) { + mirror = subject = &arr[0]; + mirror += i; + subject += i; + assert(mirror == &arr[i]); + } + } + void ut_sub_overload() + { + constexpr size_t step = 2; + for (size_t i = length - 1; i < length - 1; i -= step) { + mirror = subject = &arr[length - 1]; + mirror -= i; + subject -= i; + assert(subject == mirror); + assert(mirror == &arr[i]); + } + } + static void run_tests() + { + static TestPtrAtomic ins; + std::cout << "Testing atomicptr with type [" << typeid(TT).name() + << " with size = " << sizeof(TT) << "byte(s)" << std::endl; + + assert(ins.mirror == ins.subject); + ins.ut_rw(); + ins.ut_xchg(); + ins.ut_inc(); + ins.ut_fetch_add(); + ins.ut_add_overload(); + ins.ut_dec(); + ins.ut_fetch_sub(); + ins.ut_sub_overload(); + ins.ut_cmpxchg(); + assert(ins.mirror == ins.subject); + } + + private: + static constexpr size_t length = 1000; + std::atomic mirror; + vsync::atomic subject; + TT arr[length]{}; + std::vector offsets = {0, 1, 2, 4, 7}; +}; + +int +main(void) +{ + TestPtrAtomic::run_tests(); + TestPtrAtomic::run_tests(); + TestPtrAtomic::run_tests(); + return 0; +} diff --git a/test/atomics_cxx/vatomic_empty.cpp b/test/atomics_cxx/vatomic_empty.cpp index e32a06e6..5f375456 100644 --- a/test/atomics_cxx/vatomic_empty.cpp +++ b/test/atomics_cxx/vatomic_empty.cpp @@ -1,3 +1,7 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ extern "C" { #include } diff --git a/test/atomics_gen/CMakeLists.txt b/test/atomics_gen/CMakeLists.txt index 5b742da9..2ce74b45 100644 --- a/test/atomics_gen/CMakeLists.txt +++ b/test/atomics_gen/CMakeLists.txt @@ -1,10 +1,6 @@ # Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. # SPDX-License-Identifier: MIT set(V_TEST_LABELS "CROSS") -# currently not installed with distros -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/templates) - add_subdirectory(templates) -endif() file(GLOB TESTS *test*.c) foreach(test IN ITEMS ${TESTS}) diff --git a/test/sanity/CMakeLists.txt b/test/sanity/CMakeLists.txt new file mode 100644 index 00000000..84ed3b96 --- /dev/null +++ b/test/sanity/CMakeLists.txt @@ -0,0 +1,13 @@ +# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. +# SPDX-License-Identifier: MIT + +file(GLOB TESTS *.c) +foreach(TST_FILE IN ITEMS ${TESTS}) + get_filename_component(TEST ${TST_FILE} NAME_WE) + + add_executable(${TEST} ${TST_FILE}) + + target_link_libraries(${TEST} vatomic pthread) + + v_add_bin_test(NAME ${TEST} COMMAND ${TEST}) +endforeach() diff --git a/test/sanity/ut_vtypes_format.c b/test/sanity/ut_vtypes_format.c new file mode 100644 index 00000000..37e84635 --- /dev/null +++ b/test/sanity/ut_vtypes_format.c @@ -0,0 +1,19 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +#include +#include + +int +main(void) +{ + vuint32_t v32 = VUINT32_MAX; + vuint64_t v64 = VUINT64_MAX; + vuintptr_t vptr = VUINTPTR_MAX; + + printf("VUINT32_MAX = %" VUINT32_FORMAT "\n", v32); + printf("VUINT64_MAX = %" VUINT64_FORMAT "\n", v64); + printf("VUINTPTR_FORMAT = %" VUINTPTR_FORMAT "\n", vptr); + return 0; +} diff --git a/tmplr/CMakeLists.txt b/tmplr/CMakeLists.txt deleted file mode 100644 index 1652a97a..00000000 --- a/tmplr/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. -# SPDX-License-Identifier: MIT -add_library(tmplr INTERFACE) -target_include_directories(tmplr INTERFACE include) - -add_executable(tmplr.bin tmplr.c) diff --git a/tmplr/include/tmplr/macros.h b/tmplr/include/tmplr/macros.h deleted file mode 100644 index 004b868d..00000000 --- a/tmplr/include/tmplr/macros.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -#ifndef TMPLR_MACROS_H -#define TMPLR_MACROS_H - -/** - * Marks the begin of a template block. - * - * Takes a comma-separated list of key value pairs, where values can be lists of - * the form [[val1;val2;val3]] or single values. For example: - * - * ```c - * _tmpl_begin(KEY1 = VALUE1, KEY2 = [[VALUE2; VALUE3]]); - * KEY1 = KEY2; - * _tmpl_end; - * ``` - */ - -#define _tmpl_begin(...) - -/** - * Marks the end of a template block. - */ -#define _tmpl_end - -/** - * Adds a string to begin or end hook. - * - * @param HOOK either begin or end - * - * The string argument may contain commas but no parenthesis. - */ -#define _tmpl_hook(HOOK, ...) - -/** - * Stops tmplr processing and output. - * - * Until the matching _tmpl_unmute, all text is discarded and all tmplr command - * ignored. A muted-block is useful to add includes that help LSP servers. - */ -#define _tmpl_mute - -/** - * Restarts tmplr processing output. - */ -#define _tmpl_mute - -/** - * Maps a key K to a value which may contain commas - */ -#define _tmpl_map(K, ...) - -/** - * Skips template block iteration. - * - * @note This can only be called within _tmpl_begin and _tmpl_end. - */ -#define _tmpl_skip - -/** - * Deletes the line from the template output. - */ -#define _tmpl_dl - -/** - * Adds a new line. - */ -#define _tmpl_nl - -/** - * Aborts tmplr execution and exits with error code 1. - */ -#define _tmpl_abort - -/** - * Makes content uppercase. - * - * @note This can only be called within _tmpl_begin and _tmpl_end. - */ -#define _tmpl_upcase(...) - -#endif diff --git a/tmplr/test/test-drop.in b/tmplr/test/test-drop.in deleted file mode 100644 index aeb837d7..00000000 --- a/tmplr/test/test-drop.in +++ /dev/null @@ -1,6 +0,0 @@ -_tmpl_map(FILTER_ptr_lt, DROP!); -_tmpl_begin(TY = [[ptr]], COND = [[neq;lt]], MO = [[seq]], DROP!=_tmpl_skip, FILTER=_tmpl_dl); -FILTER_TY_COND; -static inline TT _vatomic_await_COND_MS(AA *a, TT v); -_tmpl_end; - diff --git a/tmplr/test/test-skip.in b/tmplr/test/test-skip.in deleted file mode 100644 index 126f7e67..00000000 --- a/tmplr/test/test-skip.in +++ /dev/null @@ -1,12 +0,0 @@ -_tmpl_begin(TY = [[u32; ptr]], COND = [[eq; ge]], - OP = [[sub; set]], MO = [[seq]], - FILTER_ptr_eq = KEEP, - FILTER_ptr_set = KEEP, - FILTER_ptr = SKIP, - FILTER = KEEP, - KEEP = _tmpl_dl, - SKIP = _tmpl_skip); -FILTER_TY_COND -FILTER_TY_OP ----> ty = TY, cond = COND, op = OP -_tmpl_end; diff --git a/tmplr/test/test1.in b/tmplr/test/test1.in deleted file mode 100644 index ee8cde85..00000000 --- a/tmplr/test/test1.in +++ /dev/null @@ -1,3 +0,0 @@ -_tmpl_begin(X=[[dog;cat;rat]]); -X is an animal. -_tmpl_end; diff --git a/tmplr/test/test2.in b/tmplr/test/test2.in deleted file mode 100644 index 11e26aaf..00000000 --- a/tmplr/test/test2.in +++ /dev/null @@ -1,6 +0,0 @@ -_tmpl_begin(X=[[dog;cat;rat]], Y=[[hello;hallo;oi]]); -X is an animal. -Say Y! --------- -_tmpl_end; - diff --git a/tmplr/test/test3.in b/tmplr/test/test3.in deleted file mode 100644 index 750126e5..00000000 --- a/tmplr/test/test3.in +++ /dev/null @@ -1,9 +0,0 @@ -_tmpl_begin(X=[[dog;cat;rat]], Y=[[hello;hallo;oi]]); -X is an animal. -Say Y! --------- -_tmpl_end; -HERE IS THE EMPTY LINE -_tmpl_begin(X=[[cat;rat]]); -X is nice. -_tmpl_end; diff --git a/tmplr/test/test4.in b/tmplr/test/test4.in deleted file mode 100644 index dd16349d..00000000 --- a/tmplr/test/test4.in +++ /dev/null @@ -1,6 +0,0 @@ -THIS IS A FILE -_tmpl_map(VAR, value); -_tmpl_begin(Y=[[something]]); ---> Y -_tmpl_end; -AND IT HAS ALREADY ENDED diff --git a/tmplr/test/test5.in b/tmplr/test/test5.in deleted file mode 100644 index 2b813aa1..00000000 --- a/tmplr/test/test5.in +++ /dev/null @@ -1,5 +0,0 @@ -THIS IS A NEW FILE - -_tmpl_begin(X=[[cat;dog]]); -X has some VAR. And Y? -_tmpl_end; diff --git a/tmplr/test/test6.in b/tmplr/test/test6.in deleted file mode 100644 index 4ae195ed..00000000 --- a/tmplr/test/test6.in +++ /dev/null @@ -1,7 +0,0 @@ -_tmpl_dl; this is a comment -_tmpl_map(OP_seq, sequential consistent); -_tmpl_map(OP_acq, acquire); -_tmpl_map(OP_acq, acquired); -_tmpl_begin(mo=[[seq;acq;rel]]); -OP_mo -_tmpl_end; diff --git a/tmplr/test/test7.in b/tmplr/test/test7.in deleted file mode 100644 index ee6708c3..00000000 --- a/tmplr/test/test7.in +++ /dev/null @@ -1,6 +0,0 @@ -_tmpl_map(DROP_IF_cat, _tmpl_skip); -_tmpl_begin(X=[[dog;cat;rat]]); -DROP_IF_X -after drop -X is running. -_tmpl_end; diff --git a/tmplr/tmplr.c b/tmplr/tmplr.c deleted file mode 100644 index 97b6177c..00000000 --- a/tmplr/tmplr.c +++ /dev/null @@ -1,765 +0,0 @@ -//'usr/bin/env' cc -xc -DSCRIPT -o tmplr.bin "$0" && exec ./tmplr.bin "$@" -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. - * SPDX-License-Identifier: MIT - * Description: template replacement tool - * Author: Huawei Dresden Research Center - */ -#include -#include -#include -#include -#include -#include - -/******************************************************************************* - * tmplr - a template replacement tool - * - * tmplr is a simple tool to achieve a minimum level of genericity in libvsync - * without resorting to C preprocessor macros. - * - * ## Template blocks - * - * tmplr reads input files and replaces mappings in template blocks. Template - * blocks are marked with TMPL_BEGIN/TMPL_END commands (see "Template commands" - * below). - * - * For example: - * - * TMPL_BEGIN(key=value) - * The following word, key, will be replaced by value. - * TMPL_END - * - * ## Template mappings - * - * The mappings given to TMPL_BEGIN are called *template mappings*. - * - * Iteration mappings may take a single value as in keyA = value1 or multiple - * values as in keyA = [[value1; value2]]. The list of values is separated by - * semicolumn and optionally sorrounded by [[ ]]. The list of template mappings - * is separated by commas, for example: - * - * TMPL_BEGIN(keyA=[[val1;val2]], keyB=[[val3;val4]]) - * ... - * TMPL_END - * - * ## Block iterations - * - * If template mappings contain multiple values, the template block is repeated - * for each combination of template mappings. Each such instance of template - * mappings is called *iteration mapping* in the source code. - * - * Consider this block example: - * - * TMPL_BEGIN(key=[[val1;val2]]) - * Key --> key - * TMPL_END - * - * The template mapping consists of key=[[val1;val2]]. In the first iteration of - * the block, the iteration mapping is key=val1, in the second iteration, the - * mapping is key=val2. - * - * ## Persistent mappings - * - * Beyond template mappings, tmplr also support persistent mappings. Outside - * template blocks, the user can use the command TMPL_MAP(key, value) to add - * persistent mappings to the tmplr state. - * - * During the processing of each line in a block iteration, *after* exhausting - * the application of iteration mappings, tmplr applies any persistent mapping - * matches. - * - * ## Command line and mapping override - * - * tmplr is a CLI program and takes as input a list of files. It provides two - * flags: -v for verbose output and -D to select a single value for an iteration - * mapping. For example, -DkeyA=value1. Other values will be ignored. - * - * ## Valid keys and values - * - * tmplr **does not** tokenize the input. Hence, a key "two words" is a - * perfectly valid key. Characters such as $ can also be used in keys and - * values. - * - * The only restriction is that keys cannot contain - * - new line: \n - * - parenthesis: ( ) - * - comma: , - * - semicolon: ; - * - nor any tmplr commands - * - * Values cannot contain parenthesis, commas nor semicolon. - * - * Disclaimer: - * We are aware of similar, more powerful tools such as Jinja, Mustache and M4. - * tmplr follows three design principles: - * - * - simplicity: as simple and maintainable as possible - * - dependency freedom: no additonal language which will get deprecated - * - c-syntax transperency: annotation should not interfer with the LSP - * servers such as clangd - ******************************************************************************/ - -/******************************************************************************* - * Template commands - ******************************************************************************/ - -#define TMPL_MAP "_tmpl_map" -#define TMPL_BEGIN "_tmpl_begin" -#define TMPL_END "_tmpl_end" -#define TMPL_MUTE "_tmpl_mute" -#define TMPL_UNMUTE "_tmpl_unmute" - -#define TMPL_ABORT "_tmpl_abort" -#define TMPL_SKIP "_tmpl_skip" -#define TMPL_DL "_tmpl_dl" -#define TMPL_NL "_tmpl_nl" -#define TMPL_UPCASE "_tmpl_upcase" -#define TMPL_HOOK "_tmpl_hook" - -/******************************************************************************* - * Maximum line lengths and buffer sizes - ******************************************************************************/ - -/* maximum length of a line */ -#define MAX_SLEN 256 -/* maximum number of lines in a block */ -#define MAX_BLEN 100 -/* maximum number of keys */ -#define MAX_KEYS 1024 -/* maximum length of a value */ -#define MAX_VLEN 256 -/* Buffer to hold the value */ -#define V_BUF_LEN ((MAX_VLEN) + 1) -/* maximum length of a key */ -#define MAX_KLEN 64 -/* Buffer to hold the key */ -#define K_BUF_LEN ((MAX_KLEN) + 1) -/* maximum number of replacements per line */ -#define MAX_APPLY 32 - -/******************************************************************************* - * Type definitions - ******************************************************************************/ - -/* pair_t is a key-value pair. Key and value are 0-terminated char arrays. */ -typedef struct { - char key[K_BUF_LEN]; - char val[V_BUF_LEN]; -} pair_t; - -/* err_t represents an error message */ -typedef struct { - const char *msg; -} err_t; - -#define NO_ERROR \ - (err_t) \ - { \ - 0 \ - } -#define ERROR(m) \ - (err_t) \ - { \ - .msg = m \ - } -#define IS_ERROR(err) (err).msg != NULL - -/******************************************************************************* - * Logging - ******************************************************************************/ - -bool _verbose; -#define debugf(fmt, ...) \ - do { \ - if (_verbose) \ - printf("// " fmt, ##__VA_ARGS__); \ - } while (0) - -/******************************************************************************* - * String functions - ******************************************************************************/ - -void -trim(char *s, char c) -{ - assert(s); - /* remove trailing space */ - while (s[strlen(s) - 1] == c) - s[strlen(s) - 1] = '\0'; - - /* remove leading space */ - while (s[0] == c) - /* use len of s to include \0 */ - memmove(s, s + 1, strlen(s)); -} - -void -trims(char *s, char *chars) -{ - for (char *c = chars; *c; c++) - trim(s, *c); -} - - -/******************************************************************************* - * Mappings - ******************************************************************************/ - -/* template mappings: key -> value lists - * - * Passed as arguments to TMPL_BEGIN. - */ -pair_t template_map[MAX_KEYS]; - -/* template override mappings : key -> value - * - * Given via command line -D option. These overwrite template mapping values - */ -pair_t override_map[MAX_KEYS]; - -/* iteration mappings: key -> value - * - * These are the single values of the template mappings, potentially overriden - * by override mappings. They are set at each iteration of a template block. - * - * They precede the persistent mappings. - */ -pair_t iteration_map[MAX_KEYS]; - -/* persistent mappings: key -> value - * - * Given via TMPL_MAP(key, value) commands outside of template blocks. - * - * These succeed the iteration mappings. - */ -pair_t persistent_map[MAX_KEYS]; - -/* block hooks */ -pair_t block_hooks[MAX_KEYS]; - -void -remap(pair_t *map, const char *key, const char *val) -{ - if (key == NULL) - return; - for (int i = 0; i < MAX_KEYS; i++) { - pair_t *p = map + i; - if (!p->key[0] || strcmp(p->key, key) == 0) { - memset(p->key, 0, MAX_KLEN); - strcat(p->key, key); - trim(p->key, ' '); - - memset(p->val, 0, MAX_VLEN); - strcat(p->val, val); - trim(p->val, ' '); - debugf("[REMAP] %s = %s\n", p->key, p->val); - return; - } - } -} - -pair_t * -find(pair_t *map, const char *key) -{ - for (int i = 0; i < MAX_KEYS; i++) { - pair_t *p = map + i; - if (strcmp(p->key, key) == 0) - return p; - } - return NULL; -} - -void -unmap(pair_t *map, char *key) -{ - pair_t *p = find(map, key); - if (p == NULL) - return; - - memset(p->key, 0, MAX_KLEN); - memset(p->val, 0, MAX_VLEN); -} - -void -show(pair_t *map, const char *name) -{ - debugf("[SHOW MAP] %s\n", name); - for (int i = 0; i < MAX_KEYS; i++) { - pair_t *p = map + i; - if (p->key[0]) { - debugf("\t%s = %s\n", p->key, p->val); - } - } -} - -void -clean(pair_t *map) -{ - memset(map, 0, sizeof(pair_t) * MAX_KEYS); -} - -/******************************************************************************* - * parse functions - ******************************************************************************/ - -err_t -parse_assign(pair_t *p, char *start, char *end) -{ - char key[K_BUF_LEN] = {0}; - char val[V_BUF_LEN] = {0}; - - char *comma = strstr(start, ","); - if (comma == NULL) - return ERROR("expected ','"); - start++; - strncat(key, start, comma - start); - comma++; - strncat(val, comma, end - comma); - remap(p, key, val); - return NO_ERROR; -} - -err_t -parse_template_map(char *start, char *end) -{ - char *next, *values; - start++; - *end = '\0'; - -again: - next = strstr(start, ","); - if (next) { - *next = '\0'; - next++; - } - values = strstr(start, "="); - if (values == NULL) - return ERROR("expected '='"); - *values = '\0'; - values++; - - char key[K_BUF_LEN] = {0}; - strncat(key, start, MAX_KLEN); - - char val[V_BUF_LEN] = {0}; - strncat(val, values, strlen(values)); - trims(val, " []"); - - remap(template_map, key, val); - - if (next) { - start = next; - goto again; - } - - return NO_ERROR; -} - -/******************************************************************************* - * Line-based processing - ******************************************************************************/ -bool muted = false; - -void -line_add_nl(char *line) -{ - const size_t len = strlen(line); - if (line[len - 1] != '\n') { - assert(len + 1 < MAX_SLEN); - line[len] = '\n'; - line[len + 1] = '\0'; - } -} - -bool -line_apply(char *line, const char *key, const char *val) -{ - char *cur; - - if (!key[0] || (cur = strstr(line, key)) == NULL) - return false; - - const size_t vlen = strlen(val); - const size_t klen = strlen(key); - const size_t slen = strlen(cur); - - const bool is_nl = strcmp(key, TMPL_NL) == 0; - if (!is_nl) - debugf("[APPLY] KEY: %s(%lu) VAL: %s(%lu)\n", key, klen, val, vlen); - - /* make space for value */ - if (!is_nl) - debugf("\tBEFORE: %s", line); - memmove(cur + vlen, cur + klen, slen); - memcpy(cur, val, vlen); - if (!is_nl) - debugf("\tAFTER: %s", line); - - return true; -} - -bool -process_block_line(char *line) -{ - bool applied; - char *cur; - - char buf[MAX_SLEN] = {0}; - strcat(buf, line); - line_add_nl(buf); - - debugf("[LINE] %s", buf); - int cnt = 0; -again: - applied = false; - - for (int i = 0; i < MAX_KEYS && cnt < MAX_APPLY; i++) { - /* should delete line? */ - if (strstr(buf, TMPL_DL)) { - strcpy(buf, ""); - goto end; - } - if (strstr(buf, TMPL_SKIP)) - return false; - - const pair_t *pi = iteration_map + i; - const pair_t *pp = persistent_map + i; - if (!line_apply(buf, pi->key, pi->val) && - !line_apply(buf, pp->key, pp->val)) - continue; - applied = true; - cnt++; - - /* if one mapping is applied, restart testing all mappings */ - i = -1; - } - if (applied) - goto again; - -end: - - /* apply UPCASE */ - while ((cur = strstr(buf, TMPL_UPCASE))) { - char *start = cur + strlen(TMPL_UPCASE); - char sep = start[0] == '(' ? ')' : start[0]; - char ssep[] = {sep, 0}; - char *end = strstr(start + 1, ssep); - assert(start && end && end > start); - char *ch = start + 1; - while (ch < end) { - if (*ch >= 'a' && *ch <= 'z') - *ch -= ('a' - 'A'); - ch++; - } - size_t len = (end - start) - 1; - memmove(cur, start + 1, len); - /* include the end of line */ - memmove(cur + len, end + 1, strlen(end)); - } - - /* apply NL */ - while (line_apply(buf, TMPL_NL, "\n")) - ; - - /* output and return */ - printf("%s", buf); - assert(cnt < MAX_APPLY); - return true; -} - -/******************************************************************************* - * Block processing - ******************************************************************************/ - -void -process_begin() -{ - debugf("============================\n"); - debugf("[BLOCK_BEGIN]\n"); - show(persistent_map, "persistent_map"); - show(block_hooks, "block_hooks"); - show(template_map, "template_map"); - debugf("----------------------------\n"); -} - -/******************************************************************************* - * Block buffer - * - * Inside a template block, we buffer the whole block and then output the - * content of hte buffer with mappings applied for each value of the mapping - * iterators. - ******************************************************************************/ -char save_block[MAX_BLEN][MAX_SLEN]; -int save_k; - -const char * -sticking(const char *key) -{ - for (int i = 0; i < MAX_KEYS && strlen(override_map[i].key) != 0; i++) - if (strcmp(override_map[i].key, key) == 0) - return override_map[i].val; - return NULL; -} - -void -process_block(int i, const int nvars) -{ - pair_t *hook = NULL; - if (i == nvars) { - if ((hook = find(block_hooks, "begin"))) - if (!process_block_line(hook->val)) - return; - - for (int k = 0; k < save_k && process_block_line(save_block[k]); k++) - ; - - if ((hook = find(block_hooks, "end"))) - (void)process_block_line(hook->val); - - return; - } - pair_t *p = template_map + i; - char val[V_BUF_LEN] = {0}; - strcat(val, p->val); - - const char *sval = sticking(p->key); - const char *sep = ";"; - char *saveptr = NULL; - char *tok = strtok_r(val, sep, &saveptr); - int c = 0; - while (tok) { - trims(tok, " "); - if (sval == NULL || strcmp(sval, tok) == 0) { - (void)c; - remap(iteration_map, p->key, tok); - process_block(i + 1, nvars); - unmap(iteration_map, p->key); - } - tok = strtok_r(0, sep, &saveptr); - } - if (i == 0 && (hook = find(block_hooks, "final"))) { - (void)process_block_line(hook->val); - } -} - -/******************************************************************************* - * File processing - ******************************************************************************/ - -/* processing state */ -enum state { - TEXT, - IGNORE_BLOCK, - BLOCK_BEGIN, - BLOCK_BEGIN_ARGS, - BLOCK_TEXT, - BLOCK_END, - - MAP, - HOOK, -} S = TEXT; - - -err_t -process_line(char *line) -{ - char *cur = NULL; - char *end = NULL; - err_t err; - -again: - switch (S) { - case TEXT: - if (!muted && strstr(line, TMPL_ABORT)) { - fflush(stdout); - abort(); - } - if (!muted && strstr(line, TMPL_BEGIN "(")) { - S = BLOCK_BEGIN; - goto again; - } - if (!muted && strstr(line, TMPL_MAP)) { - S = MAP; - goto again; - } - if (!muted && strstr(line, TMPL_HOOK)) { - S = HOOK; - goto again; - } - if (!muted && strstr(line, TMPL_MUTE)) { - debugf("[OUTPUT] muted\n"); - muted = true; - break; - } - if (strstr(line, TMPL_UNMUTE)) { - debugf("[OUTPUT] unmuted\n"); - muted = false; - break; - } - if (!muted && strstr(line, TMPL_DL) == NULL) - printf("%s", line); - break; - - case BLOCK_BEGIN: - clean(template_map); - cur = strstr(line, "("); - if (cur == NULL) - return ERROR("expected '('"); - S = BLOCK_BEGIN_ARGS; - line = cur; - goto again; - - case BLOCK_BEGIN_ARGS: - if ((end = strstr(line, ")"))) { - err_t err = parse_template_map(line, end); - if (IS_ERROR(err)) - return err; - S = BLOCK_TEXT; - process_begin(); - } else { - if ((end = strstr(line, ",")) == NULL) - return ERROR("expected ','"); - for (char *e = NULL; (e = strstr(end + 1, ","), e && e != end); - end = e) - ; - parse_template_map(line, end); - } - break; - - case BLOCK_TEXT: - cur = strstr(line, TMPL_END); - if (cur != NULL) { - S = BLOCK_END; - goto again; - } - if (save_k >= MAX_BLEN) - return ERROR("block too long"); - - memcpy(save_block[save_k++], line, strlen(line) + 1); - break; - case BLOCK_END: - /* consume */ - { - int nvars = 0; - for (int i = 0; i < MAX_KEYS; i++) - if (template_map[i].key[0]) - nvars++; - process_block(0, nvars); - } - save_k = 0; - S = TEXT; - break; - - case MAP: - if ((cur = strstr(line, "(")) == NULL) - return ERROR("expected '('"); - if ((end = strstr(cur, ")")) == NULL) - return ERROR("expected ')'"); - for (char *e = NULL; (e = strstr(end + 1, ")"), e && e != end); - end = e) - ; - err = parse_assign(persistent_map, cur, end); - if (IS_ERROR(err)) - return err; - S = TEXT; - break; - - case HOOK: - if ((cur = strstr(line, "(")) == NULL) - return ERROR("expected '('"); - if ((end = strstr(cur, ")")) == NULL) - return ERROR("expected ')'"); - err = parse_assign(block_hooks, cur, end); - if (IS_ERROR(err)) - return err; - S = TEXT; - break; - - default: - assert(0 && "invalid"); - } - return NO_ERROR; -} - -/******************************************************************************* - * File processing - ******************************************************************************/ - -void -process_file(const char *fn) -{ - FILE *fp = fopen(fn, "r+"); - assert(fp); - - char *line = NULL; - size_t len = 0; - ssize_t read; - err_t err; - int i = 0; - while ((read = getline(&line, &len, fp)) != -1) { - assert(line); - err = process_line(line); - if (IS_ERROR(err)) { - fprintf(stderr, "%s:%d: error: %s\n", fn, i + 1, err.msg); - abort(); - } - if (line) { - free(line); - line = NULL; - } - i++; - } - if (line) - free(line); - - fclose(fp); -} - -/******************************************************************************* - * main function with options - * - * $0 [-v] [FILE]... - ******************************************************************************/ -int -main(int argc, char *argv[]) -{ - debugf("vatomic generator\n"); - int c; - char *k; - while ((c = getopt(argc, argv, "hvD:")) != -1) { - switch (c) { - case 'D': - k = strstr(optarg, "="); - *k++ = '\0'; - remap(override_map, optarg, k); - break; - case 'v': - _verbose = true; - break; - case 'h': - printf("tmplr - a simple templating tool\n\n"); - printf("Usage:\n\ttmplr [FLAGS] [FILE ...]\n\n"); - printf("Flags:\n"); - printf("\t-v verbose\n"); - printf( - "\t-Dkey=value override template map assignement of " - "key\n"); - exit(0); - case '?': - printf("error"); - exit(1); - default: - break; - } - } - for (int i = optind; i < argc; i++) - process_file(argv[i]); - -// if started as script, remove tmplr file -#ifdef SCRIPT - return remove(argv[0]); -#else - return 0; -#endif -} diff --git a/verify/ASMModel.hs b/verify/ASMModel.hs deleted file mode 100644 index 722567e1..00000000 --- a/verify/ASMModel.hs +++ /dev/null @@ -1,207 +0,0 @@ --- Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. --- SPDX-License-Identifier: MIT - - -module ASMModel where - -import Data.Maybe (listToMaybe) -import Data.List -import Data.Tuple - - -newtype Register = Register String - deriving (Show, Eq) - -data Condition = EQ | NE | HS | LO | HI | LS deriving Show -data CompareCondition = Cb_Z | Cb_NZ -instance Show CompareCondition where - show Cb_Z = "z" - show Cb_NZ = "nz" - - -data Atomic = Atomic - { name :: String - , parameters :: [Register] - , inputClobbers :: [Register] - , outputClobbers :: [Register] - , outputNoOverlapClobbers :: [Register] - , locals :: [Register] - , returns :: Maybe Register - , instructions :: InstrSeq - } - -data Instr = Ld Bool Register Register - | St Bool Register Register - | Ldx Bool Register Register - | Stx Bool Register Register Register - - | Swp Bool Bool Register Register Register - | Cas Bool Bool Register Register Register - - | Ldumax Bool Bool Register Register Register - | Ldclr Bool Bool Register Register Register - | Ldset Bool Bool Register Register Register - | Ldeor Bool Bool Register Register Register - | Ldadd Bool Bool Register Register Register - - | Stumax Bool Register Register - | Stclr Bool Register Register - | Stset Bool Register Register - | Steor Bool Register Register - | Stadd Bool Register Register - - | Mvn Register Register - | Neg Register Register - - | B Condition String Char - | Cb CompareCondition Register String Char - | Cmp Register Register - | Add Register Register Register - | Sub Register Register Register - | And Register Register Register - | Eor Register Register Register - | Orr Register Register Register - | Mov Register Register - | Wfe - | Sevl - | Label String - | Ignored String - deriving Show - -newtype InstrSeq = InstrSeq [Instr] deriving Show - -data Method = Method String [String] InstrSeq - -paramTypes = ["vatomic64_t *", "vuint64_t", "vuint32_t", "const vatomic64_t *", "const vuint64_t", "const vuint32_t"] -cTypes = ["vuint64_t", "void", "void *", "vatomicptr_t *"] - -rmwInvariants :: Atomic -> String -rmwInvariants atm = let params = [reg | (Register reg) <- parameters atm] in - let a = head params - v = (head.tail) params in - " assert step >= old(step);\n\ - \ assert states[step]->gpr[" ++ a ++ "] == old(states[step]->gpr[" ++ a ++ "]);\n\ - \ assert states[step]->gpr[" ++ v ++ "] == old(states[step]->gpr[" ++ v ++ "]);\n\ - \ assert no_writes(old(step), step, effects);\n\n" -xchgInvariants :: Atomic -> [Char] -xchgInvariants atm = let params = [reg | (Register reg) <- parameters atm] in - let lst = if isCMPXCHG (name atm) then (head.tail.tail) params else (head.tail) params - in "assert states[step]->gpr[" ++ lst ++ "] == old(states[step]->gpr[" ++ lst ++ "]);\n" ++ rmwInvariants atm - -awaitInvariants atm = let evReg = if hasWFE atm then "assert(event_register[step] || global_monitor_exclusive[step]);\n\n" - else "" - in rmwInvariants atm ++ evReg - -readsWritesFunctions :: [String] -readsWritesFunctions = ["read", "read_acq", "read_rlx", "write", "write_rel", "write_rlx"] - -awaitConditions :: [String] -awaitConditions = ["neq", "eq", "le", "lt", "ge", "gt"] - -rmwFunctions :: [String] -rmwFunctions = getRMWFunctions ++ simpRMWFunctions - -awaitFunctions :: [String] -awaitFunctions = do - cond <- awaitConditions - fence <- ["", "_acq", "_rlx"] - return $ "await_" ++ cond ++ fence - -rmwGetFences = ["", "_acq", "_rel", "_rlx"] -rmwSimpFences = ["", "_rel", "_rlx"] - -modify :: [String] -modify = ["max", "and", "xor", "or", "add", "sub"] - -getRMWFunctions :: [String] -getRMWFunctions = do - op <- modify - fence <- rmwGetFences - return $ "get_" ++ op ++ fence - -xchgFunctions :: [String] -xchgFunctions = do - t <- ["xchg", "cmpxchg"] - fence <- rmwGetFences - return $ t ++ fence - -simpRMWFunctions :: [String] -simpRMWFunctions = do - op <- modify - fence <- rmwSimpFences - return $ op ++ fence - -extractCondition :: String -> String -extractCondition a = let res = filter (`isInfixOf` a) awaitConditions in if null res then "" else head res - -isRMW :: String -> Bool -isRMW = flip elem rmwFunctions - -isRead :: String -> Bool -isRead = isInfixOf "read" - -isWrite :: String -> Bool -isWrite = isInfixOf "write" - -isAwait :: String -> Bool -isAwait = flip elem awaitFunctions - -isCMPXCHG :: String -> Bool -isCMPXCHG = isInfixOf "cmpxchg" - -isXCHG :: String -> Bool -isXCHG = flip elem xchgFunctions - -isGet :: String -> Bool -isGet = isInfixOf "get" - -checkLabel :: Instr -> Bool -checkLabel (Label ".1") = True -checkLabel _ = False - -ops :: [String] -ops = modify ++ ["xchg", "cmpxchg"] - -extractOp :: String -> String -extractOp atomicName = let res = filter (`isInfixOf` atomicName) ops in if null res then "" else head res - -assumeCondition :: String -> String -assumeCondition name - | isGet name = loadOrder ++ storeOrder - | isRMW name = storeOrder - | isRead name = loadOrder - | isWrite name = storeOrder - | isAwait name = loadOrder - | otherwise = loadOrder ++ storeOrder -- XCHG - - where - loadOrder = let res = filter (`isInfixOf` name) ["_rel", "_acq", "_rlx"] in - if null res then "assume load_order == order_acq_sc;\n" - else let ord = head res in if ord /= "_rel" then "assume load_order == order" ++ ord ++ ";\n" - else "assume load_order == order_rlx;\n" - - storeOrder = let res = filter (`isInfixOf` name) ["_rel", "_acq", "_rlx"] in - if null res then "assume store_order == order_rel_sc;\n" - else let ord = head res in if ord /= "_acq" then "assume store_order == order" ++ head res ++ ";\n" - else "assume store_order == order_rlx;\n" - -registerSubstitution :: [(Register, Register)] -> [Register] -> [Register] -registerSubstitution table = map (chg (map swap table)) - where chg substitutionTable currRegister = case lookup currRegister substitutionTable of - Just (Register substituteRegister) -> Register substituteRegister - Nothing -> currRegister - -isWFE :: Instr -> Bool -isWFE Wfe = True -isWFE _ = False - -hasWFE :: Atomic -> Bool -hasWFE atm = let (InstrSeq instr) = instructions atm - in foldr (\x currFoundWFE -> currFoundWFE || isWFE x ) False instr - --- 4 - vatomic_fence_(seq|acq|rel|rlx) --- 3 - vatomic64_read_(seq|acq|rlx) --- 3 - vatomic64_write_(seq|rel|rlx) --- 32 = 8*4 - vatomic64_(xchg|cmpxchg|get_(max|and|or|xor|add|sub))_(seq|acq|rel|rlx) --- 18 = 6*3 - vatomic64_(max|and|or|xor|add|sub)_(seq|rel|rlx) --- 18 = 6*3 - vatomic64_await_(eq|neq|le|lt|ge|gt)_(seq|acq|rlx) diff --git a/verify/ASMParsers.hs b/verify/ASMParsers.hs deleted file mode 100644 index b18ff903..00000000 --- a/verify/ASMParsers.hs +++ /dev/null @@ -1,395 +0,0 @@ --- Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. --- SPDX-License-Identifier: MIT - -module ASMParsers where - -import ASMModel -import Text.Parsec -import Text.Parsec.String ( Parser, Parser ) -import Control.Monad (void) -import Data.Foldable (asum) -import Prelude hiding (EQ) - -nextAsmParam :: Parser Register -nextAsmParam = do - many (noneOf "[\n") - char '[' - reg <- many (noneOf "]\n") - char ']' - return $ Register reg - -ignoredParser :: Parser Instr -ignoredParser = do - instr <- string "prfm" <|> string ".align" - rest <- many (noneOf "\n") - spaces - return $ Ignored (instr ++ rest) - -eventParser :: Parser Instr -eventParser = do - instr <- (Sevl <$ try (string "sevl")) <|> (Wfe <$ try (string "wfe")) - many (noneOf "\n") - spaces - return instr - -labelParser :: Parser Instr -labelParser = do - label <- many (noneOf ": \n") - char ':' - spaces - return $ Label ('.' : label) - - -addParser :: Parser Instr -addParser = do - string "add" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Add reg1 reg2 reg3 - -subParser :: Parser Instr -subParser = do - string "sub" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Sub reg1 reg2 reg3 - -andParser :: Parser Instr -andParser = do - string "and" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ And reg1 reg2 reg3 - -eorParser :: Parser Instr -eorParser = do - string "eor" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Eor reg1 reg2 reg3 - -orrParser :: Parser Instr -orrParser = do - string "orr" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Orr reg1 reg2 reg3 - -cmpParser :: Parser Instr -cmpParser = do - string "cmp" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Cmp reg1 reg2 - -movParser :: Parser Instr -movParser = do - string "mov" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Mov reg1 reg2 - -parseCondition :: Parser Condition -parseCondition = EQ <$ try (string "eq") - <|> NE <$ try (string "ne") - <|> HS <$ try (string "hs") - <|> LO <$ try (string "lo") - <|> HI <$ try (string "hi") - <|> LS <$ try (string "ls") - -swpParser :: Parser Instr -swpParser = do - string "swp" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Swp acq rel reg1 reg2 reg3 - -casParser :: Parser Instr -casParser = do - string "cas" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Cas acq rel reg1 reg2 reg3 - -mvnParser :: Parser Instr -mvnParser = do - string "mvn" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf "\n") - spaces - - return $ Mvn reg1 reg2 - -negParser :: Parser Instr -negParser = do - string "neg" - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf "\n") - spaces - - return $ Neg reg1 reg2 - -ldumaxParser :: Parser Instr -ldumaxParser = do - string "ldumax" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Ldumax acq rel reg1 reg2 reg3 - - - -ldclrParser :: Parser Instr -ldclrParser = do - string "ldclr" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Ldclr acq rel reg1 reg2 reg3 - -ldsetParser :: Parser Instr -ldsetParser = do - string "ldset" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Ldset acq rel reg1 reg2 reg3 - -ldeorParser :: Parser Instr -ldeorParser = do - string "ldeor" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Ldeor acq rel reg1 reg2 reg3 - -ldaddParser :: Parser Instr -ldaddParser = do - string "ldadd" - acq <- option False (char 'a' >> return True) - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - reg3 <- nextAsmParam - many (noneOf "\n") - spaces - return $ Ldadd acq rel reg1 reg2 reg3 - -stumaxParser :: Parser Instr -stumaxParser = do - string "stumax" - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf("\n")) - spaces - return $ Stumax rel reg1 reg2 - -stclrParser :: Parser Instr -stclrParser = do - string "stclr" - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf("\n")) - spaces - return $ Stclr rel reg1 reg2 - -stsetParser :: Parser Instr -stsetParser = do - string "stset" - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf("\n")) - spaces - return $ Stset rel reg1 reg2 - -steorParser :: Parser Instr -steorParser = do - string "steor" - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf("\n")) - spaces - return $ Steor rel reg1 reg2 - -staddParser :: Parser Instr -staddParser = do - string "stadd" - rel <- option False (char 'l' >> return True) - reg1 <- nextAsmParam - reg2 <- nextAsmParam - many (noneOf("\n")) - spaces - return $ Stadd rel reg1 reg2 - -branchParser :: Parser Instr -branchParser = do - string "b." - cond <- parseCondition - spaces - label <- many (noneOf "bf\n") - direction <- oneOf "bf" - spaces - return $ B cond label direction - -cbParser :: Parser Instr -cbParser = - let parseCond = Cb_NZ <$ string "nz" <|> Cb_Z <$ string "z" - in do - string "cb" - cond <- (parseCond) - reg <- nextAsmParam - many $ noneOf "1234567890" - label <- many (noneOf "bf\n") - direction <- oneOf "bf" - spaces - return $ Cb cond reg label direction - -ldParser :: Parser Instr -ldParser = do - string "ld" - b <- option False (char 'a' >> return True) - param1 <- nextAsmParam - param2 <- nextAsmParam - spaces - return $ Ld b param1 param2 - -ldxParser :: Parser Instr -ldxParser = do - string "ldx" - param1 <- nextAsmParam - param2 <- nextAsmParam - spaces - return $ Ldx False param1 param2 - -ldaxParser :: Parser Instr -ldaxParser = do - string "ldax" - param1 <- nextAsmParam - param2 <- nextAsmParam - spaces - return $ Ldx True param1 param2 - -stParser :: Parser Instr -stParser = do - string "st" - b <- option False (char 'l' >> return True) - param1 <- nextAsmParam - param2 <- nextAsmParam - spaces - return $ St b param1 param2 - -stxParser :: Parser Instr -stxParser = do - string "stx" - param1 <- nextAsmParam - param2 <- nextAsmParam - param3 <- nextAsmParam - spaces - return $ Stx False param1 param2 param3 - -stlxParser :: Parser Instr -stlxParser = do - string "stlx" - param1 <- nextAsmParam - param2 <- nextAsmParam - param3 <- nextAsmParam - spaces - return $ Stx True param1 param2 param3 - -instrParser :: Parser Instr -instrParser = spaces *> ( - try ldumaxParser - <|> try ldclrParser - <|> try ldsetParser - <|> try ldeorParser - <|> try ldaddParser - - <|> try stumaxParser - <|> try stclrParser - <|> try stsetParser - <|> try steorParser - <|> try staddParser - - <|> try stlxParser - <|> try stxParser - <|> try stParser - <|> try ldaxParser - <|> try ldxParser - <|> try ldParser - - <|> try negParser - <|> try mvnParser - - <|> try swpParser - <|> try casParser - - <|> try andParser - <|> try addParser - <|> try subParser - <|> try eorParser - <|> try orrParser - <|> try cmpParser - <|> try movParser - <|> try branchParser - <|> try cbParser - <|> try eventParser - <|> try ignoredParser - <|> try labelParser - ) "Unknown instruction: " - -programParser :: Parser InstrSeq -programParser = InstrSeq <$> (many (instrParser <* spaces) <* eof) - - - diff --git a/verify/BoogieTranslator.hs b/verify/BoogieTranslator.hs deleted file mode 100644 index 63424190..00000000 --- a/verify/BoogieTranslator.hs +++ /dev/null @@ -1,106 +0,0 @@ --- Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. --- SPDX-License-Identifier: MIT - -module BoogieTranslator where -import ASMModel -import Data.Char (toLower) -import Data.Maybe - -strLower :: String -> String -strLower = map toLower - -instrToBoogie :: Instr -> String -instrToBoogie (Ld b (Register r1) (Register r2)) = "call execute(ld(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Ldx b (Register r1) (Register r2)) = "call execute(ldx(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (St b (Register r1) (Register r2)) = "call execute(st(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Stx b (Register r1) (Register r2) (Register r3)) = "call execute(stx(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" - -instrToBoogie (Swp acq rel (Register r1) (Register r2) (Register r3)) = "call execute(swp(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Cas acq rel (Register r1) (Register r2) (Register r3)) = "call execute(cas(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" - -instrToBoogie (Mvn (Register r1) (Register r2)) = "call execute(mvn(" ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Neg (Register r1) (Register r2)) = "call execute(neg(" ++ r1 ++ ", " ++ r2 ++ "));\n" - -instrToBoogie (Ldumax acq rel (Register r1) (Register r2) (Register r3)) = "call execute(ldumax(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Ldclr acq rel (Register r1) (Register r2) (Register r3)) = "call execute(ldclr(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Ldset acq rel (Register r1) (Register r2) (Register r3)) = "call execute(ldset(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Ldeor acq rel (Register r1) (Register r2) (Register r3)) = "call execute(ldeor(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Ldadd acq rel (Register r1) (Register r2) (Register r3)) = "call execute(ldadd(" ++ strLower (show acq) ++ ", " ++ strLower (show rel) ++ ", " ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" - -instrToBoogie (Stumax b (Register r1) (Register r2)) = "call execute(stumax(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Stclr b (Register r1) (Register r2)) = "call execute(stclr(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Stset b (Register r1) (Register r2)) = "call execute(stset(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Steor b (Register r1) (Register r2)) = "call execute(steor(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Stadd b (Register r1) (Register r2)) = "call execute(stadd(" ++ strLower (show b) ++ ", " ++ r1 ++ ", " ++ r2 ++ "));\n" - -instrToBoogie (Add (Register r1) (Register r2) (Register r3)) = "call execute(add(" ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Sub (Register r1) (Register r2) (Register r3)) = "call execute(sub(" ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (And (Register r1) (Register r2) (Register r3)) = "call execute(andd(" ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Eor (Register r1) (Register r2) (Register r3)) = "call execute(eor(" ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Orr (Register r1) (Register r2) (Register r3)) = "call execute(orr(" ++ r1 ++ ", " ++ r2 ++ ", " ++ r3 ++ "));\n" -instrToBoogie (Mov (Register r1) (Register r2)) = "call execute(mov(" ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Cmp (Register r1) (Register r2)) = "call execute(cmp(" ++ r1 ++ ", " ++ r2 ++ "));\n" -instrToBoogie (Label s) = s ++ ":\n" -instrToBoogie (Ignored s) = "// " ++ s ++ "\n" -instrToBoogie (B cond label _) = "if (branch(" ++ show cond ++ "(), " ++ "states[step]" ++ ")) { goto ." ++ label ++ "; }\n" -instrToBoogie (Cb cond (Register r) label b) = "if (cb" ++ show cond ++ "(" ++ r ++ ", states[step]" ++ ")) { goto ." ++ label ++ "; }\n" -instrToBoogie i = "call execute(" ++ (strLower.show) i ++ "());\n" - -boogieParameters :: [Register] -> String -boogieParameters lreg = init . init $ foldr (\(Register x) l -> l ++ x ++ ", ") "" lreg - -boogieLocals :: [Register] -> String -boogieLocals = foldr (\(Register x) l -> l ++ "var " ++ x ++ ": Register;\n") "" - -boogieMethodHeader :: String -> [Register] -> Maybe Register-> String -boogieMethodHeader name lreg ret = basicHeader name lreg ++ retHeader ret - where - basicHeader name lreg - | isGet name = "implementation {:verboseName \"" ++ name ++ "\"} get_rmw(op : RMWOp, load_order, store_order: OrderRelation, " ++ boogieParameters lreg ++ " : Register)" - | isRMW name = "implementation {:verboseName \"" ++ name ++ "\"} rmw(op : RMWOp, store_order: OrderRelation, " ++ boogieParameters lreg ++ " : Register)" - | isCMPXCHG name = "implementation {:verboseName \"" ++ name ++ "\"} cmpxchg(load_order, store_order: OrderRelation, " ++ boogieParameters lreg ++ " : Register)" - | isRead name = "implementation {:verboseName \"" ++ name ++ "\"} read(load_order: OrderRelation, "++ boogieParameters lreg ++ " : Register)" - | isWrite name = "implementation {:verboseName \"" ++ name ++ "\"} write(store_order: OrderRelation, "++ boogieParameters lreg ++ " : Register)" - | isXCHG name = "implementation {:verboseName \"" ++ name ++ "\"} xchg(load_order, store_order: OrderRelation, " ++ boogieParameters lreg ++ " : Register)" - | otherwise = "implementation {:verboseName \"" ++ name ++ "\"} await(op: AwaitOp, load_order: OrderRelation, " ++ boogieParameters lreg ++ " : Register)" -- Await - retHeader Nothing = "" - retHeader (Just (Register s)) = "returns (" ++ s ++ ": Register)" - - -boogieCode :: Atomic -> String -boogieCode a = - let (InstrSeq l) = instructions a - procName = name a in - boogieLocals (locals a) ++ assumeOp procName ++ assumeCondition procName ++ - assumeDifferentRegisters a ++ do - instr <- l - " " ++ instrToBoogie instr ++ addInvariant procName instr - ++ generateConstInputAssert a - where - addInvariant name instr - | isRMW name && checkLabel instr = rmwInvariants a - | isAwait name && checkLabel instr = awaitInvariants a - | isXCHG name && checkLabel instr = xchgInvariants a - | otherwise = "" - assumeOp name - | isRMW name = "assume (op == " ++ extractOp name ++ ");\n" - | isAwait name = "assume (op == " ++ extractCondition name ++ ");\n" - | otherwise = "" - generateConstInputAssert at = let inp = inputClobbers at - in do - (Register reg) <- inp - "assert " ++ reg ++ " == old(" ++ reg ++ ");\n"; - -boogieAtomic :: Atomic -> String -boogieAtomic a = boogieMethodHeader (name a) (parameters a) (returns a) ++ "{\n" ++ boogieCode a ++ "}\n" - -assumeDifferentRegisters :: Atomic -> String -assumeDifferentRegisters atm = - let inp = inputClobbers atm - outOv = outputNoOverlapClobbers atm - out = outputClobbers atm - in let dif = [r1 ++ " != " ++ r2 ++ " && " | (Register r1) <- inp, (Register r2) <- outOv, r1 /= r2] - ++ [r1 ++ " != " ++ r2 ++ " && " | (Register r1) <- out, (Register r2) <- out, r1 /= r2] - in case dif of - [] -> "" - _ -> "assume " ++ (init.init.init.concat) dif ++ ";\n" diff --git a/verify/CMakeLists.txt b/verify/CMakeLists.txt index 46c09c44..486ddde5 100644 --- a/verify/CMakeLists.txt +++ b/verify/CMakeLists.txt @@ -1,133 +1,150 @@ # Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. # SPDX-License-Identifier: MIT -find_program(GHC ghc DOC "Haskell compiler") +find_program(CARGO cargo DOC "Rust installation") find_program(BPL boogie DOC "Boogie installation") -if(NOT GHC OR NOT BPL) +if(NOT BPL OR NOT CARGO) + message("Skipping verification. Some dependencies are missing") return() endif() +set(ARMV8_CMAKE_C_COMPILER aarch64-linux-gnu-gcc) +set(RISCV_CMAKE_C_COMPILER riscv64-linux-gnu-gcc) + set(VSYNC_INCLUDE "-I$,;-I>") +set(ARCHS ARMV8 RISCV) + add_custom_command( - OUTPUT parser - COMMAND ghc Main.hs -odir ${CMAKE_CURRENT_BINARY_DIR} -hidir - ${CMAKE_CURRENT_BINARY_DIR} -o ${CMAKE_CURRENT_BINARY_DIR}/parser - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Compiling the parser") + OUTPUT BUILTIN_SCRIPTS + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Copy verification scripts for the builtin atomics") -set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc) -set(TARGETS LSE LLSC LXE NO_POLITE_AWAIT) +add_custom_command( + OUTPUT PERMISSION_SCRIPTS + COMMAND + chmod +x ${CMAKE_CURRENT_BINARY_DIR}/cleaner.sh && chmod +x + ${CMAKE_CURRENT_BINARY_DIR}/verify.sh && chmod +x + ${CMAKE_CURRENT_BINARY_DIR}/generate.sh + DEPENDS BUILTIN_SCRIPTS + COMMENT "Give scripts permissions") -set(LSE_FLAGS -march=armv8-a+lse) +add_custom_command( + OUTPUT ATOMIC_LIST + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/lists/* + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS BUILTIN_SCRIPTS + COMMENT "Copy the batches to the parent folder") + +set(BUILTIN_FLAGS -DVATOMIC_DISABLE_ARM64_LSE -DVATOMIC_BUILTINS) set(LLSC_FLAGS -DVATOMIC_DISABLE_ARM64_LSE) +set(LSE_FLAGS -march=armv8-a+lse) set(LXE_FLAGS -march=armv8-a+lse -DVATOMIC_ENABLE_ARM64_LXE) -set(NO_POLITE_AWAIT_FLAGS -DVATOMIC_DISABLE_POLITE_AWAIT) -# Add tests for the above defined targets -foreach(TARGET ${TARGETS}) +set(RISCV_TARGETS BUILTIN) +set(ARMV8_TARGETS LLSC BUILTIN LSE LXE) + +set(ARMV8_FLAGS -O2 -x c -fkeep-inline-functions -S -mno-outline-atomics) +set(RISCV_FLAGS -O2 -x c -fkeep-inline-functions -S) + +set(ATOMIC_CORE_GROUPS vatomic8 vatomic16 vatomic32 vatomic64 vatomicsz) +set(ATOMIC_AWAIT_GROUPS vatomic32 vatomic64) +set(ATOMIC_PTR_GROUPS vatomicptr) +set(LISTS CORE AWAIT PTR) +set(ORDERINGS acq rel rlx "") + +set(ALL_ATOMIC_GROUPS + vatomic8 + vatomic16 + vatomic32 + vatomic64 + vatomicsz + vatomicptr + vatomic_fence) + +foreach(GRP ${ALL_ATOMIC_GROUPS}) + set(${GRP}_ATOMICS "") +endforeach() - string(TOLOWER "${TARGET}" SUB_DIR) - set(SUFFIX ${SUB_DIR}) - set(SUB_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SUB_DIR}) +# Fences +foreach(ORD ${ORDERINGS}) + list(APPEND vatomic_fence_ATOMICS vatomic_fence_${ORD}) +endforeach() - # create a subdirectory for each target to isolate the generated files/logs - file(MAKE_DIRECTORY ${SUB_DIR}) - - set(EXPANDED_FILE ${SUB_DIR}/atomic.s) - set(CFLAGS ${${TARGET}_FLAGS}) - - # Expand the atomics for atomic.s passing the flags associated with the - # target - add_custom_command( - OUTPUT ${EXPANDED_FILE} - COMMAND ${CMAKE_C_COMPILER} ${CFLAGS} "${VSYNC_INCLUDE}" -E -include - vsync/atomic.h - < /dev/null > ${EXPANDED_FILE} - COMMAND_EXPAND_LISTS - DEPENDS vatomic ${SUB_DIR} - COMMENT "Preprocessing vsync/atomic.h") - - # copy the parser to the target subdir - add_custom_command( - OUTPUT copy_${SUFFIX} - COMMAND cp parser ${SUB_DIR} - DEPENDS parser ${SUB_DIR} - COMMENT "Copy parser to ${SUB_DIR}") - - # Add test for running the parser on the expanded file - add_test(NAME test_${SUFFIX}_parsing COMMAND ./parser ${EXPANDED_FILE}) - - # Run the parser on the expanded file - add_custom_command( - OUTPUT parsed_${SUFFIX} - COMMAND ./parser ${EXPANDED_FILE} - DEPENDS copy_${SUFFIX} ${EXPANDED_FILE} - WORKING_DIRECTORY ${SUB_DIR} - COMMENT "Running parser on ${EXPANDED_FILE}") - - # read the atomic functions' list from a file and load it into ATOMICS - set(ATOMICS_LIST_FILE "atomics_list.txt") - file(READ ${ATOMICS_LIST_FILE} FILE_CONTENT) - string(STRIP ${FILE_CONTENT} FILE_CONTENT) - string(REPLACE "\n" ";" ATOMICS ${FILE_CONTENT}) - - # define the atomic groups and the files related to them some of these files - # exist in the source dir and some are autogenerated by the parser in the - # target's subdirectory - set(ATOMIC_GROUPS RW AWAIT RMW XCHG) - set(RW_FILES - "${CMAKE_CURRENT_SOURCE_DIR}/read.bpl ${CMAKE_CURRENT_SOURCE_DIR}/write.bpl ${SUB_DIR}/reads_writes.bpl" - ) - set(AWAIT_FILES - "${CMAKE_CURRENT_SOURCE_DIR}/await.bpl ${SUB_DIR}/awaits.bpl") - set(RMW_FILES "${CMAKE_CURRENT_SOURCE_DIR}/rmw.bpl ${SUB_DIR}/rmws.bpl") - set(XCHG_FILES "${CMAKE_CURRENT_SOURCE_DIR}/xchg.bpl ${SUB_DIR}/xchgs.bpl") - set(LIB_FILE "${CMAKE_CURRENT_SOURCE_DIR}/library.bpl") - - # ########################################################################## - # Atomic groups tests - # ########################################################################## - foreach(grp ${ATOMIC_GROUPS}) - string(TOLOWER "${grp}" GRP_NAME) - set(LOG_NAME ${SUB_DIR}/${GRP_NAME}.log) - # generate group log command - add_custom_command( - OUTPUT ${grp}_${SUFFIX} - COMMAND bash -c "boogie ${LIB_FILE} ${${grp}_FILES} >${LOG_NAME}" - DEPENDS parsed_${SUFFIX} - COMMENT - "Run boogie for ${GRP_NAME} on ${SUFFIX} generate ${LOG_NAME}") - - # add test parsing for errors - add_test(NAME test_${GRP_NAME}_${SUFFIX}_file_compiles - COMMAND awk "/Error:/ { exit 1 }" ${LOG_NAME}) - endforeach(grp) - list(TRANSFORM ATOMIC_GROUPS APPEND _${SUFFIX}) - - # ########################################################################## - # Atomic functions tests - # ########################################################################## - foreach(ATOMIC ${ATOMICS}) - - add_custom_command( - OUTPUT ${ATOMIC}_${SUFFIX} - COMMAND - bash -c - "boogie ${LIB_FILE} ${RW_FILES} ${AWAIT_FILES} ${RMW_FILES} ${XCHG_FILES} /proc:${ATOMIC} >${SUB_DIR}/${ATOMIC}.log" - DEPENDS ${ATOMIC_GROUPS} ${SUB_DIR} - COMMENT "Run boogie for function ${ATOMIC}_${SUFFIX}") - - add_test( - NAME check_vatomic_${ATOMIC}_${SUFFIX} - COMMAND - bash -c - "grep -i 'Boogie program verifier finished with' ${SUB_DIR}/${ATOMIC}.log | awk '{if (\$(NF-1) != \"0\") exit 1}'" - ) +# Core, Await, ptr +foreach(LIST ${LISTS}) + foreach(ATM_GRP ${ATOMIC_${LIST}_GROUPS}) + string(TOLOWER "lists/vatomic_${LIST}.txt" LIST_FILE) + file(STRINGS ${LIST_FILE} ATM_LIST) + foreach(ATM ${ATM_LIST}) + set(ATM_PREF ${ATM_GRP}_${ATM}) + list(APPEND ${ATM_GRP}_ATOMICS ${ATM_PREF}) + endforeach() endforeach() - list(TRANSFORM ATOMICS APPEND _${SUFFIX}) +endforeach() - # Add build target - add_custom_target(build_boogie_${SUFFIX} DEPENDS ${ATOMICS} ${SUB_DIR}) +foreach(ARCH ${ARCHS}) + string(TOLOWER "${ARCH}" SUB_DIR) + set(SUFFIX ${SUB_DIR}) + set(SUB_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SUB_DIR}) + set(ASM_FILE ${SUB_DIR}/atomics.s) + + foreach(TARGET ${${ARCH}_TARGETS}) + string(TOLOWER "${SUFFIX}_${TARGET}" COMPOSED_TARGET) + + if(${ARCH} STREQUAL "ARMV8") + add_custom_command( + OUTPUT ASM_${COMPOSED_TARGET} + COMMAND + ${${ARCH}_CMAKE_C_COMPILER} ${${TARGET}_FLAGS} + ${${ARCH}_FLAGS} "${VSYNC_INCLUDE}" + ${CMAKE_SOURCE_DIR}/include/vsync/atomic.h -o ${ASM_FILE} && + ${CMAKE_CURRENT_BINARY_DIR}/cleaner.sh ${ASM_FILE} + COMMAND_EXPAND_LISTS + DEPENDS vatomic BUILTIN_SCRIPTS PERMISSION_SCRIPTS + COMMENT + "Generating ${COMPOSED_TARGET} builtin atomics (with cleaner)" + ) + else() + add_custom_command( + OUTPUT ASM_${COMPOSED_TARGET} + COMMAND + ${${ARCH}_CMAKE_C_COMPILER} ${${TARGET}_FLAGS} + ${${ARCH}_FLAGS} "${VSYNC_INCLUDE}" + ${CMAKE_SOURCE_DIR}/include/vsync/atomic.h -o ${ASM_FILE} + COMMAND_EXPAND_LISTS + DEPENDS vatomic BUILTIN_SCRIPTS PERMISSION_SCRIPTS + COMMENT "Generating ${COMPOSED_TARGET} builtin atomics") + endif() + foreach(GRP ${ALL_ATOMIC_GROUPS}) + set(DEPS_VAR "${GRP}_${COMPOSED_TARGET}_DEPS") + set(${DEPS_VAR} "") + + foreach(ATM ${${GRP}_ATOMICS}) + add_test(NAME check_${COMPOSED_TARGET}_${ATM} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/verify.sh ${ATM} + ${SUFFIX}) + set_tests_properties( + check_${COMPOSED_TARGET}_${ATM} + PROPERTIES LABELS ${COMPOSED_TARGET}_${GRP}) + + add_custom_command( + OUTPUT GEN_${COMPOSED_TARGET}_${ATM} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/generate.sh ${ATM} + ${SUFFIX} + DEPENDS ASM_${COMPOSED_TARGET} ATOMIC_LIST + COMMENT "Generating Boogie for ${ATM} (${COMPOSED_TARGET})") + + list(APPEND ${DEPS_VAR} GEN_${COMPOSED_TARGET}_${ATM}) + endforeach() + + add_custom_target( + build_boogie_${COMPOSED_TARGET}_${GRP} + DEPENDS ${${DEPS_VAR}} ASM_${COMPOSED_TARGET} + PERMISSION_SCRIPTS) + endforeach() + endforeach() endforeach() diff --git a/verify/Cargo.lock b/verify/Cargo.lock new file mode 100644 index 00000000..33a8c425 --- /dev/null +++ b/verify/Cargo.lock @@ -0,0 +1,633 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys", +] + +[[package]] +name = "asm2boogie" +version = "0.1.0" +dependencies = [ + "clap", + "env_logger", + "graph-cycles", + "head-tail-iter", + "itertools", + "lazy_static", + "log", + "nohash", + "nom", + "petgraph", + "phf", + "regex", +] + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "clap" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3716d7a920fb4fac5d84e9d4bce8ceb321e9414b4409da61b07b75c1e3d0697" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "jiff", + "log", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi", +] + +[[package]] +name = "graph-cycles" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b92c49194cd4f20bad0d9875503c951993f4249c0cfd210a49ed39ef072a0c" +dependencies = [ + "ahash", + "petgraph", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + +[[package]] +name = "head-tail-iter" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52cd57aecf4800fa23e8501b514d48a31134cc918add657d3f9682c8e3aa62a5" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "jiff" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d699bc6dfc879fb1bf9bdff0d4c56f0884fc6f0d0eb0fba397a6d00cd9a6b85e" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", +] + +[[package]] +name = "jiff-static" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d16e75759ee0aa64c57a56acbf43916987b20c77373cb7e808979e02b93c9f9" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "log" +version = "0.4.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "nohash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0f889fb66f7acdf83442c35775764b51fed3c606ab9cee51500dbde2cf528ca" + +[[package]] +name = "nom" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75b0bedcc4fe52caa0e03d9f1151a323e4aa5e2d78ba3580400cd3c9e2bc4bc" + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand", +] + +[[package]] +name = "phf_macros" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/verify/Cargo.toml b/verify/Cargo.toml new file mode 100644 index 00000000..404d8f9a --- /dev/null +++ b/verify/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "asm2boogie" +version = "0.1.0" +edition = "2024" + +[dependencies] +nom = "8.0.0" +clap = { version = "4.5.32", features = ["derive"] } +env_logger = "0.11.7" +log = "0.4.26" +regex = "1.11.1" +lazy_static = "1.5.0" +phf = { version = "0.11.3", features = ["macros"] } +petgraph = "0.7.1" +nohash = "0.2.0" +itertools = "0.14.0" +head-tail-iter = "1.0.1" +graph-cycles = "0.3" diff --git a/verify/Main.hs b/verify/Main.hs deleted file mode 100644 index a7c84d02..00000000 --- a/verify/Main.hs +++ /dev/null @@ -1,290 +0,0 @@ --- Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. --- SPDX-License-Identifier: MIT - -import Text.Parsec -import Text.Parsec.String (Parser) -import Control.Monad -import Data.Foldable (asum) -import Data.Maybe (catMaybes, listToMaybe, maybeToList) -import ASMModel -import ASMParsers (programParser) -import BoogieTranslator -import Data.List -import System.IO -import GHC.IO.Handle.Text -import System.IO (hPrint) -import System.Exit -import System.Environment - -funcParser :: [String] -> Parser (String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String) -funcParser retTypes = do - string "static" - skipMany1 space - string "inline" - skipMany1 space - retType <- asum (map (try.string) retTypes) - skipMany1 space - -- currently parsing just vatomic64 - string "vatomic64_" - name <- many (noneOf "(\n") - char '(' - regs <- many $ parameterParser paramTypes - spaces - char '{' - spaces - locals <- many $ localParser paramTypes - string "__asm__ volatile(" - code <- getCode - spaces - char ':' - (outputOperands, outputNoOverlap) <- getOutputOperands - spaces - char ':' - inputOperands <- getOperandList - skipMany $ noneOf ";" - char ';' - spaces - ret <- returnMaybeParser - skipMany $ noneOf "}" - char '}' - spaces - return (name - , registerSubstitution (outputOperands ++ inputOperands) $ reverse regs - , map fst inputOperands - , map fst outputOperands - , outputNoOverlap - , (registerSubstitution (outputOperands ++ inputOperands) . reverse . deleteReg ret) locals - , (listToMaybe . registerSubstitution (outputOperands ++ inputOperands) . maybeToList) ret - , code - ) -deleteReg :: Maybe Register -> [Register] -> [Register] -deleteReg Nothing = id -deleteReg (Just x) = delete x - -parseReturn :: Parser Register -parseReturn = do - spaces - string "return" - spaces - reg <- many $ noneOf ";" - char ';' - spaces - return $ Register reg - -returnMaybeParser :: Parser (Maybe Register) -returnMaybeParser = (Just <$> try parseReturn) <|> (spaces >> return Nothing) - -getOperand :: Parser (Maybe (Register, Register)) -getOperand = do - spaces - oneOf "[(" - spaces - lf <- many $ noneOf " ])" - spaces - oneOf "])" - spaces - char '\"' - op <- many $ noneOf "\"" - char '\"' - spaces - oneOf "[(" - spaces - rt <- many $ noneOf " ])" - spaces - oneOf "])" - spaces - optional $ char ',' - spaces - if 'Q' `elem` op then return $ Just (Register lf, Register lf) else return $ Just (Register lf, Register rt) - -getOperandList :: Parser [(Register, Register)] -getOperandList = catMaybes <$> manyTill getOperand (try $ lookAhead colonAtNewLine) - -getOutputOperand :: Parser (Maybe (Register, Register), Maybe Register) -getOutputOperand = do - spaces - oneOf "[(" - spaces - lf <- many $ noneOf " ])" - spaces - oneOf "])" - spaces - char '\"' - op <- many $ noneOf "\"" - char '\"' - spaces - oneOf "[(" - spaces - rt <- many $ noneOf " ])" - spaces - oneOf "])" - spaces - optional $ char ',' - spaces - return (if 'Q' `elem` op then Nothing else Just (Register lf, Register rt), - if '&' `elem` op then Just $ Register lf else Nothing - ) -getOutputOperands :: Parser ([(Register, Register)], [Register]) -getOutputOperands = do - x <- manyTill getOutputOperand (try $ lookAhead colonAtNewLine) - return (catMaybes $ map fst x - , catMaybes $ map snd x) - -parameterParser :: [String] -> Parser Register -parameterParser types = do - spaces - pType <- asum (map (try.string) types) - spaces - name <- many (noneOf ",)\n") - char ',' <|> char ')' - spaces - return $ Register name - -localParser :: [String] -> Parser Register -localParser types = do - spaces - pType <- asum $ map (try.string) types - spaces - name <- many $ noneOf ";\n" - char ';' - spaces - optional $ string "({" - spaces - return $ Register name - -getCode :: Parser String -getCode = do - spaces - result <- manyTill parseQuotedSection (try (lookAhead colonAtNewLine)) - return (concat result) - -parseQuotedSection :: Parser String -parseQuotedSection = do - char '"' - content <- many (noneOf "\"") - char '"' - spaces - return content - -colonAtNewLine :: Parser Char -colonAtNewLine = do - spaces - char ':' - -skipLine :: Parser () -skipLine = void (manyTill anyChar newline) - -lineParser :: [String] -> Parser (Maybe (String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String)) -lineParser retTypes = (Just <$> try (funcParser retTypes)) <|> (skipLine >> return Nothing) - -fileParser :: [String] -> Parser [(String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String)] -fileParser retTypes = catMaybes <$> many (lineParser retTypes) - -parseFile :: [String] -> String -> Either ParseError [(String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String)] -parseFile retTypes = parse (fileParser retTypes) "" - -toAtomic :: [(String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String)] -> Either String [Atomic] -toAtomic = mapM toupleToAtomic - where toupleToAtomic :: (String, [Register], [Register], [Register], [Register], [Register], Maybe Register, String) -> Either String Atomic - toupleToAtomic tpl = - let (aname, lreg, inputClobbers, outputClobbers, outputOvClobbers, llocal, ret, program) = tpl - in case parse programParser "" program of - Left err -> Left $ show err ++ "In atomic " ++ aname - Right seq -> Right $ Atomic {name = aname - , parameters = lreg - , outputClobbers = outputClobbers - , inputClobbers = inputClobbers - , outputNoOverlapClobbers = outputOvClobbers - , locals = llocal - , returns = ret - , instructions = seq } - -pruneDoubleBSlash :: String -> String -pruneDoubleBSlash [] = [] -pruneDoubleBSlash ('\\':'n':xs) = '\n' : pruneDoubleBSlash xs -pruneDoubleBSlash (x:xs) = x : pruneDoubleBSlash xs - -checkJumps :: Atomic -> Either String Atomic -checkJumps a = let (InstrSeq instr) = instructions a in - let err = checkForward instr [] ++ checkBackward instr [] in - case err of - "" -> Right a - _ -> Left err - where checkForward :: [Instr] -> [String] -> String - checkForward [] [] = "" - checkForward [] l = "Missused forward jumps: " ++ concatMap (++ ", ") l ++ "\n" - checkForward ((B _ label 'f') : t) l = checkForward t (label : l) - checkForward ((Cb _ _ label 'f') : t) l = checkForward t (label : l) - checkForward ((Label ('.' : s)) : t) l = checkForward t $ filter (/= s) l - checkForward (h : t) l = checkForward t l - - checkBackward :: [Instr] -> [String] -> String - checkBackward [] _ = "" - checkBackward ((B _ label 'b') : t) l - | label `elem` l = checkBackward t l - | otherwise = "Missused backward jump [" ++ name a ++ "]: " ++ label ++ "\n" ++ checkBackward t l - checkBackward ((Cb _ _ label 'b') : t) l - | label `elem` l = checkBackward t l - | otherwise = "Missused backward jump [" ++ name a ++ "]: " ++ label ++ "\n" ++ checkBackward t l - checkBackward ((Label ('.' : s)) : t) l = checkBackward t (s : l) - checkBackward (h : t) l = checkBackward t l -checkAtomic :: Atomic -> Either String (String, Atomic) -checkAtomic a = do - r2 <- checkJumps a - r3 <- getFile r2 - return (r3, r2) - -knownAtomics = sort $ simpRMWFunctions ++ getRMWFunctions ++ xchgFunctions ++ awaitFunctions ++ readsWritesFunctions - -checkIfAll :: [Atomic] -> Either String [Atomic] -checkIfAll l = let names = sort . map name $ l - in if names == knownAtomics - then Right l - else Left "Error: Atomics don't match list of names\n" - -getFile :: Atomic -> Either String String -getFile a - | isRead (name a) || isWrite (name a) = Right "reads_writes.bpl" - | isAwait $ name a = Right "awaits.bpl" - | isRMW $ name a = Right "rmws.bpl" - | isXCHG $ name a = Right "xchgs.bpl" - | otherwise = Left $ "Unknown atomic " ++ name a ++ "\n" - - -printInFiles :: [Atomic] -> IO() -printInFiles = foldr ((>>) . printer . checkAtomic) (return ()) - where printer :: Either String (String, Atomic) -> IO() - printer h = case h of - Left err -> exitWithErrorMessage (show err) (ExitFailure 2) - Right (file, a) -> appendFile file (boogieAtomic a) - -exitWithErrorMessage :: String -> ExitCode -> IO a -exitWithErrorMessage str e = hPutStrLn stderr str >> exitWith e - -printAtomicList :: [Atomic] -> IO() -printAtomicList = foldr ((>>) . (\a -> appendFile "atomics_list.txt" (name a ++ "\n"))) (return ()) - -main :: IO () -main = do - args <- getArgs - case args of - (fileName:_) -> do - putStrLn $ "fileName: " ++ fileName - file <- readFile fileName - writeFile "reads_writes.bpl" "" - writeFile "awaits.bpl" "" - writeFile "rmws.bpl" "" - writeFile "xchgs.bpl" "" - writeFile "atomics_list.txt" "" - case parseFile cTypes $ pruneDoubleBSlash file of - Left err -> exitWithErrorMessage (show err) (ExitFailure 2) - Right result -> let atomicList = toAtomic result - in case atomicList of - Left err -> exitWithErrorMessage (show err) (ExitFailure 2) - Right atomicListNoError -> - case checkIfAll atomicListNoError of - Left err -> exitWithErrorMessage (show err) (ExitFailure 2) - Right lat -> do - printInFiles lat - printAtomicList lat - _ -> putStrLn "Insufficient arguments! expecting atomic file" diff --git a/verify/README.md b/verify/README.md new file mode 100644 index 00000000..00c985b9 --- /dev/null +++ b/verify/README.md @@ -0,0 +1 @@ +See `../doc/VERIFICATION.md` for details. diff --git a/verify/armv8/library.bpl b/verify/armv8/library.bpl new file mode 100644 index 00000000..7919d865 --- /dev/null +++ b/verify/armv8/library.bpl @@ -0,0 +1,304 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +datatype Flags { + Flags ( + N: bool, + Z: bool, + C: bool + ) +} + +datatype Ordering { + AcquirePC(), + Acquire(), + Release(), + AcquireRelease(), + Fence(mode : FenceType), + NoOrd() +} + +datatype Monitor { + exclusive(addr: bv64), + open() +} + +var local_monitor: Monitor; +var flags: Flags; +var monitor_exclusive: bool; +var event_register: bool; + +datatype FenceType { + SY(), + LD() +} + +datatype Instruction { + ld(acq: bool, addr: bv64, mask: bv64), + ldx(acq: bool, addr: bv64, mask: bv64), + st(rel: bool, src, write_mask, addr: bv64), + stx(rel: bool, src, write_mask, addr: bv64), + + csel(src1, src2: bv64, cond: bool), + mov(src: bv64), + cmp(opnd1, opnd2: bv64), + add(first, second: bv64), + sub(first, second: bv64), + andd(first, second: bv64), + orr(first, second: bv64), + eor(first, second: bv64), + wfe(), + sevl(), + + dmb(mode : FenceType), + //LSE instructions + + mvn(src: bv64), // complements the bits in result + neg(src: bv64), // negates the bits in the result + + swp(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // exchanges + cas(acq, rel: bool, exp, src, addr: bv64, mask: bv64, write_mask: bv64), // compare and swap + + ldumax(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // maximum between src register, and loaded value + ldclr(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // bitwise and between src and ~loaded value + ldset(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // bitwise or between src and loaded value + ldeor(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // bitwise xor between src and loaded value + ldadd(acq, rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // sum of src and loaded value + + stumax(rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // store maximum between src and addr + stclr(rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // store and between src and ~addr + stset(rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // store or + steor(rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64), // store xor + stadd(rel: bool, src, addr: bv64, mask: bv64, write_mask: bv64) // store sum +} + +function returning_load(instr : Instruction) : bool { + instr is ld + || instr is ldx + || instr is swp + || instr is cas + || instr is ldumax + || instr is ldclr + || instr is ldset + || instr is ldeor + || instr is ldadd + || instr is stumax +} + + + +function visible(instr : Instruction) : bool { + ! (instr is stumax + || instr is stclr + || instr is stset + || instr is steor + || instr is stadd) +} + +function updated_value(instr: Instruction, read_value : bv64) : bv64 { + if instr is cas || instr is swp + then instr->src + else if instr is ldclr || instr is stclr + then and[bit_inv(instr->src), read_value] + else if instr is ldset || instr is stset + then or[instr->src, read_value] + else if instr is ldeor || instr is steor + then xor[instr->src, read_value] + else if instr is ldumax || instr is stumax + then max[instr->src, read_value] + else if instr is ldadd || instr is stadd + then add[instr->src, read_value] + else 0bv64 +} + +function rmw(instr: Instruction) : bool { + instr is swp + || instr is ldumax + || instr is stumax + || instr is ldclr + || instr is stclr + || instr is ldset + || instr is stset + || instr is ldeor + || instr is steor + || instr is ldadd + || instr is stadd +} + +function reads(instr: Instruction) : bool { + rmw(instr) || instr is ld || instr is ldx || instr is cas +} + +function writes(instr: Instruction) : bool { + rmw(instr) || instr is st +} + +procedure assume_requires_execute(instr: Instruction); + modifies step, local_monitor, monitor_exclusive, event_register, last_load, last_store; + ensures (instr is stx ==> local_monitor is exclusive && local_monitor->addr == instr->addr); + ensures instr is wfe ==> event_register || monitor_exclusive; + + +procedure execute_local(instr: Instruction) returns (r : bv64); + modifies flags; + ensures + (r == if instr is mov then instr->src + else if instr is add then bin_add(instr->first, instr->second) + else if instr is sub then bin_sub(instr->first, instr->second) + else if instr is andd then bit_and(instr->first, instr->second) + else if instr is orr then bit_or (instr->first, instr->second) + else if instr is eor then bit_xor(instr->first, instr->second) + else if instr is mvn then bit_inv(instr->src) + else if instr is neg then bin_neg(instr->src) + else if instr is csel then if instr->cond then instr->src1 else instr->src2 + else r) + && + (flags == if instr is cmp + then ( + Flags(ult(instr->opnd1, instr->opnd2), instr->opnd1 == instr->opnd2, uge(instr->opnd1, instr->opnd2)) + ) + else + old(flags) + ); + +procedure execute(instr: Instruction) returns (r : bv64); + modifies step, local_monitor, monitor_exclusive, event_register, last_load, last_store; + ensures step == old(step + 1); + ensures {:msg "state"} ( + var stx_success, cas_success := + old(local_monitor == exclusive(instr->addr) + && monitor_exclusive), + r == instr->exp; + (r == if instr is stx then b2i(! stx_success) + else if returning_load(instr) then bit_and(r, instr->mask) + else r) + && + (last_load == if reads(instr) + then + old(step) + else + old(last_load)) + && + (last_store == if writes(instr) || rmw(instr) + || (instr is cas && cas_success) + || (instr is stx && stx_success) + then + old(step) + else + old(last_store)) + && + (local_monitor == if instr is ldx then exclusive(instr->addr) + else if instr is stx + || instr is cas + || reads(instr) || writes(instr) + || instr is wfe + then open() + else old(local_monitor)) + && + (effects[old(step)] == + if rmw(instr) + || (instr is cas && cas_success) + then update(instr->addr, r, visible(instr), updated_value(instr, r), instr->write_mask) + else if writes(instr) || (instr is stx && stx_success) + then write(instr->addr, instr->src, instr->write_mask) + else if reads(instr) + then read(instr->addr, r, visible(instr)) + else no_effect() + ) + && + (ordering[old(step)] == if instr->acq && reads(instr) && (instr->rel && (writes(instr) + || (instr is stx && stx_success) + || (instr is cas && cas_success))) + then AcquireRelease() + else if instr->acq && reads(instr) + then Acquire() + else if instr->rel && (writes(instr) + || (instr is stx && stx_success) + || (instr is cas && cas_success)) + then Release() + else if instr is dmb + then Fence(instr->mode) + else NoOrd()) + && + (atomic[last_load, old(step)] == (rmw(instr) || (instr is stx && stx_success) || (instr is cas && cas_success))) + && + ( // external write can clear monitor at any moment. has to set event register. + (monitor_exclusive == false && event_register == old(monitor_exclusive || event_register)) + || monitor_exclusive == if instr is ldx then true + else if writes(instr) + || instr is stx + || (instr is cas && cas_success) + then false + else old(monitor_exclusive)) + && + /* D1.6.1 The Event Register + The Event Register for a PE is set by any of the following: + • A Send Event instruction, SEV, executed by any PE in the system. + • A Send Event Local instruction, SEVL, executed by the PE. + • An exception return. + • The clearing of the global monitor for the PE. + • An event from a Generic Timer event stream, see Event streams on page D11-5991. + • An event sent by some IMPLEMENTATION DEFINED mechanism. + [ Arm Architecture Reference Manual, version K.a ] + + NOTE: since we only care about proving that the event register is set upon reaching wfe, we just allow it to become set non-deterministically. + But it can be cleared only by wfe. + */ + (old(event_register) ==> (event_register || instr is wfe)) + && + (step == old(step) + 1) + ); + requires {:msg "either event register or global monitor is set for WFE"} + instr is wfe ==> event_register || monitor_exclusive; + requires {:msg "stx is paired to ldx"} + instr is stx ==> local_monitor == exclusive(instr->addr); + + +function cbnz(test: bv64): bool { + test != 0bv64 +} + +function cbz(test: bv64): bool { + test == 0bv64 +} + +// C1.2.4 Condition code +datatype ConditionCode { + EQ(), // Equal + NE(), // Not equal + HS(), // Unsigned higher or same + LO(), // Unsigned lower + HI(), // Unsigned higher + LS() // Unsigned lower or same +} + +function branch(cond: ConditionCode, flags: Flags): bool {( + var N, Z, C := flags->N, flags->Z, flags->C; + if cond is EQ then Z + else if cond is NE then !Z + else if cond is HS then C + else if cond is LO then !C + else if cond is HI then C && !Z + else if cond is LS then !(C && !Z) + else false // Should never be reached +)} + + +function ppo(step1, step2: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect): bool { + step1 < step2 && ( + // Barrier-ordered-before + ordering[step1] is Acquire || ordering[step1] is AcquireRelease || + ordering[step1] is AcquirePC || + ordering[step2] is Release || ordering[step2] is AcquireRelease || + (ordering[step1] is Release && ordering[step2] is Acquire) || + (exists f : StateIndex :: step1 < f && f < step2 && ordering[f] == Fence(SY())) || + (exists f : StateIndex :: step1 < f && f < step2 && ordering[f] == Fence(LD()) + && is_read(effects[step1])) + ) +} + + +function is_sc(order: Ordering): bool { + order is Acquire || order is Release || order is AcquireRelease +} diff --git a/verify/await.bpl b/verify/await.bpl deleted file mode 100644 index 18280c85..00000000 --- a/verify/await.bpl +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -type AwaitOp = [int, int] bool; - -const eq, neq, lt, le, gt, ge: AwaitOp; - -axiom eq == (lambda x, y: int :: x == y); -axiom neq == (lambda x, y: int :: x != y); -axiom lt == (lambda x, y: int :: x < y); -axiom le == (lambda x, y: int :: x <= y); -axiom gt == (lambda x, y: int :: x > y); -axiom ge == (lambda x, y: int :: x >= y); - -procedure await(op : AwaitOp, load_order : OrderRelation, addr, input : Register) returns (val : Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - ensures {:msg "load happens within function bounds"} ( - var i := states[step]->last_load; - old(step) <= i && i < step - ); - ensures {:msg "val register contains correct value"} - states[step]->gpr[val] == memory[states[step]->last_load, old(states[step]->gpr[addr])]; - ensures {:msg "respect await operation"} - // different for different awaits - op[states[step]->gpr[val], old(states[step]->gpr[input])] == true; - ensures {:msg "load produces read effect to correct address"} - effects[states[step]->last_load][read(old(states[step]->gpr[addr]))]; - ensures {:msg "no write effects"} - no_writes(old(step), step, effects); - ensures {:msg "load ordering"} - load_order[states[step]->last_load, old(step), step, ordering]; \ No newline at end of file diff --git a/verify/boogie/auxiliary.bpl b/verify/boogie/auxiliary.bpl new file mode 100644 index 00000000..8d3e585c --- /dev/null +++ b/verify/boogie/auxiliary.bpl @@ -0,0 +1,274 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/**************************************************************** + + This file offers reusable code across architectures. + + It assumes the following be defined by each architecture / verification target: + + datatype Instruction + instruction types + + datatype Ordering + different ordering types, like acquire and release ordering or fences + + function is_sc(orders: Ordering) : bool + for RCsc + + function ppo(i, j: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect): bool + defining whether i --ppo-> j with the ordering and effects + + + and that assumptions be present in templated code: + + assume sc_impl is ...; + telling which SC implementation is used (TrailingFence, LeadingFence, RCsc) + + as well as function parameters: + + assume load_order == ...; + assume fence_order == ...; + ... + +****************************************************************/ + + + + +// This builtin boogie function returns const array +function {:builtin "MapConst"} ConstArray(U): [T]U; + +type StateIndex = int; + + + +datatype Effect { + // read(a,v,vis) == read at a the value v. vis means whether this read is visible to barriers + read(addr: bv64, read_value: bv64, read_visible: bool), + write(addr: bv64, write_value, write_mask: bv64), + update(addr: bv64, read_value: bv64, read_visible: bool, write_value, write_mask: bv64), + no_effect() +} + + +function is_effect(effect: Effect) : bool { + ! (effect is no_effect) +} +function is_read(effect: Effect) : bool { + effect is read || effect is update +} + +function is_write(effect: Effect) : bool { + effect is write || effect is update +} + +var last_load, last_store: StateIndex; +var step: StateIndex; +var atomic: [StateIndex,StateIndex] bool; +var effects: [StateIndex] Effect; +var ordering: [StateIndex] Ordering; + + + + + +function {:bvbuiltin "bvshl"} shift_left(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvlshr"} shift_right(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvand"} bit_and(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvor"} bit_or(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvxor"} bit_xor(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvnot"} bit_inv(bv64) returns(bv64); +function {:bvbuiltin "bvneg"} bin_neg(bv64) returns(bv64); + +function {:bvbuiltin "bvsub"} bin_sub(bv64,bv64) returns(bv64); +function {:bvbuiltin "bvadd"} bin_add(bv64,bv64) returns(bv64); + +function {:bvbuiltin "bvule"} ule(bv64,bv64) returns(bool); +function {:bvbuiltin "bvult"} ult(bv64,bv64) returns(bool); +function {:bvbuiltin "bvuge"} uge(bv64,bv64) returns(bool); +function {:bvbuiltin "bvugt"} ugt(bv64,bv64) returns(bool); + +function {:bvbuiltin "bvsle"} sle(bv64,bv64) returns(bool); +function {:bvbuiltin "bvslt"} slt(bv64,bv64) returns(bool); +function {:bvbuiltin "bvsge"} sge(bv64,bv64) returns(bool); +function {:bvbuiltin "bvsgt"} sgt(bv64,bv64) returns(bool); + + + +function align_value(address : bv64, value : bv64, old_value : bv64, value_mask: bv64) : bv64 { + bit_or( + shift_left( + bit_and(value, value_mask), + shift_left(bit_and(address, 3bv64), 3bv64)), + bit_and(old_value, bit_inv(shift_left(value_mask, shift_left(bit_and(address, 3bv64), 3bv64))))) +} + +function extract_value(address : bv64, value : bv64) : bv64 { + shift_right(value, + shift_left(bit_and(address, 3bv64), 3bv64)) +} + + +const max: [bv64, bv64] bv64; +axiom max == (lambda x, y: bv64 :: + if uge(x, y) then x else y +); + + +const min: [bv64, bv64] bv64; +axiom min == (lambda x, y: bv64 :: + if ule(x, y) then x else y +); + + +const add: [bv64, bv64] bv64; +axiom add == (lambda x, y: bv64 :: bin_add(x, y)); + +const sub: [bv64, bv64] bv64; +axiom sub == (lambda x, y: bv64 :: bin_sub(x, y)); + +const and: [bv64, bv64] bv64; +axiom and == (lambda x, y: bv64 :: bit_and(x, y)); + +const or: [bv64, bv64] bv64; +axiom or == (lambda x, y: bv64 :: bit_or(x, y)); + +const xor: [bv64, bv64] bv64; +axiom xor == (lambda x, y: bv64 :: bit_xor(x, y)); + +function i2b(i:bv64) returns (bool) { i != 0bv64 } +function b2i(b:bool) returns (bv64) { if b then 1bv64 else 0bv64 } + + +type OrderRelation = [StateIndex, StateIndex, StateIndex, [StateIndex] Ordering, [StateIndex] Effect] bool; + +const order_rlx: OrderRelation; +axiom order_rlx == (lambda i, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + true +); + +const order_acq: OrderRelation; +axiom order_acq == (lambda load, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + (forall i: StateIndex :: + (i >= exit) && is_effect(effects[i]) ==> ppo(load, i, ordering, effects) + ) +); + +const order_rel: OrderRelation; +axiom order_rel == (lambda store, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + (forall i: StateIndex :: + (i < entry) && is_effect(effects[i]) ==> ppo(i, store, ordering, effects) + ) +); + +datatype SCImplementation { LeadingFence(), TrailingFence(), Mixed(), RCsc() } +const sc_impl: SCImplementation; + +const order_acq_sc: OrderRelation; +axiom order_acq_sc == (lambda load, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + order_acq[load, entry, exit, ordering, effects] && + if sc_impl is LeadingFence then + // ordered with all previous operations + (forall i: StateIndex :: + (i < entry) && is_effect(effects[i]) ==> ppo(i, load, ordering, effects) + ) + else if sc_impl is TrailingFence then + true + else if sc_impl is RCsc then + // ordered with all previous SC operations + is_sc(ordering[load]) + else if sc_impl is Mixed then + is_sc(ordering[load]) + || (forall i: StateIndex :: + (i < entry) && is_effect(effects[i]) ==> ppo(i, load, ordering, effects) + ) + else false +); + +const order_rel_sc: OrderRelation; +axiom order_rel_sc == (lambda store, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + order_rel[store, entry, exit, ordering, effects] && + if sc_impl is LeadingFence then + true + else if sc_impl is TrailingFence then + // ordered with all later operations + (forall i: StateIndex :: + (i >= exit) && is_effect(effects[i]) ==> ppo(store, i, ordering, effects) + ) + else if sc_impl is RCsc then + // ordered with all later SC operations + is_sc(ordering[store]) + else if sc_impl is Mixed then + is_sc(ordering[store]) + || (forall i: StateIndex :: + (i >= exit) && is_effect(effects[i]) ==> ppo(store, i, ordering, effects) + ) + else false +); + + +const order_fence_acq: OrderRelation; +axiom order_fence_acq == (lambda fence, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + (forall i, j: StateIndex :: + (i < entry) && (j >= exit) && (is_read(effects[i]) && effects[i]->read_visible) && is_effect(effects[j]) + ==> ppo(i, j, ordering, effects)) +); + +const order_fence_rel: OrderRelation; +axiom order_fence_rel == (lambda fence, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + (forall i, j: StateIndex :: + (i < entry) && (j >= exit) && is_effect(effects[i]) && is_write(effects[j]) + ==> ppo(i, j, ordering, effects)) +); +const order_fence_sc: OrderRelation; +axiom order_fence_sc == (lambda fence, entry, exit: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect :: + (forall i, j: StateIndex :: + (i < entry) && (j >= exit) && is_effect(effects[i]) && is_effect(effects[j]) + ==> ppo(i, j, ordering, effects)) +); + + + +function no_writes(from, to, write: StateIndex): bool { + (write < from || to <= write) +} + +function valid_mask(val, mask : bv64) : bool { + val == bit_and(val, mask) +} + + +type RMWOp = [bv64, bv64, bv64] bv64; + +const cmpset, add_op, sub_op, set_op, min_op, max_op, dec_op, inc_op, ret_old, xor_op, and_op, or_op: RMWOp; + +axiom cmpset == (lambda x, y1, y2 : bv64 :: if x == y1 then y2 else x); +axiom add_op == (lambda x, y, _: bv64 :: bin_add(x, y)); +axiom sub_op == (lambda x, y, _: bv64 :: bin_sub(x, y)); +axiom set_op == (lambda x, y, _: bv64 :: y); +axiom min_op == (lambda x, y, _: bv64 :: min[x, y]); +axiom max_op == (lambda x, y, _: bv64 :: max[x, y]); +axiom dec_op == (lambda x, _1, _2: bv64 :: bin_sub(x, 1bv64)); +axiom inc_op == (lambda x, _1, _2: bv64 :: bin_add(x, 1bv64)); +axiom and_op == (lambda x, y, _ : bv64 :: bit_and(x, y)); +axiom or_op == (lambda x, y, _ : bv64 :: bit_or(x, y)); +axiom xor_op == (lambda x, y, _ : bv64 :: bit_xor(x, y)); + +axiom ret_old == (lambda x, _1, _2 : bv64 :: x); + +const bit8, bit16 : [RMWOp] RMWOp; +axiom bit8 == (lambda op : RMWOp :: (lambda x, y1, y2 : bv64 :: op[x, bit_and(y1,255bv64), bit_and(y2, 255bv64)])); +axiom bit16 == (lambda op : RMWOp :: (lambda x, y1, y2 : bv64 :: op[x, bit_and(y1,65535bv64), bit_and(y2, 65535bv64)])); + +type AwaitOp = [bv64, bv64] bool; + +const eq, neq, lt, le, gt, ge: AwaitOp; + +axiom eq == (lambda x, y: bv64 :: x == y); +axiom neq == (lambda x, y: bv64 :: x != y); +axiom lt == (lambda x, y: bv64 :: ult(x, y)); +axiom le == (lambda x, y: bv64 :: ule(x, y)); +axiom gt == (lambda x, y: bv64 :: ugt(x, y)); +axiom ge == (lambda x, y: bv64 :: uge(x, y)); \ No newline at end of file diff --git a/verify/boogie/correctness.bpl b/verify/boogie/correctness.bpl new file mode 100644 index 00000000..4e498cdf --- /dev/null +++ b/verify/boogie/correctness.bpl @@ -0,0 +1,55 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/*************************** + + This file contains correctness conditions that need to be proven about the ISA. + +*/ + +/* ensure SC accesses provide ppo between each other. */ +procedure verify_sc() +ensures (sc_impl is RCsc || sc_impl is Mixed) ==> (forall i, j : StateIndex :: + is_effect(effects[i]) && is_effect(effects[j]) && is_sc(ordering[i]) && is_sc(ordering[j]) && i < j ==> ppo(i, j, ordering, effects)); +{ + +} + + +/* Prove meta properties about execute, that are used in the proof */ +procedure verify_execute(instr : Instruction) returns (r : bv64) + #modifies; + ensures {:msg "load return is correct"} ( + forall a, v: bv64, vis : bool :: + effects[step-1] == read(a,v,vis) && returning_load(instr) ==> + r == v + ); + + ensures {:msg "last_load tracked correctly"} ( + (is_read(effects[step-1])) == + (step-1 == last_load) + ); + ensures ( // can define no_writes through last_store + (is_write(effects[step-1])) == + (step-1 == last_store) + ); + ensures last_load < step; + ensures last_store < step; + + ensures (forall i : StateIndex :: + atomic[i, step-1] ==> i == last_load && step-1 == last_store + ); + ensures (forall i, j : StateIndex :: + atomic[i, j] ==> i <= j && j < step); +{ + var last_step : StateIndex; + call assume_requires_execute(instr); + + assume last_load < step; + assume last_store < step; + assume (forall i, j : StateIndex :: atomic[i, j] ==> i <= j && j < step); + last_step := step; + call r := execute(instr); + assert (step == last_step + 1); +} \ No newline at end of file diff --git a/verify/boogie/templates/await.bpl b/verify/boogie/templates/await.bpl new file mode 100644 index 00000000..0e1b0b4f --- /dev/null +++ b/verify/boogie/templates/await.bpl @@ -0,0 +1,12 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +procedure await(cond : AwaitOp) + modifies step, last_load, last_store, #state, #registers; + + ensures {:msg "satisfy await condition"} + cond[effects[last_load]->read_value, old(#input1)]; +{ + #implementation +} diff --git a/verify/boogie/templates/fence.bpl b/verify/boogie/templates/fence.bpl new file mode 100644 index 00000000..b7e67f98 --- /dev/null +++ b/verify/boogie/templates/fence.bpl @@ -0,0 +1,16 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/* + fence_order - ordering of fence +*/ +procedure fence(fence_order: OrderRelation) + modifies step, effects, ordering, atomic, last_load, last_store, #state, #registers; + ensures {:msg "no writes"} no_writes(old(step), step, last_store); + ensures {:msg "fence ordering"} + fence_order[0, old(step), step, ordering, effects]; +{ + #implementation +} + diff --git a/verify/boogie/templates/must_store.bpl b/verify/boogie/templates/must_store.bpl new file mode 100644 index 00000000..919cc026 --- /dev/null +++ b/verify/boogie/templates/must_store.bpl @@ -0,0 +1,24 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +procedure must_store() + modifies step, last_load, last_store, #state, #registers; + ensures {:msg "store happens within function bounds"} ( + old(step) <= last_store && last_store < step + ); + ensures {:msg "produces a write effect"} + is_write(effects[last_store]); + ensures {:msg "basic write or RMW"} + ((effects[last_store]->write_mask == #value_mask && effects[last_store]->addr == old(#address)) + || atomic[last_load, last_store]) + && bit_and(effects[last_store]->write_value, effects[last_store]->write_mask) == + bit_and(align_value( + bin_sub(old(#address), effects[last_store]->addr), + old(#input1), + effects[last_load]->read_value, #value_mask), + effects[last_store]->write_mask); +{ + #implementation +} + diff --git a/verify/boogie/templates/read.bpl b/verify/boogie/templates/read.bpl new file mode 100644 index 00000000..a8d5e454 --- /dev/null +++ b/verify/boogie/templates/read.bpl @@ -0,0 +1,25 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/* + ret - computation of return value (either ret_old, for normal read/rmw, or the same as op, for ..._get_... rmw) + load_order - ordering of load +*/ + +procedure read(ret : RMWOp, load_order: OrderRelation) + modifies step, last_load, last_store, #state, #registers; + ensures {:msg "load happens within function bounds"} + old(step) <= last_load && last_load < step; + ensures {:msg "load order"} + load_order[last_load, old(step), step, ordering, effects]; + ensures {:msg "is visible"} + effects[last_load]->read_visible; + ensures {:msg "correct output"} + (var extracted := bit_and(extract_value(bin_sub(old(#address), effects[last_load]->addr), effects[last_load]->read_value), #value_mask); + (var returned := ret[extracted, old(#input1), old(#input2)]; + (bit_and(#output, #value_mask) == bit_and(returned, #value_mask)))); + +{ + #implementation +} \ No newline at end of file diff --git a/verify/boogie/templates/read_only.bpl b/verify/boogie/templates/read_only.bpl new file mode 100644 index 00000000..d56bed5e --- /dev/null +++ b/verify/boogie/templates/read_only.bpl @@ -0,0 +1,12 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +procedure read_only() + modifies step, last_load, last_store, #state, #registers; + ensures no_writes(old(step), step, last_store); + ensures {:msg "produced read effects are correct"} + old(step) <= last_load && last_load < step ==> effects[last_load] == read(old(#address), #output, true); +{ + #implementation +} \ No newline at end of file diff --git a/verify/boogie/templates/registers.bpl b/verify/boogie/templates/registers.bpl new file mode 100644 index 00000000..01842a97 --- /dev/null +++ b/verify/boogie/templates/registers.bpl @@ -0,0 +1,5 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +var #registers: bv64; \ No newline at end of file diff --git a/verify/boogie/templates/rmw.bpl b/verify/boogie/templates/rmw.bpl new file mode 100644 index 00000000..16a98aa0 --- /dev/null +++ b/verify/boogie/templates/rmw.bpl @@ -0,0 +1,37 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/* + op - operation to be performed +*/ + +procedure rmw (op: RMWOp) + modifies step, last_load, last_store, #state, #registers; + + ensures {:msg "if no write happened, the value from memory is already the result of operation"} ( + var address, input1, input2 := old(#address), old(#input1), old(#input2); + no_writes(old(step), step, last_store) ==> + ( + var extracted := bit_and(extract_value(bin_sub(address, effects[last_load]->addr), effects[last_load]->read_value), #value_mask); + extracted == op[extracted, input1, input2] + ) + ); + ensures {:msg "atomicity"} + !no_writes(old(step), step, last_store) ==> ( + atomic[last_load, last_store] + ); + ensures {:msg "store produces write to correct address with correct value"} + !no_writes(old(step), step, last_store) ==> ( + var address, input1, input2 := old(#address), old(#input1), old(#input2); + (var extracted := bit_and(extract_value(bin_sub(address, effects[last_load]->addr), effects[last_load]->read_value), #value_mask); + bit_and(effects[last_store]->write_value, effects[last_store]->write_mask) == + bit_and(align_value(bin_sub(address, effects[last_load]->addr), + bit_and(op[extracted, input1, input2], #value_mask), + effects[last_load]->read_value, + #value_mask), effects[last_store]->write_mask) + ) + ); +{ + #implementation +} \ No newline at end of file diff --git a/verify/boogie/templates/write.bpl b/verify/boogie/templates/write.bpl new file mode 100644 index 00000000..f320c87a --- /dev/null +++ b/verify/boogie/templates/write.bpl @@ -0,0 +1,20 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +/* + store_order - ordering of store +*/ +procedure write(store_order: OrderRelation) + modifies step, last_load, last_store, #state, #registers; + ensures {:msg "no other writes"} + (forall i : StateIndex :: + old(step) <= i && i < step && (exists e : Effect :: effects[i] == e && (is_write(e))) + ==> i == last_store); + ensures {:msg "store ordering"} + !no_writes(old(step), step, last_store) + ==> store_order[last_store, old(step), step, ordering, effects]; +{ + #implementation +} + diff --git a/verify/cleaner.sh b/verify/cleaner.sh new file mode 100755 index 00000000..aaacb34f --- /dev/null +++ b/verify/cleaner.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +if [ -z "$1" ]; then + echo "Usage: $0 filename" + exit 1 +fi + +input="$1" +output="$input" + +# Conditions for conditional branches +conds="eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|nv|hs|lo" + +tmp=$(mktemp) + +sed -E " + s/^([[:space:]]*)([0-9][a-zA-Z0-9]*:)/\1.\2/ + s/\b(b(\.?($conds))?[[:space:]]+)([0-9][a-zA-Z0-9]*)/\1.\4/ + s/\b(bl)[[:space:]]+([0-9][a-zA-Z0-9]*)/\1 .\2/ + s/\b(cbz|cbnz)[[:space:]]+([^,]+,[[:space:]]*)([0-9][a-zA-Z0-9]*)/\1 \2.\3/ + s/\b(tbz)[[:space:]]+([^,]+,[[:space:]]*[^,]+,[[:space:]]*)([0-9][a-zA-Z0-9]*)/\1 \2.\3/ +" "$input" \ + | grep -vE '^\s*($|#|//)' > "$tmp" + +mv "$tmp" "$output" +echo "Cleaned file saved as $output" diff --git a/verify/generate.sh b/verify/generate.sh new file mode 100644 index 00000000..1c2179e4 --- /dev/null +++ b/verify/generate.sh @@ -0,0 +1,50 @@ +#!/bin/sh +set -eu + +if [ $# -ne 2 ]; then + echo "Usage: $0 FUNCTION_NAME ARCH" + exit 1 +fi + +FUNC="$1" +ARCH="$2" + +case "${ARCH}" in + armv8) + ARCHS="./armv8/atomics.s" + ;; + riscv) + ARCHS="./riscv/atomics.s" + ;; + *) + echo "Unknown architecture: $ARCH" + exit 1 + ;; +esac + +UNROLL= +OUT="out/$ARCH" +OUT_RETRY="out_retry/$ARCH" +TMPFUNCS=$(mktemp) +echo "$FUNC" > "$TMPFUNCS" + +compile() { + outdir="$1" + unroll="$2" + if $unroll; then + UNROLL="--unroll" + fi + echo "Generating $FUNC for $ARCH (unroll=$unroll, outdir=$outdir)" + cargo run --quiet -- \ + --input "${ARCHS}" \ + --functions "$TMPFUNCS" \ + --templates ./boogie/templates/ \ + --directory "$outdir" \ + --arch "$ARCH" $UNROLL +} + +# generate for both phases +compile "$OUT" "false" +compile "$OUT_RETRY" "true" + +rm "$TMPFUNCS" diff --git a/verify/library.bpl b/verify/library.bpl deleted file mode 100644 index 5e576c89..00000000 --- a/verify/library.bpl +++ /dev/null @@ -1,714 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -// This builtin boogie function returns const array -function {:builtin "MapConst"} ConstArray(U): [T]U; - -type Register; -type StateIndex = int; -// type Word = bv64; - -// function {:bvbuiltin "(_ int2bv 64)"} int2bv_64(x: int): bv64; - -// function {:bvbuiltin "bv2int"} bv2int_64(bv: bv64): int; - -// function {:bvbuiltin "bvand"} bvand_64(x: bv64, y: bv64): bv64; - -// function {:bvbuiltin "bvor"} bvor_64(x: bv64, y: bv64): bv64; - -// function {:bvbuiltin "bvxor"} bvxor_64(x: bv64, y: bv64): bv64; - -// No implementation needed -// The variant with implementation makes it run very slow and it gives no extra information -// The version without implementation creates a generic operation for each case -// NOTE: There is an issue on the boogie repo with regards to this -function bit_and(x: int, y: int): int; -function bit_or(x: int, y: int): int; -function bit_xor(x: int, y: int): int; - -function bit_not(x: int): int; - -axiom (forall x: int :: bit_not(bit_not(x)) == x); // double negation on bits -axiom (forall x: int, y: int :: bit_and(x, y) == bit_and(y, x)); -axiom (forall x: int, y: int :: bit_or(x, y) == bit_or(y, x)); -axiom (forall x: int, y: int :: bit_xor(x, y) == bit_xor(y, x)); - -const max: [int, int] int; -axiom max == (lambda x, y: int :: - if x > y then x else y -); - -const add: [int, int] int; -axiom add == (lambda x, y: int :: x + y); - -const sub: [int, int] int; -axiom sub == (lambda x, y: int :: x - y); - -const and: [int, int] int; -axiom and == (lambda x, y: int :: bit_and(x, y)); - -const or: [int, int] int; -axiom or == (lambda x, y: int :: bit_or(x, y)); - -const xor: [int, int] int; -axiom xor == (lambda x, y: int :: bit_xor(x, y)); - -datatype Flags { - Flags ( - N: bool, - Z: bool, - C: bool - ) -} - -datatype Effect { - read(addr: int), - write(addr: int, value: int) -} - -datatype Ordering { - AcquirePC(), - Acquire(), - Release() -} - -datatype Monitor { - exclusive(addr: int, step: StateIndex), - open() -} - -datatype State { - State( - gpr: [Register] int, - flags: Flags, - local_monitor: Monitor, - last_load, last_store: StateIndex - ) -} - -var step: StateIndex; -var states: [StateIndex] State; -var global_monitor_exclusive: [StateIndex] bool; -var event_register: [StateIndex] bool; -var memory: [StateIndex, int] int; -var effects: [StateIndex][Effect] bool; -var ordering: [StateIndex][Ordering] bool; - -var store_clears_local_monitor: bool; -var store_clears_global_monitor: bool; - -datatype Instruction { - ld(acq: bool, dest, addr: Register), - ldx(acq: bool, dest, addr: Register), - st(rel: bool, src, addr: Register), - stx(rel: bool, status, src, addr: Register), - - mov(dest, src: Register), - cmp(opnd1, opnd2: Register), - add(dest, first, second: Register), - sub(dest, first, second: Register), - andd(dest, first, second: Register), - orr(dest, first, second: Register), - eor(dest, first, second: Register), - wfe(), - sevl(), - - //LSE instructions - - mvn(dest, src: Register), // complements the bits in result - neg(dest, src: Register), // negates the bits in the result - - swp(acq, rel: bool, src, dest, addr: Register), // exchanges - cas(acq, rel: bool, dest, src, addr: Register), // compare and swap - - ldumax(acq, rel: bool, src, dest, addr: Register), // maximum between src register, and loaded value - ldclr(acq, rel: bool, src, dest, addr: Register), // bitwise and between src and ~loaded value - ldset(acq, rel: bool, src, dest, addr: Register), // bitwise or between src and loaded value - ldeor(acq, rel: bool, src, dest, addr: Register), // bitwise xor between src and loaded value - ldadd(acq, rel: bool, src, dest, addr: Register), // sum of src and loaded value - - stumax(rel: bool, src, addr: Register), // store maximum between src and addr - stclr(rel: bool, src, addr: Register), // store and between src and ~addr - stset(rel: bool, src, addr: Register), // store or - steor(rel: bool, src, addr: Register), // store xor - stadd(rel: bool, src, addr: Register) // store sum -} - - -procedure execute(instr: Instruction) - modifies states, effects, ordering, step, global_monitor_exclusive, event_register; - ensures step == old(step + 1); - ensures {:msg "state"} - states == old(states[ - step + 1 := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; ( - var stx_success, cas_success := - local_monitor is exclusive - && local_monitor->addr == gpr[instr->addr] - && global_monitor_exclusive[step], - memory[step, gpr[instr->addr]] == gpr[instr->dest]; - states[step] - ->(gpr := - if instr is ld - || instr is ldx - || instr is swp - || instr is cas - || instr is ldumax - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd - then - gpr[instr->dest := memory[step, gpr[instr->addr]]] - else if instr is stx then - gpr[instr->status := if stx_success then 0 else 1] - else if instr is mov then - gpr[instr->dest := gpr[instr->src]] - else if instr is add then - gpr[instr->dest := gpr[instr->first] + gpr[instr->second]] - else if instr is sub then - gpr[instr->dest := gpr[instr->first] - gpr[instr->second]] - else if instr is andd then - gpr[instr->dest := bit_and(gpr[instr->first], gpr[instr->second])] - else if instr is orr then - gpr[instr->dest := bit_or(gpr[instr->first], gpr[instr->second])] - else if instr is eor then - gpr[instr->dest := bit_xor(gpr[instr->first], gpr[instr->second])] - else if instr is mvn then - gpr[instr->dest := bit_not(gpr[instr->src])] - else if instr is neg then - gpr[instr->dest := 0 - gpr[instr->src]] - else - gpr - ) - ->(last_load := - if instr is ld - || instr is ldx - || instr is swp - || instr is cas - || instr is ldumax - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd - || instr is stumax - || instr is stclr - || instr is stset - || instr is steor - || instr is stadd - then - step - else - states[step]->last_load - ) - ->(last_store := - if instr is st - || instr is swp - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd - || (instr is cas && cas_success) - || (instr is stx && stx_success) - || (instr is ldumax || instr is stumax) - || instr is stclr - || instr is stset - || instr is steor - || instr is stadd - then - step - else - states[step]->last_store - ) - ->(local_monitor := - if instr is ldx then - exclusive(gpr[instr->addr], step) - else - if instr is stx || ( - store_clears_local_monitor && ( - instr is st - || instr is swp - || instr is ldset - || instr is ldclr - || instr is ldeor - || instr is ldadd - || instr is stclr - || instr is stset - || instr is steor - || instr is stadd - || (instr is cas && cas_success) - || (instr is ldumax || instr is stumax) - - ) - ) - then - open() - else - local_monitor - ) - ->(flags := - if instr is cmp - then ( - var diff := gpr[instr->opnd1] - gpr[instr->opnd2]; - Flags(diff < 0, diff == 0, diff >= 0) - ) - else - states[step]->flags - ) - )) - ]); - ensures {:msg "effects"} - (!(instr is ldset || instr is stset - || instr is ldeor || instr is steor - || instr is ldadd || instr is stadd - || instr is ldclr || instr is stclr - || instr is ldumax || instr is stumax)) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := - instr is ld - || instr is ldx - || instr is swp - || instr is cas - - - ] - // [ write(gpr[instr->addr], or[gpr[instr->src], memory[step, gpr[instr->addr]]]) := - // instr is ldset] - [write(gpr[instr->addr], gpr[instr->src]) := - instr is st || - instr is swp || - (instr is cas && - memory[step, gpr[instr->addr]] == gpr[instr->dest] - ) - || ( - instr is stx - && local_monitor is exclusive - && local_monitor->addr == gpr[instr->addr] - && global_monitor_exclusive[step] - ) - ] - - ) - ]); - ensures {:msg "ld/stset effects"} - (instr is ldset || instr is stset) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := true] - [write(gpr[instr->addr], or[memory[step, gpr[instr->addr]], gpr[instr->src]]) := true] - ) - ]); - ensures {:msg "ld/steor effects"} - (instr is ldeor || instr is steor) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := true] - [write(gpr[instr->addr], xor[memory[step, gpr[instr->addr]], gpr[instr->src]]) := true] - ) - ]); - ensures {:msg "ld/stumax effects"} - (instr is ldumax || instr is stumax) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := true] - [write(gpr[instr->addr], max[memory[step, gpr[instr->addr]], gpr[instr->src]]) := true] - ) - ]); - ensures {:msg "ld/stadd effects"} - (instr is ldadd || instr is stadd) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := true] - [write(gpr[instr->addr], memory[step, gpr[instr->addr]] + gpr[instr->src]) := true] - ) - ]); - ensures {:msg "ld/stclr effects"} - (instr is ldclr || instr is stclr) ==> effects == old(effects[ - step := ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - ConstArray(false) - [read(gpr[instr->addr]) := true] - [write(gpr[instr->addr], and[memory[step, gpr[instr->addr]], bit_not(gpr[instr->src])]) := true] - ) - ]); - ensures {:msg "ordering"} - ordering == old(ordering[ - step := ConstArray(false) - [Acquire() := - instr->acq && ( - instr is ld - || instr is ldx - || instr is swp - || instr is cas - || instr is ldumax - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd - ) - ] - [Release() := - instr->rel && ( - instr is st || - instr is swp || - instr is ldclr || - instr is ldset || - instr is ldadd || - instr is ldeor || - instr is stclr || - instr is stset || - instr is steor || - instr is stadd || - ( instr is cas && (var gpr := states[step]->gpr; - memory[step, gpr[instr->addr]] == gpr[instr->dest] - )) || (instr is ldumax || instr is stumax) - || - (instr is stx && ( - var gpr, local_monitor := states[step]->gpr, states[step]->local_monitor; - local_monitor is exclusive - && local_monitor->addr == gpr[instr->addr] - && global_monitor_exclusive[step] - )) - ) - ] - ]); - ensures {:msg "global monitor"} - ( - global_monitor_exclusive == old(global_monitor_exclusive[step + 1 := false]) // external write can clear monitor at any moment - && event_register == old(event_register[step + 1 := true]) // it has to set event register - ) - || global_monitor_exclusive == old(global_monitor_exclusive[step + 1 := - if instr is ldx then - true - else - if store_clears_global_monitor && ( - instr is st - || instr is stx - || instr is swp - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd - || instr is stclr - || instr is stset - || instr is steor - || instr is stadd - || (instr is cas && ( - var gpr := states[step]->gpr; - memory[step, gpr[instr->addr]] == gpr[instr->dest] - )) - || (instr is ldumax || instr is stumax) - ) - then - false - else - global_monitor_exclusive[step] - ]); - ensures {:msg "memory"} - old(global_monitor_exclusive[step] ==> - memory[step, states[step]->local_monitor->addr] == memory[step - 1, states[step]->local_monitor->addr] - ); - /* D1.6.1 The Event Register - The Event Register for a PE is set by any of the following: - • A Send Event instruction, SEV, executed by any PE in the system. - • A Send Event Local instruction, SEVL, executed by the PE. - • An exception return. - • The clearing of the global monitor for the PE. - • An event from a Generic Timer event stream, see Event streams on page D11-5991. - • An event sent by some IMPLEMENTATION DEFINED mechanism. - */ - ensures {:msg "event register"} - event_register == old(event_register[step + 1 := true]) || // extrenal factors can set the event register - event_register == old(event_register)[step := - if instr is wfe then - false - else - instr is sevl || - old(event_register[step]) // preserve event register - ]; - requires {:msg "either event register or global monitor is set for WFE"} - instr is wfe ==> - event_register[step] || global_monitor_exclusive[step]; - requires {:msg "stx is paired to ldx"} - instr is stx ==> ( - var state := states[step]; - state->local_monitor == exclusive(state->gpr[instr->addr], state->last_load) - ); -{ - var stx_succeeds, cas_succeeds, clears_global_monitor, sets_event_register: bool; - - var cmp_diff: int; - - cas_succeeds := - memory[step, states[step]->gpr[instr->addr]] == states[step]->gpr[instr->dest]; - - states[step + 1] := states[step]; - global_monitor_exclusive[step + 1] := global_monitor_exclusive[step]; - event_register[step + 1] := event_register[step]; - effects[step] := ConstArray(false); - ordering[step] := ConstArray(false); - - if (instr is ld - || instr is ldx - || instr is swp - || instr is cas - || instr is ldclr - || instr is ldumax - || instr is ldset - || instr is ldeor - || instr is ldadd) { - states[step + 1]->gpr[instr->dest] := memory[step, states[step]->gpr[instr->addr]]; - states[step + 1]->last_load := step; - effects[step][read(states[step]->gpr[instr->addr])] := true; - if (instr is ldx) { - states[step + 1]->local_monitor := exclusive(states[step]->gpr[instr->addr], step); - global_monitor_exclusive[step + 1] := true; - } - if (instr is ld - || instr is ldx - || instr is swp - || instr is cas - || instr is ldumax - || instr is ldclr - || instr is ldset - || instr is ldeor - || instr is ldadd) { - ordering[step][Acquire()] := instr->acq; - } - } - if (instr is stclr - || instr is stset - || instr is stumax - || instr is steor - || instr is stadd) { - states[step + 1]->last_load := step; - effects[step][read(states[step]->gpr[instr->addr])] := true; - } - if (instr is st || - instr is stx || - instr is swp || - (instr is cas && cas_succeeds) || - instr is ldumax || instr is stumax || - instr is ldclr || instr is stclr || - instr is ldset || instr is stset || - instr is ldeor || instr is steor || - instr is ldadd || instr is stadd) { - stx_succeeds := global_monitor_exclusive[step]; - // stx always clears local monitor - if (instr is stx || store_clears_local_monitor) { - states[step + 1]->local_monitor := open(); - } - if (store_clears_global_monitor) { - global_monitor_exclusive[step + 1] := false; - } - if (instr is stx) { - if (stx_succeeds) { - states[step + 1]->gpr[instr->status] := 0; - - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], states[step]->gpr[instr->src])] := true; - ordering[step][Release()] := instr->rel; - } else { - states[step + 1]->gpr[instr->status] := 1; - } - } else if (instr is ldclr || instr is stclr) { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], and[memory[step,states[step]->gpr[instr->addr]], bit_not(states[step]->gpr[instr->src])])] := true; - ordering[step][Release()] := instr->rel; - } else if (instr is ldumax || instr is stumax) { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], max[memory[step,states[step]->gpr[instr->addr]], states[step]->gpr[instr->src]])] := true; - ordering[step][Release()] := instr->rel; - } else if (instr is ldset || instr is stset) { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], or[memory[step,states[step]->gpr[instr->addr]], states[step]->gpr[instr->src]])] := true; - ordering[step][Release()] := instr->rel; - } else if (instr is ldeor || instr is steor) { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], xor[memory[step,states[step]->gpr[instr->addr]], states[step]->gpr[instr->src]])] := true; - ordering[step][Release()] := instr->rel; - } else if (instr is ldadd || instr is stadd) { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], memory[step,states[step]->gpr[instr->addr]] + states[step]->gpr[instr->src])] := true; - ordering[step][Release()] := instr->rel; - } - else { - states[step + 1]->last_store := step; - effects[step][write(states[step]->gpr[instr->addr], states[step]->gpr[instr->src])] := true; - ordering[step][Release()] := instr->rel; - } - - } - - if (instr is mov) { - states[step + 1]->gpr[instr->dest] := states[step]->gpr[instr->src]; - } - if (instr is add) { - states[step + 1]->gpr[instr->dest] := states[step]->gpr[instr->first] + states[step]->gpr[instr->second]; - } - if (instr is sub) { - states[step + 1]->gpr[instr->dest] := states[step]->gpr[instr->first] - states[step]->gpr[instr->second]; - } - if (instr is andd) { - states[step + 1]->gpr[instr->dest] := bit_and(states[step]->gpr[instr->first], states[step]->gpr[instr->second]); - } - if (instr is orr) { - states[step + 1]->gpr[instr->dest] := bit_or(states[step]->gpr[instr->first], states[step]->gpr[instr->second]); - } - if (instr is eor) { - states[step + 1]->gpr[instr->dest] := bit_xor(states[step]->gpr[instr->first], states[step]->gpr[instr->second]); - } - if (instr is mvn) { - states[step + 1]->gpr[instr->dest] := bit_not(states[step]->gpr[instr->src]); - } - if (instr is neg) { - states[step + 1]->gpr[instr->dest] := 0 - states[step]->gpr[instr->src]; - } - if (instr is cmp) { - cmp_diff := states[step]->gpr[instr->opnd1] - states[step]->gpr[instr->opnd2]; - states[step + 1]->flags->N := cmp_diff < 0; - states[step + 1]->flags->Z := cmp_diff == 0; - states[step + 1]->flags->C := cmp_diff >= 0; - } - - if (global_monitor_exclusive[step]) { - assume(memory[step, states[step]->local_monitor->addr] == memory[step - 1, states[step]->local_monitor->addr]); - } - - if (instr is sevl) { - event_register[step + 1] := true; - } - if (instr is wfe) { - event_register[step + 1] := false; - } - if (clears_global_monitor) { - global_monitor_exclusive[step + 1] := false; - } - if (sets_event_register || clears_global_monitor) { - event_register[step + 1] := true; - } - - step := step + 1; -} - -function cbnz(test: Register, state: State): bool { - state->gpr[test] != 0 -} - -function cbz(test: Register, state: State): bool { - state->gpr[test] == 0 -} - -// C1.2.4 Condition code -datatype ConditionCode { - EQ(), // Equal - NE(), // Not equal - HS(), // Unsigned higher or same - LO(), // Unsigned lower - HI(), // Unsigned higher - LS() // Unsigned lower or same -} - -function branch(cond: ConditionCode, state: State): bool {( - var N, Z, C := state->flags->N, state->flags->Z, state->flags->C; - if cond is EQ then Z - else if cond is NE then !Z - else if cond is HS then C - else if cond is LO then !C - else if cond is HI then C && !Z - else if cond is LS then !(C && !Z) - else false // Should never be reached -)} - - - -function no_writes(from, to: StateIndex, effects: [StateIndex][Effect] bool): bool { - (forall i: StateIndex, e: Effect :: - from <= i && i < to ==> - !(effects[i][e] && e is write) - ) -} - -function ppo(step1, step2: StateIndex, ordering: [StateIndex][Ordering] bool): bool { - step1 < step2 && ( - // Barrier-ordered-before - ordering[step1][Acquire()] || - ordering[step1][AcquirePC()] || - ordering[step2][Release()] || - (ordering[step1][Release()] && ordering[step2][Acquire()]) - ) -} - -type OrderRelation = [StateIndex, StateIndex, StateIndex, [StateIndex][Ordering] bool] bool; - -const order_rlx: OrderRelation; -axiom order_rlx == (lambda step, entry, exit: StateIndex, ordering: [StateIndex][Ordering] bool :: - true -); - -const order_acq: OrderRelation; -axiom order_acq == (lambda load, entry, exit: StateIndex, ordering: [StateIndex][Ordering] bool :: - (forall step: StateIndex :: - step >= exit ==> ppo(load, step, ordering) - ) -); - -const order_rel: OrderRelation; -axiom order_rel == (lambda store, entry, exit: StateIndex, ordering: [StateIndex][Ordering] bool :: - (forall step: StateIndex :: - step < entry ==> ppo(step, store, ordering) - ) -); - -function is_sc(order: [Ordering] bool): bool { - order[Acquire()] || order[Release()] -} - -datatype SCImplementation { LeadingFence(), TrailingFence(), AcqRel() } -const sc_impl: SCImplementation; -axiom sc_impl is AcqRel; // <- IMPORTANT: put required implemetation here - -const order_acq_sc: OrderRelation; -axiom order_acq_sc == (lambda load, entry, exit: StateIndex, ordering: [StateIndex][Ordering] bool :: - order_acq[load, entry, exit, ordering] && - if sc_impl is LeadingFence then - // ordered with all previous operations - (forall step: StateIndex :: - step < entry ==> ppo(step, load, ordering) - ) - else if sc_impl is TrailingFence then - true - else if sc_impl is AcqRel then - // ordered with all previous SC operations - is_sc(ordering[load]) && - (forall step: StateIndex :: - step < entry ==> is_sc(ordering[step]) ==> ppo(step, load, ordering) - ) - else false -); - -const order_rel_sc: OrderRelation; -axiom order_rel_sc == (lambda store, entry, exit: StateIndex, ordering: [StateIndex][Ordering] bool :: - order_rel[store, entry, exit, ordering] && - if sc_impl is LeadingFence then - true - else if sc_impl is TrailingFence then - // ordered with all later operations - (forall step: StateIndex :: - step >= exit ==> ppo(store, step, ordering) - ) - else if sc_impl is AcqRel then - // ordered with all later SC operations - is_sc(ordering[store]) && - (forall step: StateIndex :: - step >= exit ==> is_sc(ordering[step]) ==> ppo(store, step, ordering) - ) - else false -); diff --git a/verify/lists/vatomic_await.txt b/verify/lists/vatomic_await.txt new file mode 100644 index 00000000..2421d7dd --- /dev/null +++ b/verify/lists/vatomic_await.txt @@ -0,0 +1,90 @@ +await_eq +await_eq_acq +await_eq_add +await_eq_add_acq +await_eq_add_rel +await_eq_add_rlx +await_eq_rlx +await_eq_set +await_eq_set_acq +await_eq_set_rel +await_eq_set_rlx +await_eq_sub +await_eq_sub_acq +await_eq_sub_rel +await_eq_sub_rlx +await_ge +await_ge_acq +await_ge_add +await_ge_add_acq +await_ge_add_rel +await_ge_add_rlx +await_ge_rlx +await_ge_set +await_ge_set_acq +await_ge_set_rel +await_ge_set_rlx +await_ge_sub +await_ge_sub_acq +await_ge_sub_rel +await_ge_sub_rlx +await_gt +await_gt_acq +await_gt_add +await_gt_add_acq +await_gt_add_rel +await_gt_add_rlx +await_gt_rlx +await_gt_set +await_gt_set_acq +await_gt_set_rel +await_gt_set_rlx +await_gt_sub +await_gt_sub_acq +await_gt_sub_rel +await_gt_sub_rlx +await_le +await_le_acq +await_le_add +await_le_add_acq +await_le_add_rel +await_le_add_rlx +await_le_rlx +await_le_set +await_le_set_acq +await_le_set_rel +await_le_set_rlx +await_le_sub +await_le_sub_acq +await_le_sub_rel +await_le_sub_rlx +await_lt +await_lt_acq +await_lt_add +await_lt_add_acq +await_lt_add_rel +await_lt_add_rlx +await_lt_rlx +await_lt_set +await_lt_set_acq +await_lt_set_rel +await_lt_set_rlx +await_lt_sub +await_lt_sub_acq +await_lt_sub_rel +await_lt_sub_rlx +await_neq +await_neq_acq +await_neq_add +await_neq_add_acq +await_neq_add_rel +await_neq_add_rlx +await_neq_rlx +await_neq_set +await_neq_set_acq +await_neq_set_rel +await_neq_set_rlx +await_neq_sub +await_neq_sub_acq +await_neq_sub_rel +await_neq_sub_rlx \ No newline at end of file diff --git a/verify/atomics_list.txt b/verify/lists/vatomic_core.txt similarity index 50% rename from verify/atomics_list.txt rename to verify/lists/vatomic_core.txt index 7db421de..25de1e39 100644 --- a/verify/atomics_list.txt +++ b/verify/lists/vatomic_core.txt @@ -1,74 +1,105 @@ -xchg -xchg_acq -xchg_rel -xchg_rlx +add +add_get +add_get_acq +add_get_rel +add_get_rlx +add_rel +add_rlx cmpxchg cmpxchg_acq cmpxchg_rel cmpxchg_rlx +dec +dec_acq +dec_get +dec_get_acq +dec_get_rel +dec_get_rlx +dec_rel +dec_rlx +get_add +get_add_acq +get_add_rel +get_add_rlx +get_dec +get_dec_acq +get_dec_rel +get_dec_rlx +get_inc +get_inc_acq +get_inc_rel +get_inc_rlx get_max get_max_acq get_max_rel get_max_rlx +get_sub +get_sub_acq +get_sub_rel +get_sub_rlx +inc +inc_acq +inc_get +inc_get_acq +inc_get_rel +inc_get_rlx +inc_rel +inc_rlx max +max_get +max_get_acq +max_get_rel +max_get_rlx max_rel max_rlx +read +read_acq +read_rlx +sub +sub_acq +sub_get +sub_get_acq +sub_get_rel +sub_get_rlx +sub_rel +sub_rlx +write +write_rel +write_rlx +xchg +xchg_acq +xchg_rel +xchg_rlx +and +and_get +and_get_acq +and_get_rel +and_get_rlx +and_rel +and_rlx get_and -get_or -get_xor -get_add -get_sub get_and_acq -get_or_acq -get_xor_acq -get_add_acq -get_sub_acq get_and_rel -get_or_rel -get_xor_rel -get_add_rel -get_sub_rel get_and_rlx +get_or +get_or_acq +get_or_rel get_or_rlx +get_xor +get_xor_acq +get_xor_rel get_xor_rlx -get_add_rlx -get_sub_rlx -and or -xor -add -sub -and_rel +or_get +or_get_acq +or_get_rel +or_get_rlx or_rel -xor_rel -add_rel -sub_rel -and_rlx or_rlx -xor_rlx -add_rlx -sub_rlx -read -read_acq -read_rlx -write -write_rel -write_rlx -await_eq -await_neq -await_lt -await_le -await_gt -await_ge -await_eq_acq -await_neq_acq -await_lt_acq -await_le_acq -await_gt_acq -await_ge_acq -await_eq_rlx -await_neq_rlx -await_lt_rlx -await_le_rlx -await_gt_rlx -await_ge_rlx +xor +xor_get +xor_get_acq +xor_get_rel +xor_get_rlx +xor_rel +xor_rlx \ No newline at end of file diff --git a/verify/lists/vatomic_ptr.txt b/verify/lists/vatomic_ptr.txt new file mode 100644 index 00000000..fd6c6168 --- /dev/null +++ b/verify/lists/vatomic_ptr.txt @@ -0,0 +1,28 @@ +await_eq +await_eq_acq +await_eq_rlx +await_eq_set +await_eq_set_acq +await_eq_set_rel +await_eq_set_rlx +await_neq +await_neq_acq +await_neq_rlx +await_neq_set +await_neq_set_acq +await_neq_set_rel +await_neq_set_rlx +cmpxchg +cmpxchg_acq +cmpxchg_rel +cmpxchg_rlx +read +read_acq +read_rlx +write +write_rel +write_rlx +xchg +xchg_acq +xchg_rel +xchg_rlx \ No newline at end of file diff --git a/verify/read.bpl b/verify/read.bpl deleted file mode 100644 index f3bf5888..00000000 --- a/verify/read.bpl +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -procedure read(load_order: OrderRelation, addr: Register) returns (v : Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - ensures {:msg "load happens within function bounds"} ( - var i := states[step]->last_load; - old(step) <= i && i < step - ); - ensures {:msg "no write effects"} ( - forall i: StateIndex, e: Effect :: - old(step) <= i && i < step ==> - !(effects[i][e] && e is write) - ); - ensures {:msg "read effect produced and correct"} - effects[states[step]->last_load][read(old(states[step]->gpr[addr]))]; - ensures {:msg "order"} - load_order[states[step]->last_load, old(step), step, ordering]; - ensures {:msg "output register contains loaded value"} - states[step]->gpr[v] == memory[states[step]->last_load, old(states[step]->gpr[addr])]; - - diff --git a/verify/riscv/library.bpl b/verify/riscv/library.bpl new file mode 100644 index 00000000..78d40111 --- /dev/null +++ b/verify/riscv/library.bpl @@ -0,0 +1,279 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +datatype Ordering { + Acquire(), + Release(), + AcquirePC(), + ReleasePC(), + AcqRel(), + Fence(ra, wa, rb, wb: bool), + NoOrd() +} + +datatype AtomicType { + AtomicAdd(), + AtomicAnd(), + AtomicMax(), + AtomicMin(), + AtomicOr(), + AtomicXor(), + AtomicSwap() +} + +datatype Monitor { + exclusive(addr: bv64), + open() +} + +var local_monitor: Monitor; +var monitor_exclusive: bool; + +datatype Instruction { + ld(addr: bv64, mask: bv64), + ldu(addr: bv64, mask: bv64), + sd(src, addr: bv64, mask: bv64), + sb(src, addr: bv64, mask: bv64), + lr(acq, rel: bool, addr: bv64, mask: bv64), + sc(acq, rel: bool, src, addr: bv64, mask: bv64), + mv(src: bv64), + atomic(atom: AtomicType, acq, rel: bool, src, addr: bv64, mask: bv64), + + add(first, second: bv64), + addi(first, second: bv64), + sub(first, second: bv64), + neg(src: bv64), + + andd(first, second: bv64), + orr(first, second: bv64), + and(first, second: bv64), + or(first, second: bv64), + xor(first, second: bv64), + + negw(src: bv64), + + andi(first, second: bv64), + slli(first, second: bv64), + sll(first, second: bv64), + li(src: bv64), + not(src: bv64), + + + srli(first, second: bv64), + srl(first, second: bv64), + sra(first, second: bv64), + + sext(src: bv64), + + + fence(ra, wa, rb, wb: bool) +} + + +function updated_value(instr: Instruction, read_value : bv64) : bv64 { + if instr is sc then instr->src + else if instr->atom is AtomicAdd then bin_add(instr->src, read_value) + else if instr->atom is AtomicAnd then and[instr->src, read_value] + else if instr->atom is AtomicMax then max[instr->src, read_value] + else if instr->atom is AtomicMin then min[instr->src, read_value] + else if instr->atom is AtomicOr then or[instr->src, read_value] + else if instr->atom is AtomicXor then xor[instr->src, read_value] + else if instr->atom is AtomicSwap then instr->src + else 0bv64 +} + +function rmw(instr: Instruction) : bool { + instr is atomic +} + +function reads(instr: Instruction) : bool { + rmw(instr) || instr is ld || instr is ldu || instr is lr +} + + +function returning_load(instr : Instruction) : bool { reads(instr) } + +function instruction_mask(instr: Instruction) : bv64 { + instr->mask +} + +function writes(instr: Instruction) : bool { + rmw(instr) || instr is sd || instr is sb +} + + +procedure execute_local(instr: Instruction) returns (r : bv64); + ensures + (r == if instr is mv || instr is sext || instr is li then instr->src + else if instr is add || instr is addi then bin_add(instr->first, instr->second) + else if instr is sub then bin_sub(instr->first, instr->second) + else if instr is neg || instr is negw then bin_neg(instr->src) + else if instr is slli || instr is sll then shift_left(instr->first, instr->second) + + /* realistically, sra and srl behave differently - srl on unsigned, sra on signed */ + else if instr is srli || instr is srl || instr is sra then shift_right(instr->first, instr->second) + + else if instr is not then bit_inv(instr->src) + else if instr is andd || instr is and || instr is andi then bit_and(instr->first, instr->second) + else if instr is orr || instr is or then bit_or(instr->first, instr->second) + else if instr is xor then bit_xor(instr->first, instr->second) + else bit_and(r, instruction_mask(instr))); + + +procedure assume_requires_execute(instr: Instruction); + modifies step, local_monitor, monitor_exclusive, last_store, last_load; + ensures (instr is sc ==> local_monitor is exclusive && local_monitor->addr == instr->addr); + +procedure execute(instr: Instruction) returns (r : bv64); + modifies step, local_monitor, monitor_exclusive, last_store, last_load; + requires (instr is sc ==> local_monitor is exclusive && local_monitor->addr == instr->addr); + ensures step == old(step + 1); + ensures ( + var sc_success := + old(local_monitor is exclusive + && (local_monitor->addr == instr->addr) + && monitor_exclusive); + (r == if instr is mv || instr is sext || instr is li then instr->src + else if instr is sc then b2i(!sc_success) + else bit_and(r, instruction_mask(instr))) + && + (last_load == + if reads(instr) + then + old(step) + else + old(last_load)) + && + (last_store == + if writes(instr) || (instr is sc && sc_success) + then + old(step) + else + old(last_store)) + && + (local_monitor == + if instr is lr then + exclusive(instr->addr) + else if writes(instr) || reads(instr) || instr is sc then + open() + else + old(local_monitor)) + && + (effects[old(step)] == if rmw(instr) || (instr is sc && sc_success) + then update(instr->addr, r, true, updated_value(instr, r), instruction_mask(instr)) + else if reads(instr) + then read(instr->addr, r, true) + else if writes(instr) + then write(instr->addr, instr->src, instruction_mask(instr)) + else no_effect()) + && + (ordering[old(step)] == + if instr->acq && instr->rel + && (instr is lr + || (instr is sc && sc_success) + || rmw(instr)) + then AcqRel() + else if instr->acq && reads(instr) + then Acquire() + else if (instr->rel && writes(instr)) + || (instr is sc && sc_success) + then Release() + else if instr is fence + then Fence(instr->ra, instr->wa, instr->rb, instr->wb) + else NoOrd()) + && + (( + monitor_exclusive == false // external write can clear monitor at any moment + ) + || monitor_exclusive == old( + if instr is lr then + true + else if writes(instr) || instr is sc then + false + else + monitor_exclusive + )) + && + (atomic[last_load, old(step)] == (rmw(instr) || (instr is sc && sc_success))) + ); + +function beq(r1: bv64, r2:bv64): bool { + r1 == r2 +} + +function bne(r1: bv64, r2:bv64): bool { + r1 != r2 +} + +function bnez(r: bv64): bool { + r != 0bv64 +} + + +function bgt(r1, r2: bv64): bool { + sgt(r1, r2) +} + +function bgtu(r1, r2: bv64): bool { + ugt(r1, r2) +} + +function ble(r1, r2: bv64): bool { + sle(r1, r2) +} + +function bleu(r1, r2: bv64): bool { + ule(r1, r2) +} + + +function blt(r1, r2: bv64): bool { + slt(r1, r2) +} + +function bltu(r1, r2: bv64): bool { + ult(r1, r2) +} + +function bge(r1, r2: bv64): bool { + sge(r1, r2) +} + +function bgeu(r1, r2: bv64): bool { + uge(r1, r2) +} + + +function is_acq(order: Ordering) : bool { + order is Acquire || order is AcqRel || order is AcquirePC +} + +function is_rel(order: Ordering) : bool { + order is Release || order is AcqRel || order is ReleasePC +} + + +function ppo(step1, step2: StateIndex, ordering: [StateIndex] Ordering, effects: [StateIndex] Effect): bool { + step1 < step2 && ( + // Barrier-ordered-before + is_acq(ordering[step1]) || + is_rel(ordering[step2]) || + (ordering[step1] == Release() && ordering[step2] == Acquire()) || + + (exists fenceId: StateIndex, fence: Ordering, e1, e2: Effect :: + fence is Fence && ordering[fenceId] == fence && effects[step1] == e1 && effects[step2] == e2 && + (step1 < fenceId && fenceId < step2) && + ((fence->ra && is_read(e1)) || + (fence->wa && is_write(e1)) + ) && + ((fence->rb && is_read(e2)) || + (fence->wb && is_write(e2)) + ) + ) + ) +} + +function is_sc(order: Ordering): bool { + order is Acquire || order is Release +} \ No newline at end of file diff --git a/verify/rmw.bpl b/verify/rmw.bpl deleted file mode 100644 index a94bd0f3..00000000 --- a/verify/rmw.bpl +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -type RMWOp = [int, int] int; - -/* - op - operation to be performed - load_order - ordering of load - store_order - ordering of store - - input - second operand to op - output - first operand to op; loaded from memory - addr - address of load/store - - result - temporary; holds op(output, input); stored to memory - status - temporary; used for stx -*/ -procedure rmw (op: RMWOp, store_order: OrderRelation, addr, input : Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - - ensures {:msg "load happens within function bounds"} ( - var load := states[step]->last_load; - old(step) <= load && load < step - ); - - // version 1 - ensures {:msg "store happens within function bounds; produces effect with correct order; no other stores"} ( - var load, store, gpr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := op[load_val, gpr[input]]; - (load_val == store_val && no_writes(old(step), step, effects)) || - ( - old(step) <= store && store < step - && memory[load, gpr[addr]] == memory[store, gpr[addr]] - && effects[store][write(gpr[addr], store_val)] - && store_order[store, old(step), step, ordering] - && (forall i: StateIndex, e: Effect :: - old(step) <= i && i < step && i != store ==> - !(effects[i][e] && e is write) - ) - ) - ))); - - // version 2 - ensures {:msg "if no write happened, the value from memory is already the result of operation"} - no_writes(old(step), step, effects) ==> ( - var load, gpr := states[step]->last_load, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := op[load_val, gpr[input]]; - load_val == store_val - ))); - ensures {:msg "store happens within function bounds"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - old(step) <= store && store < step - ); - ensures {:msg "memory is preserved between load and store"} - !no_writes(old(step), step, effects) ==> ( - var load, store, addr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr[addr]); ( - memory[load, addr] == memory[store, addr] - )); - ensures {:msg "store produces write to correct address with correct value"} - !no_writes(old(step), step, effects) ==> ( - var load, store, gpr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := op[load_val, gpr[input]]; - effects[store][write(gpr[addr], store_val)] - ))); - ensures {:msg "no other write effects"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; ( - forall i: StateIndex, e: Effect :: - old(step) <= i && i < step && i != store ==> - !(effects[i][e] && e is write) - )); - ensures {:msg "store ordering"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - store_order[store, old(step), step, ordering] - ); - -procedure get_rmw(op: RMWOp, load_order, store_order: OrderRelation, addr, input: Register) returns (output: Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - - ensures {:msg "load happens within function bounds"} ( - var load := states[step]->last_load; - old(step) <= load && load < step - ); - ensures {:msg "load ordering"} - load_order[states[step]->last_load, old(step), step, ordering]; - ensures {:msg "output register contains loaded value"} - states[step]->gpr[output] == memory[states[step]->last_load, old(states[step]->gpr[addr])]; - ensures {:msg "if no write happened, the value from memory is already the result of operation"} - no_writes(old(step), step, effects) ==> ( - var load, gpr := states[step]->last_load, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := op[load_val, gpr[input]]; - load_val == store_val - ))); - ensures {:msg "store happens within function bounds"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - old(step) <= store && store < step - ); - ensures {:msg "memory is preserved between load and store"} - !no_writes(old(step), step, effects) ==> ( - var load, store, addr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr[addr]); ( - memory[load, addr] == memory[store, addr] - )); - ensures {:msg "store produces write to correct address with correct value"} - !no_writes(old(step), step, effects) ==> ( - var load, store, gpr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := op[load_val, gpr[input]]; - effects[store][write(gpr[addr], store_val)] - ))); - ensures {:msg "no other write effects"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; ( - forall i: StateIndex, e: Effect :: - old(step) <= i && i < step && i != store ==> - !(effects[i][e] && e is write) - )); - ensures {:msg "store ordering"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - store_order[store, old(step), step, ordering] - ); - - diff --git a/verify/src/arm/mod.rs b/verify/src/arm/mod.rs new file mode 100644 index 00000000..590b4e83 --- /dev/null +++ b/verify/src/arm/mod.rs @@ -0,0 +1,482 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +mod parser; +mod transform; + +use std::fmt::Display; + +pub use parser::parse_arm_assembly; +pub use transform::{extract_arm_functions, remove_directives, transform_labels}; + +use crate::{ + atomic_types, AtomicType, BoogieFunction, BoogieInstruction, SideEffect, ToBoogie, Width, DUMMY_REG +}; + +#[derive(Debug, Clone, PartialEq)] +pub enum RegisterType { + X, // 64-bit general purpose register + W, // 32-bit general purpose register + V, // SIMD/FP register + Q, // 128-bit SIMD register + D, // 64-bit SIMD register + S, // 32-bit SIMD register + H, // 16-bit SIMD register + B, // 8-bit SIMD register + SP, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct Register { + pub reg_type: RegisterType, + pub number: u8, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum AddressingMode { + // [Xn] + BaseRegister(Register), + // [Xn, #imm] + BaseRegisterWithOffset(Register, i64), + // [Xn, #imm]! + PreIndexed(Register, i64), + // [Xn], #imm + PostIndexed(Register, i64), +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Operand { + Register(Register), + ImmediateValue(i64), + Memory(AddressingMode), + Label(String), + ShiftedRegister(Register, String, i64), + RegisterList(Vec), + FenceMode(FenceType), +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ConditionCode { + EQ, // Equal + NE, // Not equal + + HS, // Higher or same + CS, // Carry set + + LO, // Lower + CC, // Carry clear + + MI, // Minus, negative + PL, // Plus, positive or zero + VS, // Overflow + VC, // No overflow + HI, // Unsigned higher + LS, // Unsigned lower or same + GE, // Signed greater than or equal + LT, // Signed less than + GT, // Signed greater than + LE, // Signed less than or equal + AL, // Always (default) + NV, // Never +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Condition { + Code(ConditionCode), + Zero(Operand), + NotZero(Operand), +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Directive { + CFIStartProc, + CFIEndProc, + Size(String, String), + Align(u32), + P2Align(u32, u32, u32), + Type(String, String), +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum ArithmeticOp { + Add, + Sub, + Mul, + And, + Orr, + Eor, +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum ShiftOp { + Lsl, + Lsr, + Asr, +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum MemoryOp { + Load, + Store, + Rmw +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum LSEop { + Max, + Clr, + Set, + Eor, + Add, + Swp, + Cas +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum MemorySize { + Byte, // 8-bit + Half, // 16-bit + Word, // 32-bit + Double, // 64-bit +} + +impl MemorySize { + pub fn bytes(&self) -> u32 { + match self { + MemorySize::Byte => 1, + MemorySize::Half => 2, + MemorySize::Word => 4, + MemorySize::Double => 8, + } + } + pub fn mask(&self) -> u64 { + (2u64.overflowing_pow(self.bytes() * 8)).0.overflowing_add_signed(-1).0 + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MemoryAttrs { + pub size: MemorySize, + pub exclusive: bool, + pub acquire: bool, + pub release: bool, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum MoveOp { + Mov, + Mvn, + Neg +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum FenceType { + SY, + LD, +} + +impl Display for FenceType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + Self::SY => "SY()", + Self::LD => "LD()", + } + ) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ArmInstruction { + Arithmetic(ArithmeticOp, Operand, Operand, Option), + + Shift(ShiftOp, Operand, Operand), + + Move(MoveOp, Operand, Operand), + + Memory(MemoryOp, MemoryAttrs, Operand, Operand), + MemoryLSE(MemoryOp, MemoryAttrs, LSEop, Operand, Operand, Operand), + MemoryPair(MemoryOp, Register, Register, AddressingMode), + MemoryExclusive(MemoryOp, MemoryAttrs, Operand, Operand, Operand), + Cmp(Operand, Operand), + Csel(Operand, Operand, Operand, ConditionCode), + + Dmb(FenceType), + + Branch(Option, Operand), + BranchLink(Operand), + BranchLinkRegister(Operand), + BranchRegister(Operand), + Return(Option), + + TestBitBranch(bool, Operand, Operand, Operand), + + Label(String), + + Directive(Directive), + Unhandled(String), +} + +#[derive(Debug, Clone)] +pub struct ArmFunction { + pub name: String, + pub instructions: Vec, +} + +pub const ARMV8_WIDTH: Width = Width::Wide; + +impl ToBoogie for ArmFunction { + fn to_boogie(self) -> BoogieFunction { + let instructions = self + .instructions + .iter() + .map(|instr| arm_instruction_to_boogie(instr)) + .collect(); + + let atomic_type = atomic_types(&self.name); + + let register_ident = match atomic_type.type_width(ARMV8_WIDTH) { + 8 => "x", + _ => "w", + }; + + let ptr_ident = match AtomicType::VPTR.type_width(ARMV8_WIDTH) { + 8 => "x", + _ => "w", + }; + + BoogieFunction { + name: self.name.clone(), + instructions, + address: format!("{}0", ptr_ident), + input1: format!("{}1", register_ident), + input2: format!("{}2", register_ident), + output: format!("{}0", register_ident), + } + } +} + +pub fn condition_code_to_boogie(code: ConditionCode) -> String { + match code { + ConditionCode::CS => condition_code_to_boogie(ConditionCode::HS), + ConditionCode::CC => condition_code_to_boogie(ConditionCode::LO), + _ => format!("{:?}()", code), + } +} + +pub fn condition_to_boogie(cond: &Condition) -> String { + match cond { + Condition::Code(code) => format!("branch({},flags)", condition_code_to_boogie(*code)), + Condition::Zero(reg) => format!("cbz({})", operand_to_boogie(®)), + Condition::NotZero(reg) => format!("cbnz({})", operand_to_boogie(®)), + } +} + +pub fn arm_instruction_to_boogie(instr: &ArmInstruction) -> BoogieInstruction { + match instr { + ArmInstruction::Label(name) => BoogieInstruction::Label(name.clone()), + ArmInstruction::Branch(cond_opt, target) => match target { + Operand::Label(label_name) => { + BoogieInstruction::Branch(vec![label_name.to_string()], if let Some(cond) = cond_opt { + condition_to_boogie(cond) + } else { + "true".to_string() + }) + } + _ => BoogieInstruction::Unhandled(format!( + "// Unhandled Branch Type: {:?}, {:?}", + cond_opt, target + )), + }, + ArmInstruction::Dmb(mode) => BoogieInstruction::Instr( + "dmb".to_string(), SideEffect::Global, + DUMMY_REG.to_string(), + vec![mode.to_string()], + ), + ArmInstruction::Return(_) => BoogieInstruction::Return, + ArmInstruction::Arithmetic(op, dest, src1, src2_opt) => { + let op_name = match op { + ArithmeticOp::Add => "add", + ArithmeticOp::Sub => "sub", + ArithmeticOp::Mul => "mul", + ArithmeticOp::And => "andd", + ArithmeticOp::Orr => "orr", + ArithmeticOp::Eor => "eor", + }; + let dest_reg = operand_to_boogie(dest); + let src1 = operand_to_boogie(src1); + + let ops = if let Some(src2) = src2_opt { + let src2 = operand_to_boogie(src2); + vec![src1, src2] + } else { + vec![src1] + }; + BoogieInstruction::Instr(op_name.to_string(), SideEffect::Local, dest_reg, ops) + } + ArmInstruction::Move(op, dest, src) => { + let op_name = match op { + MoveOp::Mov => "mov", + MoveOp::Mvn => "mvn", + MoveOp::Neg => "neg" + }; + + let dest_reg = operand_to_boogie(dest); + let src_reg = operand_to_boogie(src); + BoogieInstruction::Instr(op_name.to_string(), SideEffect::Local, dest_reg, vec![src_reg]) + } + ArmInstruction::Memory(op, attrs, reg1, reg2) => { + let (op_name, has_output) = match op { + MemoryOp::Load => { + if attrs.exclusive { + ("ldx", true) + } else { + ("ld", true) + } + } + MemoryOp::Store => ("st", false), + MemoryOp::Rmw => unimplemented!() + }; + + let dest_or_src_reg = operand_to_boogie(reg1); + let addr_reg = operand_to_boogie(reg2); + + if has_output { + BoogieInstruction::Instr( + op_name.to_string(), SideEffect::Global, + dest_or_src_reg, + vec![attrs.acquire.to_string(), addr_reg, format!("{}bv64", attrs.size.mask())], + ) + } else { + BoogieInstruction::Instr( + op_name.to_string(), SideEffect::Global, + DUMMY_REG.to_string(), + vec![attrs.release.to_string(), dest_or_src_reg, format!("{}bv64", attrs.size.mask()), addr_reg], + ) + } + } + ArmInstruction::MemoryExclusive(op, attrs, dest, src, addr) => { + let op_name = match op { + MemoryOp::Store => "stx", + _ => unimplemented!(), + }; + + let dest_reg = operand_to_boogie(dest); + let addr_reg = operand_to_boogie(addr); + let src_reg = operand_to_boogie(src); + + BoogieInstruction::Instr( + op_name.to_string(), SideEffect::Global, + dest_reg, + vec![attrs.release.to_string(), src_reg, format!("{}bv64", attrs.size.mask()), addr_reg], + ) + } + ArmInstruction::MemoryLSE(op, attrs, lse_op, src, dest, addr) => { + let lse_name = match lse_op { + LSEop::Max => "umax", + LSEop::Clr => "clr", + LSEop::Set => "set", + LSEop::Eor => "eor", + LSEop::Add => "add", + LSEop::Swp => "swp", + LSEop::Cas => "cas" + }; + let addr_reg = operand_to_boogie(addr); + let src_reg = operand_to_boogie(src); + let dest_reg = match lse_op { + LSEop::Cas => src_reg.clone(), + _ => operand_to_boogie(dest) + }; + + let ret = match op { + MemoryOp::Load => + BoogieInstruction::Instr( + "ld".to_string() + lse_name, SideEffect::Global, + dest_reg, + vec![attrs.acquire.to_string(), attrs.release.to_string(), src_reg, addr_reg, format!("{}bv64", attrs.size.mask()), format!("{}bv64", attrs.size.mask())] + ), + MemoryOp::Store => + BoogieInstruction::Instr( + "st".to_string() + lse_name, SideEffect::Global, + DUMMY_REG.to_string(), + vec![attrs.release.to_string(), src_reg, addr_reg, format!("{}bv64", attrs.size.mask()), format!("{}bv64", attrs.size.mask())] + ), + MemoryOp::Rmw => + BoogieInstruction::Instr( + lse_name.to_string(), SideEffect::Global, + dest_reg, + match lse_op { + LSEop::Cas + => vec![attrs.acquire.to_string(), attrs.release.to_string(), src_reg, operand_to_boogie(dest), addr_reg, format!("{}bv64", attrs.size.mask()), format!("{}bv64", attrs.size.mask())], + LSEop::Swp + => vec![attrs.acquire.to_string(), attrs.release.to_string(), src_reg, addr_reg, format!("{}bv64", attrs.size.mask()), format!("{}bv64", attrs.size.mask())], + _ => unimplemented!() + } + ), + }; + ret + } + ArmInstruction::Cmp(op1, op2) => { + let op1_reg = operand_to_boogie(op1); + let op2_reg = operand_to_boogie(op2); + + BoogieInstruction::Instr( + "cmp".to_string(), SideEffect::Local, + DUMMY_REG.to_string(), + vec![op1_reg, op2_reg], + ) + } + ArmInstruction::Csel(dest, op1, op2, ce) => { + let dest_reg = operand_to_boogie(dest); + let op1_reg = operand_to_boogie(op1); + let op2_reg = operand_to_boogie(op2); + let cond = condition_to_boogie(&Condition::Code(*ce)); + BoogieInstruction::Instr("csel".to_string(), SideEffect::Local, dest_reg, vec![op1_reg, op2_reg, cond]) + } + ArmInstruction::Directive(directive) => { + BoogieInstruction::Comment(format!("Directive: {:?}", directive)) + } + _ => { + log::warn!("Unhandled: {:?}", instr); + BoogieInstruction::Unhandled(format!("{:?}", instr)) + } + } +} + +fn register_to_boogie(reg: &Register) -> String { + match reg.reg_type { + RegisterType::X => format!("x{}", reg.number), + RegisterType::W => format!("w{}", reg.number), + RegisterType::V => format!("v{}", reg.number), + RegisterType::Q => format!("q{}", reg.number), + RegisterType::D => format!("d{}", reg.number), + RegisterType::S => format!("s{}", reg.number), + RegisterType::H => format!("h{}", reg.number), + RegisterType::B => format!("b{}", reg.number), + RegisterType::SP => "sp".to_string(), + } +} + +fn operand_to_boogie(operand: &Operand) -> String { + match operand.clone() { + Operand::Register(r) => register_to_boogie(&r), + Operand::ImmediateValue(val) => { + if val != -1 { + format!("{}bv64", val) + } else { + "bit_inv(0bv64)".to_string() + } + }, + Operand::Memory(addr_mode) => match addr_mode { + AddressingMode::BaseRegister(reg) => register_to_boogie(®), + AddressingMode::BaseRegisterWithOffset(reg, offset) => { + format!("{}+{}", register_to_boogie(®), offset) + } + _ => format!("/* unsupported addressing mode {:?} /*", operand), + }, + _ => unimplemented!(), + } +} diff --git a/verify/src/arm/parser.rs b/verify/src/arm/parser.rs new file mode 100644 index 00000000..04fe6d97 --- /dev/null +++ b/verify/src/arm/parser.rs @@ -0,0 +1,1221 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use nom::{ + IResult, Parser, + branch::alt, + bytes::complete::{tag, take_till, take_while, take_while1}, + character::complete::{char, digit1, multispace0, space0, space1}, + combinator::{map, map_res, opt, recognize, value}, + multi::{many0, separated_list0}, + sequence::{delimited, preceded, terminated}, +}; + +use super::*; + +fn parse_register_type(input: &str) -> IResult<&str, RegisterType> { + alt(( + value(RegisterType::X, char('x')), + value(RegisterType::W, char('w')), + value(RegisterType::V, char('v')), + value(RegisterType::Q, char('q')), + value(RegisterType::D, char('d')), + value(RegisterType::S, char('s')), + value(RegisterType::H, char('h')), + value(RegisterType::B, char('b')), + )) + .parse(input) +} + +fn parse_register(input: &str) -> IResult<&str, Register> { + alt(( + map(tag("sp"), |_| Register { + reg_type: RegisterType::SP, + number: 0, + }), + map( + ( + parse_register_type, + map_res(digit1, |s: &str| s.parse::()), + ), + |(reg_type, number)| Register { reg_type, number }, + ), + )) + .parse(input) +} + +fn parse_immediate(input: &str) -> IResult<&str, i64> { + let (input, _) = alt((tag("#"), tag(""))).parse(input)?; + let (input, signed) = opt(char('-')).parse(input)?; + let (input, value_str) = alt(( + recognize(( + alt((tag("0x"), tag("0X"))), + take_while1(|c: char| c.is_digit(16)), + )), + recognize(digit1), + )) + .parse(input)?; + + let base = if value_str.starts_with("0x") || value_str.starts_with("0X") { + 16 + } else { + 10 + }; + + let value = i64::from_str_radix( + if base == 16 { + &value_str[2..] + } else { + value_str + }, + base, + ) + .unwrap(); + + Ok((input, if signed.is_some() { -value } else { value })) +} + +fn parse_label(input: &str) -> IResult<&str, String> { + map( + recognize(( + take_while1(|c: char| c.is_alphabetic() || c == '_' || c == '.'), + take_while(|c: char| c.is_alphanumeric() || c == '_' || c == '.' || c == '$'), + )), + |s: &str| s.to_string(), + ) + .parse(input) +} + +fn parse_shift_operation(input: &str) -> IResult<&str, String> { + map( + alt((tag("lsl"), tag("lsr"), tag("asr"), tag("ror"))), + |s: &str| s.to_string(), + ) + .parse(input) +} + +fn parse_condition_code(input: &str) -> Option { + // We strip the dot to support branching like b and b. + let trimmed = input.strip_prefix('.').unwrap_or(input); + match trimmed { + "eq" => Some(ConditionCode::EQ), + "ne" => Some(ConditionCode::NE), + "cs" => Some(ConditionCode::CS), + "cc" => Some(ConditionCode::CC), + "mi" => Some(ConditionCode::MI), + "pl" => Some(ConditionCode::PL), + "vs" => Some(ConditionCode::VS), + "vc" => Some(ConditionCode::VC), + "hi" => Some(ConditionCode::HI), + "hs" => Some(ConditionCode::HS), + "ls" => Some(ConditionCode::LS), + "lo" => Some(ConditionCode::LO), + "ge" => Some(ConditionCode::GE), + "lt" => Some(ConditionCode::LT), + "gt" => Some(ConditionCode::GT), + "le" => Some(ConditionCode::LE), + "al" => Some(ConditionCode::AL), + "nv" => Some(ConditionCode::NV), + _ => None, + } +} + +fn parse_fence_mode(input: &str) -> IResult<&str, FenceType> { + alt(( + value(FenceType::SY, tag("sy")), + value(FenceType::LD, tag("ld")), + )) + .parse(input) +} + +fn parse_shifted_register(input: &str) -> IResult<&str, Operand> { + map( + ( + parse_register, + preceded((char(','), space0), parse_shift_operation), + preceded((space0, char('#'), space0), parse_immediate), + ), + |(reg, shift_op, amount)| Operand::ShiftedRegister(reg, shift_op, amount), + ) + .parse(input) +} + +fn parse_register_list(input: &str) -> IResult<&str, Operand> { + map( + delimited( + char('{'), + separated_list0((char(','), space0), parse_register), + char('}'), + ), + Operand::RegisterList, + ) + .parse(input) +} + +fn parse_addressing_mode(input: &str) -> IResult<&str, AddressingMode> { + alt(( + // [Xn] - Base register only + map( + delimited(char('['), parse_register, char(']')), + AddressingMode::BaseRegister, + ), + // [Xn, #imm]! - Pre-indexed with writeback + map( + terminated( + delimited( + char('['), + ( + parse_register, + preceded((char(','), space0), parse_immediate), + ), + char(']'), + ), + char('!'), + ), + |(reg, imm)| AddressingMode::PreIndexed(reg, imm), + ), + // [Xn, #imm] - Base register with offset + map( + delimited( + char('['), + ( + parse_register, + preceded((char(','), space0), parse_immediate), + ), + char(']'), + ), + |(reg, imm)| AddressingMode::BaseRegisterWithOffset(reg, imm), + ), + // [Xn], #imm + map( + ( + delimited(char('['), parse_register, char(']')), + preceded((char(','), space0), parse_immediate), + ), + |(reg, offset)| AddressingMode::PostIndexed(reg, offset), + ), + )) + .parse(input) +} + +fn parse_operand(input: &str) -> IResult<&str, Operand> { + alt(( + map(parse_addressing_mode, Operand::Memory), + map(parse_immediate, Operand::ImmediateValue), + map(parse_register, Operand::Register), + parse_shifted_register, + parse_register_list, + map(parse_label, Operand::Label), + map(parse_fence_mode, Operand::FenceMode), + )) + .parse(input) +} + +fn parse_operands(input: &str) -> IResult<&str, Vec> { + separated_list0((char(','), space0), preceded(space0, parse_operand)).parse(input) +} + +fn parse_directive(input: &str) -> IResult<&str, Directive> { + preceded( + char('.'), + alt(( + value(Directive::CFIStartProc, tag("cfi_startproc")), + value(Directive::CFIEndProc, tag("cfi_endproc")), + map( + ( + tag("size"), + space1, + parse_label, + char(','), + space0, + recognize(take_till(|c| c == '\n' || c == '\r')), + ), + |(_, _, name, _, _, value)| Directive::Size(name, value.to_string()), + ), + map( + ( + tag("align"), + space1, + map_res(digit1, |s: &str| s.parse::()), + ), + |(_, _, align)| Directive::Align(align), + ), + map( + preceded( + tag("p2align"), + preceded( + space1, + ( + map_res(digit1, |s: &str| s.parse::()), + opt(preceded( + (char(','), space0), + opt(map_res(digit1, |s: &str| s.parse::())), + )), + opt(preceded( + (char(','), space0), + opt(map_res(digit1, |s: &str| s.parse::())), + )), + ), + ), + ), + |(p1, opt_p2, opt_p3)| { + let p2 = opt_p2.flatten().unwrap_or(0); + let p3 = opt_p3.flatten().unwrap_or(0); + Directive::P2Align(p1, p2, p3) + }, + ), + map( + ( + tag("type"), + space1, + parse_label, + char(','), + space0, + recognize(take_till(|c| c == '\n' || c == '\r')), + ), + |(_, _, name, _, _, value)| Directive::Type(name, value.to_string()), + ), + )), + ) + .parse(input) +} + +pub fn parse_label_def(input: &str) -> IResult<&str, ArmInstruction> { + map(terminated(parse_label, char(':')), |label| { + ArmInstruction::Label(label) + }) + .parse(input) +} + +fn parse_memory_size_from_suffix(suffix: &str, operands: &[Operand]) -> MemorySize { + match suffix { + "b" => MemorySize::Byte, + "h" => MemorySize::Half, + _ if operands.iter().any(|op| { + if let Operand::Register(reg) = op { reg.reg_type == RegisterType::W } else { false } + }) => MemorySize::Word, + _ => MemorySize::Double, + } +} + +fn parse_fence_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if instr_name.to_lowercase().as_str() != "dmb" { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + + let mode = operands + .iter() + .find_map(|op| match op { + Operand::FenceMode(mode) => Some(*mode), + _ => None, + }) + .unwrap_or(FenceType::SY); + + Ok(("", ArmInstruction::Dmb(mode))) +} + +fn parse_arithmetic_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if operands.len() < 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Eof, + ))); + } + + let op = match instr_name.to_lowercase().as_str() { + "add" => ArithmeticOp::Add, + "sub" => ArithmeticOp::Sub, + "mul" => ArithmeticOp::Mul, + "and" => ArithmeticOp::And, + "orr" => ArithmeticOp::Orr, + "eor" => ArithmeticOp::Eor, + _ => { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + }; + + let third_operand = if operands.len() >= 3 { + Some(operands[2].clone()) + } else { + None + }; + + Ok(( + "", + ArmInstruction::Arithmetic(op, operands[0].clone(), operands[1].clone(), third_operand), + )) +} + +fn parse_shift_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if operands.len() != 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + + let op = match instr_name.to_lowercase().as_str() { + "lsl" => ShiftOp::Lsl, + "lsr" => ShiftOp::Lsr, + "asr" => ShiftOp::Asr, + _ => { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + }; + + Ok(( + "", + ArmInstruction::Shift(op, operands[0].clone(), operands[1].clone()), + )) +} + +fn parse_move_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if operands.len() != 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + + let op = match instr_name.to_lowercase().as_str() { + "mov" => MoveOp::Mov, + "mvn" => MoveOp::Mvn, + "neg" => MoveOp::Neg, + _ => { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + }; + + Ok(( + "", + ArmInstruction::Move(op, operands[0].clone(), operands[1].clone()), + )) +} + +fn parse_memory_attrs(op: MemoryOp, instr_name: &str, operands: &[Operand]) -> MemoryAttrs { + let mut exclusive = false; + let mut acquire = false; + let mut release = false; + + + if instr_name.contains("xr") { + exclusive = true; + } + + let suffix = &instr_name[instr_name.len() - 1..instr_name.len()]; + let size = parse_memory_size_from_suffix(suffix, if op == MemoryOp::Store && exclusive { &operands[1..] } else { operands }); + + + if instr_name.contains('a') { + acquire = true; + } + + if instr_name.contains('l') { + release = true; + } + + MemoryAttrs { + size, + exclusive, + acquire, + release, + } +} + +fn parse_memory_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if operands.len() < 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Eof, + ))); + } + + let base_op = instr_name.to_lowercase(); + let op = if base_op.starts_with("ld") || base_op.starts_with("l") { + MemoryOp::Load + } else if base_op.starts_with("swp") || base_op.starts_with("cas") { + MemoryOp::Rmw + } else if base_op.starts_with("st") || base_op.starts_with("s") { + MemoryOp::Store + } else { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + }; + + let attrs = parse_memory_attrs(op, &base_op, &operands); + + // Exclusive instructions (STXR, STLXR, etc.) + if (base_op.contains("stlxr") || base_op.contains("stxr")) && operands.len() >= 3 { + return Ok(( + "", + ArmInstruction::MemoryExclusive( + op, + attrs, + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ), + )); + } + + // LSE atomic memory operations (LD*/ST*) + if base_op.starts_with("ldadd") + || base_op.starts_with("ldeor") + || base_op.starts_with("ldclr") + || base_op.starts_with("ldset") + || base_op.starts_with("ldumax") + || base_op.starts_with("stadd") + || base_op.starts_with("steor") + || base_op.starts_with("stclr") + || base_op.starts_with("stset") + || base_op.starts_with("stumax") + || base_op.starts_with("swp") + || base_op.starts_with("cas") + { + let lse_op = if base_op.contains("add") { + LSEop::Add + } else if base_op.contains("eor") { + LSEop::Eor + } else if base_op.contains("clr") { + LSEop::Clr + } else if base_op.contains("set") { + LSEop::Set + } else if base_op.contains("max") || base_op.contains("umax") { + LSEop::Max + } else if base_op.contains("swp") { + LSEop::Swp + } else if base_op.contains("cas") { + LSEop::Cas + } else { + unreachable!() + }; + + if base_op.starts_with("ld") || base_op.starts_with("swp") || base_op.starts_with("cas") { + if operands.len() >= 3 { + return Ok(( + "", + ArmInstruction::MemoryLSE( + op, + attrs, + lse_op, + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ), + )); + } else { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Eof, + ))); + } + } else if base_op.starts_with("st") { + if operands.len() >= 2 { + return Ok(( + "", + ArmInstruction::MemoryLSE( + op, + attrs, + lse_op, + operands[0].clone(), // dummy + operands[0].clone(), + operands[1].clone(), + ), + )); + } else { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Eof, + ))); + } + } else if base_op.starts_with("swp") { + + } + } + + // Default: plain memory op + Ok(( + "", + ArmInstruction::Memory(op, attrs, operands[0].clone(), operands[1].clone()), + )) +} + +fn parse_pair_memory_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if instr_name != "stp" && instr_name != "ldp" { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + + if operands.len() != 3 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + + if let (Operand::Register(reg1), Operand::Register(reg2), Operand::Memory(addr)) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + if reg1.reg_type != reg2.reg_type { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Verify, + ))); + } + + let op = if instr_name == "ldp" { + MemoryOp::Load + } else { + MemoryOp::Store + }; + + Ok(("", ArmInstruction::MemoryPair(op, reg1, reg2, addr))) + } else { + Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Verify, + ))) + } +} + +fn parse_branch_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + let instr_name = instr_name.to_lowercase(); + + match instr_name.as_str() { + "bl" => { + if operands.len() != 1 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(("", ArmInstruction::BranchLink(operands[0].clone()))); + } + + "br" => { + if operands.len() != 1 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(("", ArmInstruction::BranchRegister(operands[0].clone()))); + } + + "blr" => { + if operands.len() != 1 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(("", ArmInstruction::BranchLinkRegister(operands[0].clone()))); + } + + "ret" => { + let op = if operands.is_empty() { + None + } else { + Some(operands[0].clone()) + }; + return Ok(("", ArmInstruction::Return(op))); + } + + "cbz" => { + if operands.len() != 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(( + "", + ArmInstruction::Branch( + Some(Condition::Zero(operands[0].clone())), + operands[1].clone(), + ), + )); + } + + "cbnz" => { + if operands.len() != 2 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(( + "", + ArmInstruction::Branch( + Some(Condition::NotZero(operands[0].clone())), + operands[1].clone(), + ), + )); + } + + "tbz" => { + if operands.len() != 3 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(( + "", + ArmInstruction::TestBitBranch( + false, + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ), + )); + } + + "tbnz" => { + if operands.len() != 3 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + return Ok(( + "", + ArmInstruction::TestBitBranch( + true, + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ), + )); + } + + _ => {} + } + + if operands.len() != 1 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Count, + ))); + } + + if instr_name == "b" { + return Ok(("", ArmInstruction::Branch(None, operands[0].clone()))); + } + + if instr_name.starts_with('b') && instr_name.len() > 1 { + let cond_str = &instr_name[1..]; + let condition = parse_condition_code(cond_str); + + if let Some(cond) = condition { + return Ok(( + "", + ArmInstruction::Branch(Some(Condition::Code(cond)), operands[0].clone()), + )); + } + } + + Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))) +} + +fn parse_comparison_instruction( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if instr_name.to_lowercase() != "cmp" { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + + // @NOTE: we ignore sign extension here and always assume uxtb + Ok(( + "", + ArmInstruction::Cmp(operands[0].clone(), operands[1].clone()), + )) +} + +fn parse_conditional_select( + instr_name: &str, + operands: Vec, +) -> IResult<&str, ArmInstruction> { + if instr_name.to_lowercase() != "csel" || operands.len() < 4 { + return Err(nom::Err::Error(nom::error::Error::new( + "", + nom::error::ErrorKind::Tag, + ))); + } + + let condition = if let Operand::Label(condition_str) = &operands[3] { + match condition_str.to_lowercase().as_str() { + "eq" => ConditionCode::EQ, + "ne" => ConditionCode::NE, + "cs" => ConditionCode::CS, + "cc" => ConditionCode::CC, + "mi" => ConditionCode::MI, + "pl" => ConditionCode::PL, + "vs" => ConditionCode::VS, + "vc" => ConditionCode::VC, + "hi" => ConditionCode::HI, + "ls" => ConditionCode::LS, + "ge" => ConditionCode::GE, + "lt" => ConditionCode::LT, + "gt" => ConditionCode::GT, + "le" => ConditionCode::LE, + "al" => ConditionCode::AL, + "nv" => ConditionCode::NV, + _ => ConditionCode::AL, + } + } else { + ConditionCode::AL + }; + + Ok(( + "", + ArmInstruction::Csel( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + condition, + ), + )) +} + +fn parse_instruction(input: &str) -> IResult<&str, ArmInstruction> { + let (input, instr_name) = take_while1(|c: char| c.is_alphabetic() || c == '.')(input)?; + let (input, operands) = parse_operands(input)?; + let (input, _) = take_till(|c| c == '\n')(input)?; + let (input, _) = opt(char('\n')).parse(input)?; + + let result = alt(( + |_: &str| parse_arithmetic_instruction(instr_name, operands.clone()), + |_: &str| parse_shift_instruction(instr_name, operands.clone()), + |_: &str| parse_move_instruction(instr_name, operands.clone()), + |_: &str| parse_pair_memory_instruction(instr_name, operands.clone()), + |_: &str| parse_memory_instruction(instr_name, operands.clone()), + |_: &str| parse_branch_instruction(instr_name, operands.clone()), + |_: &str| parse_comparison_instruction(instr_name, operands.clone()), + |_: &str| parse_conditional_select(instr_name, operands.clone()), + |_: &str| parse_fence_instruction(instr_name, operands.clone()), + )) + .parse(input); + + result.map(|(_, instr)| (input, instr)) +} + +fn parse_line(input: &str) -> IResult<&str, ArmInstruction> { + if input.is_empty() { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Eof, + ))); + } + let (input, line) = take_till(|c| c == '\n')(input)?; + let (input, _) = opt(char('\n')).parse(input)?; + let line_trimmed = line.trim(); + + if line_trimmed.is_empty() { + return Ok((input, ArmInstruction::Unhandled(String::new()))); + } + + let result = alt(( + parse_label_def, + map(parse_directive, |dir| ArmInstruction::Directive(dir)), + parse_instruction, + )) + .parse(line_trimmed); + + match result { + Ok((remaining, instr)) if remaining.is_empty() => Ok((input, instr)), + _ => Ok((input, ArmInstruction::Unhandled(line.to_string()))), + } +} + +pub fn parse_arm_assembly(input: &str) -> IResult<&str, Vec> { + let (input, _) = multispace0(input)?; + let (input, instructions) = many0(parse_line).parse(input)?; + Ok((input, instructions)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_register() { + let (_, reg) = parse_register("x0").unwrap(); + assert_eq!( + reg, + Register { + reg_type: RegisterType::X, + number: 0 + } + ); + + let (_, reg) = parse_register("w12").unwrap(); + assert_eq!( + reg, + Register { + reg_type: RegisterType::W, + number: 12 + } + ); + + let (_, reg) = parse_register("v31").unwrap(); + assert_eq!( + reg, + Register { + reg_type: RegisterType::V, + number: 31 + } + ); + } + + #[test] + fn test_parse_immediate() { + let (_, imm) = parse_immediate("42").unwrap(); + assert_eq!(imm, 42); + + let (_, imm) = parse_immediate("#-100").unwrap(); + assert_eq!(imm, -100); + + let (_, imm) = parse_immediate("#0x1F").unwrap(); + assert_eq!(imm, 31); + } + + #[test] + fn test_parse_addressing_mode() { + let (_, addr) = parse_addressing_mode("[x0]").unwrap(); + assert_eq!( + addr, + AddressingMode::BaseRegister(Register { + reg_type: RegisterType::X, + number: 0 + }) + ); + + let (_, addr) = parse_addressing_mode("[x1, #16]").unwrap(); + assert_eq!( + addr, + AddressingMode::BaseRegisterWithOffset( + Register { + reg_type: RegisterType::X, + number: 1 + }, + 16 + ) + ); + + let (_, addr) = parse_addressing_mode("[x2, #16]!").unwrap(); + assert_eq!( + addr, + AddressingMode::PreIndexed( + Register { + reg_type: RegisterType::X, + number: 2 + }, + 16 + ) + ); + + let (_, addr) = parse_addressing_mode("[sp, #16]!").unwrap(); + assert_eq!( + addr, + AddressingMode::PreIndexed( + Register { + reg_type: RegisterType::SP, + number: 0 + }, + 16 + ) + ); + } + + #[test] + fn test_parse_arithmetic_instruction() { + let operands = vec![ + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0, + }), + Operand::Register(Register { + reg_type: RegisterType::X, + number: 1, + }), + Operand::Register(Register { + reg_type: RegisterType::X, + number: 2, + }), + ]; + let (_, instr) = parse_arithmetic_instruction("add", operands).unwrap(); + assert_eq!( + instr, + ArmInstruction::Arithmetic( + ArithmeticOp::Add, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0 + }), + Operand::Register(Register { + reg_type: RegisterType::X, + number: 1 + }), + Some(Operand::Register(Register { + reg_type: RegisterType::X, + number: 2 + })) + ) + ); + + let operands = vec![ + Operand::Register(Register { + reg_type: RegisterType::W, + number: 0, + }), + Operand::Register(Register { + reg_type: RegisterType::W, + number: 1, + }), + Operand::ImmediateValue(42), + ]; + let (_, instr) = parse_arithmetic_instruction("sub", operands).unwrap(); + assert_eq!( + instr, + ArmInstruction::Arithmetic( + ArithmeticOp::Sub, + Operand::Register(Register { + reg_type: RegisterType::W, + number: 0 + }), + Operand::Register(Register { + reg_type: RegisterType::W, + number: 1 + }), + Some(Operand::ImmediateValue(42)) + ) + ); + } + + #[test] + fn test_parse_memory_instruction() { + let operands = vec![ + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0, + }), + Operand::Memory(AddressingMode::BaseRegisterWithOffset( + Register { + reg_type: RegisterType::X, + number: 1, + }, + 16, + )), + ]; + let (_, instr) = parse_memory_instruction("ldr", operands).unwrap(); + match instr { + ArmInstruction::Memory(op, attrs, dst, src) => { + assert_eq!(op, MemoryOp::Load); + assert_eq!(attrs.size, MemorySize::Double); + assert_eq!( + dst, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0 + }) + ); + if let Operand::Memory(AddressingMode::BaseRegisterWithOffset(reg, offset)) = src { + assert_eq!(reg.number, 1); + assert_eq!(offset, 16); + } else { + panic!("Expected memory operand with base register and offset"); + } + } + _ => panic!("Expected memory instruction"), + } + } + + #[test] + fn test_parse_branch_instruction() { + let operands = vec![Operand::Label(String::from("label1"))]; + let (_, instr) = parse_branch_instruction("b", operands).unwrap(); + assert_eq!( + instr, + ArmInstruction::Branch(None, Operand::Label(String::from("label1"))) + ); + + let operands = vec![Operand::Label(String::from("label2"))]; + let (_, instr) = parse_branch_instruction("beq", operands).unwrap(); + assert_eq!( + instr, + ArmInstruction::Branch( + Some(Condition::Code(ConditionCode::EQ)), + Operand::Label(String::from("label2")) + ) + ); + } + + #[test] + fn test_parse_directive() { + let (_, dir) = parse_directive(".align 4").unwrap(); + assert_eq!(dir, Directive::Align(4)); + + let (_, dir) = parse_directive(".size function, 128").unwrap(); + assert_eq!( + dir, + Directive::Size(String::from("function"), String::from("128")) + ); + } + + #[test] + fn test_parse_full_instruction() { + let (_, instr) = parse_instruction("add x0, x1, x2").unwrap(); + match instr { + ArmInstruction::Arithmetic(op, dst, src1, Some(src2)) => { + assert_eq!(op, ArithmeticOp::Add); + assert_eq!( + dst, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0 + }) + ); + assert_eq!( + src1, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 1 + }) + ); + assert_eq!( + src2, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 2 + }) + ); + } + _ => panic!("Expected arithmetic instruction"), + } + + let (_, instr) = parse_instruction("mov x0, #42").unwrap(); + match instr { + ArmInstruction::Move(op, dst, src) => { + assert_eq!(op, MoveOp::Mov); + assert_eq!( + dst, + Operand::Register(Register { + reg_type: RegisterType::X, + number: 0 + }) + ); + assert_eq!(src, Operand::ImmediateValue(42)); + } + _ => panic!("Expected move instruction"), + } + } + + #[test] + fn test_parse_arm_assembly() { + let input = r#" + .align 4 + .type function, %function + function: + stp x29, x30, [sp, #-16]! + mov x29, sp + add x0, x0, #1 + bl helper_function + ldp x29, x30, [sp], #16 + ret + helper_function: + stp x29, x30, [sp, #-16]! + mov x0, #42 + .block_function: + ldp x29, x30, [sp], #16 + ret"#; + + let (_unparsed, instructions) = parse_arm_assembly(input).unwrap(); + assert_eq!(instructions.len(), 15); + + match &instructions[0] { + ArmInstruction::Directive(Directive::Align(4)) => {} + _ => panic!("Expected .align 4 directive"), + } + + match &instructions[2] { + ArmInstruction::Label(label) => assert_eq!(label, "function"), + _ => panic!("Expected function label"), + } + + match &instructions[6] { + ArmInstruction::BranchLink(Operand::Label(label)) => { + assert_eq!(label, "helper_function") + } + _ => panic!("Expected branch and link instruction"), + } + } + + #[test] + fn test_parse_beginning() { + let input: &str = r#" + .arch armv8-a + .text + .align 2 + .type test_function, %function + test_function: + .LFB0: + .cfi_startproc + mov x0, 0 + ret + .cfi_endproc + .LFE0: + .size test_function, .-test_function"#; + + let (_unparsed, instructions) = parse_arm_assembly(input).unwrap(); + + assert_eq!(instructions.len(), 12); + } +} diff --git a/verify/src/arm/transform.rs b/verify/src/arm/transform.rs new file mode 100644 index 00000000..a794a6c6 --- /dev/null +++ b/verify/src/arm/transform.rs @@ -0,0 +1,187 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use super::*; + +pub fn extract_arm_functions( + parsed: Vec, + names: Option<&[String]>, + valid_prefix: &[&str], +) -> Vec { + let mut functions = Vec::new(); + let mut current_function: Option<(String, Vec)> = None; + + for instr in parsed { + match &instr { + ArmInstruction::Label(name) if !name.starts_with(".") => { + if let Some((prev_name, prev_instrs)) = current_function { + functions.push(ArmFunction { + name: prev_name, + instructions: prev_instrs, + }); + } + current_function = Some((name.clone(), vec![instr.clone()])); + } + _ => { + if let Some((_, ref mut instrs)) = current_function { + instrs.push(instr.clone()); + } + } + } + } + + if let Some((name, instrs)) = current_function { + functions.push(ArmFunction { + name, + instructions: instrs, + }); + } + + match names { + Some(names) => { + let valid_ns = valid_prefix; + + functions + .into_iter() + .filter(|f| { + if f.name.starts_with("vatomic") { + let rest = &f.name[7..]; // Remove "vatomic" + if let Some((n, suffix)) = rest.split_once('_') { + valid_ns.contains(&n) && names.contains(&suffix.to_string()) + } else { + false + } + } else { + false + } + }) + .collect() + } + None => functions, + } +} + +pub fn transform_labels(function: &ArmFunction) -> ArmFunction { + use std::collections::HashMap; + + let mut label_map: HashMap = HashMap::new(); + let mut forward_map: HashMap = HashMap::new(); + let mut counter = 0usize; + let mut new_instructions = Vec::new(); + + for instr in &function.instructions { + match instr { + ArmInstruction::Label(name) => { + // If it was a forward ref, bind it and remove from forward_map + let new_name = if let Some(forward) = forward_map.remove(name) { + label_map.insert(name.clone(), forward.clone()); + forward + } else { + let new_label = format!("L{}", counter + 1); + counter += 1; + label_map.insert(name.clone(), new_label.clone()); + new_label + }; + new_instructions.push(ArmInstruction::Label(new_name)); + } + ArmInstruction::Branch(cond, Operand::Label(name)) => { + // Handle backward/forward shorthand in name if desired + let new_name = if name.ends_with('b') { + let base = &name[..name.len() - 1]; + label_map.get(base).cloned().unwrap_or_else(|| { + // No backward def yet; create one + let new_label = format!("L{}", counter + 1); + counter += 1; + label_map.insert(base.to_string(), new_label.clone()); + new_label + }) + } else if name.ends_with('f') { + let base = &name[..name.len() - 1]; + forward_map.get(base).cloned().unwrap_or_else(|| { + // First forward ref for this base + let new_label = format!("L{}", counter + 1); + counter += 1; + forward_map.insert(base.to_string(), new_label.clone()); + new_label + }) + } else { + // Normal named label + if let Some(lbl) = label_map.get(name) { + lbl.clone() + } else if let Some(lbl) = forward_map.get(name) { + lbl.clone() + } else { + // Treat it like a first forward ref + let new_label = format!("L{}", counter + 1); + counter += 1; + forward_map.insert(name.to_string(), new_label.clone()); + new_label + } + }; + new_instructions.push(ArmInstruction::Branch( + cond.clone(), + Operand::Label(new_name), + )); + } + ArmInstruction::BranchLink(Operand::Label(name)) => { + let new_name = label_map.get(name) + .cloned() + .or_else(|| { + // Treat as forward if unknown + if !forward_map.contains_key(name) { + let new_label = format!("L{}", counter + 1); + counter += 1; + forward_map.insert(name.clone(), new_label.clone()); + Some(new_label) + } else { + forward_map.get(name).cloned() + } + }) + .unwrap_or_else(|| name.clone()); + new_instructions.push(ArmInstruction::BranchLink(Operand::Label(new_name))); + } + ArmInstruction::TestBitBranch(cond, op1, op2, Operand::Label(name)) => { + let new_name = label_map.get(name) + .cloned() + .or_else(|| { + if !forward_map.contains_key(name) { + let new_label = format!("L{}", counter + 1); + counter += 1; + forward_map.insert(name.clone(), new_label.clone()); + Some(new_label) + } else { + forward_map.get(name).cloned() + } + }) + .unwrap_or_else(|| name.clone()); + new_instructions.push(ArmInstruction::TestBitBranch( + *cond, + op1.clone(), + op2.clone(), + Operand::Label(new_name), + )); + } + _ => new_instructions.push(instr.clone()), + } + } + + ArmFunction { + name: function.name.clone(), + instructions: new_instructions, + } +} + +pub fn remove_directives(function: &ArmFunction) -> ArmFunction { + let new_instructions = function + .instructions + .iter() + .filter(|instruction| !matches!(instruction, ArmInstruction::Directive(_))) + .cloned() + .collect(); + + ArmFunction { + name: function.name.clone(), + instructions: new_instructions, + } +} diff --git a/verify/src/generate.rs b/verify/src/generate.rs new file mode 100644 index 00000000..ad460fd0 --- /dev/null +++ b/verify/src/generate.rs @@ -0,0 +1,70 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use std::collections::HashSet; + +use crate::{loops::loop_headers, BoogieInstruction, SideEffect}; + +pub fn boogie_to_string(instructions: &[BoogieInstruction]) -> String { + let mut code = String::new(); + + let loop_header_idx = loop_headers(instructions); + let backward_branch_targets: HashSet<_> = loop_header_idx + .iter() + .copied() + .map(|i| match &instructions[i] { + BoogieInstruction::Label(name) => name.clone(), + _ => unreachable!(), + }) + .collect(); + + for instr in instructions { + match instr { + BoogieInstruction::Label(name) => { + code.push_str(&format!("{}:\n", name)); + + if backward_branch_targets.contains(name) { + code.push_str(" assert last_store < old(step);\n"); + code.push_str(" assert step >= old(step);\n"); + code.push_str(" assert (forall i : int, e : Effect :: old(step) <= i && i < step && effects[i] == e ==> ! (is_write(e)));\n\n"); + } + } + BoogieInstruction::Instr(name, side_effects, out, ops) => { + code.push_str(&format!( + " call {} := execute{}({}({}));\n", + out, + match *side_effects { SideEffect::Global => "", SideEffect::Local => "_local" }, + name, + ops.join(",") + )); + } + BoogieInstruction::Branch(target, condition) => { + if condition == "true" { + /* this special case is required to make the control flow explicit to boogie, which otherwise makes an incorrect CFG that may be irreducible. */ + if target.is_empty() { + code.push_str(&format!(" assume false; return;\n")); + } else { + code.push_str(&format!(" goto {};\n", &target.join(","),)); + } + } else { + if target.is_empty() { + code.push_str(&format!(" assume (! {});\n", condition)); + } else { + code.push_str(&format!(" if ({}) {{ goto {}; }}\n", condition, &target.join(","),)); + } + } + } + BoogieInstruction::Return => { + code.push_str(" return;\n"); + } + BoogieInstruction::Unhandled(ins) => { + code.push_str(&format!(" // Unhandled: {}\n", ins)); + } + BoogieInstruction::Comment(comment) => { + code.push_str(&format!(" // {}\n", comment)); + } + } + } + code +} diff --git a/verify/src/lib.rs b/verify/src/lib.rs new file mode 100644 index 00000000..45263f13 --- /dev/null +++ b/verify/src/lib.rs @@ -0,0 +1,398 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use generate::boogie_to_string; +use loops::unroll; +use phf::phf_map; +use std::{ + fs, io::Write, path::Path +}; + +use lazy_static::lazy_static; +use regex::Regex; + +pub mod arm; +pub mod generate; +pub mod riscv; +pub mod loops; +pub const DUMMY_REG: &str = "dummy"; + +pub struct AssemblyFunction<'a> { + pub name: &'a str, + pub code: Vec<&'a str>, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum FenceConvention { + RCsc, + TrailingFence, + LeadingFence, + Mixed, +} + +pub trait Arch { + fn name(&self) -> String; + fn all_registers(&self) -> Vec; + fn width(&self) -> Width; + fn parse_functions( + &self, + assembly: &str, + ) -> Result, Box>; + fn state(&self) -> String; + fn fence_convention(&self) -> FenceConvention; +} + + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum SideEffect { + Local, + Global, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum BoogieInstruction { + Label(String), + Instr(String, SideEffect, String, Vec), + Branch(Vec, String), + Unhandled(String), + Comment(String), + Return, +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum ReturnType { + Return, + NoReturn, +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum FunctionClass { + Read, + Write, + Await(ReturnType), + AwaitRmw, + Rmw(ReturnType), + Fence, +} + +static FENCE_ORDERING: phf::Map<&'static str, &'static str> = phf_map! { + "" => "order_fence_sc", + "_acq" => "order_fence_acq", + "_rel" => "order_fence_rel", + "_rlx" => "order_rlx", +}; + +static ORDERING: phf::Map<&'static str, (&'static str, &'static str)> = phf_map! { + "" => ("order_acq_sc","order_rel_sc"), + "_acq" => ("order_acq","order_rlx"), + "_rel" => ("order_rlx","order_rel"), + "_rlx" => ("order_rlx","order_rlx"), +}; + +static AWAIT_OP: phf::Map<&'static str, &'static str> = phf_map! { + "eq" => "eq", + "neq" => "neq", + "lt" => "lt", + "le" => "le", + "gt" => "gt", + "ge" => "ge", +}; + +static RMW_OP: phf::Map<&'static str, &'static str> = phf_map! { + "cmpxchg" => "cmpset", + "add" => "add_op", + "sub" => "sub_op", + "xchg" => "set_op", + "set" => "set_op", + "dec" => "dec_op", + "inc" => "inc_op", + "min" => "min_op", + "max" => "max_op", + "and" => "and_op", + "xor" => "xor_op", + "or" => "or_op" +}; + +static ATOMIC_TYPE: phf::Map<&'static str, AtomicType> = phf_map! { + "64" => AtomicType::V64, + "sz" => AtomicType::VSZ, + "ptr" => AtomicType::VPTR, + "32" => AtomicType::V32, + "16" => AtomicType::V16, + "8" => AtomicType::V8, + "" => AtomicType::VFENCE, +}; + +lazy_static! { + static ref RETURNING_RMW : Regex = Regex::new(r"get|cmpxchg|xchg").unwrap(); + // @TODO: generate automatically from the keys + static ref RMW_RE : Regex = Regex::new(r"(?get_)?(?add|sub|set|cmpxchg|min|max|xchg|dec|inc|and|xor|or)(?<_get>_get)?").unwrap(); + static ref ORDERING_RE : Regex = Regex::new(r"(_rlx|_acq|_rel|)$").unwrap(); + static ref AWAIT_RE : Regex = Regex::new(r"await_([^_]+)").unwrap(); + static ref WIDTH_RE : Regex = Regex::new(r"8|16|32|sz|ptr|64").unwrap(); +} + +fn classify_function(name: &str) -> FunctionClass { + if name.contains("read") { + FunctionClass::Read + } else if name.contains("write") { + FunctionClass::Write + } else if name.contains("await") { + if RMW_RE.is_match(name) { + FunctionClass::AwaitRmw + } else { + FunctionClass::Await(if name.contains("eq") { + ReturnType::NoReturn + } else { + ReturnType::Return + }) + } + } else if name.contains("fence") { + FunctionClass::Fence + } else { + let ret = if RETURNING_RMW.captures(name).is_some() { + ReturnType::Return + } else { + ReturnType::NoReturn + }; + FunctionClass::Rmw(ret) + } +} + +fn get_templates_for_type(func_type: FunctionClass) -> Vec<&'static str> { + match func_type { + FunctionClass::Read => vec!["read_only.bpl", "read.bpl"], + FunctionClass::Write => vec!["write.bpl", "must_store.bpl"], + FunctionClass::Await(ReturnType::NoReturn) => vec!["await.bpl"], + FunctionClass::Await(ReturnType::Return) => vec!["read_only.bpl", "read.bpl", "await.bpl"], + FunctionClass::Rmw(ReturnType::NoReturn) => vec!["write.bpl", "rmw.bpl"], + FunctionClass::Rmw(ReturnType::Return) => vec!["read.bpl", "write.bpl", "rmw.bpl"], + FunctionClass::AwaitRmw => vec!["read.bpl", "write.bpl", "rmw.bpl", "await.bpl"], + FunctionClass::Fence => vec!["fence.bpl"], + } +} + +#[derive(Debug, Clone)] +pub struct BoogieFunction { + pub name: String, + pub address: String, + pub input1: String, + pub input2: String, + pub output: String, + pub instructions: Vec, +} + +pub trait ToBoogie { + fn to_boogie(self) -> BoogieFunction; +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Width { + Thin, + Wide, +} + +pub fn wide_arch_widths(type_name: AtomicType) -> u32 { + match type_name { + AtomicType::V8 => 1, + AtomicType::V16 => 2, + AtomicType::V32 => 4, + _ => 8, + } +} + +pub fn thin_arch_widths(type_name: AtomicType) -> u32 { + match type_name { + AtomicType::V8 => 1, + AtomicType::V16 => 2, + AtomicType::VSZ | AtomicType::VPTR | AtomicType::V32 => 4, + _ => 8, + } +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum AtomicType { + V64, + VSZ, + VPTR, + V32, + V16, + V8, + VFENCE, +} + +pub fn atomic_types(function_name: &str) -> AtomicType { + WIDTH_RE + .captures(function_name) + .map(|c| ATOMIC_TYPE[&c[0]]) + .unwrap_or(AtomicType::VFENCE) +} + +impl AtomicType { + pub fn type_width(&self, arch_width: Width) -> u32 { + match arch_width { + Width::Thin => thin_arch_widths(*self), + Width::Wide => wide_arch_widths(*self), + } + } +} + +fn get_assumptions( + func_type: &str, + load_order: &str, + store_order: &str, + rmw_op: &str, + ret_op: &str, + cond: &str, +) -> String { + match func_type { + "fence.bpl" => std::format!(" assume (fence_order == {});\n", load_order), + "read.bpl" => std::format!( + " assume (load_order == {});\n assume (ret == {});\n", + load_order, + ret_op + ), + "write.bpl" => std::format!(" assume (store_order == {});\n", store_order), + "await.bpl" => std::format!(" assume (cond == {});\n", cond), + "rmw.bpl" => std::format!(" assume (op == {});\n", rmw_op), + _ => "".to_string(), + } +} + +pub fn generate_boogie_file( + function: &BoogieFunction, + output_dir: &str, + template_dir: &str, + arch: &dyn Arch, + unroll_loop: bool, +) -> Result<(), std::io::Error> { + let func_type = classify_function(&function.name); + let mut templates = get_templates_for_type(func_type); + templates.push("registers.bpl"); + + let instructions = if unroll_loop { &unroll(&function.instructions ) } else { &function.instructions }; + + let boogie_code = boogie_to_string(&instructions); + + let registers = arch.all_registers(); + + let state = arch.state(); + + let atomic_type = atomic_types(&function.name); + + let target_path = Path::new(output_dir).join(&function.name); + fs::create_dir_all(&target_path)?; + + let mut rmw_op = "".to_string(); + let mut read_ret = "ret_old".to_string(); + if let Some(rmw_name) = RMW_RE.captures(&function.name) { + if let Some(op) = RMW_OP.get(rmw_name.name("type").unwrap().as_str()) { + rmw_op = op.to_string(); + + if rmw_name.name("_get").is_some() { + read_ret = op.to_string(); + } + } + + if func_type == FunctionClass::AwaitRmw { + rmw_op = format!("(lambda x, y1, y2: bv64 :: {}[x, y2, y1])", rmw_op); + } + } + + let mut await_cond = "".to_string(); + if let Some(await_name) = AWAIT_RE.captures(&function.name) { + if let Some(op) = AWAIT_OP.get(&await_name[1]) { + await_cond = op.to_string(); + } + } + + let ordering = ORDERING_RE.captures(&function.name).unwrap(); + let (load_order, store_order) = if func_type == FunctionClass::Fence { + (FENCE_ORDERING[&ordering[0]], "") + } else { + ORDERING[&ordering[0]] + }; + + match atomic_type { + AtomicType::V8 => { + await_cond = format!("bit8[{}]", await_cond); + read_ret = format!("bit8[{}]", read_ret); + rmw_op = format!("bit8[{}]", rmw_op); + } + AtomicType::V16 => { + await_cond = format!("bit16[{}]", await_cond); + read_ret = format!("bit16[{}]", read_ret); + rmw_op = format!("bit16[{}]", rmw_op); + } + _ => {} + } + + let pointer_size = 2u64 + .wrapping_pow(8 * AtomicType::VPTR.type_width(arch.width())) + .wrapping_sub(1); + let register_size = 2u64 + .wrapping_pow(8 * atomic_type.type_width(arch.width())) + .wrapping_sub(1); + + for template in templates { + let template_path = Path::new(template_dir).join(template); + let template_content = fs::read_to_string(&template_path)?; + + let boogie_code_with_assume = format!( + " + assume (last_store < step); + assume (sc_impl is {:?}); + assume (valid_mask({}, {}bv64)); + assume (valid_mask({}, {}bv64)); + assume (valid_mask({}, {}bv64)); + {} + {}", + arch.fence_convention(), + function.address, + pointer_size, + function.input1, + register_size, + function.input2, + register_size, + get_assumptions( + template, + load_order, + store_order, + &rmw_op, + &read_ret, + &await_cond + ), + boogie_code + ); + + let content = template_content + .replace(" #implementation", &boogie_code_with_assume) + .replace("#registers", registers.join(",").as_str()) + .replace("#address", &function.address) + .replace("#state", &state) + .replace("#output", &function.output) + .replace("#input1", &function.input1) + .replace("#input2", &function.input2) + .replace("#value_mask", &format!("{}bv64", register_size)); + + fs::write(&target_path.join(template), content)?; + } + + println!( + "generated verification templates for function {} ({:?})", + function.name, atomic_type + ); + Ok(()) +} + +pub fn generate_debug_file(boogie: &[BoogieFunction], path: &str) -> Result<(), std::io::Error> { + let mut file = fs::File::create(path)?; + for function in boogie { + let boogie_code = boogie_to_string(&function.instructions); + + let content = format!("// ---- {} ----\n{}\n", function.name, boogie_code); + writeln!(file, "{:#?}", content)?; + } + Ok(()) +} diff --git a/verify/src/loops.rs b/verify/src/loops.rs new file mode 100644 index 00000000..62c326d8 --- /dev/null +++ b/verify/src/loops.rs @@ -0,0 +1,376 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ + +use std::{collections::{HashMap, HashSet}, hash::BuildHasher, usize}; + +use petgraph::{algo::tarjan_scc, graphmap::{self, NodeTrait}, prelude::GraphMap, visit::{EdgeRef, GraphBase, IntoEdgeReferences, IntoEdgesDirected, IntoNeighbors, IntoNeighborsDirected, IntoNodeIdentifiers, NodeIndexable}, Directed, EdgeType}; + +use crate::BoogieInstruction; + + +fn top_level_loops::NodeId>) -> ()>(cfg: &G, mut callback: F) +where + for <'a> ::NodeId : From< <&'a G as GraphBase>::NodeId >, + for <'a> &'a G : IntoNeighborsDirected + IntoEdgesDirected + IntoNodeIdentifiers + IntoNeighbors + NodeIndexable + IntoEdgeReferences, + for <'a> <&'a G as GraphBase>::NodeId: Eq + std::hash::Hash + Copy + std::fmt::Debug, + ::NodeId: Eq + std::hash::Hash + Copy, +{ + let loops = tarjan_scc(cfg) + .into_iter() + .filter_map( |vec| + if vec.len() > 1 || cfg.neighbors(vec[0]).any(|j| j == vec[0]) { + Some( + vec.into_iter().map(|i| ::NodeId::from(i)).collect::>()) + } else { + None + } + ); + + for the_loop in loops { + callback(the_loop); + } +} + +trait Removable { + fn remove(&mut self,to_remove : T); +} + +impl Removable< ::NodeId> for GraphMap { + fn remove(&mut self, to_remove : ::NodeId) { + let incoming : Vec<_> = self.edges_directed(to_remove, petgraph::Direction::Incoming).map( |edge| + (edge.source(), edge.target()) + ).collect(); + for (from,to) in incoming { + self.remove_edge(from, to); + } + } +} + +/// destroys the CFG while iterating through it. +fn all_loops::NodeId>, &G) -> ()>(cfg: &mut G, mut callback: F) -> HashSet< ::NodeId > +where + G : Removable< ::NodeId >, + for <'a> ::NodeId : From< <&'a G as GraphBase>::NodeId >, + for <'a> <&'a G as GraphBase>::NodeId : From< ::NodeId >, + for <'a> &'a G : IntoNeighborsDirected + IntoEdgesDirected + IntoNodeIdentifiers + IntoNeighbors + NodeIndexable + IntoEdgeReferences, + for <'a> <&'a G as GraphBase>::NodeId: Eq + std::hash::Hash + Copy + std::fmt::Debug, + ::NodeId: Eq + std::hash::Hash + Copy, +{ + + let mut loop_headers = HashSet::new(); + loop { + let mut new_headers = Vec::new(); + top_level_loops(cfg, |the_loop| { + for i in the_loop.iter().copied() { + let is_entry = cfg.edges_directed(i.into(), petgraph::Direction::Incoming).any(|edge| ! the_loop.contains(&edge.source().into())); + + if is_entry { + new_headers.push(i); + loop_headers.insert(i); + } + } + callback(the_loop, cfg); + }); + + if new_headers.is_empty() { + break; + } + + for i in new_headers { + cfg.remove(i); + } + } + loop_headers +} + +pub fn cfg(code: &[BoogieInstruction]) -> GraphMap { + let mut graph = + graphmap::GraphMap::with_capacity(code.len() + 1, 2 * code.len() + 1); + let label_idx = all_labels(code.iter()); + + // let mut output = String::new(); + + // for (key, value) in label_idx { + // output.push_str(&format!("{}: {}\n", key, value)); + // } + + // panic!("{}", output); + + let mut unreachable = false; + for (i, instr) in code.iter().enumerate() { + match &instr { + BoogieInstruction::Label(_) => { + unreachable = false; + } + _ => { + + } + } + if unreachable == false { + match &instr { + BoogieInstruction::Return => { + } + BoogieInstruction::Branch(targets, cond) + => { + for label in targets { + if !label_idx.contains_key(label) { + panic!("Missing label in label_idx: {}", label); + } + graph.add_edge(i, label_idx[label], ()); + } + if cond != "true" { + graph.add_edge(i, i + 1, ()); + } + else { + unreachable = true; + } + } + _ => { + graph.add_edge(i, i + 1, ()); + } + } + } + } + + graph +} + +pub fn loop_headers(code: &[BoogieInstruction]) -> HashSet { + let mut graph = cfg(code); + all_loops(&mut graph, |_,_| { }) +} + +fn all_labels<'a, I : Iterator>(code: I) -> HashMap { + code.enumerate().filter_map(|(i, instr)| match instr { + BoogieInstruction::Label(lab) => Some((lab.clone(), i)), + _ => None, + }).collect() +} + +fn find_exit_name(count: &mut usize, labels: &HashSet) -> String { + loop { + *count+=1; + let name = format!("X{}", count); + if ! labels.contains(&name) { + return name; + } + } +} + +fn insert_loop_fallthrough_back_edges(code: &mut Vec) { + let loops = collect_loops(code); + let graph = cfg(code); + let mut fallthrough_back_edges = vec![]; + + + for the_loop in loops.into_iter() { + + let loop_headers : HashSet<_> = the_loop.iter().copied().filter(|&i| + graph.edges_directed(i, petgraph::Direction::Incoming).any(|edge| + ! the_loop.contains(&edge.source()))).collect(); + + for j in the_loop { + if loop_headers.contains(&(j+1)) { + + let mut may_fallthrough = true; + + let BoogieInstruction::Label(label) = &code[j+1] else { panic!("loop headers must be labels"); }; + + if let BoogieInstruction::Branch(targets, condition) = &code[j] { + may_fallthrough = ! targets.contains(label) && condition != "true"; + } + + if may_fallthrough { + fallthrough_back_edges.push((j, label.clone())); + } + } + } + } + + fallthrough_back_edges.sort_by(|(j1,_),(j2,_)| j2.cmp(j1)); + + for (j,label) in fallthrough_back_edges { + code.insert(j+1, BoogieInstruction::Branch(vec![label], "true".to_string())); + } +} + +fn insert_loop_exit_labels(code: &mut Vec) { + let mut graph = cfg(code); + let mut loop_exits = Vec::new(); + all_loops(&mut graph, |the_loop,cfg| { + for i in the_loop.iter().copied() { + for edge in cfg.edges_directed(i, petgraph::Direction::Outgoing) { + if ! the_loop.contains(&edge.target()) && edge.target() == i+1 { + loop_exits.push(edge.target()); + } + } + } + }); + + let mut count = 0; + let labels = all_labels(code.iter()).keys().cloned().collect(); + loop_exits.sort(); + loop_exits.dedup(); + loop_exits.reverse(); + if let Some(BoogieInstruction::Return) = code.last() { + + } else { + code.push(BoogieInstruction::Return); + } + + for exit in loop_exits { + if let BoogieInstruction::Label(_) = &code[exit] { + // already a label - nothing to do + } else { + code.insert(exit, BoogieInstruction::Label(find_exit_name(&mut count, &labels))); + } + } +} + +fn collect_loops(code : &[BoogieInstruction]) -> Vec> { + let mut graph = cfg(code); + let mut loops = Vec::new(); + top_level_loops(&mut graph, |the_loop| { + loops.push(the_loop.clone()); + }); + loops +} + +fn duplicate_loops(code: &mut Vec) { + /* @TODO + * Pretty sure this function only really works for the top level loops - nested loops are not unrolled inside the duplicated code! + */ + + let loops = collect_loops(code); + let graph = cfg(code); + let labels = all_labels(code.iter()); + + for (loop_id, the_loop) in loops.into_iter().enumerate() { + + + + let loop_headers : HashSet<_> = the_loop.iter().copied().filter(|&i| + graph.edges_directed(i, petgraph::Direction::Incoming).any(|edge| + ! the_loop.contains(&edge.source()))).collect(); + + + let mut sorted_loop : Vec<_> = the_loop.iter().copied().collect(); + sorted_loop.sort(); + + // first step: insert forced loop + + for &i in sorted_loop.iter() { + code.push(transformed(&code[i], |label| transform_label(label, loop_id, LOOP_FORCED_ITER_SUFFIX), + |targets| { + targets.iter().flat_map(|label| { + let target = labels[label]; + if loop_headers.contains(&target) { + vec![transform_label(label, loop_id, LOOP_FINAL_ITER_SUFFIX)].into_iter() + } else if the_loop.contains(&target) { + vec![transform_label(label, loop_id, LOOP_FORCED_ITER_SUFFIX)].into_iter() + } else { + // loop exits are blocked off -- empty goto will become assume false + vec![].into_iter() + } + }).collect() + })); + + if graph.edges_directed(i, petgraph::Direction::Outgoing).any(|edge| edge.target() == i+1 && ! the_loop.contains(&(i+1))) { + // indirect loop exit + // empty jump label to block off exit! + code.push(BoogieInstruction::Branch(vec![], "true".to_string())); + } + } + + // second step: insert final loop + + + for &i in sorted_loop.iter() { + code.push(transformed(&code[i], |label| transform_label(label, loop_id, LOOP_FINAL_ITER_SUFFIX), + |targets| { + targets.iter().flat_map(|label| { + let target = labels[label]; + if loop_headers.contains(&target) { + // back loops are blocked off + vec![].into_iter() + } else if the_loop.contains(&target) { + vec![transform_label(label, loop_id, LOOP_FINAL_ITER_SUFFIX)].into_iter() + } else { + // loop exits are kept + vec![label.clone()].into_iter() + } + }).collect() + })); + + if graph.edges_directed(i, petgraph::Direction::Outgoing).any(|edge| edge.target() == i+1 && ! the_loop.contains(&(i+1))) { + // indirect loop exit + assert!(i+1 < code.len()); + let BoogieInstruction::Label(label) = &code[i+1] else { + panic!("A loop exit point does not have a label. Please call `insert_loop_exit_labels` before using this function.") + }; + code.push(BoogieInstruction::Branch(vec![label.clone()], "true".to_string())); + } + } + + // third step: transform incoming & internal edges to point to the new forced/final loop + for i in loop_headers.iter().copied() { + let BoogieInstruction::Label(my_label) = code[i].clone() else { panic!("loop header has entry edge but no label, this should be impossible.") }; + for edge in graph.edges_directed(i, petgraph::Direction::Incoming) { + let j = edge.source(); + code[j] = transformed(&code[j], |label| label.clone(), + |targets| { + let mut new_targets = Vec::new(); + for label in targets { + let same_label = label == &my_label; + if same_label { + new_targets.push(transform_label(label, loop_id, LOOP_FORCED_ITER_SUFFIX)); + + if ! the_loop.contains(&j) { // back branches are not kept + new_targets.push(transform_label(label, loop_id, LOOP_FINAL_ITER_SUFFIX)); + } + } + if ! same_label { + new_targets.push(label.clone()); + } + } + new_targets + }); + } + } + + } + +} + + +const LOOP_FORCED_ITER_SUFFIX : &str = "FORCED_ITER"; +const LOOP_FINAL_ITER_SUFFIX : &str = "FINAL_ITER"; + + +fn transform_label(label: &String, loop_id : usize, suffix : &str) -> String { + format!("{}_{}_{}", label, loop_id, suffix) +} + +fn transformed String, G : Fn(&[String]) -> Vec>(instr: &BoogieInstruction, label_transformer: F, branch_transformer: G) -> BoogieInstruction { + match instr { + BoogieInstruction::Branch(targets,cond) => BoogieInstruction::Branch(branch_transformer(targets), cond.clone()), + BoogieInstruction::Label(label) => BoogieInstruction::Label(label_transformer(label)), + _ => instr.clone(), + } +} + +/// Unroll all loops +pub fn unroll(code: &[BoogieInstruction]) -> Vec { + + let mut changed_code = code.into(); + insert_loop_fallthrough_back_edges(&mut changed_code); + insert_loop_exit_labels(&mut changed_code); + + duplicate_loops(&mut changed_code); + + + + changed_code +} \ No newline at end of file diff --git a/verify/src/main.rs b/verify/src/main.rs new file mode 100644 index 00000000..ef837afa --- /dev/null +++ b/verify/src/main.rs @@ -0,0 +1,231 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use asm2boogie::arm::{self, extract_arm_functions, parse_arm_assembly}; +use asm2boogie::riscv::{self, extract_riscv_functions, parse_riscv_assembly}; +use asm2boogie::{Arch, ToBoogie, generate_boogie_file, generate_debug_file}; +use asm2boogie::{BoogieFunction, DUMMY_REG, FenceConvention, Width}; + +use clap::{Parser, ValueEnum}; +use std::path::Path; +use std::{fs, iter}; + +enum OutputMode { + File(String), + Directory(String), +} + +#[derive(Debug, Clone, Copy, PartialEq, ValueEnum)] +enum ArchSpecifier { + #[value(name = "armv8")] + ArmV8, + + #[value(name = "riscv")] + RiscV, +} + +impl Arch for ArchSpecifier { + fn name(&self) -> String { + format!("{:?}", self) + } + + fn all_registers(&self) -> Vec { + match self { + ArchSpecifier::ArmV8 => iter::once(DUMMY_REG.to_string()) + .chain((0..=31).flat_map(|i| [format!("x{}", i), format!("w{}", i)])) + .collect(), + ArchSpecifier::RiscV => iter::once(DUMMY_REG.to_string()) + .chain((0..=7).map(|i| format!("a{}", i))) + .chain((0..=11).map(|i| format!("s{}", i))) + .chain((0..=6).map(|i| format!("t{}", i))) + .collect() + } + } + + fn width(&self) -> asm2boogie::Width { + Width::Wide + } + + fn parse_functions( + &self, + assembly: &str, + ) -> Result, Box> { + /* @TODO intermediate step -- Vec. AsmFunction -> BoogieFunction. Here AsmFunction is a generic type of just name + string defining code. */ + match self { + ArchSpecifier::ArmV8 => { + let (_, parsed_asm) = + parse_arm_assembly(assembly).map_err(|err| err.to_string())?; + let functions = + extract_arm_functions(parsed_asm, None, &["ptr", "32", "64", "sz", "8", ""]) + .into_iter() + .map(|f| arm::transform_labels(&f)); + Ok(functions.into_iter().map(ToBoogie::to_boogie).collect()) + } + ArchSpecifier::RiscV => { + let (_, parsed_asm) = + parse_riscv_assembly(assembly).map_err(|err| err.to_string())?; + let functions = + extract_riscv_functions(parsed_asm, None, &["ptr", "32", "64", "sz", "8", ""]) + .into_iter() + .map(|f| riscv::transform_labels(&f)); + Ok(functions.into_iter().map(ToBoogie::to_boogie).collect()) + } + } + } + + fn state(&self) -> String { + match self { + ArchSpecifier::ArmV8 => { + "local_monitor, monitor_exclusive, flags, event_register".to_string() + } + ArchSpecifier::RiscV => "local_monitor, monitor_exclusive".to_string() + } + } + + fn fence_convention(&self) -> FenceConvention { + match self { + ArchSpecifier::ArmV8 => FenceConvention::RCsc, + ArchSpecifier::RiscV => FenceConvention::Mixed, + } + } +} + +#[derive(Parser, Debug)] +#[clap(author, version, about = "Generate Verifiable Boogie Code from ASM", long_about = None)] +struct Args { + #[clap( + short = 'a', + long, + value_enum, + default_value = "armv8", + help = "Target architecture (armv8 or riscv)" + )] + arch: ArchSpecifier, + + #[clap(short = 'u', long, help = "unroll the main loop to prove at least one iteration of the loop occurs", action)] + unroll: bool, + + #[clap(short = 'i', long, value_name = "FILE", help = "input file")] + input: String, + #[clap(short = 'f', long, value_name = "FILE", help = "function names")] + functions: String, + #[clap( + short = 'o', + long, + value_name = "FILE", + help = "Sets the output file", + conflicts_with = "directory" + )] + output: Option, + #[clap( + short = 'd', + long, + value_name = "DIR", + help = "Sets the output directory", + conflicts_with = "output" + )] + directory: Option, + #[clap(short = 't', long, value_name = "DIR", help = "template directory")] + templates: Option, +} + +fn read_function_names(file_path: &str) -> Result, std::io::Error> { + let content = fs::read_to_string(file_path)?; + Ok(content + .lines() + .map(str::trim) + .filter(|s| !s.is_empty()) + .map(String::from) + .collect()) +} + +fn ensure_directory_exists(path: &str) -> Result<(), std::io::Error> { + if !Path::new(path).exists() { + fs::create_dir_all(path)?; + } + Ok(()) +} + + +fn main() -> Result<(), Box> { + env_logger::init(); + let args = Args::parse(); + + let output_mode = match (&args.output, &args.directory) { + (Some(file), None) => OutputMode::File(file.clone()), + (None, Some(dir)) => { + if args.templates.is_none() { + eprintln!("Error: --templates is required when --directory is specified."); + std::process::exit(1); + } + OutputMode::Directory(dir.clone()) + } + (None, None) => { + eprintln!("Error: Either --output or --directory must be specified."); + std::process::exit(1); + } + (Some(_), Some(_)) => { + unreachable!("Clap should prevent both output and directory from being specified."); + } + }; + + let function_names = read_function_names(&args.functions).unwrap_or_else(|e| { + eprintln!("Error reading functions file '{}': {}", args.functions, e); + std::process::exit(1); + }); + + log::info!("Input file: {}", args.input); + log::info!("Functions file: {}", args.functions); + log::info!("Arch: {:?}", args.arch); + + if let Some(output) = &args.output { + log::info!("Output file: {}", output); + } + if let Some(directory) = &args.directory { + log::info!("Output directory: {}", directory); + } + if let Some(templates) = &args.templates { + log::info!("Template directory: {}", templates); + } + + log::info!("Function Extraction Names: {:?}", function_names); + + let input_content = fs::read_to_string(&args.input).unwrap_or_else(|e| { + eprintln!("Error reading input file '{}': {}", args.input, e); + std::process::exit(1); + }); + log::info!("Successfully read input file '{}'", args.input); + + let name_re = regex::Regex::new( + format!("^({})$", function_names.join("|")).as_str() + )?; + + let boogie_functions: Vec<_> = args + .arch + .parse_functions(&input_content)? + .iter() + .filter(|func| name_re.is_match(func.name.as_str())) + .cloned() + .collect(); + + match output_mode { + OutputMode::File(file_path) => { + log::info!("Generating output file: {}", file_path); + generate_debug_file(&boogie_functions, &file_path)?; + } + OutputMode::Directory(dir_path) => { + let template_dir = args.templates.as_ref().unwrap(); + log::info!("Generating output files in directory: {}", dir_path); + ensure_directory_exists(&dir_path)?; + + for function in &boogie_functions { + log::info!("Generating Boogie code for function: {}", function.name); + generate_boogie_file(function, &dir_path, template_dir, &args.arch, args.unroll)?; + } + } + } + + log::info!("Successfully generated Boogie code"); + Ok(()) +} diff --git a/verify/src/riscv/mod.rs b/verify/src/riscv/mod.rs new file mode 100644 index 00000000..0cb975b1 --- /dev/null +++ b/verify/src/riscv/mod.rs @@ -0,0 +1,563 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +mod parser; +mod transform; + +// pub use genegrate::{arm_to_boogie_code, get_address_registers, get_used_registers}; +pub use parser::parse_riscv_assembly; +pub use transform::{extract_riscv_functions, remove_directives, transform_labels}; + +use crate::{BoogieFunction, BoogieInstruction, SideEffect, ToBoogie, DUMMY_REG}; + +#[derive(Debug, Clone, PartialEq)] +pub struct MemoryOperand { + pub offset: i64, + pub base: Register, +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum Size { + Byte, // 8-bit + Half, // 16-bit + Word, // 32-bit + Double, // 64-bit +} + +impl Size { + pub fn bytes(&self) -> u32 { + match self { + Size::Byte => 1, + Size::Half => 2, + Size::Word => 4, + Size::Double => 8, + } + } + + pub fn mask(&self) -> u64 { + (2u64.overflowing_pow(self.bytes() * 8)).0.overflowing_add_signed(-1).0 + } +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum AtomicSemantics { + None, + Acquire, + Release, + AcquireRelease, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Directive { + CFIStartProc, + CFIEndProc, + Size(String, String), + Align(u32), + P2Align(u32, u32, u32), + Type(String, String), +} + +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum FenceMode { + Read, + Write, + ReadWrite, + OutputWrite, + InputOutputReadWrite, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum RegisterType { + A, // Argument registers (a0-a7) + T, // Temporary registers (t0-t6) + S, // Saved registers (s0-s11) + X, // General-purpose (x0-x31) + Special(String), // Special registers like "zero", "ra", "sp" +} + +#[derive(Debug, Clone, PartialEq)] +pub struct Register { + pub reg_type: RegisterType, + pub number: Option, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Operand { + Register(Register), + Immediate(i64), + Memory(MemoryOperand), + Label(String), + FenceMode(FenceMode), +} +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum AtomicOp { + Swap, + Add, + And, + Or, + Xor, + Max, + Min, +} +#[derive(Debug, Clone, PartialEq, Copy)] +pub enum ArithmeticOp { + Add, + Sub, + Mul, + And, + Or, + Xor, + Sll, // Shift Left Logical + Srl, // Shift Right Logical + Sra, // Shift Right Arithmetic +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ComparisonOp { + Eq, // Equal (for beq) + Ne, // Not equal (for bne) + Lt, // Less than, signed (for blt) + Ge, // Greater than or equal, signed (for bge) + Ltu, // Less than, unsigned (for bltu) + Geu, // Greater than or equal, unsigned (for bgeu) + Nez, // Not equal zero (for bnez) + Gtu, // + Leu, // +} + +#[derive(Debug, Clone, PartialEq)] +pub enum RiscvInstruction { + Label(String), + Directive(Directive), + Fence { + pred: FenceMode, + succ: FenceMode, + }, + Branch { + op: ComparisonOp, + rs1: Register, + rs2: Register, + label: String, + }, + Load { + size: Size, + dst: Register, + src: MemoryOperand, + }, + UnsignedLoad { + size: Size, + dst: Register, + src: MemoryOperand, + }, + Store { + size: Size, + src: Register, + dst: MemoryOperand, + }, + LoadReserved { + size: Size, + semantics: AtomicSemantics, + rd: Register, + addr: MemoryOperand, + }, + StoreConditional { + size: Size, + semantics: AtomicSemantics, + rd: Register, + rs2: Register, + addr: MemoryOperand, + }, + LoadImmidate { + register: Register, + value: i64, + }, + Atomic { + op: AtomicOp, + size: Size, + semantics: AtomicSemantics, + rd: Register, + rs2: Register, + addr: MemoryOperand, + }, + + Not { + rd: Register, + rs: Register, + }, + Neg { + rd: Register, + rs: Register, + size: Option, + }, + ArithmeticRR { + op: ArithmeticOp, + rd: Register, + rs1: Register, + rs2: Register, + size: Option, + }, + ArithmeticRI { + op: ArithmeticOp, + rd: Register, + rs1: Register, + imm: i64, + size: Option, + }, + Move(Register, Register), + SignExtendWord(Register, Register), + Jump { + rd: Register, + label: Option, + }, + Return, + // TODO: add to boogie + Call { + label: String, + }, + // TODO: add to boogie + LoadAddress { + register: Register, + label: String, + }, + Unhandled(String), +} + +#[derive(Debug, Clone)] +pub struct RiscvFunction { + pub name: String, + pub instructions: Vec, +} + +impl ToBoogie for RiscvFunction { + fn to_boogie(self) -> BoogieFunction { + let instructions = self + .instructions + .iter() + .map(|instr| riscv_instruction_to_boogie(instr)) + .collect(); + + BoogieFunction { + name: self.name.clone(), + instructions, + address: "a0".to_string(), + input1: "a1".to_string(), + input2: "a2".to_string(), + output: "a0".to_string(), + } + } +} + +pub fn riscv_instruction_to_boogie(instr: &RiscvInstruction) -> BoogieInstruction { + match riscv_instruction_to_boogie_direct(instr) { + BoogieInstruction::Instr(instr, side_effects, reg, args) if reg.to_lowercase() == "zero" => + BoogieInstruction::Instr(instr, side_effects, DUMMY_REG.to_string(), args), + instr => instr, + } +} + +fn riscv_instruction_to_boogie_direct(instr: &RiscvInstruction) -> BoogieInstruction { + match instr { + RiscvInstruction::Label(name) => BoogieInstruction::Label(name.clone()), + RiscvInstruction::Fence { pred, succ } => { + let (pr, pw) = fence_mode_to_boogie(pred); + let (sr, sw) = fence_mode_to_boogie(succ); + + BoogieInstruction::Instr( + "fence".to_string(), SideEffect::Global, + DUMMY_REG.to_string(), + vec![ + pr.to_string(), + pw.to_string(), + sr.to_string(), + sw.to_string(), + ], + ) + } + RiscvInstruction::Branch { + op, + rs1, + rs2, + label, + } => { + let r1 = register_to_string(rs1); + let r2 = register_to_string(rs2); + BoogieInstruction::Branch( + vec![label.to_string()], + match *op { + ComparisonOp::Nez => format!("bnez({})", r1.to_string()), + _ => format!( + "b{}({}, {})", + format!("{:?}", op).to_lowercase(), + r1.to_string(), + r2.to_string() + ), + }, + ) + } + RiscvInstruction::Load { dst, src, size, .. } => { + let dst_reg = register_to_string(dst); + let src_reg = operand_to_boogie(&Operand::Memory(src.clone())); + + BoogieInstruction::Instr("ld".to_string(), SideEffect::Global, dst_reg, vec![src_reg, format!("{}bv64", size.mask())]) + } + RiscvInstruction::UnsignedLoad { dst, src, size, .. } => { + let dst_reg = register_to_string(dst); + let src_reg = operand_to_boogie(&Operand::Memory(src.clone())); + + BoogieInstruction::Instr("ldu".to_string(), SideEffect::Global, dst_reg, vec![src_reg, format!("{}bv64", size.mask())]) + } + RiscvInstruction::Store { size, dst, src, .. } => { + let src_reg = operand_to_boogie(&Operand::Register(src.clone())); + let dst_reg = operand_to_boogie(&Operand::Memory(dst.clone())); + + BoogieInstruction::Instr( + "sb".to_string(), SideEffect::Global, + DUMMY_REG.to_string(), + vec![src_reg, dst_reg,format!("{}bv64", size.mask()),], + ) + } + RiscvInstruction::LoadReserved { + semantics, + rd, + addr, + size, + .. + } => { + let aq = matches!( + semantics, + AtomicSemantics::Acquire | AtomicSemantics::AcquireRelease + ); + let rl = matches!( + semantics, + AtomicSemantics::Release | AtomicSemantics::AcquireRelease + ); + let dst_reg = register_to_string(rd); + let src_reg = operand_to_boogie(&Operand::Memory(addr.clone())); + + BoogieInstruction::Instr( + "lr".to_string(), SideEffect::Global, + dst_reg, + vec![aq.to_string(), rl.to_string(), src_reg, format!("{}bv64", size.mask())], + ) + } + RiscvInstruction::Call { label } => BoogieInstruction::Instr( + "call".to_string(), + SideEffect::Local, + DUMMY_REG.to_string(), + vec![label.to_string()], + ), + RiscvInstruction::LoadAddress { register, label } => { + let dst_reg = register_to_string(register); + BoogieInstruction::Instr("la".to_string(), SideEffect::Global, dst_reg, vec![label.to_string()]) + } + RiscvInstruction::StoreConditional { + size, + semantics, + rd, + rs2, + addr, + .. + } => { + let aq = matches!( + semantics, + AtomicSemantics::Acquire | AtomicSemantics::AcquireRelease + ); + let rl = matches!( + semantics, + AtomicSemantics::Release | AtomicSemantics::AcquireRelease + ); + + let dst_reg = register_to_string(rd); + let src_reg = operand_to_boogie(&Operand::Register(rs2.clone())); + let addr_op = operand_to_boogie(&Operand::Memory(addr.clone())); + + BoogieInstruction::Instr( + "sc".to_string(), SideEffect::Global, + dst_reg, + vec![aq.to_string(), rl.to_string(), src_reg, addr_op, format!("{}bv64", size.mask())], + ) + } + RiscvInstruction::LoadImmidate { register, value } => { + let dst = register_to_string(register); + let value = operand_to_boogie(&Operand::Immediate(*value)); + + BoogieInstruction::Instr("li".to_string(), SideEffect::Local, dst, vec![value]) + } + RiscvInstruction::Move(dst, src) => { + let dst_reg = register_to_string(dst); + let src_reg = operand_to_boogie(&Operand::Register(src.clone())); + BoogieInstruction::Instr("mv".to_string(), SideEffect::Local, dst_reg, vec![src_reg]) + } + RiscvInstruction::Atomic { + op, + semantics, + rd, + rs2, + addr, + size, + .. + } => { + let atomic_op = format!( + "Atomic{}()", + match op { + AtomicOp::Add => "Add", + AtomicOp::And => "And", + AtomicOp::Max => "Max", + AtomicOp::Min => "Min", + AtomicOp::Or => "Or", + AtomicOp::Xor => "Xor", + AtomicOp::Swap => "Swap", + } + ); + + let aq = matches!( + semantics, + AtomicSemantics::Acquire | AtomicSemantics::AcquireRelease + ); + let rl = matches!( + semantics, + AtomicSemantics::Release | AtomicSemantics::AcquireRelease + ); + + let dst_reg = register_to_string(rd); + let src_reg = operand_to_boogie(&Operand::Register(rs2.clone())); + let addr_op = operand_to_boogie(&Operand::Memory(addr.clone())); + + BoogieInstruction::Instr( + "atomic".to_string(), SideEffect::Global, + dst_reg, + vec![atomic_op, aq.to_string(), rl.to_string(), src_reg, addr_op, format!("{}bv64", size.mask())], + ) + } + RiscvInstruction::ArithmeticRR { + op, + rd, + rs1, + rs2, + size, + } => { + let dst_reg = register_to_string(rd); + let src1_reg = register_to_string(rs1); + let src2_reg = register_to_string(rs2); + + let op_name = match op { + ArithmeticOp::Add => "add", + ArithmeticOp::Sub => "sub", + ArithmeticOp::Mul => "mul", + ArithmeticOp::And => "and", + ArithmeticOp::Or => "or", + ArithmeticOp::Xor => "xor", + ArithmeticOp::Sll => "sll", + ArithmeticOp::Srl => "srl", + ArithmeticOp::Sra => "sra", + }; + + let _op_suffix = if let Some(Size::Word) = size { "w" } else { "" }; + + BoogieInstruction::Instr(format!("{}", op_name), SideEffect::Local, dst_reg, vec![src1_reg, src2_reg]) + } + RiscvInstruction::ArithmeticRI { + op, + rd, + rs1, + imm, + size, + } => { + let dst_reg = register_to_string(rd); + let src_reg = register_to_string(rs1); + let imm_str = format!("{}bv64", *imm as u64); + + let op_name = match op { + ArithmeticOp::Add => "addi", + ArithmeticOp::Sub => "subi", + ArithmeticOp::And => "andi", + ArithmeticOp::Or => "ori", + ArithmeticOp::Xor => "xori", + ArithmeticOp::Sll => "slli", + ArithmeticOp::Srl => "srli", + ArithmeticOp::Sra => "srai", + _ => unreachable!(), + }; + + let _op_suffix = if let Some(Size::Word) = size { "w" } else { "" }; + + BoogieInstruction::Instr(format!("{}", op_name), SideEffect::Local, dst_reg, vec![src_reg, imm_str]) + } + RiscvInstruction::Not { rd, rs } => { + let dst_reg = register_to_string(rd); + let src_reg = register_to_string(rs); + BoogieInstruction::Instr("not".to_string(), SideEffect::Local, dst_reg, vec![src_reg]) + } + RiscvInstruction::Neg { rd, rs, size } => { + let dst_reg = register_to_string(rd); + let src_reg = register_to_string(rs); + let op_name = if let Some(Size::Word) = size { + "negw" + } else { + "neg" + }; + BoogieInstruction::Instr(op_name.to_string(), SideEffect::Local, dst_reg, vec![src_reg]) + } + RiscvInstruction::SignExtendWord(dst, src) => { + let dst_reg = register_to_string(dst); + let src_reg = register_to_string(src); + BoogieInstruction::Instr("sext".to_string(), SideEffect::Local, dst_reg, vec![src_reg]) + } + RiscvInstruction::Jump { rd, label } => { + if let Some(label) = label { + BoogieInstruction::Branch(vec![label.clone()], "true".to_string()) + } else { + let reg = register_to_string(rd); + if reg.to_lowercase() == "ra" { + // @TODO: check that ra is maintained properly! perhaps just emit an assertion that ra==old(ra) + BoogieInstruction::Return + } else { + // TODO: support this in boogie! + BoogieInstruction::Instr("jr".to_string(), SideEffect::Local, reg.to_string(), vec![]) + } + } + } + RiscvInstruction::Return => BoogieInstruction::Return, + RiscvInstruction::Directive(d) => BoogieInstruction::Comment(format!("Directive: {:?}", d)), + RiscvInstruction::Unhandled(instr) => { + log::warn!("Unhandled: {:?}", instr); + BoogieInstruction::Unhandled(format!("Unhandled Risc Instruction {}", instr)) + } + } +} + +fn register_to_string(reg: &Register) -> String { + match ®.reg_type { + RegisterType::A => format!("a{}", reg.number.unwrap()), + RegisterType::T => format!("t{}", reg.number.unwrap()), + RegisterType::S => format!("s{}", reg.number.unwrap()), + RegisterType::X => format!("x{}", reg.number.unwrap()), + RegisterType::Special(name) => name.to_string(), + } +} + +fn operand_to_boogie(operand: &Operand) -> String { + match operand.clone() { + Operand::Register(reg) => register_to_string(®), + Operand::Immediate(val) => format!("{}bv64", val as u64), + Operand::Memory(op) => { + if op.offset == 0 { + register_to_string(&op.base) + } else { + format!("{}+{}", register_to_string(&op.base), op.offset) + } + } + _ => unimplemented!(), + } +} + +fn fence_mode_to_boogie(mode: &FenceMode) -> (bool, bool) { + match mode { + FenceMode::Read => (true, false), + FenceMode::Write => (false, true), + FenceMode::ReadWrite => (true, true), + FenceMode::OutputWrite => (false, true), // TODO: fix this + FenceMode::InputOutputReadWrite => (true, true), + } +} diff --git a/verify/src/riscv/parser.rs b/verify/src/riscv/parser.rs new file mode 100644 index 00000000..084e16d5 --- /dev/null +++ b/verify/src/riscv/parser.rs @@ -0,0 +1,1506 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use nom::{ + IResult, Parser, + branch::alt, + bytes::complete::{tag, take_till, take_while1}, + character::complete::{char, digit1, hex_digit1, multispace0, space0, space1}, + combinator::{map, map_res, opt, recognize, value}, + multi::{many0, separated_list0}, + sequence::{delimited, pair, preceded, terminated}, +}; + +use super::*; + +fn parse_register(input: &str) -> IResult<&str, Register> { + let (input, name) = take_while1(|c: char| c.is_alphanumeric())(input)?; + let (reg_type, number) = match name { + "zero" => (RegisterType::Special("zero".to_string()), None), + "ra" => (RegisterType::Special("ra".to_string()), None), + "sp" => (RegisterType::Special("sp".to_string()), None), + _ if name.starts_with('a') && name.len() == 2 && name[1..].parse::().is_ok() => { + let num = name[1..].parse::().unwrap(); + if num <= 7 { + (RegisterType::A, Some(num)) + } else { + (RegisterType::X, Some(num)) + } + } + _ if name.starts_with('t') && name.len() == 2 && name[1..].parse::().is_ok() => { + let num = name[1..].parse::().unwrap(); + if num <= 6 { + (RegisterType::T, Some(num)) + } else { + (RegisterType::X, Some(num)) + } + } + _ if name.starts_with('s') && name.len() == 2 && name[1..].parse::().is_ok() => { + let num = name[1..].parse::().unwrap(); + if num <= 11 { + (RegisterType::S, Some(num)) + } else { + (RegisterType::X, Some(num)) + } + } + _ if name.starts_with('x') && name.len() >= 2 && name[1..].parse::().is_ok() => { + let num = name[1..].parse::().unwrap(); + (RegisterType::X, Some(num)) + } + _ => { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Tag, + ))); + } + }; + Ok((input, Register { reg_type, number })) +} + +fn parse_memory_operand(input: &str) -> IResult<&str, MemoryOperand> { + map( + ( + parse_immediate, + delimited(char('('), parse_register, char(')')), + ), + |(offset, base)| MemoryOperand { offset, base }, + ) + .parse(input) +} + +fn parse_immediate(input: &str) -> IResult<&str, i64> { + let (input, _) = alt((tag("#"), tag(""))).parse(input)?; + let (input, signed) = opt(char('-')).parse(input)?; + + let (input, value_str) = alt(( + recognize(pair(alt((tag("0x"), tag("0X"))), hex_digit1)), + recognize(digit1), + )) + .parse(input)?; + + let base = if value_str.starts_with("0x") || value_str.starts_with("0X") { + 16 + } else { + 10 + }; + + if base == 10 && (input.contains('b') || input.contains('f')) { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Tag, + ))); + } + + let value = i64::from_str_radix( + if base == 16 { + &value_str[2..] + } else { + value_str + }, + base, + ) + .unwrap(); + + Ok((input, if signed.is_some() { -value } else { value })) +} + +fn parse_label(input: &str) -> IResult<&str, String> { + map( + take_while1(|c: char| c.is_alphanumeric() || c == '_' || c == '.' || c == '$'), + |s: &str| s.to_string(), + ) + .parse(input) +} + +fn parse_operand_label(input: &str) -> IResult<&str, String> { + let (input, label) = take_while1(|c: char| { + c.is_alphanumeric() || c == '_' || c == '.' || c == '$' || c == 'f' || c == 'b' || c == '@' + })(input)?; + + if label.starts_with("0x") || label.starts_with("0X") { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Tag, + ))); + } + + let is_numeric_prefix = label.chars().next().unwrap_or(' ').is_numeric(); + if is_numeric_prefix && !label.ends_with('f') && !label.ends_with('b') { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Tag, + ))); + } + + Ok((input, label.to_string())) +} + +fn parse_operand(input: &str) -> IResult<&str, Operand> { + alt(( + map(parse_memory_operand, Operand::Memory), + map(parse_register, Operand::Register), + map(parse_fence_mode_operand, Operand::FenceMode), + map(parse_immediate, Operand::Immediate), + map(parse_operand_label, Operand::Label), + )) + .parse(input) +} + +fn parse_fence_mode_operand(input: &str) -> IResult<&str, FenceMode> { + alt(( + value(FenceMode::InputOutputReadWrite, tag("iorw")), + value(FenceMode::OutputWrite, tag("ow")), + value(FenceMode::ReadWrite, tag("rw")), + value(FenceMode::Read, tag("r")), + value(FenceMode::Write, tag("w")), + )) + .parse(input) +} + +fn parse_operands(input: &str) -> IResult<&str, Vec> { + separated_list0((space0, char(','), space0), preceded(space0, parse_operand)).parse(input) +} + +fn parse_directive(input: &str) -> IResult<&str, Directive> { + preceded( + char('.'), + alt(( + value(Directive::CFIStartProc, tag("cfi_startproc")), + value(Directive::CFIEndProc, tag("cfi_endproc")), + map( + ( + tag("size"), + space1, + parse_label, + char(','), + space0, + recognize(take_till(|c| c == '\n' || c == '\r')), + ), + |(_, _, name, _, _, value)| Directive::Size(name, value.to_string()), + ), + map( + ( + tag("align"), + space1, + map_res(digit1, |s: &str| s.parse::()), + ), + |(_, _, align)| Directive::Align(align), + ), + map( + preceded( + tag("p2align"), + preceded( + space1, + ( + map_res(digit1, |s: &str| s.parse::()), + opt(preceded( + (char(','), space0), + opt(map_res(digit1, |s: &str| s.parse::())), + )), + opt(preceded( + (char(','), space0), + opt(map_res(digit1, |s: &str| s.parse::())), + )), + ), + ), + ), + |(p1, opt_p2, opt_p3)| { + let p2 = opt_p2.flatten().unwrap_or(0); + let p3 = opt_p3.flatten().unwrap_or(0); + Directive::P2Align(p1, p2, p3) + }, + ), + map( + ( + tag("type"), + space1, + parse_label, + char(','), + space0, + recognize(take_till(|c| c == '\n' || c == '\r')), + ), + |(_, _, name, _, _, value)| Directive::Type(name, value.to_string()), + ), + )), + ) + .parse(input) +} + +fn parse_label_def(input: &str) -> IResult<&str, RiscvInstruction> { + map(terminated(parse_label, char(':')), |label| { + RiscvInstruction::Label(label) + }) + .parse(input) +} + +fn parse_instruction_name(input: &str) -> IResult<&str, String> { + map( + take_while1(|c: char| c.is_alphanumeric() || c == '.'), + |s: &str| s.to_string(), + ) + .parse(input) +} + +fn is_load_instruction(name: &str) -> Option { + match name.to_lowercase().as_str() { + "lb" => Some(Size::Byte), + "lh" => Some(Size::Half), + "lw" => Some(Size::Word), + "ld" => Some(Size::Double), + _ => None, + } +} + +fn is_store_instruction(name: &str) -> Option { + match name.to_lowercase().as_str() { + "sb" => Some(Size::Byte), + "sh" => Some(Size::Half), + "sw" => Some(Size::Word), + "sd" => Some(Size::Double), + _ => None, + } +} + +fn is_unsigned_load_instruction(name: &str) -> Option { + match name.to_lowercase().as_str() { + "lbu" => Some(Size::Byte), + "lhu" => Some(Size::Half), + "lwu" => Some(Size::Word), + "ldu" => Some(Size::Double), + _ => None, + } +} + +fn map_instruction(name: &str, operands: Vec) -> RiscvInstruction { + let name_lower = name.to_lowercase(); + + match name_lower.as_str() { + "beq" | "bne" | "blt" | "bge" | "bgtu" | "bltu" | "bgeu" | "bleu" => { + let op = match name_lower.as_str() { + "beq" => ComparisonOp::Eq, + "bne" => ComparisonOp::Ne, + "blt" => ComparisonOp::Lt, + "bge" => ComparisonOp::Ge, + "bltu" => ComparisonOp::Ltu, + "bgeu" => ComparisonOp::Geu, + "bgtu" => ComparisonOp::Gtu, + "bleu" => ComparisonOp::Leu, + _ => unreachable!(), + }; + if operands.len() == 3 { + if let (Operand::Register(rs1), Operand::Register(rs2), Operand::Label(label)) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + RiscvInstruction::Branch { + op, + rs1, + rs2, + label, + } + } else { + RiscvInstruction::Unhandled(format!( + "Invalid operands for {}: {:?}", + name, operands + )) + } + } else { + RiscvInstruction::Unhandled(format!( + "{} requires three operands, got {}", + name, + operands.len() + )) + } + } + "not" => { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Register(rs)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::Not { rd, rs }; + } + } + RiscvInstruction::Unhandled(format!("Invalid operands for not: {:?}", operands)) + } + "neg" => { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Register(rs)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::Neg { rd, rs, size: None }; + } + } + RiscvInstruction::Unhandled(format!("Invalid operands for neg: {:?}", operands)) + } + "negw" => { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Register(rs)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::Neg { + rd, + rs, + size: Some(Size::Word), + }; + } + } + RiscvInstruction::Unhandled(format!("Invalid operands for negw: {:?}", operands)) + } + "li" => { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Immediate(value)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::LoadImmidate { + register: rd, + value, + } + } else { + RiscvInstruction::Unhandled(format!("Invalid operands for li: {:?}", operands)) + } + } else { + RiscvInstruction::Unhandled(format!( + "li requires two operands, got {}", + operands.len() + )) + } + } + "bnez" => { + if operands.len() == 2 { + if let (Operand::Register(rs), Operand::Label(label)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::Branch { + op: ComparisonOp::Nez, + rs1: rs, + rs2: Register { + reg_type: RegisterType::Special("zero".to_string()), + number: None, + }, + label, + } + } else { + RiscvInstruction::Unhandled(format!( + "Invalid operands for bnez: {:?}", + operands + )) + } + } else { + RiscvInstruction::Unhandled(format!( + "bnez requires two operands, got {}", + operands.len() + )) + } + } + "beqz" => { + if operands.len() == 2 { + if let (Operand::Register(rs), Operand::Label(label)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::Branch { + op: ComparisonOp::Eq, + rs1: rs, + rs2: Register { + reg_type: RegisterType::Special("zero".to_string()), + number: None, + }, + label, + } + } else { + RiscvInstruction::Unhandled(format!( + "Invalid operands for beqz: {:?}", + operands + )) + } + } else { + RiscvInstruction::Unhandled(format!( + "beqz requires two operands, got {}", + operands.len() + )) + } + } + "jal" => { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Label(label)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::Jump { + rd, + label: Some(label), + } + } else { + RiscvInstruction::Unhandled(format!("Invalid operands for jal: {:?}", operands)) + } + } else { + RiscvInstruction::Unhandled(format!( + "jal requires two operands, got {}", + operands.len() + )) + } + } + "j" => { + if operands.len() == 1 { + if let Operand::Label(label) = operands[0].clone() { + RiscvInstruction::Jump { + rd: Register { + reg_type: RegisterType::Special("zero".to_string()), + number: None, + }, + label: Some(label), + } + } else { + RiscvInstruction::Unhandled(format!("Invalid operand for j: {:?}", operands[0])) + } + } else { + RiscvInstruction::Unhandled(format!( + "j requires one operand, got {}", + operands.len() + )) + } + } + "jr" => { + if operands.len() == 1 { + if let Operand::Register(Register { reg_type, .. }) = operands[0].clone() { + RiscvInstruction::Jump { + rd: Register { + reg_type, + number: None, + }, + label: None, + } + } else { + RiscvInstruction::Unhandled(format!( + "Invalid operand for jr: {:?}", + operands[0] + )) + } + } else { + RiscvInstruction::Unhandled(format!( + "jr requires one operand, got {}", + operands.len() + )) + } + } + "call" => { + if operands.len() == 1 { + if let Operand::Label(name) = operands[0].clone() { + RiscvInstruction::Call { label: name } + } else { + RiscvInstruction::Unhandled(format!( + "Invalid operand for call: {:?}", + operands[0] + )) + } + } else { + RiscvInstruction::Unhandled(format!( + "call requires one operand/label, got {}", + operands.len() + )) + } + } + "la" => { + if operands.len() == 2 { + if let (Operand::Register(register), Operand::Label(label)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::LoadAddress { register, label } + } else { + RiscvInstruction::Unhandled(format!("Invalid la operands: {:?}", operands)) + } + } else { + RiscvInstruction::Unhandled(format!( + "la requires two operands, got {}", + operands.len() + )) + } + } + "fence" => { + if operands.len() == 2 { + if let (Operand::FenceMode(pred), Operand::FenceMode(succ)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::Fence { pred, succ } + } else { + RiscvInstruction::Unhandled(format!("Invalid fence operands: {:?}", operands)) + } + } else { + RiscvInstruction::Unhandled(format!( + "Fence requires two operands, got {}", + operands.len() + )) + } + } + "ret" if operands.is_empty() => RiscvInstruction::Return, + "mv" => { + if operands.len() == 2 { + if let (Operand::Register(dst), Operand::Register(src)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::Move(dst, src) + } else { + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + } else { + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + } + "sext.w" => { + if operands.len() == 2 { + if let (Operand::Register(dst), Operand::Register(src)) = + (operands[0].clone(), operands[1].clone()) + { + RiscvInstruction::SignExtendWord(dst, src) + } else { + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + } else { + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + } + _ => { + if let Some(size) = is_load_instruction(&name_lower) { + if operands.len() == 2 { + if let (Operand::Register(dst), Operand::Memory(src)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::Load { size, dst, src }; + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for load: {:?}", + operands + )); + } + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for load: {:?}", + operands + )); + } + } + if let Some(size) = is_unsigned_load_instruction(&name_lower) { + if operands.len() == 2 { + if let (Operand::Register(dst), Operand::Memory(src)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::UnsignedLoad { size, dst, src }; + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for load: {:?}", + operands + )); + } + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for load: {:?}", + operands + )); + } + } else if let Some(size) = is_store_instruction(&name_lower) { + if operands.len() == 2 { + if let (Operand::Register(src), Operand::Memory(dst)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::Store { size, src, dst }; + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for store:{:?}", + operands + )); + } + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for store: {:?}", + operands + )); + } + } else if name_lower.starts_with("lr") + || name_lower.starts_with("sc") + || name_lower.starts_with("amo") + { + let parts: Vec<&str> = name_lower.split('.').collect(); + if parts.len() >= 2 { + let base_op = parts[0]; + let size_str = parts[1]; + let semantics_str = if parts.len() > 2 { parts[2] } else { "" }; + + let size = match size_str { + "b" => Size::Byte, + "h" => Size::Half, + "w" => Size::Word, + "d" => Size::Double, + _ => { + return RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)); + } + }; + + let semantics = match semantics_str { + "aq" => AtomicSemantics::Acquire, + "rl" => AtomicSemantics::Release, + "aqrl" => AtomicSemantics::AcquireRelease, + "" => AtomicSemantics::None, + _ => { + return RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)); + } + }; + + if base_op == "lr" { + if operands.len() == 2 { + if let (Operand::Register(rd), Operand::Memory(addr)) = + (operands[0].clone(), operands[1].clone()) + { + return RiscvInstruction::LoadReserved { + size, + semantics, + rd, + addr, + }; + } + } + } else if base_op == "sc" { + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs2), + Operand::Memory(addr), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + return RiscvInstruction::StoreConditional { + size, + semantics, + rd, + rs2, + addr, + }; + } + } + } else if base_op.starts_with("amo") { + let amo_op_str = &base_op[3..]; + let op = match amo_op_str { + "swap" => AtomicOp::Swap, + "add" => AtomicOp::Add, + "and" => AtomicOp::And, + "or" => AtomicOp::Or, + "xor" => AtomicOp::Xor, + "max" => AtomicOp::Max, + "min" => AtomicOp::Min, + _ => { + return RiscvInstruction::Unhandled(format!( + "{} {:?}", + name, operands + )); + } + }; + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs2), + Operand::Memory(addr), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + return RiscvInstruction::Atomic { + op, + size, + semantics, + rd, + rs2, + addr, + }; + } + } + } + return RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)); + } else { + return RiscvInstruction::Unhandled(format!( + "Invalid operands for load: {:?}", + operands + )); + } + } else { + if name_lower.ends_with('w') { + let base_name = name_lower.trim_end_matches('w'); + + match base_name { + "add" | "sub" | "mul" | "and" | "or" | "xor" | "sll" | "srl" | "sra" => { + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs1), + Operand::Register(rs2), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + let op = match base_name { + "add" => ArithmeticOp::Add, + "sub" => ArithmeticOp::Sub, + "mul" => ArithmeticOp::Mul, + "and" => ArithmeticOp::And, + "or" => ArithmeticOp::Or, + "xor" => ArithmeticOp::Xor, + "sll" => ArithmeticOp::Sll, + "srl" => ArithmeticOp::Srl, + "sra" => ArithmeticOp::Sra, + _ => unreachable!(), + }; + return RiscvInstruction::ArithmeticRR { + op, + rd, + rs1, + rs2, + size: Some(Size::Word), + }; + } + } + return RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)); + } + _ => {} + } + } + + if name_lower.ends_with("iw") { + let base_name = name_lower.trim_end_matches("iw"); + + match base_name { + "add" | "and" | "or" | "xor" | "sll" | "srl" | "sra" => { + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs1), + Operand::Immediate(imm), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + let op = match base_name { + "add" => ArithmeticOp::Add, + "and" => ArithmeticOp::And, + "or" => ArithmeticOp::Or, + "xor" => ArithmeticOp::Xor, + "sll" => ArithmeticOp::Sll, + "srl" => ArithmeticOp::Srl, + "sra" => ArithmeticOp::Sra, + _ => unreachable!(), + }; + return RiscvInstruction::ArithmeticRI { + op, + rd, + rs1, + imm, + size: Some(Size::Word), + }; + } + } + return RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)); + } + _ => {} + } + } + match name_lower.as_str() { + "add" | "sub" | "mul" | "and" | "or" | "xor" | "sll" | "srl" | "sra" => { + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs1), + Operand::Register(rs2), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + let op = match name_lower.as_str() { + "add" => ArithmeticOp::Add, + "sub" => ArithmeticOp::Sub, + "mul" => ArithmeticOp::Mul, + "and" => ArithmeticOp::And, + "or" => ArithmeticOp::Or, + "xor" => ArithmeticOp::Xor, + "sll" => ArithmeticOp::Sll, + "srl" => ArithmeticOp::Srl, + "sra" => ArithmeticOp::Sra, + _ => unreachable!(), + }; + return RiscvInstruction::ArithmeticRR { + op, + rd, + rs1, + rs2, + size: None, + }; + } + } + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + "addi" | "andi" | "ori" | "xori" | "slli" | "srli" | "srai" => { + if operands.len() == 3 { + if let ( + Operand::Register(rd), + Operand::Register(rs1), + Operand::Immediate(imm), + ) = ( + operands[0].clone(), + operands[1].clone(), + operands[2].clone(), + ) { + let op = match name_lower.as_str() { + "addi" => ArithmeticOp::Add, + "andi" => ArithmeticOp::And, + "ori" => ArithmeticOp::Or, + "xori" => ArithmeticOp::Xor, + "slli" => ArithmeticOp::Sll, + "srli" => ArithmeticOp::Srl, + "srai" => ArithmeticOp::Sra, + _ => unreachable!(), + }; + return RiscvInstruction::ArithmeticRI { + op, + rd, + rs1, + imm, + size: None, + }; + } + } + RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)) + } + _ => RiscvInstruction::Unhandled(format!("{} {:?}", name, operands)), + } + } + } + } +} + +fn parse_instruction(input: &str) -> IResult<&str, RiscvInstruction> { + let (input, name) = parse_instruction_name(input)?; + let (input, operands) = parse_operands(input)?; + let instr = map_instruction(&name, operands); + Ok((input, instr)) +} + +fn parse_line(input: &str) -> IResult<&str, Option> { + if input.is_empty() { + return Err(nom::Err::Error(nom::error::Error::new( + input, + nom::error::ErrorKind::Eof, + ))); + } + + let (input, line) = take_till(|c| c == '\n')(input)?; + let (input, _) = opt(char('\n')).parse(input)?; + let line_without_comment = line.split('#').next().unwrap().trim(); + + if line_without_comment.is_empty() { + Ok((input, None)) + } else { + let result = alt(( + parse_label_def, + map(parse_directive, |dir| RiscvInstruction::Directive(dir)), + parse_instruction, + )) + .parse(line_without_comment); + + match result { + Ok((remaining, instr)) if remaining.is_empty() => Ok((input, Some(instr))), + _ => Ok((input, Some(RiscvInstruction::Unhandled(line.to_string())))), + } + } +} + +pub fn parse_riscv_assembly(input: &str) -> IResult<&str, Vec> { + let (input, _) = multispace0(input)?; + let (input, options) = many0(parse_line).parse(input)?; + let instructions: Vec = options.into_iter().flatten().collect(); + Ok((input, instructions)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_register() { + let (_, reg) = parse_register("a0").unwrap(); + assert_eq!( + reg, + Register { + reg_type: RegisterType::A, + number: Some(0) + } + ); + } + + #[test] + fn test_parse_memory_operand() { + let (_, mem) = parse_memory_operand("0(a0)").unwrap(); + assert_eq!( + mem, + MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + } + ); + } + + #[test] + fn test_parse_fence() { + let (_, instr) = parse_instruction("fence rw,w").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Fence { + pred: FenceMode::ReadWrite, + succ: FenceMode::Write, + } + ); + } + + #[test] + fn test_parse_store() { + let (_, instr) = parse_instruction("sd a1,0(a0)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Store { + size: Size::Double, + src: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + dst: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + }, + } + ); + } + + #[test] + fn test_parse_atomic() { + let (_, instr) = parse_instruction("amoswap.d.aqrl a1,a1,0(a0)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Atomic { + op: AtomicOp::Swap, + size: Size::Double, + semantics: AtomicSemantics::AcquireRelease, + rd: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + }, + } + ); + } + + #[test] + fn test_parse_label() { + let input = "label:"; + let (_, instruction) = parse_riscv_assembly(input).unwrap(); + assert_eq!(instruction[0], RiscvInstruction::Label("label".to_string())); + + let input = ".label:"; + let (_, instruction) = parse_riscv_assembly(input).unwrap(); + assert_eq!( + instruction[0], + RiscvInstruction::Label(".label".to_string()) + ); + } + + #[test] + fn test_parse_labels() { + let input = r#" +my_label: +.local_label: +func.123: +_start: +.label$with$dollars: + "#; + let (_, instructions) = parse_riscv_assembly(input).unwrap(); + assert_eq!(instructions.len(), 5); + assert_eq!( + instructions[0], + RiscvInstruction::Label("my_label".to_string()) + ); + assert_eq!( + instructions[1], + RiscvInstruction::Label(".local_label".to_string()) + ); + assert_eq!( + instructions[2], + RiscvInstruction::Label("func.123".to_string()) + ); + assert_eq!( + instructions[3], + RiscvInstruction::Label("_start".to_string()) + ); + assert_eq!( + instructions[4], + RiscvInstruction::Label(".label$with$dollars".to_string()) + ); + } + + #[test] + fn test_parse_full_assembly() { + let input = r#" +vatomic64_write: + fence rw,w + sd a1,0(a0) + .inner_label: + fence rw,rw + ret + "#; + let (_, instructions) = parse_riscv_assembly(input).unwrap(); + assert_eq!(instructions.len(), 6); + assert_eq!( + instructions[0], + RiscvInstruction::Label("vatomic64_write".to_string()) + ); + assert_eq!( + instructions[1], + RiscvInstruction::Fence { + pred: FenceMode::ReadWrite, + succ: FenceMode::Write, + } + ); + } + + #[test] + fn test_parse_load_reserved() { + let (_, instr) = parse_instruction("lr.w.aq a0,0(a1)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::LoadReserved { + size: Size::Word, + semantics: AtomicSemantics::Acquire, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(1) + } + }, + } + ); + } + + #[test] + fn test_parse_store_conditional() { + let (_, instr) = parse_instruction("sc.d.rl a0,a2,0(a1)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::StoreConditional { + size: Size::Double, + semantics: AtomicSemantics::Release, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(2) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(1) + } + }, + } + ); + } + + #[test] + fn test_parse_atomic_swap() { + let (_, instr) = parse_instruction("amoswap.d.aqrl a1,a2,0(a0)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Atomic { + op: AtomicOp::Swap, + size: Size::Double, + semantics: AtomicSemantics::AcquireRelease, + rd: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(2) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + }, + } + ); + } + + #[test] + fn test_parse_arithmetic_rr() { + let (_, instr) = parse_instruction("add a0,a1,a2").unwrap(); + assert_eq!( + instr, + RiscvInstruction::ArithmeticRR { + op: ArithmeticOp::Add, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs1: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(2) + }, + size: None, + } + ); + } + + #[test] + fn test_parse_arithmetic_ri() { + let (_, instr) = parse_instruction("addi a0,a1,42").unwrap(); + assert_eq!( + instr, + RiscvInstruction::ArithmeticRI { + op: ArithmeticOp::Add, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs1: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + imm: 42, + size: None, + } + ); + } + + #[test] + fn test_parse_branch() { + let (_, instr) = parse_instruction("beq a0, a1, my_label").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Branch { + op: ComparisonOp::Eq, + rs1: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + label: "my_label".to_string(), + } + ); + } + + #[test] + fn test_parse_branch_pseudo() { + let (_, instr) = parse_instruction("bnez a0, my_label").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Branch { + op: ComparisonOp::Nez, + rs1: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs2: Register { + reg_type: RegisterType::Special("zero".to_string()), + number: None, + }, + label: "my_label".to_string(), + } + ); + } + + #[test] + fn test_parse_jump() { + let (_, instr) = parse_instruction("jal ra, my_function").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Jump { + rd: Register { + reg_type: RegisterType::Special("ra".to_string()), + number: None, + }, + label: Some("my_function".to_string()), + } + ); + } + + #[test] + fn test_parse_pseudo_jump() { + let (_, instr) = parse_instruction("j my_label").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Jump { + rd: Register { + reg_type: RegisterType::Special("zero".to_string()), + number: None, + }, + label: Some("my_label".to_string()), + } + ); + } + + #[test] + fn test_parse_instruction_with_comment() { + let input = "beq a0, a1, my_label # branch if equal\n"; + let (_, instructions) = parse_riscv_assembly(input).unwrap(); + assert_eq!(instructions.len(), 1); + assert_eq!( + instructions[0], + RiscvInstruction::Branch { + op: ComparisonOp::Eq, + rs1: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + label: "my_label".to_string(), + } + ); + } + + #[test] + fn test_parse_amoswap() { + let (_, instr) = parse_instruction("amoswap.w.rl a1,a1,0(a0)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Atomic { + op: AtomicOp::Swap, + size: Size::Word, + semantics: AtomicSemantics::Release, + rd: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + }, + } + ); + } + + #[test] + fn test_parse_amoadd() { + let (_, instr) = parse_instruction("amoadd.w.aqrl a5,a1,0(a0)").unwrap(); + assert_eq!( + instr, + RiscvInstruction::Atomic { + op: AtomicOp::Add, + size: Size::Word, + semantics: AtomicSemantics::AcquireRelease, + rd: Register { + reg_type: RegisterType::A, + number: Some(5) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + addr: MemoryOperand { + offset: 0, + base: Register { + reg_type: RegisterType::A, + number: Some(0) + } + }, + } + ); + } + + #[test] + fn test_parse_hex_immediate() { + let (_, instr) = parse_instruction("andi a0, a5, 0xff").unwrap(); + assert_eq!( + instr, + RiscvInstruction::ArithmeticRI { + op: ArithmeticOp::And, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs1: Register { + reg_type: RegisterType::A, + number: Some(5) + }, + imm: 0xff, + size: None, + } + ); + + let (_, instr) = parse_instruction("addi a0, a1, 0xFF").unwrap(); + assert_eq!( + instr, + RiscvInstruction::ArithmeticRI { + op: ArithmeticOp::Add, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs1: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + imm: 0xFF, + size: None, + } + ); + + let (_, instr) = parse_instruction("addi a0, a1, 0x10").unwrap(); + assert_eq!( + instr, + RiscvInstruction::ArithmeticRI { + op: ArithmeticOp::Add, + rd: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs1: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + imm: 16, + size: None, + } + ); + } + + #[test] + fn test_parse_numeric_local_labels() { + let (_, instructions) = parse_riscv_assembly("1f:\n beq a0, a1, 1f").unwrap(); + assert_eq!(instructions.len(), 2); + assert_eq!(instructions[0], RiscvInstruction::Label("1f".to_string())); + assert_eq!( + instructions[1], + RiscvInstruction::Branch { + op: ComparisonOp::Eq, + rs1: Register { + reg_type: RegisterType::A, + number: Some(0) + }, + rs2: Register { + reg_type: RegisterType::A, + number: Some(1) + }, + label: "1f".to_string(), + } + ); + } + + #[test] + fn test_parse_multiline_assembly() { + let input = r#" +main: + addi sp, sp, -16 + sd ra, 8(sp) + li a0, 10 + li a1, 20 + add a2, a0, a1 +loop_start: + addi a0, a0, -1 + bnez a0, loop_start + + mv a0, a2 + ld ra, 8(sp) + addi sp, sp, 16 + ret +"#; + + let (remaining, instructions) = parse_riscv_assembly(input).unwrap(); + + assert!(remaining.is_empty(), "Parser did not consume all input"); + + assert_eq!( + instructions.len(), + 13, + "Incorrect number of instructions parsed" + ); + + assert!( + instructions + .iter() + .any(|instr| matches!(instr, RiscvInstruction::Label(label) if label == "main")), + "Main label not found" + ); + + let arithmetic_count = instructions + .iter() + .filter(|instr| { + matches!( + instr, + RiscvInstruction::ArithmeticRI { .. } | RiscvInstruction::ArithmeticRR { .. } + ) + }) + .count(); + assert_eq!( + arithmetic_count, 4, + "Incorrect number of arithmetic instructions" + ); + + let has_branch = instructions.iter().any(|instr| { + matches!( + instr, + RiscvInstruction::Branch { + op: ComparisonOp::Nez, + .. + } + ) + }); + assert!(has_branch, "Branch instruction not found"); + + let has_return = instructions + .iter() + .any(|instr| matches!(instr, RiscvInstruction::Return)); + assert!(has_return, "Return instruction not found"); + } +} diff --git a/verify/src/riscv/transform.rs b/verify/src/riscv/transform.rs new file mode 100644 index 00000000..b823f0f4 --- /dev/null +++ b/verify/src/riscv/transform.rs @@ -0,0 +1,153 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * SPDX-License-Identifier: MIT + */ +use super::*; + +use head_tail_iter::HeadTailIterator; +use std::collections::HashMap; + +pub fn extract_riscv_functions( + parsed: Vec, + names: Option<&[String]>, + valid_prefix: &[&str], +) -> Vec { + let mut functions = Vec::new(); + let mut current_function: Option<(String, Vec)> = None; + + for instr in parsed { + match &instr { + RiscvInstruction::Label(name) + if name + .chars() + .next() + .map_or(false, |c| c.is_alphabetic() || c == '_') => + { + if let Some((prev_name, prev_instrs)) = current_function { + functions.push(RiscvFunction { + name: prev_name, + instructions: prev_instrs, + }); + } + current_function = Some((name.clone(), vec![instr.clone()])); + } + _ => { + if let Some((_, ref mut instrs)) = current_function { + instrs.push(instr.clone()); + } + } + } + } + + if let Some((name, instrs)) = current_function { + functions.push(RiscvFunction { + name, + instructions: instrs, + }); + } + + match names { + Some(names) => { + let valid_ns = valid_prefix; + functions + .into_iter() + .filter(|f| { + if f.name.starts_with("vatomic") { + let rest = &f.name[7..]; // Remove "vatomic" + if let Some((n, suffix)) = rest.split_once('_') { + valid_ns.contains(&n) && names.contains(&suffix.to_string()) + } else { + false + } + } else { + false + } + }) + .collect() + } + None => functions, + } +} +pub fn remove_directives(function: &RiscvFunction) -> RiscvFunction { + let new_instructions = function + .instructions + .iter() + .filter(|instruction| !matches!(instruction, RiscvInstruction::Directive(_))) + .cloned() + .collect(); + + RiscvFunction { + name: function.name.clone(), + instructions: new_instructions, + } +} + +macro_rules! to_label_name { + ($i:expr) => { + format!("L{}", $i) + }; +} + +fn get_label_index( + fwd_map: &HashMap<&str, usize>, + back_map: &HashMap<&str, usize>, + label: &String, +) -> String { + let fwd = label.ends_with('f'); + let bwd = label.ends_with('b'); + let new_label_idx = if fwd || bwd { + let short_label = &label[0..label.len() - 1]; + (if fwd { &fwd_map } else { &back_map })[short_label] + } else { + back_map + .get(label.as_str()) + .copied() + .unwrap_or_else(|| fwd_map[label.as_str()]) + }; + to_label_name!(new_label_idx) +} + +pub fn transform_labels(function: &RiscvFunction) -> RiscvFunction { + let mut back_map = HashMap::new(); + let new_instructions = function + .instructions + .head_tail_pairs() + .enumerate() + .map(|(i, (instr, remaining))| { + let mut fwd_map: HashMap<&str, usize> = HashMap::new(); + for (idx, instr) in remaining.iter().enumerate() { + if let RiscvInstruction::Label(label) = instr { + fwd_map.entry(label.as_str()).or_insert(idx + i + 1); + } + } + + match instr { + RiscvInstruction::Label(name) => { + back_map.insert(name.as_str(), i); + RiscvInstruction::Label(to_label_name!(i)) + } + RiscvInstruction::Jump { rd, label: Some(label) } => RiscvInstruction::Jump { + rd: rd.clone(), + label: Some(get_label_index(&fwd_map, &back_map, label)), + }, + RiscvInstruction::Branch { + op, + rs1, + rs2, + label, + } => RiscvInstruction::Branch { + op: op.clone(), + rs1: rs1.clone(), + rs2: rs2.clone(), + label: get_label_index(&fwd_map, &back_map, label), + }, + _ => instr.clone(), + } + }) + .collect(); + + RiscvFunction { + name: function.name.clone(), + instructions: new_instructions, + } +} diff --git a/verify/verify.sh b/verify/verify.sh new file mode 100755 index 00000000..0f7babac --- /dev/null +++ b/verify/verify.sh @@ -0,0 +1,39 @@ +#!/bin/sh +set -eu + +if [ $# -ne 2 ]; then + echo "Usage: $0 FUNCTION_NAME ARCH" + exit 1 +fi + +FUNC="$1" +ARCH="$2" + +OUT="out/$ARCH" +OUT_RETRY="out_retry/$ARCH" + +verify() { + phase="$1" + outdir="$2" + echo "Verifying $FUNC on $ARCH (phase $phase, outdir=$outdir)" + boogie /proverOpt:SOLVER=z3 \ + "$outdir/$FUNC"/*.bpl \ + ./boogie/auxiliary.bpl \ + "./$ARCH/library.bpl" +} + +# phase 1 +if verify 1 "$OUT" | tee /dev/stderr | grep -q "0 errors"; then + echo "$FUNC on $ARCH passed phase 1" + exit 0 +fi + +# phase 2 +echo "Phase 1 failed, retrying with heavy verification..." +if verify 2 "$OUT_RETRY" | tee /dev/stderr | grep -q "0 errors"; then + echo "$FUNC on $ARCH passed phase 2" + exit 0 +else + echo "$FUNC on $ARCH failed verification" + exit 1 +fi diff --git a/verify/write.bpl b/verify/write.bpl deleted file mode 100644 index a1ffe08c..00000000 --- a/verify/write.bpl +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -procedure write(store_order: OrderRelation, addr, src: Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - ensures {:msg "store happens within function bounds"} ( - var i := states[step]->last_store; - old(step) <= i && i < step - ); - ensures {:msg "order"} - store_order[states[step]->last_store, old(step), step, ordering]; - ensures {:msg "produces write effect"} ( - var gpr := old(states[step]->gpr); - effects[states[step]->last_store][write(gpr[addr], gpr[src])] - ); - ensures {:msg "no other writes"} ( - forall i : StateIndex, e : Effect :: - old(step) <= i && i < step && i != states[step]->last_store ==> - !(effects[i][e] && (e is write)) - ); - diff --git a/verify/xchg.bpl b/verify/xchg.bpl deleted file mode 100644 index 06edc838..00000000 --- a/verify/xchg.bpl +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (C) Huawei Technologies Co., Ltd. 2025. All rights reserved. - * SPDX-License-Identifier: MIT - */ -procedure xchg(load_order, store_order: OrderRelation, addr, input: Register) returns (output: Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - ensures {:msg "load happens within function bounds"} ( - var i := states[step]->last_load; - old(step) <= i && i < step - ); - ensures {:msg "store happens within function bounds"} ( - var i := states[step]->last_store; - old(step) <= i && i < step - ); - ensures {:msg "output register contains correct value"} - states[step]->gpr[output] == memory[states[step]->last_load, old(states[step]->gpr[addr])]; - ensures {:msg "memory is preserved between load and store"} ( - var load, store, addr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr[addr]); - memory[load, addr] == memory[store, addr] - ); - ensures {:msg "load produces read to correct address"} - effects[states[step]->last_load][read(old(states[step]->gpr[addr]))]; - ensures {:msg "store produces write to correct address with correct value"} - effects[states[step]->last_store][write( - old(states[step]->gpr[addr]), - old(states[step]->gpr[input]) // will be replaced depending on function - )]; - ensures {:msg "no other write effects"} - (forall i: StateIndex, e: Effect :: - old(step) <= i && i < step && i != states[step]->last_store ==> - !(effects[i][e] && e is write) - ); - /* Above is equivalent to - no_writes(old(step), states[step]->last_store, effects) && - no_writes(states[step]->last_store + 1, step, effects); - */ - ensures {:msg "load ordering"} - load_order[states[step]->last_load, old(step), step, ordering]; - ensures {:msg "store ordering"} - store_order[states[step]->last_store, old(step), step, ordering]; - -procedure cmpxchg(load_order, store_order: OrderRelation, addr, exp, input: Register) returns (output: Register); - modifies step, states, effects, ordering, global_monitor_exclusive, event_register; - - ensures {:msg "load happens within function bounds"} ( - var load := states[step]->last_load; - old(step) <= load && load < step - ); - ensures {:msg "load ordering"} - load_order[states[step]->last_load, old(step), step, ordering]; - ensures {:msg "output register contains loaded value"} - states[step]->gpr[output] == memory[states[step]->last_load, old(states[step]->gpr[addr])]; - ensures {:msg "if no write happened, the value from memory is already the result of operation"} - no_writes(old(step), step, effects) ==> ( - var load, gpr := states[step]->last_load, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - load_val != states[step]->gpr[exp]))); - ensures {:msg "store happens within function bounds"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - old(step) <= store && store < step - ); - ensures {:msg "memory is preserved between load and store"} - !no_writes(old(step), step, effects) ==> ( - var load, store, addr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr[addr]); ( - memory[load, addr] == memory[store, addr] - )); - ensures {:msg "store produces write to correct address with correct value"} - !no_writes(old(step), step, effects) ==> ( - var load, store, gpr := states[step]->last_load, states[step]->last_store, old(states[step]->gpr); ( - var load_val := memory[load, gpr[addr]]; ( - var store_val := gpr[input]; ( - effects[store][write(gpr[addr], store_val)] && load_val == states[step]->gpr[exp]) - ))); - ensures {:msg "no other write effects"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; ( - forall i: StateIndex, e: Effect :: - old(step) <= i && i < step && i != store ==> - !(effects[i][e] && e is write) - )); - ensures {:msg "store ordering"} - !no_writes(old(step), step, effects) ==> ( - var store := states[step]->last_store; - store_order[store, old(step), step, ordering] - ); - - diff --git a/vmm.cat b/vmm.cat index 69b9e7c3..6b2c646d 100644 --- a/vmm.cat +++ b/vmm.cat @@ -5,53 +5,71 @@ * Version 0.9.4 */ -(* Notation - We will use a, b, c, ... for non-atomic but shared locations, r, r1, r2, .. for non-shared locations (each thread has its own set of these), and x, y, z for shared locations. - We use x_BAR for reading x with barrier BAR, and x =_BAR ... for storing to x with barrier BAR. - T1 || T2 means running the codes T1 and T2 in parallel in different threads. - C1 ~~> C2 means C1 "can be transformed to" C2 by the compiler. - - Cheat sheet - Operators: - r&s = both r and s - r|s = either r or s - r;s = first r then s - ~r = not r - r* = zero or more r - r+ = one or more r - r? = one or zero r - [t] = relates all events with tag t to themselves - - domain(r) = all events with outgoing r edges - range(r) = all events with incoming r edges - - Tags: - IW = initial write - M = any memory access - R = read access - W = write access - F = fences - RLX, REL, ACQ, SC = barrier modes - - Relations: - ext = external, different threads - int = internal, same threads - - rf = reads from, W -> R - fr = from-reads, R -> W - co = coherence, W -> W - - rmw = read-modify-write, relates R and W events of a single atomic operation like atomic increment - po = program order, relates events of the same thread according to order in program text. NB: Not always a total relation even in one thread because of C's sequencing rules. - loc = relate events to the same location - po-loc = po & loc - data, addr, ctrl = data (x =_rlx y_rlx), address (*(y_rlx) =_rlx ...), control (if (y_rlx) x =_rlx ...) dependencies. - The real model only considers "real" dependencies that the compiler doesn't know how to bypass. - Example: +(******************************************************************************* + == Notation == + + We use a, b, c, ... for non-atomic but shared locations, r, r1, r2, ... + for non-shared locations (each thread has its own set of these), and x, y, z + for shared locations. + + We use x_BAR for reading x with barrier BAR, and x =_BAR ... for storing + to x with barrier BAR. + + T1 || T2 means running the codes T1 and T2 in parallel in different threads. + C1 ~~> C2 means C1 "can be transformed to" C2 by the compiler. + + == Cheat sheet == + + Operators: + r&s = both r and s + r|s = either r or s + r;s = first r then s + ~r = not r + r* = zero or more r + r+ = one or more r + r? = one or zero r + [t] = relates all events with tag t to themselves + + domain(r) = all events with outgoing r edges + range(r) = all events with incoming r edges + + Tags: + IW = initial write + M = any memory access + R = read access + W = write access + F = fences + RLX, REL, ACQ, SC = barrier modes + + Relations: + ext = external, different threads + int = internal, same threads + + rf = reads from, W -> R + fr = from-reads, R -> W + co = coherence, W -> W + + rmw = read-modify-write, relates R and W events of a single atomic operation + like atomic increment + po = program order, relates events of the same thread according to order + in program text. NB: Not always a total relation even in one thread + because of C's sequencing rules. + loc = relate events to the same location + po-loc = po & loc + + data = data (x =_rlx y_rlx) dependencies. + addr = address (*(y_rlx) =_rlx ...) dependencies. + ctrl = control (if (y_rlx) x =_rlx ...) dependencies. + + The real model only considers "real" dependencies that the compiler doesn't + know how to bypass. Example: + if (x_rlx==1) {} y_rlx=1; - has no real ctrl edge. However, tools may not implement this rigorously and can have some false positive dependencies. -*) + has no real ctrl edge. However, tools may not implement this rigorously + and can have some false positive dependencies. + +*******************************************************************************) let ext = ext & ((~IW) * M) let int = int | (IW * M) @@ -75,45 +93,66 @@ let Rel = (REL | SC) & (W | F) (** Ordering **) (* In our model, dependencies only order stores: - r1 = x_rlx; - r2 = *r1; - if (r2 == 1) { - y =_rlx 1; // always after x_rlx - r3 = z_rlx; // may be before x_rlx - } - - In this example, the load *r1 carries the dependency from r1 = x_rlx to y =_rlx 1. - To make such examples work in VMM, we define a carry-dep relation, and extend all dependencies to not only include the immediate dependency, but also any dependencies carried over from earlier loads. + * r1 = x_rlx; + * r2 = *r1; + * if (r2 == 1) { + * y =_rlx 1; // always after x_rlx + * r3 = z_rlx; // may be before x_rlx + * } + * + * In this example, the load *r1 carries the dependency from r1 = x_rlx to + * y =_rlx 1. + * To make such examples work in VMM, we define a carry-dep relation, and + * extend all dependencies to not only include the immediate dependency, + * but also any dependencies carried over from earlier loads. *) + let carry-dep = data;rfi | addr | ctrl let ctrl = carry-dep* ; ctrl let addr = carry-dep* ; addr let data = carry-dep* ; data let dep = ctrl | addr | data -(* Note: Plain writes can be elided and therefore are generally not ordered by things that order writes *) +(* Note: Plain writes can be elided and therefore are generally not ordered + * by things that order writes + *) + (* Barrier Ordered-Before: barrier ordering rules *) let bob = [Acq];po | po;[Rel] | [SC];po;[SC] | po;[SC & F];po | [R];po;[Acq & F];po | po;[Rel & F];po;[W & Marked] -(* Preserved Program-Order: these are never visibly reordered by compiler and hardware. - Includes both barrier ordering, and dependency ordering + same-address ordering *) + +(* Preserved Program-Order: these are never visibly reordered by compiler + * and hardware. Includes both barrier ordering, and dependency ordering + + * same-address ordering. + *) + let ppo = bob | [Marked];(dep | coi | fri);[W & Marked] (* Important: these relations satisfy - ppo ; [~Marked] ; po <= ppo - po ; [~R] & [~Marked] ; ppo <= ppo - I.e., the only way to order an unmarked operation on the right (unmarked operation other than reads on the left), is through a barrier that will also order everything that comes after (before) the unmarked orperation. + * ppo ; [~Marked] ; po <= ppo + * po ; [~R] & [~Marked] ; ppo <= ppo + * + * I.e., the only way to order an unmarked operation on the right (unmarked + * operation other than reads on the left), is through a barrier that will + * also order everything that comes after (before) the unmarked orperation. *) -(* - If there is no w-race (defined below), plain writes are slightly better-behaved: if they are read-from, then either - 1) they exist and provide ordering, or - 2) an older store with the same value exists and that store is also a candidate for the read, in which case the ordering provided in this graph is ignored in the graph in which the older store is observed +(* If there is no w-race (defined below), plain writes are slightly + * better-behaved: if they are read-from, then either + * 1) they exist and provide ordering, or + * 2) an older store with the same value exists and that store is also + * a candidate for the read, in which case the ordering provided in this + * graph is ignored in the graph in which the older store is observed + * + * However, the plain writes may not exist in the form supposed by + * dependencies etc. This is especially true for data dependencies, which + * may be speculatively elided: + * a = y ~~> r = y; a = 42; if (r != 42) a = r; + * + * Only ctrl and addr dependencies are not elided, because the compiler + * is not allowed to speculatively modify memory regions (which might be + * protected by a lock owned by another thread). + *) - However, the plain writes may not exist in the form supposed by dependencies etc. - This is especially true for data dependencies, which may be speculatively elided: - a = y ~~> r = y; a = 42; if (r != 42) a = r; - Only ctrl and addr dependencies are not elided, because the compiler is not allowed to speculatively modify memory regions (which might be protected by a lock owned by another thread). -*) let WRF-ppo = po;[Rel & F];po;[W & Plain] | [Marked];(ctrl | addr);[W & Plain] let hb = ppo | WRF-ppo | rfe | fre | coe @@ -127,24 +166,36 @@ let w-race = coe \ w-race-fix let w-racy = [domain(w-race)] | [range(w-race)] flag ~empty w-racy as w-data-race -(* - If there is no w-race, then WRF-ppo also provides ordering. - Besides the definition in hb above, this is relevant for fre & r-race below, - where the store cannot occur before the read (i.e. they are actually ordered) - as long as the read is ordered and the store is ordered by WRF-ppo. Example: - - r = a; x =_rel 1; || if (x_rlx == 1) a = 1; - Here the second thread is only ordered by WRF-ppo, but r can not be 1 and it is not a data race. +(* If there is no w-race, then WRF-ppo also provides ordering. + * Besides the definition in hb above, this is relevant for fre & r-race + * below, where the store cannot occur before the read (i.e. they are + * actually ordered) as long as the read is ordered and the store is ordered + * by WRF-ppo. Example: + * r = a; x =_rel 1; || if (x_rlx == 1) a = 1; + * + * Here the second thread is only ordered by WRF-ppo, but r can not be 1 + * and it is not a data race. + * + * It is not relevant for rfe & r-race, since there the plain store would be + * in the range (but WRF-ppo only orders stores in the domain); and reading + * from it without proper synchronization is a data race too anyways. + *) - It is not relevant for rfe & r-race, since there the plain store would be in the range (but WRF-ppo only orders stores in the domain); and reading from it without proper synchronization is a data race too anyways. -*) let r-race-fix = ([Marked] | ppo);hb+; WRF-ppo let r-race = (fre | rfe) \ (w-race-fix | r-race-fix) let r-racy = [domain(r-race)] | [range(r-race)] -(* a read is observed through a dependency if the value is eventually used to determine a control or address dependency, or the value is stored (through data dependencies) in a location that is read by another thread. - In those cases, if the read was data racy and returned a trashy value (e.g. due to torn read/write or other compiler optimizations like store value speculation), this trashy value will actually affect the execution, which is UB. - As long as the value is not observed in this manner, r-race is well-defined behavior. + +(* a read is observed through a dependency if the value is eventually used + * to determine a control or address dependency, or the value is stored (through + * data dependencies) in a location that is read by another thread. + * In those cases, if the read was data racy and returned a trashy value + * (e.g. due to torn read/write or other compiler optimizations like store + * value speculation), this trashy value will actually affect the execution, + * which is UB. + * As long as the value is not observed in this manner, r-race is well-defined + * behavior. *) + let obs-dep = ctrl | addr | data;rfe flag ~empty [domain(obs-dep)] & r-racy as r-data-race