diff --git a/.clang-format b/.clang-format index 0c6bed10..c243fccb 100644 --- a/.clang-format +++ b/.clang-format @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/.github/workflows/build-and-test-jumpstart.yaml b/.github/workflows/build-and-test-jumpstart.yaml index 27b4eb99..6a33c685 100644 --- a/.github/workflows/build-and-test-jumpstart.yaml +++ b/.github/workflows/build-and-test-jumpstart.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -6,102 +6,101 @@ name: Build and Test Jumpstart on: push: - branches: [ "main" ] + branches: ["main"] pull_request: - branches: [ "main" ] + branches: ["main"] workflow_dispatch: env: SPIKE_REPO: https://github.com/riscv-software-src/riscv-isa-sim.git SPIKE_REV: master - TOOLCHAIN_URL: https://github.com/riscv-collab/riscv-gnu-toolchain/releases/download/2023.09.27/riscv64-elf-ubuntu-22.04-gcc-nightly-2023.09.27-nightly.tar.gz + TOOLCHAIN_URL: https://github.com/riscv-collab/riscv-gnu-toolchain/releases/download/2025.09.28/riscv64-elf-ubuntu-22.04-gcc-nightly-2025.09.28-nightly.tar.xz jobs: build-and-test-jumpstart: - runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - - - name: Install Packages - run: | - sudo apt-get update - sudo apt-get install -y device-tree-compiler build-essential - - - name: Get revisions of dependencies - run: | - SPIKE_COMMIT=$( git ls-remote "$SPIKE_REPO" $SPIKE_REV | awk '{ print $1; }' ) - echo "Revison of Spike: $SPIKE_COMMIT" - # Save for later use - echo "SPIKE_COMMIT=$SPIKE_COMMIT" >> $GITHUB_ENV - - - name: Get the toolchain from cache (if available) - id: cache-restore-toolchain - uses: actions/cache/restore@v3 - with: - path: /opt/riscv/toolchain - key: "toolchain-${{env.TOOLCHAIN_URL}}" - - - if: ${{ steps.cache-restore-toolchain.outputs.cache-hit != 'true' }} - name: Download Toolchain (if not cached) - run: | - mkdir -p /opt/riscv/toolchain - wget --progress=dot:giga $TOOLCHAIN_URL -O /tmp/toolchain.tar.gz - - - if: ${{ steps.cache-restore-toolchain.outputs.cache-hit != 'true' }} - name: Install Toolchain (if not cached) - run: tar zxf /tmp/toolchain.tar.gz --strip-components=1 -C /opt/riscv/toolchain - - - name: Save the toolchain to the cache (if necessary) - id: cache-save-toolchain - uses: actions/cache/save@v3 - with: - path: /opt/riscv/toolchain - key: "toolchain-${{env.TOOLCHAIN_URL}}" - - - name: Add the toolchain to the path - run: echo "/opt/riscv/toolchain/bin" >> $GITHUB_PATH - - - name: Get spike from cache (if available) - id: cache-restore-spike - uses: actions/cache/restore@v3 - with: - path: /opt/riscv/spike - key: "spike-${{env.SPIKE_COMMIT}}" - - - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} - name: Download Spike source (if not cached) - run: | - git clone "$SPIKE_REPO" - cd riscv-isa-sim - git checkout "$SPIKE_COMMIT" - git submodule update --init --recursive - - - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} - name: Build Spike (if not cached) - run: | - cd riscv-isa-sim - mkdir build && cd build - ../configure --prefix=/opt/riscv/spike - make -j"$(nproc 2> /dev/null || sysctl -n hw.ncpu)" - make install - - - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} - name: Save spike to cache (if built) - id: cache-save-spike - uses: actions/cache/save@v3 - with: - path: /opt/riscv/spike - key: "spike-${{env.SPIKE_COMMIT}}" - - - uses: BSFishy/meson-build@v1.0.3 - with: - action: test - directory: build - setup-options: --cross-file cross_compile/public/gcc_options.txt --cross-file cross_compile/gcc.txt --buildtype release -Dspike_binary=/opt/riscv/spike/bin/spike - options: --verbose - meson-version: 1.2.0 + - uses: actions/checkout@v3 + with: + submodules: recursive + fetch-depth: 0 + + - name: Install Packages + run: | + sudo apt-get update + sudo apt-get install -y device-tree-compiler build-essential + + - name: Get revisions of dependencies + run: | + SPIKE_COMMIT=$( git ls-remote "$SPIKE_REPO" $SPIKE_REV | awk '{ print $1; }' ) + echo "Revison of Spike: $SPIKE_COMMIT" + # Save for later use + echo "SPIKE_COMMIT=$SPIKE_COMMIT" >> $GITHUB_ENV + + - name: Get the toolchain from cache (if available) + id: cache-restore-toolchain + uses: actions/cache/restore@v3 + with: + path: /opt/riscv/toolchain + key: "toolchain-${{env.TOOLCHAIN_URL}}" + + - if: ${{ steps.cache-restore-toolchain.outputs.cache-hit != 'true' }} + name: Download Toolchain (if not cached) + run: | + mkdir -p /opt/riscv/toolchain + wget --progress=dot:giga $TOOLCHAIN_URL -O /tmp/toolchain.tar.xz + + - if: ${{ steps.cache-restore-toolchain.outputs.cache-hit != 'true' }} + name: Install Toolchain (if not cached) + run: tar xJf /tmp/toolchain.tar.xz --strip-components=1 -C /opt/riscv/toolchain + + - name: Save the toolchain to the cache (if necessary) + id: cache-save-toolchain + uses: actions/cache/save@v3 + with: + path: /opt/riscv/toolchain + key: "toolchain-${{env.TOOLCHAIN_URL}}" + + - name: Add the toolchain to the path + run: echo "/opt/riscv/toolchain/bin" >> $GITHUB_PATH + + - name: Get spike from cache (if available) + id: cache-restore-spike + uses: actions/cache/restore@v3 + with: + path: /opt/riscv/spike + key: "spike-${{env.SPIKE_COMMIT}}" + + - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} + name: Download Spike source (if not cached) + run: | + git clone "$SPIKE_REPO" + cd riscv-isa-sim + git checkout "$SPIKE_COMMIT" + git submodule update --init --recursive + + - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} + name: Build Spike (if not cached) + run: | + cd riscv-isa-sim + mkdir build && cd build + ../configure --prefix=/opt/riscv/spike + make -j"$(nproc 2> /dev/null || sysctl -n hw.ncpu)" + make install + + - if: ${{ steps.cache-restore-spike.outputs.cache-hit != 'true' }} + name: Save spike to cache (if built) + id: cache-save-spike + uses: actions/cache/save@v3 + with: + path: /opt/riscv/spike + key: "spike-${{env.SPIKE_COMMIT}}" + + - uses: BSFishy/meson-build@v1.0.3 + with: + action: test + directory: build + setup-options: --cross-file cross_compile/public/gcc_options.txt --cross-file cross_compile/gcc.txt --buildtype release -Dspike_binary=/opt/riscv/spike/bin/spike + options: --verbose + meson-version: 1.3.0 diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 32bfb9d6..360f094e 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -13,6 +13,8 @@ jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v3 - - uses: pre-commit/action@v3.0.0 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: "3.12" + - uses: pre-commit/action@v3.0.0 diff --git a/.gitignore b/.gitignore index bc43c842..35a35c99 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 23587eaf..3841cbd8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 repos: - repo: https://github.com/pre-commit/pre-commit-hooks.git - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-added-large-files - id: check-ast @@ -24,43 +24,43 @@ repos: args: [--markdown-linebreak-ext=md] - repo: https://github.com/PyCQA/isort - rev: 5.13.2 + rev: 6.0.1 hooks: - id: isort - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.4.2 + rev: 25.1.0 hooks: - id: black - repo: https://github.com/ikamensh/flynt/ - rev: 1.0.1 + rev: 1.0.2 hooks: - id: flynt - repo: https://github.com/asottile/pyupgrade - rev: v3.16.0 + rev: v3.20.0 hooks: - id: pyupgrade - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v16.0.6 + rev: v20.1.8 hooks: - id: clang-format # pull mirror of https://github.com/fsfe/reuse-tool - repo: https://github.com/rivosinc/reuse-tool - rev: '092e17c7287dad33a3da7fde22185dea29698810' + rev: 'da430ed605e06460b020a75410d62ddb7fc9a616' hooks: - id: reuse-annotate args: - - -c=Rivos Inc. - - -l=Apache-2.0 + - -c Rivos Inc. + - -l Apache-2.0 - --merge-copyrights - --skip-unrecognised - id: reuse - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 + rev: 7.1.0 hooks: - id: flake8 diff --git a/README.md b/README.md index 6c5d1b2d..9c0ed3a9 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,11 @@ # JumpStart -[![REUSE status](https://api.reuse.software/badge/github.com/rivosinc/JumpStart)](https://api.reuse.software/info/github.com/rivosinc/JumpStart) - Bare-metal kernel, APIs and build infrastructure for writing directed diags for RISC-V CPU/SoC validation. ## Setup the Environment @@ -16,44 +14,85 @@ JumpStart requires the following tools to be available in your path: * [meson](https://mesonbuild.com) * [riscv-gnu-toolchain](https://github.com/riscv-collab/riscv-gnu-toolchain) * [Spike](https://github.com/riscv-software-src/riscv-isa-sim) +* [just](https://github.com/casey/just) (command runner) + +### Ubuntu + +Install required packages: +```shell +# gcc toolchain +# Install riscv-gnu-toolchain from source or use a prebuilt version + +# just tool +curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin + +# meson +sudo apt install meson + +# Build Spike from source +# See: https://github.com/riscv-software-src/riscv-isa-sim +``` + +### macOS -JumpStart has been tested on Ubuntu 22.04 and macOS. +``` +brew tap riscv-software-src/riscv +brew install riscv-tools riscv-isa-sim riscv-gnu-toolchain +brew install just meson +``` ## Test the Environment This will build JumpStart and run the unit tests. ```shell -meson setup builddir --cross-file cross_compile/public/gcc_options.txt --cross-file cross_compile/gcc.txt --buildtype release -meson compile -C builddir -meson test -C builddir +just test gcc release spike +``` + +To see all the possible test targets, run: + +```shell +just --list ``` ## Building and Running Diags -The [`scripts/build_diag.py`](scripts/build_diag.py) script provides an easy way to build and run diags on different targets. +The [`scripts/build_diag.py`](scripts/build_diag.py) script provides an easy way to build and run diags on different environments. -This will build the diag in the [`tests/common/test000`](tests/common/test000) using the `gcc` toolchain and run it on the `spike` target: +This will build the diag in the [`tests/common/test000`](tests/common/test000) using the `gcc` toolchain and run it on the `spike` environment: ```shell -❯ scripts/build_diag.py --diag_src_dir tests/common/test000/ --diag_build_dir /tmp/diag -INFO: [MainThread]: Diag built: - Name: test000 - Directory: /tmp/diag - Assets: {'disasm': '/tmp/diag/test000.elf.dis', 'binary': '/tmp/diag/test000.elf', 'spike_trace': '/tmp/diag/test000.itrace'} - BuildType: release, - Target: spike - RNG Seed: 8410517908284574883 - Source Info: - Diag: test000, Source Path: /Users/joy/workspace/jumpstart/tests/common/test000 - Sources: ['/Users/joy/workspace/jumpstart/tests/common/test000/test000.c'] - Attributes: /Users/joy/workspace/jumpstart/tests/common/test000/test000.diag_attributes.yaml - Meson options overrides file: None +❯ scripts/build_diag.py --diag_src_dir tests/common/test000/ --diag_build_dir /tmp/diag --environment spike +INFO: [ThreadPoolExecutor-0_0]: Compiling 'tests/common/test000/' +INFO: [ThreadPoolExecutor-1_0]: Running diag 'tests/common/test000/' +INFO: [MainThread]: +Summary +Build root: /tmp/diag +Build Repro Manifest: /tmp/diag/build_manifest.repro.yaml +┏━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Diag ┃ Build ┃ Run [spike] ┃ Result ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ tests/common/test000/ │ PASS (2.20s) │ PASS (0.20s) │ /tmp/diag/test000/test000.elf │ +└───────────────────────┴──────────────┴──────────────┴───────────────────────────────┘ + +Diagnostics built: 1 +Diagnostics run: 1 + +Run Manifest: +/tmp/diag/run_manifest.yaml + +STATUS: PASSED ``` +For more details, check the Reference Manual section on [Building and Running Diags](docs/reference_manual.md#building-and-running-diags). + ## Documentation * [Quick Start: Anatomy of a Diag](docs/quick_start_anatomy_of_a_diag.md) * [Reference Manual](docs/reference_manual.md) * [FAQs](docs/faqs.md) * [JumpStart Internals](docs/jumpstart_internals.md) + +## Support + +For help, please send a message on the Slack channel #jumpstart-directed-diags-framework. diff --git a/cross_compile/gcc.txt b/cross_compile/gcc.txt index c34bb020..d0c65ab4 100644 --- a/cross_compile/gcc.txt +++ b/cross_compile/gcc.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/cross_compile/llvm.txt b/cross_compile/llvm.txt deleted file mode 100644 index 9dc1ec2c..00000000 --- a/cross_compile/llvm.txt +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -[binaries] -c = 'clang-18' -strip = 'llvm-strip' -objdump = 'riscv64-unknown-elf-objdump' -# Use the gcc linker. -c_ld = 'bfd' - -[built-in options] -c_args = target_args + ['-no-integrated-as', - '-fno-pic', - ] -c_link_args = target_args + ['-nostdlib', '-static'] diff --git a/cross_compile/public/gcc_options.txt b/cross_compile/public/gcc_options.txt index 31fff85c..de2c3b39 100644 --- a/cross_compile/public/gcc_options.txt +++ b/cross_compile/public/gcc_options.txt @@ -3,4 +3,4 @@ # SPDX-License-Identifier: Apache-2.0 [constants] -target_args = ['-march=rv64ghcv_zbb_zbs'] +target_args = ['-march=rv64gcvh_zba_zbb_zbs_zihintpause'] diff --git a/docs/faqs.md b/docs/faqs.md index f311eaf1..f2e4f780 100644 --- a/docs/faqs.md +++ b/docs/faqs.md @@ -1,5 +1,5 @@ @@ -8,7 +8,7 @@ SPDX-License-Identifier: Apache-2.0 ## Are there restrictions on what GPRs I can use in my diags? -**Yes.** The Thread Pointer (x4) and Global Pointer (x3) registers are reserved for JumpStart purposes and should not be used in diags. TP is used to point to a per hart attributes structure and GP is used as a temporary in JumpStart routines. +**Yes.** The Thread Pointer (x4) and Global Pointer (x3) registers are reserved for JumpStart purposes and should not be used in diags. TP is used to point to a per cpu attributes structure and GP is used as a temporary in JumpStart routines. **Diags are expected to follow the [RISC-V ABI Calling Convention](https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc).** @@ -17,3 +17,11 @@ SPDX-License-Identifier: Apache-2.0 Running spike through `meson test` breaks spike's command line debugging facility (`-d`) for interactive debugging. You will need to run spike manually with `-d` for interactive debugging. + +## What's the best way to debug a diag that is behaving incorrectly? + +* If your diag can run on Spike, generate the spike trace and see where things go off the rails. + * Look for `trap` to find unexpected exceptions. + * Look for the point where your code returns to the JumpStart code. + * Run spike with the `-d` flag to step through your diag and inspect registers and memory. +* Build with the `--buildtype debug` to turn off optimizations and generate debug information. The disassembly generated will have your code interleaved with the assembly, making it easier to correlate the two. diff --git a/docs/jumpstart_internals.md b/docs/jumpstart_internals.md index 9763e541..4205737b 100644 --- a/docs/jumpstart_internals.md +++ b/docs/jumpstart_internals.md @@ -1,5 +1,5 @@ diff --git a/docs/quick_start_anatomy_of_a_diag.md b/docs/quick_start_anatomy_of_a_diag.md index d8ed16d9..c46e4a2a 100644 --- a/docs/quick_start_anatomy_of_a_diag.md +++ b/docs/quick_start_anatomy_of_a_diag.md @@ -1,5 +1,5 @@ @@ -9,20 +9,20 @@ SPDX-License-Identifier: Apache-2.0 `test021` is a 2P diag that has `CPU0` update the page table mapping of a page in memory by changing the valid bit from `0` to `1`. `CPU1` reads from the page before and after the valid bit is set to `1`. The test verifies that the read from `CPU1` fails when the valid bit is `0` and eventually succeeds after the valid bit is set to `1`. The diag comprises of 2 source files: -* [`test021.c`](../tests/common/test021.c) -* [`test021.S`](../tests/common/test021.S) +* [`test021.c`](../tests/common/test021/test021.c) +* [`test021.S`](../tests/common/test021/test021.S) and a diag attributes file: -* [`test021.diag_attributes.yaml`](../tests/common/test021.diag_attributes.yaml) +* [`test021.diag_attributes.yaml`](../tests/common/test021/test021.diag_attributes.yaml) ## Diag Attributes YAML file -[`test021.diag_attributes.yaml`](../tests/common/test021.diag_attributes.yaml) contains attributes that describe the diag. JumpStart uses these attributes to generate diag specific code, data structures and files. +[`test021.diag_attributes.yaml`](../tests/common/test021/test021.diag_attributes.yaml) contains attributes that describe the diag. JumpStart uses these attributes to generate diag specific code, data structures and files. ```yaml -active_hart_mask: "0b11" +active_cpu_mask: "0b11" ``` -This is a 2P diag with CPUs 0 and 1 active. JumpStart will allocate enough space in data structures for 2 CPUs. Any CPUs not specified in the active_hart_mask will be considered inactive and sent to wfi if encountered. +This is a 2P diag with CPUs 0 and 1 active. JumpStart will allocate enough space in data structures for 2 CPUs. Any CPUs not specified in the active_cpu_mask will be considered inactive and sent to wfi if encountered. ```yaml satp_mode: "sv39" @@ -82,11 +82,11 @@ The diag additionally defines a `.data.diag` section at `0x80006000`. The `valid By default, the JumptStart boot code will start in machine mode, initialize the system (MMU, interrupts, exception handling etc) and then jump to the diag's `main` function in Supervisor mode. -[`test021.c`](../tests/common/test021.c) contains `main()` that the JumpStart boot code will jump to after initializing the system. +[`test021.c`](../tests/common/test021/test021.c) contains `main()` that the JumpStart boot code will jump to after initializing the system. ```c - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id > 1) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (cpu_id > 1) { return DIAG_FAILED; } ``` @@ -112,7 +112,7 @@ struct translation_info { }; ``` -The `data_area` variable is a global variable defined in the `.data.diag` section by [`test021.S`](../tests/common/test021.S): +The `data_area` variable is a global variable defined in the `.data.diag` section by [`test021.S`](../tests/common/test021/test021.S): ```asm .section .data.diag, "wa", @progbits @@ -131,14 +131,14 @@ data_area: The diag sanity checks that the valid bit is not set for the leaf page table entry for this translation. `walk_successful` will be `0` as the translation encountered the invalid leaf page table entry but `levels_traversed` will be `3` as it would have traversed 3 levels to get to the leaf page table entry. ```c - if (hart_id == 1) { + if (cpu_id == 1) { register_smode_trap_handler_override( - SCAUSE_EC_LOAD_PAGE_FAULT, (uint64_t)(&hart1_load_page_fault_handler)); + SCAUSE_EC_LOAD_PAGE_FAULT, (uint64_t)(&cpu1_load_page_fault_handler)); .. .. ``` -CPU1 registers a supervisor mode trap handler override (`hart1_load_page_fault_handler()`) for the load page fault exception using the `register_smode_trap_handler_override()` API provided by JumpStart. +CPU1 registers a supervisor mode trap handler override (`cpu1_load_page_fault_handler()`) for the load page fault exception using the `register_smode_trap_handler_override()` API provided by JumpStart. ```c if (is_load_allowed_to_data_area() == 1) { @@ -148,7 +148,7 @@ CPU1 registers a supervisor mode trap handler override (`hart1_load_page_fault_h `CPU1` calls `is_load_allowed_to_data_area()` to check that the reads to the data area are not allowed. -`is_load_allowed_to_data_area()` is defined in [`test021.S`](../tests/common/test021.S): +`is_load_allowed_to_data_area()` is defined in [`test021.S`](../tests/common/test021/test021.S): ```asm .section .text, "ax", @progbits @@ -168,10 +168,10 @@ is_load_allowed_to_data_area: .. .. ``` -`is_load_allowed_to_data_area()` issues a load to the `data_area` variable and returns `1` if the load succeeds. If the load faults, the load page fault exception handler `hart1_load_page_fault_handler()` simply skips over the faulting instruction: +`is_load_allowed_to_data_area()` issues a load to the `data_area` variable and returns `1` if the load succeeds. If the load faults, the load page fault exception handler `cpu1_load_page_fault_handler()` simply skips over the faulting instruction: ```c -void hart1_load_page_fault_handler(void) { +void cpu1_load_page_fault_handler(void) { .. .. // skip over the faulting load @@ -181,13 +181,13 @@ void hart1_load_page_fault_handler(void) { ``` ```c - sync_all_harts_from_smode(); + sync_all_cpus_from_smode(); ``` The diag syncs up the cores so that they both complete all the above steps before `CPU0` modifies the page table entry to mark it as valid. ```c - if (hart_id == 0) { + if (cpu_id == 0) { *((uint64_t *)xlate_info.pte_address[2]) = xlate_info.pte_value[2] | PTE_V; asm volatile("sfence.vma"); diff --git a/docs/reference_manual.md b/docs/reference_manual.md index 93ab128e..f40ecde2 100644 --- a/docs/reference_manual.md +++ b/docs/reference_manual.md @@ -1,5 +1,5 @@ @@ -10,11 +10,17 @@ JumpStart provides a bare-metal kernel, APIs and build infrastructure for writin A Diag is expected to provide sources (C and assembly files) and it's attributes in a YAML file. -The JumpStart [`Unit Tests`](../tests) are a good reference on writing diags: -* [Common tests](../tests/common/meson.build) - **For a Quick Start Guide, see [Anatomy of a Diag](quick_start_anatomy_of_a_diag.md)** which provides a detailed explanation of `test021` which is a 2-core diag that modifies a shared page table in memory and checks that the change is visible to both cores. +## Table of Contents + +* [Diag Sources](#diag-sources) +* [Diag Attributes](#diag-attributes) +* [JumpStart APIs](#jumpstart-apis) +* [Building and Running Diags](#building-and-running-diags) +* [Running Unit Tests](#running-unit-tests) +* [Debugging with GDB](#debugging-diags-with-gdb) + ## Diag Sources Diags are written in C and/or Assembly. @@ -27,23 +33,26 @@ JumpStart provides a set of basic API functions that the diag can use. Details [ The diag exits by returning from `main()` with a `DIAG_PASSED` or `DIAG_FAILED` return code. Alternatively, the diag can call `jumpstart_mmode_fail()` or `jumpstart_smode_fail()` functions if a clean return from `main()` is not possible. On return from the diag, JumpStart will exit the simulation with the appropriate exit code and exit sequence for the simulation environment. +The JumpStart [`Unit Tests`](../tests) are a good reference on writing diags: +* [Common tests](../tests/common/meson.build) + **Diags are expected to follow the [RISC-V ABI Calling Convention](https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc).** -**The Thread Pointer (x4) and Global Pointer (x3) registers are reserved for JumpStart purposes and should not be used in diags.** TP is used to point to a per hart attributes structure and GP is used as a temporary in JumpStart routines. +**The Thread Pointer (x4) and Global Pointer (x3) registers are reserved for JumpStart purposes and should not be used in diags.** TP is used to point to a per cpu attributes structure and GP is used as a temporary in JumpStart routines. ## Diag Attributes -The Diag Attributes file specifies the memory layout and various attributes of the diag such as the MMU mode, number of active harts, etc. +The Diag Attributes file specifies the memory layout and various attributes of the diag such as the MMU mode, number of active cpus, etc. The default diag attribute values are defined in the [Source Attributes YAML file](../src/public/jumpstart_public_source_attributes.yaml). -### `active_hart_mask` +### `active_cpu_mask` -Binary bitmask controlling how many active harts are in the diag. Any hart that is not part of the bitmask will be sent to `wfi`. +Binary bitmask controlling how many active cpus are in the diag. Any cpu that is not part of the bitmask will be sent to `wfi`. -Default: `0b1` or 1 hart active. +Default: `0b1` or 1 cpu active. -Specifies the active harts in the diag. The default is `0b1` or 1 hart active. +Specifies the active cpus in the diag. The default is `0b1` or 1 cpu active. ### `enable_virtualization` @@ -53,7 +62,9 @@ Default: `False`. ### `satp_mode`, `vstap_mode`, `hgatp_mode` -The MMU mode (SV39, SV48, etc.) that will be programmed into the corresponding *ATP register. +The MMU mode that will be programmed into the corresponding *ATP register. + +Valid values: `bare`, `sv39`, `sv48`, `sv39x4`, `sv48x4`. ### `start_test_in_mmode` @@ -63,8 +74,6 @@ NOTE: Diags that run in `sbi_firmware_boot` mode (where JumpStart starts in S-mo Default: `False`. The diag's `main()` will be called in S-mode. -Example: [test009](../tests/common/test009.diag_attributes.yaml). - ### `mmode_start_address`, `smode_start_address` and `umode_start_address` The address at which the start of the Machine, Supervisor and User mode sections will be placed by the linker. @@ -73,7 +82,7 @@ The address at which the start of the Machine, Supervisor and User mode sections The maximum number of 4K pages that can be used to allocate Page Tables for each translation stage. -### `num_pages_for_jumpstart_smode_bss` and `num_pages_for_jumpstart_smode_rodata` +### `num_pages_for_jumpstart_smode_bss` and `num_pages_for_jumpstart_mmode_rodata` The number of 4K pages allowed for the `.bss` and `.rodata` sections respectively. @@ -93,6 +102,14 @@ Controls the memory layout and attributes of all the sections of the diag. Controls the virtual, guest physical, physical and supervisor physical addresses of the mapping. +#### `target_mmu` + +Specifies the list of MMUs that this mapping will be set up for. + +MMUs currently supported: `cpu`, `iommu`. + +Default: ["cpu"] + #### `stage` Controls the translation stage (S, VS, G) that this mapping will be used in. The S stage is the single stage translation and the VS and G stages are the two stage translation. @@ -113,6 +130,14 @@ The page size has to conform to the sizes supported by the SATP mode. Controls the number of `page_size` pages allocated for the section. +#### `num_pages_per_cpu` + +Controls the number of `page_size` pages allocated per CPU for the section. The total number of pages allocated will be `num_pages_per_cpu` multiplied by `max_num_cpus_supported`. + +This attribute is mutually exclusive with `num_pages` - only one of them can be specified for a mapping. When `num_pages_per_cpu` is used, the memory allocation scales automatically with the number of CPUs supported by the system. + +Example: If `num_pages_per_cpu: 2` and `max_num_cpus_supported: 4`, then 8 total pages will be allocated for the section. + #### `alias` Indicates whether this is a VA alias. It's PA should be contained in the PA range of another mapping. @@ -124,6 +149,10 @@ If not explicitly specified, this will be inferred based on the translation stag Default: `None`. +#### `pma_memory_type` (Rivos Internal) + +The memory type of the section. This is used to set the memory type for the PMARR region that holds this section. + #### `linker_script_section` The name of the linker script section that this section will be placed in. @@ -146,48 +175,142 @@ The sections `.text` and `.text.end` will be placed together in the `.text` link } ``` -## Building Diags +## Building and Running Diags with `build_diag.py` -`meson` is used to build the diags. The diags are built in 2 stages - `meson setup` and `meson compile`. +[`scripts/build_diag.py`](../scripts/build_diag.py) is the preferred way to build diags and optionally run them on spike. -### `meson setup` +It will place the build and run artifacts into `--diag_build_dir`. It produces the ELFs, run traces (for spike), `build_manifest.repro.yaml` file to reproduce the build, etc. -Takes the diag's sources and attributes and generates a meson build directory. +It will produce a summary indicating status for each diag. -Pass the sources and the attribute file to `meson setup` with the `diag_attributes_yaml`, `diag_name` and `diag_sources` setup options: +``` +Summary +Build root: /tmp/diag +Build Repro Manifest: /tmp/diag/build_manifest.repro.yaml +┏━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Diag ┃ Build ┃ Run [spike] ┃ Result ┃ +┡━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ test000 │ PASS (4.45s) │ PASS (0.38s) │ /tmp/diag/test000/test000.elf │ +├─────────┼──────────────┼──────────────┼───────────────────────────────┤ +│ test010 │ PASS (4.52s) │ PASS (0.38s) │ /tmp/diag/test010/test010.elf │ +├─────────┼──────────────┼──────────────┼───────────────────────────────┤ +│ test002 │ PASS (4.49s) │ PASS (0.39s) │ /tmp/diag/test002/test002.elf │ +└─────────┴──────────────┴──────────────┴───────────────────────────────┘ + +Run Manifest: +/tmp/diag/run_manifest.yaml + +STATUS: PASSED -```shell -meson setup builddir --cross-file cross_compile/rivos_internal/gcc_options.txt --cross-file cross_compile/gcc.txt --buildtype release -Ddiag_attributes_yaml=tests/common/test000.diag_attributes.yaml -Ddiag_sources=tests/common/test000.c -Ddiag_name=my_jumpstart_diag ``` -All `meson setup` options are listed in the [meson_options.txt](../meson.options) file. +### Environment Configuration -#### `diag_attribute_overrides` +The script uses an environment-based configuration system that determines the run_target, boot configuration, and other build settings. Environments are defined in [`scripts/build_tools/environments.yaml`](../scripts/build_tools/environments.yaml) and can inherit from other environments. -Diag attributes specified in the diag's attribute file can be overriden at `meson setup` with the `diag_attribute_overrides` option. `diag_attribute_overrides` takes a list of attributes that can be overriden. +Available environments include: +- `spike`: Run on Spike simulator with fw-none boot configuration -For example, to override the `active_hart_mask`: +Each environment can specify: +- `run_target`: The run_target to run the diag on (spike, etc.) +- `override_meson_options`: Meson options to override for this environment +- `override_diag_attributes`: Diag attributes to override for this environment +- `extends`: Parent environment to inherit from -```shell -meson setup builddir -Ddiag_attribute_overrides=active_hart_mask=0b11 ... -``` +### Flags + +The preferred way to build and run using JumpStart is to use the [`scripts/build_diag.py`](../scripts/build_diag.py) script. -### `meson compile` +#### `--diag_src_dir` -Compiles the diag for which the meson build directory has been generated by `meson setup`. +A list of diag source directories containing the diag's sources and attributes file. +#### `--build_manifest` + +A manifest file containing a list of multiple diags to be built. The manifest file can also contain global overrides for `override_meson_options`, `override_diag_attributes` and `diag_custom_defines` that are applied to all diags in a manifest. See `diags/sival/ddr.diag_manifest.yaml` in the `ctest` repo for an example. + +#### `--environment` + +**Required.** The environment to build and run for. + +Available environments can be listed by running: +```shell +jumpstart/scripts/build_diag.py --help +``` + +Environment related extra arguments can be listed by running: ```shell -meson compile -C builddir +jumpstart/scripts/build_diag.py -e --help ``` -This will build `builddir/my_jumpstart_diag` +The environment determines: +- The run_target (spike, etc.) +- Boot configuration (fw-none) +- Default meson options and diag attributes + +#### `--override_meson_options` + +Used to override the meson options specified in [meson.options](../meson.options) or those set by the environment. + +#### `--override_diag_attributes` + +Used to override the diag attributes specified in the [attributes file](../src/public/jumpstart_public_source_attributes.yaml) or those set by the environment. This will override the attributes specified in the diag's attributes file. + +#### `--diag_custom_defines` + +Override per diag custom defines. + +#### `--include_diags` / `--exclude_diags` + +Filter diagnostics when using a manifest. Only valid with `--build_manifest` and incompatible with `--diag_src_dir`. +- `--include_diags name1 name2`: Build only the listed diagnostics from the manifest; errors if a name is not present. +- `--exclude_diags name1 name2`: Build all diagnostics except the listed ones; errors if a name is not present. + +#### `--buildtype` + +Meson build type to use. Choices: `release`, `minsize`, `debug`, `debugoptimized`. Defaults to `release` if not specified. + +#### `--toolchain` + +Compiler toolchain. Choices: `gcc`. Default: `gcc`. + +#### `--disable_diag_run` + +Builds the diag but does not run it on the run_target (skips trace generation/run phase). + +#### `--diag_build_dir` (`--diag_build`) + +Required. Output directory for built artifacts. A subdirectory is used for Meson build artifacts. + +#### `--keep_meson_builddir` + +Keep the temporary Meson build directory (useful for inspecting logs/artifacts on failures). Default: `false`. -### `meson test` +#### `--rng_seed` -Runs the generated diag in Spike. +Seed for randomized build/run behavior. Accepts Python int literals (e.g., `1234`, `0xdeadbeef`, `0b1010`). If not provided, uses `rng_seed` from the manifest or auto-generates a random seed. + +#### `-v`, `--verbose` + +Enable verbose logging. + +#### `-j`, `--jobs` + +Number of parallel compile jobs. + +See `--help` for all options. + +## Running Unit Tests + +Use the `justfile` to build and run unit tests during development. + +Run `just --list` to see all the available commands. + +Examples: ```shell -meson test -C builddir +# Build all unit tests with GCC targeting release build and run on Spike. +just test gcc release spike ``` ## JumpStart APIs @@ -196,84 +319,110 @@ These are listed in the header files in the [include](../include) directory. Functions with names that end in `_from_smode()` or `_from_mmode()` can only be called from the respective modes. -### `get_thread_attributes_hart_id_from_smode()` +### Memory Management APIs -Returns the hart id of the hart calling the function. Can only be called from S-mode. +JumpStart provides a heap-based memory management system that supports allocations from DDR memory with different memory attributes (WB, WC, UC). -### `read_csr()`, `write_csr()`, `read_write_csr()`, `set_csr()`, `clear_csr()`, `read_set_csr()` and `read_clear_csr()` +If the diag attribute `enable_heap` is set to `True` a DDR WB heap will be initialized for use. -Operates on the specified CSR. The CSR names are passed to the RISC-V `csrr` and `csrw` instructions so the names should match what GCC expects. +Custom heaps (of any memory type and size) must be explicitly set up to point to memory regions in the memory map of the diag. +Note that multiple heaps can be active at a time but only one heap of a particular type (memory backing and memory attribute) can be set up at at time. -### `run_function_in_smode()`, `run_function_in_umode()` and `run_function_in_vsmode()` +#### Basic Memory Functions +- `malloc()`, `free()`, `calloc()`, `memalign()`: Default memory allocation functions that use DDR WB memory. -Diags can use these functions to run functions in the corresponding modes. Each function can be passed up to 6 arguments. +#### Memory Type Specific Functions +- `malloc_from_memory()`, `free_from_memory()`, `calloc_from_memory()`, `memalign_from_memory()`: Memory allocation functions that allow specifying the backing memory and memory type. -The different modes cannot share the same pages so the functions belonging to each mode should be tagged with the corresponding linker script section name to place them in different sections. +#### Heap Management +- `setup_heap()`: Initialize a new heap with specified backing memory and memory type. +- `deregister_heap()`: Clean up and remove a previously initialized heap. All allocations from this heap have to be freed before deregistering the heap. +- `get_heap_size()`: Get the total size of a specific heap. -Refer to Unit Tests `test002`, `test011`, `test018`, `test045` for examples of how these functions can be called and how the memory map can be set up. +The following constants are defined for use with these functions: -### `disable_mmu_from_smode()` +**Backing Memory Types:** +- `BACKING_MEMORY_DDR`: Standard DDR memory -Disables the MMU. The page tables are set up and the MMU is enabled by default when the diag starts. +**Memory Types:** +- `MEMORY_TYPE_WB`: Write-Back cached memory +- `MEMORY_TYPE_WC`: Write-Combining memory +- `MEMORY_TYPE_UC`: Uncached memory -### `sync_all_harts_from_smode()` +Example usage: +```c +// Set up a 4MB uncached DDR heap +setup_heap(0xA0200000, 0xA0200000 + 4 * 1024 * 1024, + BACKING_MEMORY_DDR, MEMORY_TYPE_UC); -Synchronization point for all active harts in the diag. +// Allocate from the uncached heap +void* buf = malloc_from_memory(size, BACKING_MEMORY_DDR, MEMORY_TYPE_UC); -### `register_mmode_trap_handler_override()` and `get_mmode_trap_handler_override()` +// Clean up when done +free_from_memory(buf, BACKING_MEMORY_DDR, MEMORY_TYPE_UC); +deregister_heap(BACKING_MEMORY_DDR, MEMORY_TYPE_UC); +``` -Allows the diag to register a trap handler override function for M-mode traps. The registered function will be called when the trap occurs in M-mode. +### `get_thread_attributes_cpu_id_from_smode()` -### `register_smode_trap_handler_override()` and `get_smode_trap_handler_override()` +Returns the cpu id of the cpu calling the function. Can only be called from S-mode. -Allows the diag to register a trap handler override function for S-mode traps. The registered function will be called when the trap occurs in S-mode. +### `read_csr()`, `write_csr()`, `read_write_csr()`, `set_csr()`, `clear_csr()`, `read_set_csr()` and `read_clear_csr()` -### `register_vsmode_trap_handler_override()` and `get_vsmode_trap_handler_override()` +Operates on the specified CSR. The CSR names are passed to the RISC-V `csrr` and `csrw` instructions so the names should match what GCC expects. -Allows the diag to register a trap handler override function for VS-mode traps. The registered function will be called when the trap occurs in VS-mode. +### `run_function_in_smode()`, `run_function_in_umode()`, `run_function_in_vsmode()` and `run_function_in_vumode()` -### `get_*epc_for_current_exception()` and `set_*epc_for_current_exception()` +Diags can use these functions to run functions in the corresponding modes. Each function can be passed up to 6 arguments. -These functions can be used to get and set the MEPC/SEPC during an exception. Allows modification of the EPC before returning from the exception. +`run_function_in_smode()` can only be called from M-mode. + +`run_function_in_umode()` and `run_function_in_vsmode()` can only be called from S-mode. -## Running Diags +`run_function_in_vumode()` can only be called from VS-mode. -JumpStart diags can be run on Spike and QEMU targets. +The different modes cannot share the same pages so the functions belonging to each mode should be tagged with the corresponding linker script section name to place them in different sections. -The target can be specified by passing the `-Dtarget` option to `meson setup`. The target can be `spike` or `qemu`. +*IMPORTANT*: The return values of these functions should be checked. The only way to tell if the function ran successfully is to check the return value. -`meson test` will attempt to run the diag on the target. To see the options being passed to the target, run `meson test` with the `-v` option. +Refer to Unit Tests `test002`, `test011`, `test018`, `test045`, `test048` for examples of how these functions can be called and how the memory map can be set up. -```shell -meson test -C builddir -v -``` +### `disable_mmu_from_smode()` -To generate the execution trace, pass the `generate_trace=true` option to `meson setup`. +Disables the MMU. The page tables are set up and the MMU is enabled by default when the diag starts. -```shell -meson setup -C builddir -Dgenerate_trace=true ... -``` +### `sync_all_cpus_from_smode()` -If the diag requires additional arguments be passed to the target, specify them with the `spike_additional_arguments`/`qemu_additional_arguments` options to `meson setup`. -These take a list of arguments. +Synchronization point for all active cpus in the diag. -```shell -meson setup -C builddir -Dspike_additional_arguments=-p2 ... -``` -## Boot Configs +### `sync_cpus_in_mask_from_smode()` -The boot path can be selected at build time with the `boot_config` meson option. +Synchronization point for a specific subset of CPUs specified by a CPU mask. This function provides more flexible synchronization than `sync_all_cpus_from_smode()` by allowing diags to synchronize only specific CPUs. -### `fw-none` (default) +**Parameters:** +- `cpu_mask`: A bitmask specifying which CPUs should participate in the synchronization. Each bit represents a CPU ID (bit 0 = CPU 0, bit 1 = CPU 1, etc.) +- `sync_point_address`: The address of a 4-byte aligned memory location to use as the synchronization point. Each CPU combination should use its own unique sync point to avoid conflicts. - JumpStart starts running from hardware reset. No system firmware is expected to be present. +**Important Notes:** +- Each CPU combination must use its own dedicated sync point to prevent synchronization conflicts +- The sync point must be 4-byte aligned and placed in a memory section accessible to all participating CPUs +- Only CPUs specified in the mask will participate in the synchronization +- The primary CPU (lowest CPU ID in the mask) coordinates the synchronization process -### `fw-m` +See [test019](../tests/common/test019/) for examples of how the sync functions can be used. -JumpStart starts in M-mode at the `mmode_start_address` after running system firmware for initialization. The system firmware that runs prior to JumpStart can be overwritten by JumpStart. +### `register_mmode_trap_handler_override()` and `get_mmode_trap_handler_override()` + +Allows the diag to register a trap handler override function for M-mode traps. The registered function will be called when the trap occurs in M-mode. + +### `register_smode_trap_handler_override()` and `get_smode_trap_handler_override()` + +Allows the diag to register a trap handler override function for S-mode traps. The registered function will be called when the trap occurs in S-mode. + +### `register_vsmode_trap_handler_override()` and `get_vsmode_trap_handler_override()` -### `fw-sbi` +Allows the diag to register a trap handler override function for VS-mode traps. The registered function will be called when the trap occurs in VS-mode. -JumpStart starts in S-mode at the `sbi_firmware_trampoline` address after running system firmware for initialization. The system firmware is expected to be resident and will not be overwritten by JumpStart. JumpStart will interact with the system firmware using the SBI HSM extension - for example, to boot non-booting harts. +### `get_*epc_for_current_exception()` and `set_*epc_for_current_exception()` -Only S-mode based diags can be run in this mode as JumpStart cannot enter M-mode. +These functions can be used to get and set the MEPC/SEPC during an exception. Allows modification of the EPC before returning from the exception. diff --git a/include/common/cpu_bits.h b/include/common/cpu_bits.h index 461537f2..50413370 100644 --- a/include/common/cpu_bits.h +++ b/include/common/cpu_bits.h @@ -1,10 +1,12 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ /* RISC-V ISA constants */ -/* This file is based on qemu/target/riscv/cpu_bits.h. Sync if needed. */ +/* This file is based on RISC-V CPU bits definitions. Sync if needed. */ #ifndef TARGET_RISCV_CPU_BITS_H #define TARGET_RISCV_CPU_BITS_H @@ -21,6 +23,7 @@ #define BIT_MASK(start, end) ((~0ULL >> (64 - ((end) - (start) + 1))) << (start)) #define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) #define ALIGN_UP_SIZE(base, size) (((base) + (size) - 1) & ~((uint64_t)(size)-1)) /* Extension context status mask */ @@ -56,6 +59,9 @@ /* Control and Status Registers */ +/* zicfiss user ssp csr */ +#define CSR_SSP 0x011 + /* User Trap Setup */ #define CSR_USTATUS 0x000 #define CSR_UIE 0x004 @@ -153,7 +159,6 @@ #define CSR_HPMCOUNTER29H 0xc9d #define CSR_HPMCOUNTER30H 0xc9e #define CSR_HPMCOUNTER31H 0xc9f -#define CSR_SCOUNTINHIBIT 0x120 /* Machine Timers and Counters */ #define CSR_MCYCLE 0xb00 @@ -179,6 +184,8 @@ /* 32-bit only */ #define CSR_MSTATUSH 0x310 +#define CSR_MEDELEGH 0x312 +#define CSR_HEDELEGH 0x612 /* Machine Trap Handling */ #define CSR_MSCRATCH 0x340 @@ -193,6 +200,8 @@ /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ #define CSR_MISELECT 0x350 #define CSR_MIREG 0x351 + +/* Machine Indirect Register Alias */ #define CSR_MIREG2 0x352 #define CSR_MIREG3 0x353 #define CSR_MIREG4 0x355 @@ -229,6 +238,11 @@ #define CSR_SSTATEEN2 0x10E #define CSR_SSTATEEN3 0x10F +#define CSR_SRMCFG 0x181 + +/* Supervisor Counter Delegation */ +#define CSR_SCOUNTINHIBIT 0x120 + /* Supervisor Trap Handling */ #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 @@ -250,6 +264,8 @@ /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ #define CSR_SISELECT 0x150 #define CSR_SIREG 0x151 + +/* Supervisor Indirect Register Alias */ #define CSR_SIREG2 0x152 #define CSR_SIREG3 0x153 #define CSR_SIREG4 0x155 @@ -322,6 +338,13 @@ #define CSR_VSISELECT 0x250 #define CSR_VSIREG 0x251 +/* Virtual Supervisor Indirect Alias */ +#define CSR_VSIREG2 0x252 +#define CSR_VSIREG3 0x253 +#define CSR_VSIREG4 0x255 +#define CSR_VSIREG5 0x256 +#define CSR_VSIREG6 0x257 + /* VS-Level Interrupts (H-extension with AIA) */ #define CSR_VSTOPEI 0x25c #define CSR_VSTOPI 0xeb0 @@ -354,6 +377,8 @@ #define SMSTATEEN0_CS (1ULL << 0) #define SMSTATEEN0_FCSR (1ULL << 1) #define SMSTATEEN0_JVT (1ULL << 2) +#define SMSTATEEN0_CTR (1ULL << 54) +#define SMSTATEEN0_P1P13 (1ULL << 56) #define SMSTATEEN0_HSCONTXT (1ULL << 57) #define SMSTATEEN0_IMSIC (1ULL << 58) #define SMSTATEEN0_AIA (1ULL << 59) @@ -390,17 +415,19 @@ #define CSR_PMPADDR14 0x3be #define CSR_PMPADDR15 0x3bf -/* Debug/Trace Registers (shared with Debug Mode) */ +/* Trace Registers (shared with Debug Mode) */ #define CSR_TSELECT 0x7a0 #define CSR_TDATA1 0x7a1 #define CSR_TDATA2 0x7a2 #define CSR_TDATA3 0x7a3 #define CSR_TINFO 0x7a4 +#define CSR_MCONTEXT 0x7a8 /* Debug Mode Registers */ #define CSR_DCSR 0x7b0 #define CSR_DPC 0x7b1 -#define CSR_DSCRATCH 0x7b2 +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 /* Performance Counters */ #define CSR_MHPMCOUNTER3 0xb03 @@ -436,6 +463,9 @@ /* Machine counter-inhibit register */ #define CSR_MCOUNTINHIBIT 0x320 +/* Machine counter configuration registers */ +#define CSR_MCYCLECFG 0x321 +#define CSR_MINSTRETCFG 0x322 #define CSR_MHPMEVENT3 0x323 #define CSR_MHPMEVENT4 0x324 #define CSR_MHPMEVENT5 0x325 @@ -466,6 +496,9 @@ #define CSR_MHPMEVENT30 0x33e #define CSR_MHPMEVENT31 0x33f +#define CSR_MCYCLECFGH 0x721 +#define CSR_MINSTRETCFGH 0x722 + #define CSR_MHPMEVENT3H 0x723 #define CSR_MHPMEVENT4H 0x724 #define CSR_MHPMEVENT5H 0x725 @@ -526,13 +559,6 @@ #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f -#define HPMEVENT_VUINH 0x400000000000000ULL -#define HPMEVENT_VSINH 0x800000000000000ULL -#define HPMEVENT_UINH 0x1000000000000000ULL -#define HPMEVENT_SINH 0x2000000000000000ULL -#define HPMEVENT_MINH 0x4000000000000000ULL -#define HPMEVENT_OVF 0x8000000000000000ULL - /* * User PointerMasking registers * NB: actual CSR numbers might be changed in future @@ -591,10 +617,17 @@ #define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ #define MSTATUS_TW 0x00200000 /* since: priv-1.10 */ #define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */ +#define MSTATUS_SPELP 0x00800000 /* zicfilp */ +#define MSTATUS_SDT 0x01000000 +#define MSTATUS_UXL 0x300000000ULL +#define MSTATUS_SXL 0xC00000000ULL #define MSTATUS_GVA 0x4000000000ULL #define MSTATUS_MPV 0x8000000000ULL +#define MSTATUS_MPELP 0x20000000000ULL /* zicfilp */ +#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */ #define MSTATUS_MPP_SHIFT 11 #define MSTATUS_MPP_MSB 12 +#define MSTATUS_MPV_SHIFT 39 #define MSTATUS64_UXL 0x0000000300000000ULL #define MSTATUS64_SXL 0x0000000C00000000ULL @@ -603,21 +636,65 @@ #define MSTATUS64_SD 0x8000000000000000ULL #define MSTATUSH128_SD 0x8000000000000000ULL +/* mvien CSR bits */ +#define MVIEN_LCOFIEN 0x2000 +#define MVIEN_LPRIEN 0x800000000 +#define MVIEN_HPRIEN 0x80000000000 +#define MVIEN_PTIEN 0x200000000000 + +/* mvip CSR bits */ +#define MVIP_LCOFIP 0x2000 +#define MVIP_LPRIP 0x800000000 +#define MVIP_HPRIP 0x80000000000 +#define MVIP_PTIP 0x200000000000 + +/* hvien CSR bits */ +#define HVIEN_LCOFIEN 0x2000 +#define HVIEN_LPRIEN 0x800000000 +#define HVIEN_HPRIEN 0x80000000000 +#define HVIEN_PTIEN 0x200000000000 + +/* hvip CSR bits */ +#define HVIP_VSTIP 0x40 +#define HVIP_VSEIP 0x400 +#define HVIP_LCOFIP 0x2000 +#define HVIP_LPRIP 0x800000000 +#define HVIP_HPRIP 0x80000000000 +#define HVIP_PTIP 0x200000000000 + #define MISA32_MXL 0xC0000000 #define MISA64_MXL 0xC000000000000000ULL /* sstatus CSR bits */ -#define SSTATUS_UIE 0x00000001 -#define SSTATUS_SIE 0x00000002 -#define SSTATUS_UPIE 0x00000010 -#define SSTATUS_SPIE 0x00000020 -#define SSTATUS_SPP 0x00000100 -#define SSTATUS_VS 0x00000600 -#define SSTATUS_FS 0x00006000 -#define SSTATUS_XS 0x00018000 -#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ -#define SSTATUS_MXR 0x00080000 -#define SSTATUS_SPP_SHIFT 8 +/* Bit positions */ +#define SSTATUS_UIE_POS 0 +#define SSTATUS_SIE_POS 1 +#define SSTATUS_UPIE_POS 4 +#define SSTATUS_SPIE_POS 5 +#define SSTATUS_UBE_POS 6 +#define SSTATUS_SBE_POS 7 +#define SSTATUS_SPP_POS 8 +#define SSTATUS_VS_POS 9 +#define SSTATUS_FS_POS 13 +#define SSTATUS_XS_POS 15 +#define SSTATUS_SUM_POS 18 +#define SSTATUS_MXR_POS 19 + +/* Masks derived from bit positions */ +#define SSTATUS_UIE (1 << SSTATUS_UIE_POS) +#define SSTATUS_SIE (1 << SSTATUS_SIE_POS) +#define SSTATUS_UPIE (1 << SSTATUS_UPIE_POS) +#define SSTATUS_SPIE (1 << SSTATUS_SPIE_POS) +#define SSTATUS_UBE (1 << SSTATUS_UBE_POS) +#define SSTATUS_SBE (1 << SSTATUS_SBE_POS) +#define SSTATUS_SPP (1 << SSTATUS_SPP_POS) +#define SSTATUS_VS 0x00000600 /* Multi-bit field, keep explicit value */ +#define SSTATUS_FS 0x00006000 /* Multi-bit field, keep explicit value */ +#define SSTATUS_XS 0x00018000 /* Multi-bit field, keep explicit value */ +#define SSTATUS_SUM (1 << SSTATUS_SUM_POS) +#define SSTATUS_MXR (1 << SSTATUS_MXR_POS) +#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */ +#define SSTATUS_SPP_SHIFT SSTATUS_SPP_POS #define SSTATUS64_UXL 0x0000000300000000ULL @@ -628,24 +705,69 @@ #define HSTATUS_VSBE 0x00000020 #define HSTATUS_GVA 0x00000040 #define HSTATUS_SPV 0x00000080 +#define HSTATUS_SPV_SHIFT 7 #define HSTATUS_SPVP 0x00000100 #define HSTATUS_HU 0x00000200 #define HSTATUS_VGEIN 0x0003F000 #define HSTATUS_VTVM 0x00100000 #define HSTATUS_VTW 0x00200000 #define HSTATUS_VTSR 0x00400000 -#define HSTATUS_VSXL 0x300000000 +#define HSTATUS_VSXL 0x300000000ULL +#define HSTATUS_VGEIN_SHIFT 12 #define HSTATUS32_WPRI 0xFF8FF87E #define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL +/* hie CSR bits */ +#define HIE_VSTIE 0x40 +#define HIE_VSEIE 0x400 +#define HIE_SGEIE 0x1000 + #define COUNTEREN_CY (1 << 0) #define COUNTEREN_TM (1 << 1) #define COUNTEREN_IR (1 << 2) #define COUNTEREN_HPM3 (1 << 3) +#define COUNTEREN_HPM4 (1 << 4) +#define COUNTEREN_HPM5 (1 << 5) +#define COUNTEREN_HPM6 (1 << 6) +#define COUNTEREN_HPM7 (1 << 7) +#define COUNTEREN_HPM8 (1UL << 8) +#define COUNTEREN_HPM9 (1UL << 9) +#define COUNTEREN_HPM10 (1UL << 10) +#define COUNTEREN_HPM11 (1UL << 11) +#define COUNTEREN_HPM12 (1UL << 12) +#define COUNTEREN_HPM13 (1UL << 13) +#define COUNTEREN_HPM14 (1UL << 14) +#define COUNTEREN_HPM15 (1UL << 15) +#define COUNTEREN_HPM16 (1UL << 16) +#define COUNTEREN_HPM17 (1UL << 17) +#define COUNTEREN_HPM18 (1UL << 18) +#define COUNTEREN_HPM19 (1UL << 19) +#define COUNTEREN_HPM20 (1UL << 20) +#define COUNTEREN_HPM21 (1UL << 21) +#define COUNTEREN_HPM22 (1UL << 22) +#define COUNTEREN_HPM23 (1UL << 23) +#define COUNTEREN_HPM24 (1UL << 24) +#define COUNTEREN_HPM25 (1UL << 25) +#define COUNTEREN_HPM26 (1UL << 26) +#define COUNTEREN_HPM27 (1UL << 27) +#define COUNTEREN_HPM28 (1UL << 28) +#define COUNTEREN_HPM29 (1UL << 29) +#define COUNTEREN_HPM30 (1UL << 30) +#define COUNTEREN_HPM31 (1UL << 31) /* vsstatus CSR bits */ -#define VSSTATUS64_UXL 0x0000000300000000ULL +#define VSSTATUS_SIE 0x2 +#define VSSTATUS_SPIE 0x20 +#define VSSTATUS_UBE 0x40 +#define VSSTATUS_SPP 0x100 +#define VSSTATUS_VS 0x600 +#define VSSTATUS_FS 0x6000 +#define VSSTATUS_SUM 0x40000 +#define VSSTATUS_MXR 0x80000 +#define VSSTATUS_XS 0x18000 +#define VSSTATUS_UXL 0x300000000ULL +#define VSSTATUS_SD 0x8000000000000000ULL /* Privilege modes */ #define PRV_U 0 @@ -682,6 +804,11 @@ #define VM_1_10_SV57 10 #define VM_1_10_SV64 11 +/* VM modes (hgsatp.mode) */ +#define VM_1_10_SV39x4 8 +#define VM_1_10_SV48x4 9 +#define VM_1_10_SV57x4 10 + /* Page table entry (PTE) fields */ #define PTE_V 0x001 /* Valid */ #define PTE_R 0x002 /* Read */ @@ -697,6 +824,10 @@ #define PTE_RESERVED 0x1FC0000000000000ULL /* Reserved bits */ #define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ +#define PTE_PBMT_PMA 0x0000000000000000ULL +#define PTE_PBMT_NC 0x0000000000000001ULL +#define PTE_PBMT_IO 0x0000000000000002ULL + /* Page table PPN shift amount */ #define PTE_PPN_SHIFT 10 @@ -726,12 +857,18 @@ #define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */ #define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */ #define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */ -#define RISCV_EXCP_SEMIHOST 0x10 -#define RISCV_EXCP_DATA_CORRUPTION_EXCEPTION 0x13 /* Srastraps */ +#define RISCV_EXCP_SW_CHECK 0x12 /* since: priv-1.13.0 */ +#define RISCV_EXCP_HW_ERR 0x13 /* since: priv-1.13.0 */ #define RISCV_EXCP_INST_GUEST_PAGE_FAULT 0x14 #define RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT 0x15 #define RISCV_EXCP_VIRT_INSTRUCTION_FAULT 0x16 #define RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT 0x17 +#define RISCV_EXCP_SEMIHOST 0x3f + +/* zicfilp defines lp violation results in sw check with tval = 2*/ +#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2 +/* zicfiss defines ss violation results in sw check with tval = 3*/ +#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3 #define RISCV_EXCP_INT_FLAG 0x80000000 #define RISCV_EXCP_INT_MASK 0x7fffffff @@ -751,24 +888,27 @@ #define IRQ_M_EXT 11 #define IRQ_S_GEXT 12 #define IRQ_PMU_OVF 13 -#define IRQ_LOCAL_MAX 16 +#define IRQ_PWR 45 +#define IRQ_LOCAL_MAX 64 +/* -1 is due to bit zero of hgeip and hgeie being ROZ. */ #define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1) /* mip masks */ -#define MIP_USIP (1 << IRQ_U_SOFT) -#define MIP_SSIP (1 << IRQ_S_SOFT) -#define MIP_VSSIP (1 << IRQ_VS_SOFT) -#define MIP_MSIP (1 << IRQ_M_SOFT) -#define MIP_UTIP (1 << IRQ_U_TIMER) -#define MIP_STIP (1 << IRQ_S_TIMER) -#define MIP_VSTIP (1 << IRQ_VS_TIMER) -#define MIP_MTIP (1 << IRQ_M_TIMER) -#define MIP_UEIP (1 << IRQ_U_EXT) -#define MIP_SEIP (1 << IRQ_S_EXT) -#define MIP_VSEIP (1 << IRQ_VS_EXT) -#define MIP_MEIP (1 << IRQ_M_EXT) -#define MIP_SGEIP (1 << IRQ_S_GEXT) -#define MIP_LCOFIP (1 << IRQ_PMU_OVF) +#define MIP_USIP (1ULL << IRQ_U_SOFT) +#define MIP_SSIP (1ULL << IRQ_S_SOFT) +#define MIP_VSSIP (1ULL << IRQ_VS_SOFT) +#define MIP_MSIP (1ULL << IRQ_M_SOFT) +#define MIP_UTIP (1ULL << IRQ_U_TIMER) +#define MIP_STIP (1ULL << IRQ_S_TIMER) +#define MIP_VSTIP (1ULL << IRQ_VS_TIMER) +#define MIP_MTIP (1ULL << IRQ_M_TIMER) +#define MIP_UEIP (1ULL << IRQ_U_EXT) +#define MIP_SEIP (1ULL << IRQ_S_EXT) +#define MIP_VSEIP (1ULL << IRQ_VS_EXT) +#define MIP_MEIP (1ULL << IRQ_M_EXT) +#define MIP_SGEIP (1ULL << IRQ_S_GEXT) +#define MIP_LCOFIP (1ULL << IRQ_PMU_OVF) +#define MIP_PTIP (1ULL << IRQ_PWR) /* sip masks */ #define SIP_SSIP MIP_SSIP @@ -778,13 +918,16 @@ #define SIP_LCOFIP MIP_LCOFIP /* MIE masks */ -#define MIE_SEIE (1 << IRQ_S_EXT) -#define MIE_UEIE (1 << IRQ_U_EXT) -#define MIE_MTIE (1 << IRQ_M_TIMER) -#define MIE_STIE (1 << IRQ_S_TIMER) -#define MIE_UTIE (1 << IRQ_U_TIMER) -#define MIE_SSIE (1 << IRQ_S_SOFT) -#define MIE_USIE (1 << IRQ_U_SOFT) +#define MIE_SEIE (1ULL << IRQ_S_EXT) +#define MIE_MEIE (1ULL << IRQ_M_EXT) +#define MIE_UEIE (1ULL << IRQ_U_EXT) +#define MIE_MTIE (1ULL << IRQ_M_TIMER) +#define MIE_STIE (1ULL << IRQ_S_TIMER) +#define MIE_UTIE (1ULL << IRQ_U_TIMER) +#define MIE_SSIE (1ULL << IRQ_S_SOFT) +#define MIE_USIE (1ULL << IRQ_U_SOFT) +#define MIE_LCOFIE (1ULL << IRQ_PMU_OVF) +#define MIE_PTIE (1ULL << IRQ_PWR) /* General PointerMasking CSR bits */ #define PM_ENABLE 0x00000001ULL @@ -793,6 +936,8 @@ /* Execution environment configuration bits */ #define MENVCFG_FIOM BIT(0) +#define MENVCFG_LPE BIT(2) /* zicfilp */ +#define MENVCFG_SSE BIT(3) /* zicfiss */ #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) @@ -808,11 +953,15 @@ #define MENVCFGH_STCE BIT(31) #define SENVCFG_FIOM MENVCFG_FIOM +#define SENVCFG_LPE MENVCFG_LPE +#define SENVCFG_SSE MENVCFG_SSE #define SENVCFG_CBIE MENVCFG_CBIE #define SENVCFG_CBCFE MENVCFG_CBCFE #define SENVCFG_CBZE MENVCFG_CBZE #define HENVCFG_FIOM MENVCFG_FIOM +#define HENVCFG_LPE MENVCFG_LPE +#define HENVCFG_SSE MENVCFG_SSE #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE @@ -887,10 +1036,15 @@ #define ISELECT_IMSIC_EIE63 0xff #define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY #define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63 -#define ISELECT_MASK 0x1ff +#define ISELECT_MASK_AIA 0x1ff + +/* [M|S|VS]SELCT value for Indirect CSR Access Extension */ +#define ISELECT_CD_FIRST 0x40 +#define ISELECT_CD_LAST 0x5f +#define ISELECT_MASK_SXCSRIND 0xfff /* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */ -#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1) +#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1) /* IMSIC bits (AIA) */ #define IMSIC_TOPEI_IID_SHIFT 16 @@ -936,8 +1090,27 @@ #define SEED_OPST_DEAD 0b11U #define SEED_ENTROPY_MASK 0xFFFFU -/* PMU related bits */ -#define MIE_LCOFIE (1 << IRQ_PMU_OVF) +#define MCYCLECFG_BIT_MINH BIT_ULL(62) +#define MCYCLECFGH_BIT_MINH BIT(30) +#define MCYCLECFG_BIT_SINH BIT_ULL(61) +#define MCYCLECFGH_BIT_SINH BIT(29) +#define MCYCLECFG_BIT_UINH BIT_ULL(60) +#define MCYCLECFGH_BIT_UINH BIT(28) +#define MCYCLECFG_BIT_VSINH BIT_ULL(59) +#define MCYCLECFGH_BIT_VSINH BIT(27) +#define MCYCLECFG_BIT_VUINH BIT_ULL(58) +#define MCYCLECFGH_BIT_VUINH BIT(26) + +#define MINSTRETCFG_BIT_MINH BIT_ULL(62) +#define MINSTRETCFGH_BIT_MINH BIT(30) +#define MINSTRETCFG_BIT_SINH BIT_ULL(61) +#define MINSTRETCFGH_BIT_SINH BIT(29) +#define MINSTRETCFG_BIT_UINH BIT_ULL(60) +#define MINSTRETCFGH_BIT_UINH BIT(28) +#define MINSTRETCFG_BIT_VSINH BIT_ULL(59) +#define MINSTRETCFGH_BIT_VSINH BIT(27) +#define MINSTRETCFG_BIT_VUINH BIT_ULL(58) +#define MINSTRETCFGH_BIT_VUINH BIT(26) #define MHPMEVENT_BIT_OF BIT_ULL(63) #define MHPMEVENTH_BIT_OF BIT(31) @@ -952,8 +1125,20 @@ #define MHPMEVENT_BIT_VUINH BIT_ULL(58) #define MHPMEVENTH_BIT_VUINH BIT(26) -#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) -#define MHPMEVENT_IDX_MASK 0xFFFFF +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \ + MHPMEVENT_BIT_SINH | \ + MHPMEVENT_BIT_UINH | \ + MHPMEVENT_BIT_VSINH | \ + MHPMEVENT_BIT_VUINH) + +#define MHPMEVENTH_FILTER_MASK (MHPMEVENTH_BIT_MINH | \ + MHPMEVENTH_BIT_SINH | \ + MHPMEVENTH_BIT_UINH | \ + MHPMEVENTH_BIT_VSINH | \ + MHPMEVENTH_BIT_VUINH) + +#define MHPMEVENT_SSCOF_MASK 0xFF00000000000000ULL +#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK) #define MHPMEVENT_SSCOF_RESVD 16 /* JVT CSR bits */ diff --git a/include/common/delay.h b/include/common/delay.h new file mode 100644 index 00000000..8d5268c0 --- /dev/null +++ b/include/common/delay.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include + +/** + * @brief Macro for delay_us implementation that works in both mmode and smode + * + * This macro provides the core delay_us functionality that can be used by + * both mmode and smode implementations. It takes a parameter for the delay + * in microseconds and implements the delay using cycle counting and pause + * instructions. + * + * @param __delay_in_useconds Number of microseconds to delay execution + */ +#define _delay_us(__delay_in_useconds) \ + ({ \ + register volatile uint64_t __start_time, __end_time; \ + const uint32_t __iter_count = 10; \ + __start_time = read_csr(CSR_TIME); \ + for (uint32_t __i = 0; __i < __iter_count; __i++) { \ + asm volatile("pause"); \ + } \ + __end_time = read_csr(CSR_TIME); \ + uint64_t __avg_lat = (__end_time - __start_time) / __iter_count; \ + /* Check if delay has already completed within iter_count */ \ + if ((__delay_in_useconds / __avg_lat) <= __iter_count) { \ + /* Delay already completed, no additional iterations needed */ \ + } else { \ + uint32_t __latency_iter_count = \ + (__delay_in_useconds / __avg_lat) - __iter_count; \ + for (uint32_t __i = 0; __i < __latency_iter_count; __i++) { \ + asm volatile("pause"); \ + } \ + } \ + }) diff --git a/include/common/heap.smode.h b/include/common/heap.smode.h index 7fec4bb0..1e931d29 100644 --- a/include/common/heap.smode.h +++ b/include/common/heap.smode.h @@ -1,30 +1,69 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +// SPDX-FileCopyrightText: 2016 by Lukasz Janyst #pragma once +#include #include +#include //------------------------------------------------------------------------------ -//! Allocate memory on the heap +// Malloc helper structs //------------------------------------------------------------------------------ -void *malloc(size_t size); +struct memchunk { + struct memchunk *next; + uint64_t size; +}; + +typedef struct memchunk memchunk; //------------------------------------------------------------------------------ -//! Free the memory +// Heap Constants //------------------------------------------------------------------------------ -void free(void *ptr); - -void *calloc(size_t nmemb, size_t size); +// Allocating anything less than 8 bytes is kind of pointless, the +// book-keeping overhead is too big. +//------------------------------------------------------------------------------ +#define MIN_HEAP_ALLOCATION_SIZE 8 +#define PER_HEAP_ALLOCATION_METADATA_SIZE \ + sizeof(struct memchunk) // Per allocation metadata size void *memalign(size_t alignment, size_t size); -void *memset(void *s, int c, size_t n); - -void *memcpy(void *dest, const void *src, size_t n); - //------------------------------------------------------------------------------ //! Debug Features //------------------------------------------------------------------------------ void print_heap(void); + +//------------------------------------------------------------------------------ +// Memory type and backing memory specific versions +//------------------------------------------------------------------------------ +void *malloc_from_memory(size_t size, uint8_t backing_memory, + uint8_t memory_type); + +void free_from_memory(void *ptr, uint8_t backing_memory, uint8_t memory_type); + +void *calloc_from_memory(size_t nmemb, size_t size, uint8_t backing_memory, + uint8_t memory_type); + +void *memalign_from_memory(size_t alignment, size_t size, + uint8_t backing_memory, uint8_t memory_type); + +void setup_heap(uint64_t heap_start, uint64_t heap_end, uint8_t backing_memory, + uint8_t memory_type); + +void deregister_heap(uint8_t backing_memory, uint8_t memory_type); + +size_t get_heap_size(uint8_t backing_memory, uint8_t memory_type); + +//------------------------------------------------------------------------------ +// Helper functions to convert numeric values to readable strings +//------------------------------------------------------------------------------ +const char *backing_memory_to_string(uint8_t backing_memory); +const char *memory_type_to_string(uint8_t memory_type); + +bool is_valid_heap(uint8_t backing_memory, uint8_t memory_type); diff --git a/include/common/jumpstart.h b/include/common/jumpstart.h index 81200db2..972f7dda 100644 --- a/include/common/jumpstart.h +++ b/include/common/jumpstart.h @@ -1,15 +1,14 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #pragma once #include #include -#include "jumpstart_data_structures.h" -#include "jumpstart_defines.h" - #define __ASM_STR(x) #x #define ARRAY_SIZE(a) (sizeof(a) / sizeof(*a)) @@ -59,6 +58,20 @@ __v; \ }) +#define load_reserved_64(addr) \ + ({ \ + unsigned long __tmp; \ + asm volatile("lr.d %0, (%1)" : "=r"(__tmp) : "r"(addr)); \ + __tmp; \ + }) + +#define store_conditional_64(addr, val) \ + ({ \ + unsigned long ret = 0; \ + asm volatile("sc.d %0, %1, (%2)" : "=r"(ret) : "r"(val), "r"(addr)); \ + ret; \ + }) + #define STRINGIFY(x) #x #define ADD_QUOTES(x) STRINGIFY(x) // Disables instruction by instruction checking when running on the simulator, @@ -73,7 +86,9 @@ int run_function_in_umode(uint64_t function_address, ...); int run_function_in_smode(uint64_t function_address, ...); int run_function_in_vsmode(uint64_t function_address, ...); +int run_function_in_vumode(uint64_t function_address, ...); +void setup_mmu_from_smode(void); void disable_mmu_from_smode(void); uint64_t get_mmode_trap_handler_override(uint64_t mcause); @@ -95,30 +110,51 @@ uint64_t get_thread_attributes_bookend_magic_number_from_smode(void); uint64_t get_thread_attributes_trap_override_struct_address_from_smode(void); uint8_t get_thread_attributes_current_mode_from_smode(void); uint8_t get_thread_attributes_current_v_bit_from_smode(void); -uint8_t get_thread_attributes_hart_id_from_smode(void); +uint8_t get_thread_attributes_cpu_id_from_smode(void); +uint8_t get_thread_attributes_physical_cpu_id_from_smode(void); +uint64_t get_thread_attributes_marchid_from_smode(void); +uint64_t get_thread_attributes_mimpid_from_smode(void); uint8_t get_thread_attributes_vsmode_setup_done_from_smode(void); uint8_t get_thread_attributes_num_context_saves_remaining_in_smode_from_smode(void); uint8_t get_thread_attributes_num_context_saves_remaining_in_smode_from_mmode(void); +struct thread_attributes * +get_thread_attributes_for_cpu_id_from_smode(uint8_t cpu_id); + +uint8_t get_physical_cpu_id_for_cpu_id_from_smode(uint8_t cpu_id); + +struct thread_attributes * +get_thread_attributes_for_cpu_id_from_mmode(uint8_t cpu_id); + +uint8_t get_physical_cpu_id_for_cpu_id_from_mmode(uint8_t cpu_id); + uint64_t get_thread_attributes_bookend_magic_number_from_mmode(void); uint64_t get_thread_attributes_trap_override_struct_address_from_mmode(void); uint8_t get_thread_attributes_current_mode_from_mmode(void); uint8_t get_thread_attributes_current_v_bit_from_mmode(void); -uint8_t get_thread_attributes_hart_id_from_mmode(void); +uint8_t get_thread_attributes_cpu_id_from_mmode(void); +uint8_t get_thread_attributes_physical_cpu_id_from_mmode(void); +uint64_t get_thread_attributes_marchid_from_mmode(void); +uint64_t get_thread_attributes_mimpid_from_mmode(void); uint8_t get_thread_attributes_smode_setup_done_from_mmode(void); uint8_t get_thread_attributes_num_context_saves_remaining_in_mmode_from_mmode(void); uint8_t get_thread_attributes_num_context_saves_remaining_in_mmode_from_smode(void); -void sync_all_harts_from_smode(void); -void sync_all_harts_from_mmode(void); +void sync_all_cpus_from_smode(void); +void sync_all_cpus_from_mmode(void); +void sync_cpus_in_mask_from_smode(uint64_t cpu_mask, + uint64_t sync_point_address); +void sync_cpus_in_mask_from_mmode(uint64_t cpu_mask, + uint64_t sync_point_address); void jumpstart_umode_fail(void) __attribute__((noreturn)); void jumpstart_smode_fail(void) __attribute__((noreturn)); void jumpstart_vsmode_fail(void) __attribute__((noreturn)); +void jumpstart_vumode_fail(void) __attribute__((noreturn)); void jumpstart_mmode_fail(void) __attribute__((noreturn)); uint64_t get_mepc_for_current_exception(void); @@ -129,5 +165,40 @@ void set_sepc_for_current_exception(uint64_t new_sepc); void exit_from_smode(uint64_t return_code) __attribute__((noreturn)); -#define __attr_stext __attribute__((section(".jumpstart.text.smode"))) -#define __attr_mtext __attribute__((section(".jumpstart.text.mmode"))) +#define __attr_stext __attribute__((section(".jumpstart.cpu.text.smode"))) +#define __attr_privdata \ + __attribute__((section(".jumpstart.cpu.data.privileged"))) + +// Only functions that need to be placed in the 4K mmode init section +// should be marked with __attr_mtext_init. +#define __attr_mtext_init \ + __attribute__((section(".jumpstart.cpu.text.mmode.init"))) +#define __attr_mtext __attribute__((section(".jumpstart.cpu.text.mmode"))) + +// Attributes for diag custom rcode hook functions and data +#define __attr_diag_custom_rcode_hook_text \ + __attribute__((section(".diag_custom_rcode_hook.cpu.text.rcode"))) +#define __attr_diag_custom_rcode_hook_data \ + __attribute__((section(".diag_custom_rcode_hook.cpu.data.rcode"))) + +uint64_t read_time(void); + +/** + * @brief Delays execution by the specified number of microseconds (S-mode) + * + * The function delays the execution of the program by (twiddling thumbs for) + * the number of microseconds provided as a parameter. + * + * @param delay_in_useconds Number of microseconds to delay execution + */ +void delay_us_from_smode(uint32_t delay_in_useconds); + +/** + * @brief Delays execution by the specified number of microseconds (M-mode) + * + * The function delays the execution of the program by (twiddling thumbs for) + * the number of microseconds provided as a parameter. + * + * @param delay_in_useconds Number of microseconds to delay execution + */ +void delay_us_from_mmode(uint32_t delay_in_useconds); diff --git a/include/common/lock.h b/include/common/lock.h new file mode 100644 index 00000000..8dc55fce --- /dev/null +++ b/include/common/lock.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +typedef enum { + AMOSWAP_ACQUIRE, + AMOSWAP_RELEASE, +} amoswapKind_t; + +#define _swap_atomic(__val, __new_value, __kind) \ + ({ \ + uint64_t result; \ + switch (__kind) { \ + case AMOSWAP_RELEASE: \ + __asm__ __volatile__("amoswap.d.rl %0, %2, %1" \ + : "=r"(result), "+A"(*__val) \ + : "r"(__new_value) \ + : "memory"); \ + break; \ + case AMOSWAP_ACQUIRE: \ + __asm__ __volatile__("amoswap.d.aq %0, %2, %1" \ + : "=r"(result), "+A"(*__val) \ + : "r"(__new_value) \ + : "memory"); \ + break; \ + default: \ + goto fail; \ + } \ + result; \ + }) + +#define _acquire_lock(__lock, __swap_atomic) \ + ({ \ + disable_checktc(); \ + while (1) { \ + if (*(volatile uint64_t *)__lock) { \ + continue; \ + } \ + if (__swap_atomic(__lock, 1, AMOSWAP_ACQUIRE) == 0) { \ + break; \ + } \ + } \ + enable_checktc(); \ + }) + +#define _release_lock(__lock, __swap_atomic) \ + __swap_atomic(__lock, 0, AMOSWAP_RELEASE) diff --git a/include/common/lock.mmode.h b/include/common/lock.mmode.h new file mode 100644 index 00000000..94f88ba4 --- /dev/null +++ b/include/common/lock.mmode.h @@ -0,0 +1,13 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once +#include +typedef uint64_t spinlock_t; + +void m_acquire_lock(spinlock_t *lock); + +void m_release_lock(spinlock_t *lock); diff --git a/include/common/lock.smode.h b/include/common/lock.smode.h index ca0cdb25..dad4265b 100644 --- a/include/common/lock.smode.h +++ b/include/common/lock.smode.h @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #pragma once #include diff --git a/include/common/tablewalk.smode.h b/include/common/tablewalk.smode.h index 39cdbf5b..489c5efa 100644 --- a/include/common/tablewalk.smode.h +++ b/include/common/tablewalk.smode.h @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #pragma once @@ -8,14 +10,17 @@ #define MAX_NUM_PAGE_TABLE_LEVELS 4 -struct translation_info { - uint8_t satp_mode; - uint8_t levels_traversed; - uint8_t walk_successful; +struct __attribute__((packed)) translation_info { uint64_t va; uint64_t pa; uint64_t pte_address[MAX_NUM_PAGE_TABLE_LEVELS]; uint64_t pte_value[MAX_NUM_PAGE_TABLE_LEVELS]; + uint8_t xatp_mode; + uint8_t levels_traversed; + uint8_t walk_successful; + uint8_t pbmt_mode; }; +void translate_GVA(uint64_t gva, struct translation_info *xlate_info); +void translate_GPA(uint64_t gpa, struct translation_info *xlate_info); void translate_VA(uint64_t va, struct translation_info *xlate_info); diff --git a/include/common/time.mmode.h b/include/common/time.mmode.h new file mode 100644 index 00000000..6329bbd0 --- /dev/null +++ b/include/common/time.mmode.h @@ -0,0 +1,19 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include + +/** + * @brief Delays execution by the specified number of microseconds (M-mode) + * + * The function delays the execution of the program by (twiddling thumbs for) + * the number of microseconds provided as a parameter. + * + * @param delay_in_useconds Number of microseconds to delay execution + */ +void delay_us_from_mmode(uint32_t delay_in_useconds); diff --git a/include/common/time.smode.h b/include/common/time.smode.h new file mode 100644 index 00000000..fe0c34f3 --- /dev/null +++ b/include/common/time.smode.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include + +/** + * @brief Delays execution by the specified number of microseconds (S-mode) + * + * The function delays the execution of the program by (twiddling thumbs for) + * the number of microseconds provided as a parameter. + * + * @param delay_in_useconds Number of microseconds to delay execution + */ +void delay_us_from_smode(uint32_t delay_in_useconds); + +/** + * @brief Get current time in seconds since epoch (S-mode) + * + * @param tloc Pointer to store the time, or NULL to just return the time + * @return Current time in seconds since epoch, or (time_t)-1 on error + */ +time_t time(time_t *tloc); diff --git a/include/common/uart.h b/include/common/uart.h new file mode 100644 index 00000000..a1cb7152 --- /dev/null +++ b/include/common/uart.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#define _puts(__uart_initialized, __putch, __str) \ + ({ \ + if (__uart_initialized == 0) { \ + return 0; \ + } \ + \ + int __count = 0; \ + \ + while (*__str != '\0') { \ + __putch(*__str); \ + __count++; \ + __str++; \ + } \ + \ + __count; \ + }) + +#define VPRINTK_BUFFER_SIZE 1024 + +#define _vprintk(__puts, __fmt, __args) \ + ({ \ + static char __buf[VPRINTK_BUFFER_SIZE]; \ + int __rc, __ret; \ + __rc = vsnprintf(__buf, sizeof(__buf), __fmt, __args); \ + if (__rc > (int)sizeof(__buf)) { \ + __puts("vprintk() buffer overflow\n"); \ + __ret = -1; \ + } else { \ + __ret = __puts(__buf); \ + } \ + __ret; \ + }) + +#define _printk(__printk_lock, __acquire_lock, __release_lock, \ + __uart_initialized, _vprintk, __fmt) \ + ({ \ + if (__uart_initialized == 0) { \ + return 0; \ + } \ + va_list __args; \ + int __rc; \ + __acquire_lock(&__printk_lock); \ + va_start(__args, __fmt); \ + __rc = _vprintk(__fmt, __args); \ + va_end(__args); \ + __release_lock(&__printk_lock); \ + \ + __rc; \ + }) diff --git a/include/common/uart.mmode.h b/include/common/uart.mmode.h new file mode 100644 index 00000000..50615e8b --- /dev/null +++ b/include/common/uart.mmode.h @@ -0,0 +1,10 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +void m_putch(const char c); +int m_puts(const char *str); diff --git a/include/common/uart.smode.h b/include/common/uart.smode.h index da76cd8e..7f48d2cd 100644 --- a/include/common/uart.smode.h +++ b/include/common/uart.smode.h @@ -1,9 +1,12 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #pragma once +int putch(const char c); int puts(const char *str); int printk(const char *fmt, ...) __attribute__((format(printf, 1, 2))); diff --git a/include/common/utils.mmode.h b/include/common/utils.mmode.h index 141d1607..86cbdb15 100644 --- a/include/common/utils.mmode.h +++ b/include/common/utils.mmode.h @@ -1,6 +1,10 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once #include diff --git a/include/common/utils.smode.h b/include/common/utils.smode.h index 6f19aa80..7fa0c0f0 100644 --- a/include/common/utils.smode.h +++ b/include/common/utils.smode.h @@ -1,6 +1,10 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once #include diff --git a/include/meson.build b/include/meson.build index edb9412b..0df3fe75 100644 --- a/include/meson.build +++ b/include/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/justfile b/justfile new file mode 100644 index 00000000..3aac1f4f --- /dev/null +++ b/justfile @@ -0,0 +1,83 @@ +# SPDX-FileCopyrightText: 2024 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# Provides targets to build and run the jumpstart unit tests for development +# and CI purposes. + +# To build and run the unit tests with all possible configurations: +# just test-all + +# To target a particular configuration: +# just --set num_test_processes {{num_test_processes}} test +# Examples: +# just --set num_test_processes {{num_test_processes}} test gcc release spike +# just --set num_test_processes {{num_test_processes}} test gcc debug spike + +# build and test targets can be run individually +# Examples: +# just build gcc release spike +# just test gcc release spike + +# To limit the number of parallel test jobs pass --set num_test_processes +# Example: +# just --set num_test_processes 10 test-all + +num_test_processes := "max" + +default: + @just test-all + +setup compiler buildtype target: + @# For fw-none boot_config, priv modes and diag attributes are empty (defaults) + meson setup {{compiler}}-{{buildtype}}-{{target}}-public-fw-none.builddir --cross-file cross_compile/public/{{compiler}}_options.txt --cross-file cross_compile/{{compiler}}.txt --buildtype {{buildtype}} -Drun_target={{target}} -Dboot_config=fw-none -Drivos_internal_build=false + +build compiler buildtype target: (setup compiler buildtype target) + meson compile -C {{compiler}}-{{buildtype}}-{{target}}-public-fw-none.builddir + +test compiler buildtype target: (build compiler buildtype target) + @case {{num_test_processes}} in \ + max) \ + num_processes_option=""; \ + ;; \ + *) \ + num_processes_option="-j "{{num_test_processes}}""; \ + ;; \ + esac; \ + meson test -C {{compiler}}-{{buildtype}}-{{target}}-public-fw-none.builddir $num_processes_option + +clean_internal compiler buildtype target: + rm -rf {{compiler}}-{{buildtype}}-{{target}}-public-fw-none.builddir + +build-all-spike-gcc: + @just build gcc debug spike + @just build gcc release spike + +build-all-spike: + @just build-all-spike-gcc + +build-all: + @just build-all-spike + +build-all-gcc: + @just build-all-spike-gcc + +test-all-spike-gcc: + @just --set num_test_processes {{num_test_processes}} test gcc debug spike + @just --set num_test_processes {{num_test_processes}} test gcc release spike + +test-all-spike: + @just test-all-spike-gcc + +test-all-public: + @just --set num_test_processes {{num_test_processes}} test gcc debug spike + @just --set num_test_processes {{num_test_processes}} test gcc release spike + +test-all: + @just test-all-spike + +test-all-gcc: + @just test-all-spike-gcc + +clean: + rm -rf *.builddir diff --git a/meson.build b/meson.build index 5019a60f..b07e128e 100644 --- a/meson.build +++ b/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -8,9 +8,18 @@ project('JumpStart', 'c', 'werror=true', 'b_ndebug=if-release', ], - meson_version: '>=1.0.3' + meson_version: '>=1.3.0' ) +test_env = environment() + +# Check compiler support for mcmodel options +cc = meson.get_compiler('c') +mcmodel = get_option('mcmodel') +if not cc.has_argument('-mcmodel=' + mcmodel) + error('Selected mcmodel=' + mcmodel + ' but compiler does not support it. Please use a different mcmodel option.') +endif + add_project_arguments('-Wno-pedantic', # Require that all enums are covered by a switch statement. '-Wswitch-enum', @@ -20,78 +29,36 @@ add_project_arguments('-Wno-pedantic', # Let GCC know we are using our own malloc/calloc implementation. Otherwise # it makes assumptions about using it's own. '-fno-builtin', - '-mcmodel=medany', + '-mcmodel=' + mcmodel, + '-g', language: 'c') +diag_custom_defines = get_option('diag_custom_defines') +foreach diag_custom_define : diag_custom_defines + add_project_arguments('-D' + diag_custom_define, language : 'c') +endforeach +default_c_args = [] -jumpstart_source_attribute_overrides = get_option('jumpstart_source_attribute_overrides') diag_attribute_overrides = get_option('diag_attribute_overrides') -compatible_priv_modes = [] -if get_option('boot_config') == 'fw-none' - compatible_priv_modes = get_option('riscv_priv_modes_enabled') -elif get_option('boot_config') == 'fw-m' - compatible_priv_modes = ['mmode', 'smode', 'umode'] - jumpstart_source_attribute_overrides += ['diag_entry_label=_mmode_start'] - diag_attribute_overrides += ['mmode_start_address=' + get_option('mmode_start_address')] -elif get_option('boot_config') == 'fw-sbi' - compatible_priv_modes = ['smode', 'umode'] - jumpstart_source_attribute_overrides += ['diag_entry_label=sbi_firmware_trampoline'] - diag_attribute_overrides += ['smode_start_address=' + get_option('smode_start_address')] -else - error('Invalid boot_config value') +if get_option('boot_config') != 'fw-none' + error('Invalid boot_config value. Only fw-none is supported.') endif -riscv_priv_modes_enabled = [] -foreach mode: get_option('riscv_priv_modes_enabled') - if compatible_priv_modes.contains(mode) - riscv_priv_modes_enabled += [mode] - endif -endforeach - +riscv_priv_modes_enabled = get_option('riscv_priv_modes_enabled') subdir('src') subdir('include') prog_python = find_program('python3') -jumpstart_source_generator = files('scripts/generate_jumpstart_sources.py') - -jumpstart_source_generator_inputs = [jumpstart_source_generator, jumpstart_source_attributes_yaml] -jumpstart_source_generator_expected_outputs = ['jumpstart_defines.h', 'jumpstart_data_structures.h', 'jumpstart_data_structures.S'] -jumpstart_source_generator_command = [prog_python, - '@INPUT0@', - '--defines_file', '@OUTPUT0@', - '--data_structures_file', '@OUTPUT1@', - '--assembly_file', '@OUTPUT2@', - '--jumpstart_source_attributes_yaml', '@INPUT1@', - '--priv_modes_enabled', riscv_priv_modes_enabled - ] - -override_jumpstart_source_attributes_parameter = [] -if jumpstart_source_attribute_overrides.length() > 0 - override_jumpstart_source_attributes_parameter += ['--override_jumpstart_source_attributes'] - - foreach override : jumpstart_source_attribute_overrides - override_jumpstart_source_attributes_parameter += [override] - endforeach -endif -jumpstart_source_generator_command += override_jumpstart_source_attributes_parameter - -jumpstart_source_generator_outputs = custom_target( - 'Generate jumpstart sources for build', - input : jumpstart_source_generator_inputs, - output: jumpstart_source_generator_expected_outputs, - command: jumpstart_source_generator_command) - -jumpstart_sources += jumpstart_source_generator_outputs diag_source_generator = files('scripts/generate_diag_sources.py') diag_sources = get_option('diag_sources') diag_attributes_yaml = get_option('diag_attributes_yaml') -if get_option('diag_target') == 'spike' +if get_option('run_target') == 'spike' spike = find_program(get_option('spike_binary')) spike_isa_string = get_option('spike_isa_string') @@ -105,7 +72,7 @@ if get_option('diag_target') == 'spike' else if spike_isa_string == '' - spike_isa_string = 'rv64gcvh_zbb_zbs_zkr_svpbmt' + spike_isa_string = 'rv64gcvh_zba_zbb_zbs_zkr_svpbmt_smstateen_zicntr' endif default_spike_args += ['--misaligned'] @@ -123,19 +90,6 @@ if get_option('diag_target') == 'spike' if get_option('spike_additional_arguments').length() > 0 default_spike_args += get_option('spike_additional_arguments') endif - -elif get_option('diag_target') == 'qemu' - qemu_binary = rivos_qemu_binary - if get_option('qemu_binary') != '' - qemu_binary = get_option('qemu_binary') - endif - qemu = find_program(qemu_binary) - - default_qemu_args = rivos_qemu_args - - if get_option('qemu_additional_arguments').length() > 0 - default_qemu_args += get_option('qemu_additional_arguments') - endif endif objdump = find_program('objdump') @@ -148,15 +102,10 @@ diag_source_generator_command = [prog_python, '--output_assembly_file', '@OUTPUT0@', '--output_defines_file', '@OUTPUT2@', '--output_linker_script', '@OUTPUT1@', + '--output_data_structures_file', '@OUTPUT3@', '--priv_modes_enabled', riscv_priv_modes_enabled ] -if get_option('diag_target') == 'qemu' - diag_attribute_overrides += ['in_qemu_mode=True'] -endif - -diag_source_generator_command += override_jumpstart_source_attributes_parameter - if diag_attribute_overrides.length() > 0 diag_source_generator_command += ['--override_diag_attributes'] @@ -173,17 +122,20 @@ if diag_attributes_yaml != '' and diag_sources.length() > 0 input : diag_source_generator_common_inputs + [diag_attributes_yaml], output : [diag_name + '.generated.S', diag_name + '.linker_script.ld', - diag_name + '.defines.h'], + diag_name + '.defines.h', + diag_name + '.data_structures.h', + ], command : diag_source_generator_command) diag_sources += diag_source_generator_output[0] linker_script = diag_source_generator_output[1] diag_defines = diag_source_generator_output[2] + diag_data_structures = diag_source_generator_output[3] - diag_exe = executable(diag_name, + diag_exe = executable(diag_name + '.elf', sources: [jumpstart_sources, diag_sources], include_directories: jumpstart_includes, - c_args: ['-include', diag_defines.full_path()], + c_args: default_c_args + ['-include', diag_defines.full_path(), '-include', diag_data_structures.full_path()], link_args: ['-T' + linker_script.full_path()], link_depends: linker_script, dependencies: declare_dependency(sources: diag_defines) @@ -198,28 +150,27 @@ if diag_attributes_yaml != '' and diag_sources.length() > 0 depends : [diag_exe]) endif - if get_option('diag_target') == 'spike' - test('🧪 ' + diag_name, - spike, - args : [default_spike_args, diag_exe], - timeout: get_option('spike_timeout'), - is_parallel : false) # Parallel runs of the test turns off terminal - # feedback and requires `reset` to be run to - # restore the terminal. - elif get_option('diag_target') == 'qemu' - qemu_args = default_qemu_args + trace_file = diag_name + '.itrace' + + if get_option('run_target') == 'spike' + spike_args = default_spike_args if get_option('generate_trace') == true - qemu_args += [ - '--var', 'ap-logfile:' + diag_name + '.trace', - '--var', 'out:' + meson.current_build_dir() - ] + spike_args += ['--log=' + trace_file] endif + target = spike + args = [spike_args, diag_exe] + timeout = get_option('spike_timeout') + test('🧪 ' + diag_name, - qemu, - timeout: get_option('qemu_timeout'), - args : [qemu_args, '--var', 'ap-payload:' + diag_exe.full_path()]) + target, + args : args, + timeout: timeout, + depends: diag_exe, + should_fail: false, + env: test_env + ) endif else diff --git a/meson.options b/meson.options index df4a3442..a5853ac9 100644 --- a/meson.options +++ b/meson.options @@ -25,15 +25,15 @@ option('diag_generate_disassembly', value : false, description : 'Generate diag disassembly.') -option('diag_target', +option('run_target', type : 'combo', - choices: ['spike', 'qemu'], + choices: ['spike'], value : 'spike', description : 'Target to build the diag for.') -option('jumpstart_source_attribute_overrides', +option('diag_custom_defines', type : 'array', - description : 'Overrides specified JumpStart source attributes.') + description : 'Custom diag specific defines.') option('riscv_priv_modes_enabled', type : 'array', @@ -42,23 +42,13 @@ option('riscv_priv_modes_enabled', option('boot_config', type : 'combo', - choices: ['fw-none', 'fw-m', 'fw-sbi'], + choices: ['fw-none'], value : 'fw-none', description : 'Select Fw to run before handover to jumpstart. \n' + '- fw-none : expects direct entry into jumpstart from hardware reset without fw. \n' + - '- fw-m : expects handover to jumpstart in mmode (Non-resident fw). \n' + - '- fw-sbi : expects handover to jumpstart in smode with sbi interface (Resident M-Mode fw). \n' + 'Note: highest privilege level in jumpstart binary will be the entry priv level' ) -option('smode_start_address', - type : 'string', - value : '0x90000000', - description : 'Address to place the smode code.') -option('mmode_start_address', - type : 'string', - value : '0x90000000', - description : 'Address to place the mmode code.') option('spike_binary', type : 'string', @@ -79,20 +69,6 @@ option('spike_timeout', value : 30, description : 'meson test timeout when running the tests on spike.') -option('qemu_binary', - type : 'string', - value : '', - description : 'QEMU binary to use') - -option('qemu_additional_arguments', - type : 'array', - description : 'Additional arguments to pass to qemu when running the diag.') - -option('qemu_timeout', - type : 'integer', - value : 300, - description : 'meson test timeout when running the tests on QEMU.') - option('generate_trace', type : 'boolean', value : false, @@ -102,3 +78,18 @@ option('rivos_internal_build', type : 'boolean', value : false, description : 'Build the Rivos internal version of JumpStart.') + +option('soc_rev', + type : 'combo', + choices: ['A0', 'B0'], + value : 'A0', + description : 'SOC Revision.') + +option('mcmodel', + type : 'combo', + choices: ['medlow', 'medany', 'large'], + value : 'medany', + description : 'RISC-V code model to use. \n' + + '- medlow : Code and data must be within 2GB of the program counter. \n' + + '- medany : Code and data must be within 2GB of the program counter or global pointer. \n' + + '- large : No restrictions on code and data placement.') diff --git a/pyproject.toml b/pyproject.toml index 211c1672..31e986c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/build_diag.py b/scripts/build_diag.py index ba0c71e4..882866ec 100755 --- a/scripts/build_diag.py +++ b/scripts/build_diag.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -9,12 +9,40 @@ import argparse import logging as log import os +from typing import Dict -from build_tools import DiagBuildTarget, build_jumpstart_diag +import yaml +from build_tools import DiagFactory, Meson +from build_tools.environment import get_environment_manager def main(): - parser = argparse.ArgumentParser(description=__doc__) + env_parser = argparse.ArgumentParser(description=__doc__, add_help=False) + env_manager = get_environment_manager() + env_names = sorted(env_manager.list_visible_environments().keys()) + env_help = f"Environment to build for. Available environments: {', '.join(env_names)}" + + env_parser.add_argument( + "--environment", + "-e", + help=env_help, + required=False, + type=str, + default=None, + choices=env_names, + ) + env_parser.add_argument( + "--target", + "-t", + help="[DEPRECATED] Use --environment instead. Target to build for.", + required=False, + type=str, + default=None, + choices=env_names, + ) + env_args, _ = env_parser.parse_known_args() + + parser = argparse.ArgumentParser(description=__doc__, parents=[env_parser]) parser.add_argument( "--jumpstart_dir", help="Jumpstart directory", @@ -22,66 +50,85 @@ def main(): type=str, default=f"{os.path.dirname(os.path.realpath(__file__))}/..", ) - parser.add_argument( + # Allow either a list of source directories or a YAML manifest + input_group = parser.add_mutually_exclusive_group(required=False) + input_group.add_argument( "--diag_src_dir", "-d", - help="Directory containing jumpstart diag to build.", - required=True, + "--diag_src", + help="One or more directories containing jumpstart diags to build. If provided, a YAML plan will be generated automatically.", + nargs="+", + type=str, + ) + input_group.add_argument( + "--build_manifest", + help="Path to a YAML manifest with a top-level 'diagnostics' mapping for DiagFactory.", type=str, ) + parser.add_argument( + "--include_diags", + help=( + "Limit build to only the specified diagnostics present in the provided build manifest. " + "Only valid with --build_manifest and incompatible with --diag_src_dir." + ), + nargs="+", + type=str, + default=None, + ) + parser.add_argument( + "--exclude_diags", + help=( + "Exclude the specified diagnostics from the provided build manifest. " + "Only valid with --build_manifest and incompatible with --diag_src_dir." + ), + nargs="+", + type=str, + default=None, + ) parser.add_argument( "--buildtype", help="--buildtype to pass to meson setup.", type=str, - default="release", - choices=["release", "debug"], + default=None, + choices=["release", "minsize", "debug", "debugoptimized"], ) parser.add_argument( "--override_meson_options", - help="Meson options to override.", + "--override_meson", + help="Override the meson options from meson.options. Format: 'key=value' (e.g., 'generate_trace=true').", required=False, nargs="+", - default=None, + default=[], ) parser.add_argument( "--override_diag_attributes", - help="Diag attributes to override.", + help="Override the diag attributes specified in the diag's attributes file. Format: 'key=value' (e.g., 'active_cpu_mask=0b1').", required=False, nargs="+", - default=None, + default=[], ) parser.add_argument( - "--active_hart_mask_override", - "-c", - help="Override the default hart mask for the diag.", + "--diag_custom_defines", + help="Set diag specific defines. Format: 'NAME=VALUE' (e.g., 'USE_L2PMU=1').", required=False, - type=str, + nargs="+", default=None, ) parser.add_argument( - "--target", - "-t", - help="Target to build for.", + "--active_cpu_mask_override", + "-c", + help="Override the default CPU mask for the diag.", required=False, type=str, - default="spike", - choices=DiagBuildTarget.supported_targets, + default=None, ) parser.add_argument( "--toolchain", - help=f"Toolchain to build diag with. Options: {DiagBuildTarget.supported_toolchains}.", + help=f"Toolchain to build diag with. Options: {Meson.supported_toolchains}.", required=False, type=str, default="gcc", - choices=DiagBuildTarget.supported_toolchains, - ) - parser.add_argument( - "--boot_config", - help=f"Boot Config to build diag for. Options: {DiagBuildTarget.supported_boot_configs}.", - required=False, - type=str, - default="fw-none", - choices=DiagBuildTarget.supported_boot_configs, + choices=Meson.supported_toolchains, ) parser.add_argument( "--disable_diag_run", @@ -91,8 +138,9 @@ def main(): ) parser.add_argument( "--diag_build_dir", + "--diag_build", help="Directory to place built diag in.", - required=True, + required=False, type=str, ) parser.add_argument( @@ -105,40 +153,234 @@ def main(): "--rng_seed", help="RNG seed for the diag builder.", required=False, - type=int, + type=lambda x: int(x, 0), + default=None, + ) + parser.add_argument( + "--custom_rcode_bin", + help="Path to custom r-code binary to replace jumpstart r-code.", + required=False, + type=str, default=None, ) parser.add_argument( "-v", "--verbose", help="Verbose output.", action="store_true", default=False ) + parser.add_argument( + "-j", + "--jobs", + help="Number of parallel compile jobs.", + required=False, + type=int, + default=5, + ) + + final_target = env_args.environment if env_args.environment else env_args.target + if final_target and "oswis" in final_target: + # OSWIS-only arguments + oswis = parser.add_argument_group("OSWIS-only arguments") + oswis.add_argument( + "--oswis_additional_arguments", + help="Additional arguments to pass to OSWIS when running the diag.", + nargs="*", + default=[], + ) + oswis.add_argument( + "--oswis_emulation_model", + help="Emulation model to use when running the tests with OSWIS.", + type=str, + default="work_core", + ) + oswis.add_argument( + "--oswis_diag_timeout", + help="Meson test timeout when running the tests with OSWIS.", + type=int, + default=3000, + ) + oswis.add_argument( + "--oswis_timeout", + help="Emulator timeout when running the tests with OSWIS.", + type=int, + default=10000000000, + ) + oswis.add_argument( + "--oswis_firmware_tarball", + help="Path to a tarball containing the boot firmware for OSWIS SCS models.", + type=str, + default="", + ) + args = parser.parse_args() + # Handle backward compatibility for --target + if args.target is not None: + import warnings + + warnings.warn( + "--target is deprecated and will be removed in a future version. Use --environment instead.", + DeprecationWarning, + stacklevel=2, + ) + # If both --target and --environment are specified, error out + if args.environment is not None: + parser.error( + "Cannot specify both --target and --environment. Use --environment instead." + ) + # Use target value as environment if environment is not specified + args.environment = args.target + + # Validate required arguments for normal operation + if not args.diag_src_dir and not args.build_manifest: + parser.error("Either --diag_src_dir or --build_manifest is required") + + if not args.diag_build_dir: + parser.error("--diag_build_dir is required") + + if args.environment is None: + parser.error("--environment must be specified") + if args.verbose: log.basicConfig(format="%(levelname)s: [%(threadName)s]: %(message)s", level=log.DEBUG) else: log.basicConfig(format="%(levelname)s: [%(threadName)s]: %(message)s", level=log.INFO) - diag_build_target = DiagBuildTarget( - args.diag_src_dir, - args.diag_build_dir, - args.buildtype, - args.target, - args.toolchain, - args.boot_config, - args.rng_seed, - args.active_hart_mask_override, - args.override_meson_options, - args.override_diag_attributes, - ) + script_meson_option_overrides = {} + + if args.buildtype is not None: + args.override_meson_options.append(f"buildtype={args.buildtype}") + + if args.custom_rcode_bin is not None: + args.override_meson_options.append(f"custom_rcode_bin={args.custom_rcode_bin}") + + if args.active_cpu_mask_override is not None: + args.override_diag_attributes.append(f"active_cpu_mask={args.active_cpu_mask_override}") + + # Enforce argument compatibility for include/exclude options + if args.include_diags is not None and args.build_manifest is None: + raise SystemExit("--include_diags can only be used with --build_manifest.") + if args.exclude_diags is not None and args.build_manifest is None: + raise SystemExit("--exclude_diags can only be used with --build_manifest.") + + # Determine the build manifest YAML path: either provided manifest or a generated plan + build_manifest_yaml = None + if args.build_manifest is not None: + build_manifest_yaml_file = os.path.abspath(args.build_manifest) + build_manifest_yaml = yaml.safe_load(open(build_manifest_yaml_file)) + if args.include_diags is not None or args.exclude_diags is not None: + if ( + not isinstance(build_manifest_yaml, dict) + or "diagnostics" not in build_manifest_yaml + ): + raise SystemExit( + "Provided build manifest is missing the required top-level 'diagnostics' mapping" + ) + diagnostics_full = build_manifest_yaml.get("diagnostics", {}) + filtered_diags = diagnostics_full.copy() + # Apply include first (if provided) + if args.include_diags is not None: + filtered_diags = {} + for diag_name in args.include_diags: + if diag_name not in diagnostics_full: + raise SystemExit( + f"--include_diags specified '{diag_name}' which is not present in the provided build manifest" + ) + filtered_diags[diag_name] = diagnostics_full[diag_name] + # Then apply exclude (if provided) + if args.exclude_diags is not None: + for diag_name in args.exclude_diags: + if diag_name not in diagnostics_full: + raise SystemExit( + f"--exclude_diags specified '{diag_name}' which is not present in the provided build manifest" + ) + if diag_name in filtered_diags: + del filtered_diags[diag_name] + build_manifest_yaml["diagnostics"] = filtered_diags + else: + # Use the directory name as the diag name (no disambiguation) and error on duplicates + diag_name_to_dir: Dict[str, str] = {} + for path in args.diag_src_dir: + name = os.path.basename(os.path.normpath(path)) or "diag" + if name in diag_name_to_dir: + existing = diag_name_to_dir[name] + raise SystemExit( + f"Found multiple diags with the same name derived from directory basenames. Please ensure unique names. Conflict: {name}: [{existing}, {path}]" + ) + diag_name_to_dir[name] = path + + build_manifest_yaml = {"diagnostics": {}} + for diag_name, src_dir in diag_name_to_dir.items(): + build_manifest_yaml["diagnostics"][diag_name] = {"source_dir": src_dir} + + # Add the script default to the meson options in the build manifest. + for key, value in script_meson_option_overrides.items(): + if "global_overrides" not in build_manifest_yaml: + build_manifest_yaml["global_overrides"] = {} + if "override_meson_options" not in build_manifest_yaml["global_overrides"]: + build_manifest_yaml["global_overrides"]["override_meson_options"] = [] + build_manifest_yaml["global_overrides"]["override_meson_options"].insert( + 0, f"{key}={value}" + ) + + # Ensure OSWIS-specific arguments exist in args, even if not set by the parser + if not hasattr(args, "oswis_additional_arguments"): + args.oswis_additional_arguments = [] + if not hasattr(args, "oswis_emulation_model"): + args.oswis_emulation_model = "" + if not hasattr(args, "oswis_diag_timeout"): + args.oswis_diag_timeout = 0 + if not hasattr(args, "oswis_timeout"): + args.oswis_timeout = 0 + if not hasattr(args, "oswis_firmware_tarball"): + args.oswis_firmware_tarball = "" + + # Get the environment object + try: + environment = env_manager.get_environment(args.environment) + except Exception as e: + raise Exception(f"Failed to get environment object for {args.environment}: {e}") - generated_diag = build_jumpstart_diag( - args.jumpstart_dir, - diag_build_target, - args.disable_diag_run, - args.keep_meson_builddir, + if args.disable_diag_run is True: + environment.run_target = None + + factory = DiagFactory( + build_manifest_yaml=build_manifest_yaml, + root_build_dir=args.diag_build_dir, + environment=environment, + toolchain=args.toolchain, + rng_seed=args.rng_seed, + jumpstart_dir=args.jumpstart_dir, + keep_meson_builddir=args.keep_meson_builddir, + jobs=args.jobs, + cli_meson_option_overrides=args.override_meson_options, + cli_diag_attribute_overrides=args.override_diag_attributes, + cli_diag_custom_defines=args.diag_custom_defines, + oswis_additional_arguments=args.oswis_additional_arguments, + oswis_emulation_model=args.oswis_emulation_model, + oswis_diag_timeout=args.oswis_diag_timeout, + oswis_timeout=args.oswis_timeout, + oswis_firmware_tarball=args.oswis_firmware_tarball, ) - log.info(f"Diag built: {generated_diag}") + try: + factory.compile_all() + + if environment.run_target is None: + log.info( + f"Skipping diag run: environment '{environment.name}' has no run_target (build-only environment)" + ) + elif environment.run_target is not None: + factory.run_all() + + except Exception as exc: + # Ensure we always print a summary before exiting + try: + factory.summarize() + except Exception: + pass + log.error(str(exc)) + raise SystemExit(1) + + factory.summarize() if __name__ == "__main__": diff --git a/scripts/build_tools/__init__.py b/scripts/build_tools/__init__.py index cdb801e7..33040ed7 100644 --- a/scripts/build_tools/__init__.py +++ b/scripts/build_tools/__init__.py @@ -1,11 +1,12 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 # __init__.py -from .diag import DiagBuildTarget, DiagSource -from .meson import build_jumpstart_diag +from .diag import AssetAction, DiagBuildUnit, DiagSource +from .diag_factory import DiagFactory +from .meson import Meson # PEP8 guideline: # https://peps.python.org/pep-0008/#public-and-internal-interfaces @@ -13,7 +14,9 @@ # the names in their public API using the __all__ attribute. __all__ = [ + "AssetAction", "DiagSource", - "DiagBuildTarget", - "build_jumpstart_diag", + "DiagBuildUnit", + "Meson", + "DiagFactory", ] diff --git a/scripts/build_tools/diag.py b/scripts/build_tools/diag.py index a6618625..ad74f5ef 100644 --- a/scripts/build_tools/diag.py +++ b/scripts/build_tools/diag.py @@ -1,17 +1,30 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 +import enum import logging as log import os +import random import shutil -import sys +import time +from typing import Any, List, Optional import yaml - -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) from system import functions as system_functions # noqa +from .environment import get_environment_manager # noqa +from .meson import Meson, MesonBuildError # noqa + + +def convert_cpu_mask_to_num_active_cpus(cpu_mask): + num_cpus = 0 + cpu_mask = int(cpu_mask, 2) + while cpu_mask != 0: + num_cpus += 1 + cpu_mask >>= 1 + return num_cpus + class DiagSource: source_file_extensions = [".c", ".S"] @@ -21,12 +34,13 @@ class DiagSource: ] meson_options_override_yaml_extensions = ["meson_option_overrides.yaml"] - def __init__(self, diag_src_dir) -> None: + def __init__(self, diag_src_dir: str) -> None: self.diag_src_dir = os.path.abspath(diag_src_dir) + self.original_path = diag_src_dir # Store the original path as provided if not os.path.exists(self.diag_src_dir): raise Exception(f"Diag source directory does not exist: {self.diag_src_dir}") - self.diag_sources = system_functions.find_files_with_extensions_in_dir( + self.diag_sources: List[str] = system_functions.find_files_with_extensions_in_dir( self.diag_src_dir, self.source_file_extensions ) if len(self.diag_sources) == 0: @@ -47,8 +61,10 @@ def __init__(self, diag_src_dir) -> None: ) self.diag_attributes_yaml = self.diag_attributes_yaml[0] - self.meson_options_override_yaml = system_functions.find_files_with_extensions_in_dir( - self.diag_src_dir, self.meson_options_override_yaml_extensions + self.meson_options_override_yaml: Optional[str] = ( + system_functions.find_files_with_extensions_in_dir( + self.diag_src_dir, self.meson_options_override_yaml_extensions + ) ) if len(self.meson_options_override_yaml) > 1: raise Exception( @@ -59,36 +75,25 @@ def __init__(self, diag_src_dir) -> None: else: self.meson_options_override_yaml = None - self.diag_name = os.path.basename(os.path.normpath(self.diag_src_dir)) - - self.active_hart_mask = None - with open(self.get_diag_attributes_yaml()) as f: - diag_attributes = yaml.safe_load(f) - if "active_hart_mask" in diag_attributes: - log.debug( - f"Found active_hart_mask specified by diag: {diag_attributes['active_hart_mask']}" - ) - self.active_hart_mask = diag_attributes["active_hart_mask"] - def __str__(self) -> str: - return f"\t\tDiag: {self.diag_name}, Source Path: {self.diag_src_dir}\n\t\tSources: {self.diag_sources}\n\t\tAttributes: {self.diag_attributes_yaml}\n\t\tMeson options overrides file: {self.meson_options_override_yaml}" + return f"\t\tDiag: Source Path: {self.diag_src_dir}\n\t\tSources: {self.diag_sources}\n\t\tAttributes: {self.diag_attributes_yaml}\n\t\tMeson options overrides file: {self.meson_options_override_yaml}" - def get_name(self): - return self.diag_name - - def get_diag_src_dir(self): + def get_diag_src_dir(self) -> str: return self.diag_src_dir - def get_sources(self): + def get_original_path(self) -> str: + return self.original_path + + def get_sources(self) -> List[str]: return self.diag_sources - def get_diag_attributes_yaml(self): + def get_diag_attributes_yaml(self) -> str: return self.diag_attributes_yaml - def get_meson_options_override_yaml(self): + def get_meson_options_override_yaml(self) -> Optional[str]: return self.meson_options_override_yaml - def is_valid_source_directory(diag_src_dir): + def is_valid_source_directory(diag_src_dir: str) -> bool: # if we can successfully make an object without taking an # exception then we have a valid diag source directory. try: @@ -98,56 +103,640 @@ def is_valid_source_directory(diag_src_dir): return True + def get_attribute_value(self, attribute_name: str) -> Optional[Any]: + with open(self.get_diag_attributes_yaml()) as f: + diag_attributes = yaml.safe_load(f) or {} + return diag_attributes.get(attribute_name) + -class DiagBuildTarget: - supported_targets = ["qemu", "spike"] - supported_toolchains = ["gcc", "llvm"] - supported_boot_configs = ["fw-none", "fw-m", "fw-sbi"] +class AssetAction(enum.IntEnum): + MOVE = 0 + COPY = 1 + NO_COPY = 2 + +class DiagBuildUnit: def __init__( self, - diag_src_dir, + yaml_config: dict, + meson_options_cmd_line_overrides, + diag_attributes_cmd_line_overrides, + diag_custom_defines_cmd_line_overrides, build_dir, - buildtype, - target, + environment, toolchain, - boot_config, - rng_seed, - active_hart_mask_override, + jumpstart_dir, + keep_meson_builddir, + ) -> None: + self._initialize_state() + + self._validate_and_parse_yaml_config(yaml_config) + + # Set up RNG generator. + log.debug(f"DiagBuildUnit: {self.name} Seeding RNG with: {self.rng_seed}") + self.rng: random.Random = random.Random(self.rng_seed) + + self.environment = environment + + self._setup_build_dir(build_dir) + + self._create_meson_instance(toolchain, jumpstart_dir, keep_meson_builddir) + self._apply_meson_option_overrides( + yaml_config, + meson_options_cmd_line_overrides, + diag_attributes_cmd_line_overrides, + diag_custom_defines_cmd_line_overrides, + ) + + def _initialize_state(self) -> None: + """Initialize the build state and status tracking.""" + self.state = enum.Enum("BuildState", "INITIALIZED COMPILED RUN") + self.current_state = self.state.INITIALIZED + # Fine-grained status tracking + self.CompileState = enum.Enum("CompileState", "PENDING PASS FAILED") + self.RunState = enum.Enum("RunState", "PENDING PASS CONDITIONAL_PASS EXPECTED_FAIL FAILED") + self.compile_state = self.CompileState.PENDING + self.compile_error: Optional[str] = None + self.run_state = self.RunState.PENDING + self.run_error: Optional[str] = None + self.expected_fail: bool = False + self.compile_duration_s: Optional[float] = None + self.run_duration_s: Optional[float] = None + self.run_return_code: Optional[int] = None + self.build_assets = {} + + def _validate_and_parse_yaml_config(self, yaml_config: dict) -> None: + """Validate and parse the YAML configuration to extract diag information.""" + if yaml_config is None: + raise Exception("yaml_config is required for DiagBuildUnit") + + # yaml_config must be of the form { : {...}, global_overrides: {...}? } + diag_blocks = {k: v for k, v in yaml_config.items() if k != "global_overrides"} + if len(diag_blocks) != 1: + raise Exception("Expected exactly one per-diag block in yaml_config") + + # Extract the diag name and its config block + self.name, only_block = next(iter(diag_blocks.items())) + resolved_src_dir = only_block.get("source_dir") + if resolved_src_dir is None: + raise Exception( + "Diag source directory not provided. Expected 'source_dir' in per-diag YAML." + ) + + self.diag_source: DiagSource = DiagSource(resolved_src_dir) + self.expected_fail: bool = only_block.get("expected_fail", False) + + # Extract rng_seed from the diag config + self.rng_seed: int = only_block.get("rng_seed") + if self.rng_seed is None: + raise Exception("rng_seed is required in per-diag YAML configuration") + + def _setup_build_dir(self, build_dir: str) -> None: + """Set up the build directory and meson build directory.""" + self.build_dir: str = os.path.abspath(build_dir) + system_functions.create_empty_directory(self.build_dir) + + # Create a directory for Meson build directory inside the diag build directory + meson_builddir = os.path.join(self.build_dir, "meson_builddir") + system_functions.create_empty_directory(meson_builddir) + self.meson_builddir = meson_builddir + + def _create_meson_instance( + self, toolchain: str, jumpstart_dir: str, keep_meson_builddir: bool + ) -> None: + """Create the Meson instance for this build unit.""" + self.keep_meson_builddir = keep_meson_builddir + self.meson = Meson( + toolchain, + jumpstart_dir, + self.name, + self.diag_source.get_sources(), + self.diag_source.get_diag_attributes_yaml(), + self.meson_builddir, + ) + + def _apply_meson_option_overrides( + self, + yaml_config: dict, meson_options_cmd_line_overrides, diag_attributes_cmd_line_overrides, + diag_custom_defines_cmd_line_overrides, ) -> None: - self.build_dir = os.path.abspath(build_dir) - self.build_assets = {} - self.diag_source = DiagSource(diag_src_dir) + """Apply meson option overrides in the correct order.""" + # Apply default overrides first + self._apply_default_meson_overrides() + + # Apply environment overrides + self._apply_environment_overrides() - self.buildtype = buildtype - assert target in self.supported_targets - self.target = target - self.rng_seed = rng_seed + # Apply YAML file overrides from source directory + self._apply_source_yaml_overrides() + + # Apply overrides in order: global (YAML), diag-specific (YAML), command-line + self._apply_yaml_config_overrides(yaml_config) + + self._apply_command_line_overrides( + meson_options_cmd_line_overrides, + diag_attributes_cmd_line_overrides, + diag_custom_defines_cmd_line_overrides, + ) - assert toolchain in self.supported_toolchains - self.toolchain = toolchain + self._apply_run_target_specific_overrides() - assert boot_config in self.supported_boot_configs - self.boot_config = boot_config + # Deduplicate diag_custom_defines meson option. + # The compiler will error if there are duplicate defines. + self._deduplicate_diag_custom_defines() - if self.target == "spike" and self.boot_config != "fw-none": + def _apply_default_meson_overrides(self) -> None: + """Apply default meson option overrides for run targets.""" + self.meson.override_meson_options_from_dict({"run_target": self.environment.run_target}) + self.meson.override_meson_options_from_dict( + {"diag_attribute_overrides": [f"build_rng_seed={self.rng_seed}"]} + ) + + def _apply_environment_overrides(self) -> None: + """Apply environment-specific overrides based on the environment.""" + try: + # Apply meson option overrides from environment + if self.environment.override_meson_options: + self.meson.override_meson_options_from_dict(self.environment.override_meson_options) + + # Apply diag attribute overrides from environment + if self.environment.override_diag_attributes: + self.meson.override_meson_options_from_dict( + {"diag_attribute_overrides": self.environment.override_diag_attributes} + ) + + except Exception as e: + log.error( + f"Failed to apply environment overrides for environment '{self.environment.name}': {e}" + ) + raise + + def _apply_source_yaml_overrides(self) -> None: + """Apply meson option overrides from diag's YAML file in source directory.""" + meson_yaml_path = self.diag_source.get_meson_options_override_yaml() + if meson_yaml_path is not None: + with open(meson_yaml_path) as f: + overrides_from_yaml = yaml.safe_load(f) + self.meson.override_meson_options_from_dict(overrides_from_yaml) + + def _apply_yaml_config_overrides(self, yaml_config: dict) -> None: + """Apply overrides from the YAML configuration.""" + # 1) Global overrides from YAML (if provided as part of yaml_config) + self._apply_yaml_overrides(yaml_config.get("global_overrides")) + + # 2) Diag-specific overrides from YAML (full per-diag block) + self._apply_yaml_overrides(yaml_config.get(self.name)) + + def _apply_command_line_overrides( + self, + meson_options_cmd_line_overrides, + diag_attributes_cmd_line_overrides, + diag_custom_defines_cmd_line_overrides, + ) -> None: + """Apply command-line overrides (applied last).""" + # 3) Command-line overrides applied last + if meson_options_cmd_line_overrides is not None: + from data_structures import DictUtils # local import to avoid cycles + + cmd_overrides_dict = DictUtils.create_dict(meson_options_cmd_line_overrides) + self.meson.override_meson_options_from_dict(cmd_overrides_dict) + + if diag_attributes_cmd_line_overrides: + self.meson.override_meson_options_from_dict( + {"diag_attribute_overrides": diag_attributes_cmd_line_overrides} + ) + + if diag_custom_defines_cmd_line_overrides: + self.meson.override_meson_options_from_dict( + {"diag_custom_defines": list(diag_custom_defines_cmd_line_overrides)} + ) + + def _deduplicate_diag_custom_defines(self) -> None: + """Remove duplicate diag_custom_defines, keeping the last occurrence of each key.""" + existing_defines = self.meson.get_meson_options().get("diag_custom_defines", []) + if not existing_defines: + return + + # Use a dict to naturally handle precedence - last value wins + defines_dict = {} + for entry in existing_defines: + if "=" in entry: + key = entry.split("=", 1)[0] + defines_dict[key] = entry + else: + defines_dict[entry] = entry + + # Convert back to list + deduplicated_defines = list(defines_dict.values()) + + self.meson.meson_options["diag_custom_defines"] = deduplicated_defines + + def _apply_run_target_specific_overrides(self) -> None: + """Apply target-specific meson option overrides.""" + if self.environment.run_target == "spike": + self._apply_spike_overrides() + + def _apply_spike_overrides(self) -> None: + """Apply Spike-specific meson option overrides.""" + num_active_cpus = self._calculate_spike_active_cpus() + + spike_overrides = { + "spike_additional_arguments": [ + f"-p{num_active_cpus}", + ], + } + + # Add hartids based on soc_rev and num_active_cpus + soc_rev = self.meson.get_meson_options().get("soc_rev", "A0") + all_hartids = self.get_hart_ids_for_soc(soc_rev) + hartids = all_hartids[:num_active_cpus] + + spike_overrides["spike_additional_arguments"].append(f"--hartids={','.join(hartids)}") + + self.meson.override_meson_options_from_dict(spike_overrides) + + def get_active_cpu_mask(self) -> str: + """Get the final active_cpu_mask value from source attributes and meson overrides. + + Returns the active_cpu_mask as a string (e.g., "0b1", "0b1111"). + Meson overrides take precedence over source attributes. + """ + # Start with the value from source attributes + active_cpu_mask = self.diag_source.get_attribute_value("active_cpu_mask") + if active_cpu_mask is None: + active_cpu_mask = "0b1" # Default value + + # Check for overrides in meson diag_attribute_overrides + for diag_attribute in self.meson.get_meson_options().get("diag_attribute_overrides", []): + if diag_attribute.startswith("active_cpu_mask="): + active_cpu_mask = diag_attribute.split("=", 1)[1] + break + + return active_cpu_mask + + def get_primary_cpu_id(self) -> int: + """Get the primary CPU ID, which is the index of the lowest set bit in the active_cpu_mask. + + Returns the 0-based index of the first set bit in the active_cpu_mask. + For example: + - active_cpu_mask="0b1" -> primary_cpu_id=0 + - active_cpu_mask="0b10" -> primary_cpu_id=1 + - active_cpu_mask="0b101" -> primary_cpu_id=0 + - active_cpu_mask="0b1100" -> primary_cpu_id=2 + """ + active_cpu_mask = self.get_active_cpu_mask() + + # Convert binary string to integer + if active_cpu_mask.startswith("0b"): + cpu_mask_int = int(active_cpu_mask, 2) + else: + cpu_mask_int = int(active_cpu_mask, 2) + + if cpu_mask_int == 0: + raise Exception("No active CPUs: active_cpu_mask is zero") + + # Find the index of the lowest set bit + primary_cpu_id = 0 + while cpu_mask_int & 1 == 0: + cpu_mask_int >>= 1 + primary_cpu_id += 1 + + return primary_cpu_id + + def get_primary_hart_id(self) -> int: + """Get the primary hart ID, which is the hart ID corresponding to the primary CPU ID. + + Returns the hart ID (as integer) for the primary CPU based on soc_rev. + For example: + - For soc_rev="A0" and primary_cpu_id=0 -> hart_id=0 + - For soc_rev="A0" and primary_cpu_id=1 -> hart_id=1 + - For soc_rev="B0" and primary_cpu_id=0 -> hart_id=0 + - For soc_rev="B0" and primary_cpu_id=1 -> hart_id=1 + """ + primary_cpu_id = self.get_primary_cpu_id() + soc_rev = self.meson.get_meson_options().get("soc_rev", "A0") + hart_ids = self.get_hart_ids_for_soc(soc_rev) + + # Ensure we don't go out of bounds + if primary_cpu_id >= len(hart_ids): raise Exception( - f"Invalid boot_config {self.boot_config} for spike. Only fw-none is supported for spike." + f"Primary CPU ID {primary_cpu_id} is out of bounds for soc_rev '{soc_rev}' " + f"which has {len(hart_ids)} hart IDs" ) - self.active_hart_mask_override = active_hart_mask_override + return int(hart_ids[primary_cpu_id]) + + def get_hart_ids_for_soc(self, soc_rev: str) -> List[str]: + """Get the list of hart IDs for a given soc_rev. + + Args: + soc_rev: The SoC revision ("A0" or "B0") + + Returns: + List of hart ID strings for the given soc_rev + + Raises: + Exception: If soc_rev is not supported + """ + hart_ids_by_soc = { + "A0": ["0", "1", "2", "3", "32", "33", "34", "35"], + "B0": [ + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "32", + "33", + "34", + "35", + "36", + "37", + "38", + "39", + ], + } + + if soc_rev not in hart_ids_by_soc: + raise Exception( + f"Unsupported soc_rev '{soc_rev}' in get_hart_ids_for_soc. Please add support for this soc_rev." + ) - self.meson_options_cmd_line_overrides = meson_options_cmd_line_overrides + return hart_ids_by_soc[soc_rev] + + def _calculate_spike_active_cpus(self) -> int: + """Calculate the number of active CPUs for Spike target.""" + active_cpu_mask = self.get_active_cpu_mask() + return convert_cpu_mask_to_num_active_cpus(active_cpu_mask) + + def _normalize_meson_overrides(self, value) -> dict: + """Normalize meson overrides to a dictionary format.""" + if value is None: + return {} + # Accept dict, list of "k=v" strings, or list of dicts + if isinstance(value, dict): + return value + if isinstance(value, list): + # list of dicts + if all(isinstance(x, dict) for x in value): + merged: dict = {} + for item in value: + merged.update(item) + return merged + # list of strings + from data_structures import DictUtils # local import to avoid cycles + + str_items = [x for x in value if isinstance(x, str)] + return DictUtils.create_dict(str_items) + raise TypeError("Unsupported override_meson_options format in YAML overrides") + + def _apply_yaml_overrides(self, overrides: Optional[dict]) -> None: + """Apply overrides from a YAML configuration block.""" + if not overrides: + return + # meson options + meson_over = self._normalize_meson_overrides(overrides.get("override_meson_options")) + if meson_over: + self.meson.override_meson_options_from_dict(meson_over) + + # diag_custom_defines + diag_custom_defines = overrides.get("diag_custom_defines") + if diag_custom_defines: + self.meson.override_meson_options_from_dict( + {"diag_custom_defines": list(diag_custom_defines)} + ) - self.diag_attributes_cmd_line_overrides = diag_attributes_cmd_line_overrides + # diag attribute overrides + diag_attr_overrides = overrides.get("override_diag_attributes") + if diag_attr_overrides: + self.meson.override_meson_options_from_dict( + {"diag_attribute_overrides": list(diag_attr_overrides)} + ) + + # --------------------------------------------------------------------- + # Status label helpers (moved/centralized color logic) + # --------------------------------------------------------------------- + def _fmt_duration(self, seconds: Optional[float]) -> str: + try: + return f" ({seconds:.2f}s)" if seconds is not None else "" + except Exception: + return "" + + def _colorize_status_prefix(self, label: str) -> str: + """Colorize a status label prefix, preserving any trailing text. + + Recognizes prefixes: PASS, CONDITIONAL_PASS, EXPECTED_FAIL, FAILED, PENDING. + """ + # Order matters: check longer prefixes first + mapping = { + "CONDITIONAL_PASS": ("\u001b[33m", len("CONDITIONAL_PASS")), # yellow + "EXPECTED_FAIL": ("\u001b[33m", len("EXPECTED_FAIL")), # yellow + "PASS": ("\u001b[32m", len("PASS")), # green + "FAILED": ("\u001b[31m", len("FAILED")), # red + "PENDING": ("\u001b[33m", len("PENDING")), # yellow + } + for prefix, (color, plen) in mapping.items(): + if label.startswith(prefix): + reset = "\u001b[0m" + return f"{color}{prefix}{reset}" + label[plen:] + return label + + def colorize_status_text(self, text: str) -> str: + """Public helper to colorize a status-bearing string by prefix only. + + Safe to pass padded strings; only the leading status token is colorized. + """ + return self._colorize_status_prefix(text or "") + + def format_build_label(self, include_duration: bool = False, color: bool = False) -> str: + base = self.compile_state.name + if include_duration: + base += self._fmt_duration(self.compile_duration_s) + return self._colorize_status_prefix(base) if color else base + + def format_run_label(self, include_duration: bool = False, color: bool = False) -> str: + base = self.run_state.name + if include_duration: + base += self._fmt_duration(self.run_duration_s) + return self._colorize_status_prefix(base) if color else base + + def compile(self): + start_time = time.perf_counter() + if self.meson is None: + self.compile_error = f"Meson object does not exist for diag: {self.name}" + self.compile_duration_s = time.perf_counter() - start_time + self.compile_state = self.CompileState.FAILED + return + + try: + self.meson.setup() + + self.meson.introspect() + + compiled_assets = self.meson.compile() + for asset_type, asset_path in compiled_assets.items(): + self.add_build_asset(asset_type, asset_path) + self.compile_error = None + self.current_state = self.state.COMPILED + self.compile_state = self.CompileState.PASS + except Exception as exc: + self.compile_error = str(exc) + self.compile_state = self.CompileState.FAILED + finally: + self.compile_duration_s = time.perf_counter() - start_time + + def run(self): + start_time = time.perf_counter() + if self.meson is None: + self.run_error = f"Meson object does not exist for diag: {self.name}" + self.run_duration_s = time.perf_counter() - start_time + self.run_state = self.RunState.FAILED + return + if self.compile_state != self.CompileState.PASS: + # Do not run if compile failed + return + + # Check if environment has a run_target defined + if self.environment.run_target is None: + self.run_error = ( + f"Environment '{self.environment.name}' does not have a run_target defined" + ) + self.run_duration_s = time.perf_counter() - start_time + self.run_state = self.RunState.FAILED + return + + try: + run_assets = self.meson.test() + for asset_type, asset_path in run_assets.items(): + self.add_build_asset(asset_type, asset_path) + self.run_error = None + self.run_return_code = 0 + self.current_state = self.state.RUN + self.run_state = self.RunState.PASS + except Exception as exc: + # Capture return code for MesonBuildError to allow expected-fail handling + try: + if isinstance(exc, MesonBuildError): + self.run_return_code = exc.return_code + except Exception: + pass + self.run_error = str(exc) + finally: + self.run_duration_s = time.perf_counter() - start_time + # Normalize run_state based on expected_fail, return code, and error + try: + if self.expected_fail is True: + # Expected to fail: + if self.run_return_code is not None and self.run_return_code != 0: + # This is the expected behavior + self.run_state = self.RunState.EXPECTED_FAIL + self.run_error = None + elif self.run_return_code == 0: + # Unexpected pass + self.run_state = self.RunState.FAILED + self.run_error = "Diag run passed but was expected to fail." + else: + # No return code; treat as failure unless error text indicates otherwise + self.run_state = ( + self.RunState.EXPECTED_FAIL + if self.run_error is None + else self.RunState.FAILED + ) + else: + # Not expected to fail: + if self.run_error is None and ( + self.run_return_code is None or self.run_return_code == 0 + ): + self.run_state = self.RunState.PASS + else: + self.run_state = self.RunState.FAILED + except Exception: + # Conservative fallback + if self.run_error is not None: + self.run_state = self.RunState.FAILED + # else keep whatever was set earlier + + def apply_batch_outcome_from_junit_status(self, junit_status: Optional[str]) -> None: + """Apply batch-run outcome to this unit using a junit testcase status string. + + junit_status: one of "pass", "fail", "skipped". + """ + # Default pessimistic state + self.run_state = self.RunState.FAILED + if junit_status == "fail": + # truf marks fail when rc==0 for expected_fail=True, or rc!=0 for expected_fail=False + if self.expected_fail: + self.run_return_code = 0 + self.run_error = "Diag run passed but was expected to fail." + self.run_state = self.RunState.FAILED + else: + self.run_return_code = 1 + self.run_error = "Batch run failure" + self.run_state = self.RunState.FAILED + elif junit_status == "pass" or junit_status == "conditional_pass": + # truf marks pass when rc!=0 for expected_fail=True, or rc==0 for expected_fail=False + if self.expected_fail: + self.run_return_code = 1 + self.run_error = None + self.run_state = self.RunState.EXPECTED_FAIL + else: + self.run_return_code = 0 + self.run_error = None + if junit_status == "conditional_pass": + self.run_state = self.RunState.CONDITIONAL_PASS + else: + self.run_state = self.RunState.PASS + else: + # If not in report or unknown status, assume failure conservatively + self.run_return_code = 1 + self.run_error = "No batch result" + self.run_state = self.RunState.FAILED + + def mark_no_junit_report(self) -> None: + self.run_error = "No JUnit report" + self.run_return_code = None + self.run_state = self.RunState.FAILED + + def mark_batch_exception(self, exc: Exception) -> None: + try: + self.run_error = f"{type(exc).__name__}: {exc}" + except Exception: + self.run_error = "Batch run failed with an exception" + self.run_return_code = None + self.run_state = self.RunState.FAILED def __str__(self) -> str: - print_string = f"\n\tName: {self.diag_source.diag_name}\n\tDirectory: {self.build_dir}\n\tAssets: {self.build_assets}\n\tBuildType: {self.buildtype},\n\tTarget: {self.target},\n\tBootConfig: {self.boot_config}," - if self.rng_seed is not None: - print_string += f"\n\tRNG Seed: {self.rng_seed}" + current_buildtype = self.meson.get_meson_options().get("buildtype", "release") + + compile_label = self.compile_state.name + if self.compile_error: + compile_label += f": {self.compile_error}" + + run_label = self.run_state.name + if self.run_error: + run_label += f": {self.run_error}" + + compile_colored = self.colorize_status_text(compile_label) + run_colored = self.colorize_status_text(run_label) + + print_string = ( + f"\n\tName: {self.name}" + f"\n\tDirectory: {self.build_dir}" + f"\n\tBuildType: {current_buildtype}," + f"\n\tEnvironment: {self.environment.name}," + f"\n\tRunTarget: {self.environment.run_target}," + f"\n\tCompile: {compile_colored}," + f"\n\tRun: {run_colored}" + ) + print_string += f"\n\tRNG Seed: {hex(self.rng_seed)}" print_string += f"\n\tSource Info:\n{self.diag_source}" + print_string += "\n\tMeson options:\n" + self.meson.get_meson_options_pretty(spacing="\t\t") + print_string += f"\n\tAssets: {self.build_assets}" return print_string @@ -156,8 +745,11 @@ def add_build_asset( build_asset_type, build_asset_src_file_path, build_asset_file_name=None, - no_copy=False, + asset_action=AssetAction.COPY, ): + if not isinstance(asset_action, AssetAction): + raise TypeError("asset_action must be an instance of AssetAction Enum") + if build_asset_type in self.build_assets: raise Exception(f"Asset already exists: {build_asset_type}") @@ -167,12 +759,18 @@ def add_build_asset( if not os.path.exists(build_asset_src_file_path): raise Exception(f"Asset does not exist: {build_asset_src_file_path}") - if no_copy is True: + if asset_action == AssetAction.NO_COPY: self.build_assets[build_asset_type] = build_asset_src_file_path - else: + elif asset_action == AssetAction.MOVE: + self.build_assets[build_asset_type] = shutil.move( + build_asset_src_file_path, f"{self.build_dir}/{build_asset_file_name}" + ) + elif asset_action == AssetAction.COPY: self.build_assets[build_asset_type] = shutil.copy( build_asset_src_file_path, f"{self.build_dir}/{build_asset_file_name}" ) + else: + raise Exception(f"Invalid Asset action type: {asset_action}") def get_build_asset(self, build_asset_type): if build_asset_type not in self.build_assets: @@ -184,4 +782,62 @@ def get_build_directory(self): return self.build_dir def get_name(self): - return self.diag_source.diag_name + return self.name + + def compile_passed(self) -> bool: + """Check if compilation passed successfully. + + Returns True if compile_state is PASS and compile_error is None. + Returns False otherwise. + """ + return ( + getattr(self, "compile_state", None) is not None + and getattr(self.compile_state, "name", "") == "PASS" + and self.compile_error is None + ) + + def run_passed(self) -> bool: + """Check if run passed successfully. + + Returns True if run_state is PASS and run_error is None. + Returns False otherwise. + """ + return ( + getattr(self, "run_state", None) is not None + and getattr(self.run_state, "name", "") == "PASS" + and self.run_error is None + ) + + def cleanup_meson_builddir(self) -> None: + if not hasattr(self, "keep_meson_builddir"): + return + + """Clean up the meson build directory if keep_meson_builddir is False and no failures occurred.""" + # Keep the build directory if explicitly requested or if there were failures + should_keep = ( + self.keep_meson_builddir + or self.compile_state == self.CompileState.FAILED + or self.run_state == self.RunState.FAILED + ) + + if hasattr(self, "meson_builddir") and self.meson_builddir and not should_keep: + try: + log.debug(f"Removing meson build directory: {self.meson_builddir}") + shutil.rmtree(self.meson_builddir) + except Exception as exc: + log.debug(f"Ignoring error during meson build directory cleanup: {exc}") + elif hasattr(self, "meson_builddir") and self.meson_builddir and should_keep: + if self.compile_state == self.CompileState.FAILED: + log.debug( + f"Keeping meson build directory due to compile failure: {self.meson_builddir}" + ) + elif self.run_state == self.RunState.FAILED: + log.debug( + f"Keeping meson build directory due to run failure: {self.meson_builddir}" + ) + elif self.keep_meson_builddir: + log.debug(f"Keeping meson build directory as requested: {self.meson_builddir}") + + def __del__(self): + """Cleanup when the object is destroyed.""" + self.cleanup_meson_builddir() diff --git a/scripts/build_tools/diag_factory.py b/scripts/build_tools/diag_factory.py new file mode 100644 index 00000000..273dae80 --- /dev/null +++ b/scripts/build_tools/diag_factory.py @@ -0,0 +1,914 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +import glob +import logging as log +import os +import random +import sys +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Dict, List, Optional, Tuple + +import yaml +from system import functions as system_functions # noqa + +from .diag import DiagBuildUnit + + +class DiagFactoryError(Exception): + pass + + +class DiagFactory: + """Create and build multiple diagnostics from a YAML description. + + YAML format (expected_fail defaults to 0 if not specified): + + diagnostics: + : + source_dir: + override_meson_options: ["key=value", ...] + override_diag_attributes: ["attr=value", ...] + diag_custom_defines: ["NAME=VALUE", ...] + expected_fail: + """ + + def __init__( + self, + build_manifest_yaml: dict, + root_build_dir: str, + environment, + toolchain: str, + rng_seed: Optional[int], + jumpstart_dir: str, + keep_meson_builddir: bool, + jobs: int = 1, + cli_meson_option_overrides: Optional[List[str]] = None, + cli_diag_attribute_overrides: Optional[List[str]] = None, + cli_diag_custom_defines: Optional[List[str]] = None, + skip_write_manifest: bool = False, + oswis_additional_arguments: List[str] = None, + oswis_emulation_model: str = None, + oswis_diag_timeout: int = None, + oswis_timeout: int = None, + oswis_firmware_tarball: str = None, + ) -> None: + self.build_manifest_yaml = build_manifest_yaml + self.root_build_dir = os.path.abspath(root_build_dir) + self.toolchain = toolchain + + # Store the environment object directly + self.environment = environment + + self.jumpstart_dir = jumpstart_dir + self.keep_meson_builddir = keep_meson_builddir + try: + self.jobs = max(1, int(jobs)) + except Exception: + self.jobs = 1 + self.global_overrides: Dict[str, any] = {} + self.cli_meson_option_overrides = cli_meson_option_overrides or [] + self.cli_diag_attribute_overrides = cli_diag_attribute_overrides or [] + self.cli_diag_custom_defines = cli_diag_custom_defines or [] + + # Determine batch_mode from environment configuration + self.batch_mode: bool = self.environment.override_meson_options.get("batch_mode", False) + + self.skip_write_manifest: bool = bool(skip_write_manifest) + + self.oswis_additional_arguments = oswis_additional_arguments + self.oswis_emulation_model = oswis_emulation_model + self.oswis_diag_timeout = oswis_diag_timeout + self.oswis_timeout = oswis_timeout + self.oswis_firmware_tarball = oswis_firmware_tarball + + loaded = self.build_manifest_yaml or {} + + # Validate the provided YAML manifest strictly before proceeding + self._validate_manifest(loaded) + + self.diagnostics: Dict[str, dict] = loaded["diagnostics"] or {} + + # Create a deterministic RNG for generating diag seeds + if rng_seed is None: + self.factory_rng = random.Random() + else: + self.factory_rng = random.Random(rng_seed) + + # Set rng_seed for each diagnostic if not already specified + for diag_name, diag_config in self.diagnostics.items(): + if "rng_seed" not in diag_config: + diag_config["rng_seed"] = self.factory_rng.randrange(sys.maxsize) + + # Optional global_overrides (already validated) + self.global_overrides = loaded.get("global_overrides") or {} + + system_functions.create_empty_directory(os.path.abspath(self.root_build_dir)) + + self._diag_units: Dict[str, DiagBuildUnit] = {} + # expected_fail now lives per DiagBuildUnit; no per-factory map + self._build_repo_manifest_path: Optional[str] = None + self._run_manifest_path: Optional[str] = None + # Batch-mode artifacts (set when batch_mode=True and generation succeeds) + self._batch_out_dir: Optional[str] = None + self._batch_manifest_path: Optional[str] = None + # Track batch runner failures + self._batch_runner_failed: bool = False + + if not self.skip_write_manifest: + self.write_build_repro_manifest() + + def _validate_manifest(self, manifest: dict) -> None: + """Validate the structure and types of a DiagFactory YAML manifest. + + Rules: + - Top-level: required key `diagnostics`, optional keys `global_overrides`, `rng_seed`. + No other top-level keys are allowed. + - `diagnostics`: mapping of diag_name -> per-diag mapping. + Each per-diag mapping must include `source_dir` (non-empty string). + Allowed optional keys per diag: `override_meson_options`, `override_diag_attributes`, + `diag_custom_defines`, `expected_fail`, `rng_seed`. + - `global_overrides` (optional): mapping; allowed keys are + `override_meson_options`, `override_diag_attributes`, `diag_custom_defines`. + + - Types: + - override_meson_options: dict OR list (each item must be a dict or str) + - override_diag_attributes: list of str + - diag_custom_defines: list of str + - expected_fail: bool, int, or str + - rng_seed: int + + """ + if not isinstance(manifest, dict): + raise DiagFactoryError("Invalid diagnostics YAML. Expected a top-level mapping (dict).") + + top_allowed = {"diagnostics", "global_overrides"} + top_keys = set(manifest.keys()) + if "diagnostics" not in top_keys: + raise DiagFactoryError("Invalid diagnostics YAML. Missing required key 'diagnostics'.") + extra_top = top_keys - top_allowed + if extra_top: + raise DiagFactoryError( + "Invalid diagnostics YAML. Only 'diagnostics' and optional 'global_overrides' are allowed; found: " + + ", ".join(sorted(extra_top)) + ) + + diagnostics = manifest.get("diagnostics") + if not isinstance(diagnostics, dict) or len(diagnostics) == 0: + raise DiagFactoryError("'diagnostics' must be a non-empty mapping of names to configs.") + + per_diag_allowed = { + "source_dir", + "override_meson_options", + "override_diag_attributes", + "diag_custom_defines", + "expected_fail", + "rng_seed", + } + + def _validate_override_meson_options(value, context: str) -> None: + if isinstance(value, dict): + return + if isinstance(value, list): + for idx, item in enumerate(value): + if not isinstance(item, (str, dict)): + raise DiagFactoryError( + f"{context}.override_meson_options[{idx}] must be str or dict" + ) + return + raise DiagFactoryError(f"{context}.override_meson_options must be a dict or list") + + def _validate_str_list(value, context: str, field_name: str) -> None: + if not isinstance(value, list) or not all(isinstance(x, str) for x in value): + raise DiagFactoryError(f"{context}.{field_name} must be a list of strings") + + # Validate each diagnostic block + for diag_name, diag_cfg in diagnostics.items(): + if not isinstance(diag_name, str) or diag_name.strip() == "": + raise DiagFactoryError("Each diagnostic name must be a non-empty string") + if not isinstance(diag_cfg, dict): + raise DiagFactoryError( + f"diagnostics.{diag_name} must be a mapping of options, found {type(diag_cfg).__name__}" + ) + + # Unknown key check + unknown = set(diag_cfg.keys()) - per_diag_allowed + if unknown: + raise DiagFactoryError( + f"diagnostics.{diag_name} contains unknown key(s): " + + ", ".join(sorted(unknown)) + ) + + # Required source_dir + src = diag_cfg.get("source_dir") + if not isinstance(src, str) or src.strip() == "": + raise DiagFactoryError( + f"diagnostics.{diag_name}.source_dir is required and must be a non-empty string" + ) + + # Optional per-diag fields + if "override_meson_options" in diag_cfg: + _validate_override_meson_options( + diag_cfg["override_meson_options"], f"diagnostics.{diag_name}" + ) + if "override_diag_attributes" in diag_cfg: + _validate_str_list( + diag_cfg["override_diag_attributes"], + f"diagnostics.{diag_name}", + "override_diag_attributes", + ) + if "diag_custom_defines" in diag_cfg: + _validate_str_list( + diag_cfg["diag_custom_defines"], + f"diagnostics.{diag_name}", + "diag_custom_defines", + ) + if "expected_fail" in diag_cfg: + ef = diag_cfg["expected_fail"] + if not isinstance(ef, (bool, int, str)): + raise DiagFactoryError( + f"diagnostics.{diag_name}.expected_fail must be a bool, int, or str" + ) + if "rng_seed" in diag_cfg: + seed = diag_cfg["rng_seed"] + if not isinstance(seed, int): + raise DiagFactoryError( + f"diagnostics.{diag_name}.rng_seed must be an integer if provided" + ) + if seed < 0: + raise DiagFactoryError(f"diagnostics.{diag_name}.rng_seed must be non-negative") + + # Validate optional global_overrides + if "global_overrides" in manifest: + go = manifest["global_overrides"] + if not isinstance(go, dict): + raise DiagFactoryError("global_overrides must be a mapping (dict)") + go_allowed = { + "override_meson_options", + "override_diag_attributes", + "diag_custom_defines", + } + unknown = set(go.keys()) - go_allowed + if unknown: + raise DiagFactoryError( + "global_overrides contains unknown key(s): " + ", ".join(sorted(unknown)) + ) + if "override_meson_options" in go: + _validate_override_meson_options(go["override_meson_options"], "global_overrides") + if "override_diag_attributes" in go: + _validate_str_list( + go["override_diag_attributes"], + "global_overrides", + "override_diag_attributes", + ) + if "diag_custom_defines" in go: + _validate_str_list( + go["diag_custom_defines"], "global_overrides", "diag_custom_defines" + ) + + def _execute_parallel( + self, + max_workers: int, + tasks: Dict[str, Tuple], + runner_fn, + ) -> Dict[str, DiagBuildUnit]: + """Execute tasks concurrently and return a mapping of diag name to unit. + + - tasks: mapping of diag_name -> tuple where the first element is the DiagBuildUnit + followed by any extra args needed by runner_fn. + - runner_fn: callable invoked as runner_fn(name, *task_args) + """ + results: Dict[str, DiagBuildUnit] = {} + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = {} + for diag_name, args in tasks.items(): + unit = args[0] + fut = executor.submit(runner_fn, diag_name, *args) + future_to_task[fut] = (diag_name, unit) + + for fut in as_completed(list(future_to_task.keys())): + diag_name, unit = future_to_task[fut] + try: + fut.result() + except Exception: + # Any exception is already recorded (or will be) on the unit + pass + results[diag_name] = unit + return results + + def _normalize_to_kv_list(self, value) -> List[str]: + """Normalize override structures into a list of "k=v" strings. + + Accepts dict, list of dicts, list of strings, or None. + """ + if not value: + return [] + if isinstance(value, dict): + return [f"{k}={v}" for k, v in value.items()] + if isinstance(value, list): + if all(isinstance(x, dict) for x in value): + merged: Dict[str, any] = {} + for item in value: + merged.update(item) + return [f"{k}={v}" for k, v in merged.items()] + return [str(x) for x in value if isinstance(x, str)] + raise TypeError("Unsupported override format; expected dict or list") + + def _dedupe_kv_list(self, items: List[str]) -> List[str]: + """Remove duplicate keys from a list of "k=v" strings keeping the last occurrence. + + Preserves the overall order of first appearances after de-duplication. + """ + seen = {} + order: List[str] = [] + # Walk from end so later entries win + for entry in reversed(items or []): + if "=" in entry: + key = entry.split("=", 1)[0] + else: + key = entry + if key not in seen: + seen[key] = entry + order.append(key) + # Reconstruct in forward order + order.reverse() + return [seen[k] for k in order] + + def build_repro_manifest_dict(self) -> dict: + """Create a reproducible build manifest combining diagnostics and global overrides. + + Command-line overrides are appended under 'global_overrides'. + """ + # Start with diagnostics as loaded + manifest: Dict[str, any] = {"diagnostics": dict(self.diagnostics)} + + # Merge global overrides with CLI overrides + global_overrides: Dict[str, any] = dict(self.global_overrides or {}) + + combined_meson = self._normalize_to_kv_list(global_overrides.get("override_meson_options")) + combined_meson.extend(list(self.cli_meson_option_overrides or [])) + combined_meson = self._dedupe_kv_list(combined_meson) + if combined_meson: + global_overrides["override_meson_options"] = combined_meson + + combined_diag_attrs = self._normalize_to_kv_list( + global_overrides.get("override_diag_attributes") + ) + combined_diag_attrs.extend(list(self.cli_diag_attribute_overrides or [])) + combined_diag_attrs = self._dedupe_kv_list(combined_diag_attrs) + if combined_diag_attrs: + global_overrides["override_diag_attributes"] = combined_diag_attrs + + existing_defines = global_overrides.get("diag_custom_defines") or [] + if isinstance(existing_defines, dict): + existing_defines = [f"{k}={v}" for k, v in existing_defines.items()] + elif isinstance(existing_defines, list): + existing_defines = [str(x) for x in existing_defines] + else: + existing_defines = [] + combined_defines = list(existing_defines) + combined_defines.extend(list(self.cli_diag_custom_defines or [])) + combined_defines = self._dedupe_kv_list(combined_defines) + if combined_defines: + global_overrides["diag_custom_defines"] = combined_defines + + if global_overrides: + manifest["global_overrides"] = global_overrides + + return manifest + + def write_build_repro_manifest(self, output_path: Optional[str] = None) -> str: + """Write the build manifest YAML to disk and return its path.""" + if output_path is None: + output_path = os.path.join(self.root_build_dir, "build_manifest.repro.yaml") + manifest = self.build_repro_manifest_dict() + with open(output_path, "w") as f: + yaml.safe_dump(manifest, f, sort_keys=False) + self._build_repo_manifest_path = output_path + log.debug(f"Wrote build manifest: {output_path}") + return output_path + + def write_run_manifest(self, output_path: Optional[str] = None) -> str: + """Write the run manifest YAML to disk and return its path. + + Format: + diagnostics: + : + elf_path: + num_iterations: 1 + expected_fail: + """ + if output_path is None: + output_path = os.path.join(self.root_build_dir, "run_manifest.yaml") + + run_manifest = {"diagnostics": {}} + + # Include all successfully compiled diags + for diag_name, unit in self._diag_units.items(): + if unit.compile_passed(): + try: + elf_path = unit.get_build_asset("elf") + if os.path.exists(elf_path): + run_manifest["diagnostics"][diag_name] = { + "elf_path": os.path.abspath(elf_path), + "num_iterations": 1, + "expected_fail": getattr(unit, "expected_fail", False), + "primary_hart_id": unit.get_primary_hart_id(), + } + except Exception as exc: + log.warning(f"Failed to get ELF path for diag '{diag_name}': {exc}") + + with open(output_path, "w") as f: + yaml.safe_dump(run_manifest, f, sort_keys=False) + self._run_manifest_path = output_path + log.debug(f"Wrote run manifest: {output_path}") + return output_path + + def _prepare_unit(self, diag_name: str, config: dict) -> Tuple[str, DiagBuildUnit]: + # Do not validate here; DiagBuildUnit validates presence of 'source_dir' + # Pass through all per-diag config keys as-is + yaml_diag_config = dict(config) + + # Create per-diag build dir + diag_build_dir = os.path.join(self.root_build_dir, diag_name) + + # Build the single YAML config to pass through: { : {..}, global_overrides: {...} } + # Create deep copies to avoid modifying shared state + import copy + + merged_yaml_config = { + diag_name: copy.deepcopy({k: v for k, v in yaml_diag_config.items() if v is not None}), + "global_overrides": copy.deepcopy(self.global_overrides), + } + + unit = DiagBuildUnit( + yaml_config=merged_yaml_config, + meson_options_cmd_line_overrides=( + copy.deepcopy(self.cli_meson_option_overrides) + if self.cli_meson_option_overrides + else None + ), + diag_attributes_cmd_line_overrides=( + copy.deepcopy(self.cli_diag_attribute_overrides) + if self.cli_diag_attribute_overrides + else None + ), + diag_custom_defines_cmd_line_overrides=( + copy.deepcopy(self.cli_diag_custom_defines) + if self.cli_diag_custom_defines + else None + ), + build_dir=diag_build_dir, + environment=copy.deepcopy(self.environment), + toolchain=self.toolchain, + jumpstart_dir=self.jumpstart_dir, + keep_meson_builddir=self.keep_meson_builddir, + ) + + return diag_build_dir, unit + + def compile_all(self) -> Dict[str, DiagBuildUnit]: + def _do_compile(name: str, unit: DiagBuildUnit, build_dir: str) -> None: + log.info(f"Compiling '{unit.diag_source.get_original_path()}'") + log.debug(f"Build directory: {build_dir}") + try: + unit.compile() + except Exception as exc: + # Capture unexpected exceptions as compile_error + unit.compile_error = f"{type(exc).__name__}: {exc}" + unit.compile_state = unit.CompileState.FAILED + + # Build task map: name -> (unit, build_dir) + tasks: Dict[str, Tuple] = {} + for diag_name, config in self.diagnostics.items(): + diag_build_dir, unit = self._prepare_unit(diag_name, config) + self._diag_units[diag_name] = unit + tasks[diag_name] = (unit, diag_build_dir) + + self._execute_parallel(self.jobs, tasks, _do_compile) + + for name, unit in self._diag_units.items(): + log.debug(f"Diag built details: {unit}") + + # If batch mode is enabled, generate the batch manifest and payloads/ELFs here + if self.batch_mode: + self._generate_batch_artifacts() + + # Generate run manifest after all compilation is complete + if not self.skip_write_manifest: + self.write_run_manifest() + + # After building all units (and generating any artifacts), raise if any compile failed + compile_failures = [ + unit.diag_source.get_original_path() + for name, unit in self._diag_units.items() + if not unit.compile_passed() + ] + if compile_failures: + failure_list = "\n ".join(compile_failures) + raise DiagFactoryError(f"One or more diagnostics failed to compile:\n {failure_list}") + + def run_all(self) -> Dict[str, DiagBuildUnit]: + if not self._diag_units: + raise DiagFactoryError("run_all() called before compile_all().") + + # Check if environment has a run_target defined + if self.environment.run_target is None: + raise DiagFactoryError( + f"Environment '{self.environment.name}' does not have a run_target defined" + ) + + if self.batch_mode is True: + self._run_all_batch_mode() + elif self.environment.run_target == "oswis": + # Handles non-batch mode cases for oswis target. + self._run_all_oswis() + else: + # Non-batch mode: run per-diag via DiagBuildUnit.run() + effective_jobs = self.jobs if self.environment.run_target == "spike" else 1 + + def _do_run(name: str, unit: DiagBuildUnit) -> None: + log.info(f"Running diag '{unit.diag_source.get_original_path()}'") + try: + unit.run() + except Exception as exc: + unit.run_error = f"{type(exc).__name__}: {exc}" + unit.run_state = unit.RunState.FAILED + + run_tasks: Dict[str, Tuple] = {name: (unit,) for name, unit in self._diag_units.items()} + self._execute_parallel(effective_jobs, run_tasks, _do_run) + + # After running all units, raise if any run failed + run_failures = [ + unit.diag_source.get_original_path() + for name, unit in self._diag_units.items() + if unit.compile_passed() and not unit.run_passed() + ] + if run_failures: + failure_list = "\n ".join(run_failures) + raise DiagFactoryError(f"One or more diagnostics failed to run:\n {failure_list}") + + def summarize(self) -> str: + # Build pretty table; compute widths from plain text, add ANSI coloring for PASS/FAILED/EXPECTED_FAIL labels + + # Define color constants + bold = "\u001b[1m" + reset = "\u001b[0m" + green = "\u001b[32m" + red = "\u001b[31m" + + # Gather data per-diag for the Result column + gathered = [] + for diag_name, unit in self._diag_units.items(): + build_plain = unit.format_build_label(include_duration=True, color=False) + run_plain = unit.format_run_label(include_duration=True, color=False) + error_text = unit.compile_error or unit.run_error or "" + + try: + elf_path = unit.get_build_asset("elf") + except Exception: + elf_path = None + + # Determine what to show in the Result column + if error_text and error_text.strip(): + # If there's an error, show it (will be colored red later) + merged_content = error_text + elif elf_path and not self.batch_mode: + # If no error but ELF is available and not in batch mode, show the path + merged_content = elf_path + else: + # Fallback - don't show ELF paths in batch mode + merged_content = "N/A" + + gathered.append( + { + "name": unit.diag_source.get_original_path(), + "original_name": diag_name, + "build": build_plain, + "run": run_plain, + "result": merged_content, + "has_error": bool(error_text and error_text.strip()), + } + ) + + # Check if Result column would be empty (all "N/A") + include_result_col = any(item["result"] != "N/A" for item in gathered) + + # Build rows in two-row groups per diag + row_groups = [] + for item in gathered: + if include_result_col: + row_groups.append( + [ + ( + item["name"], + item["original_name"], + item["build"], + item["run"], + item["result"], + item["has_error"], + ), + ] + ) + else: + row_groups.append( + [ + ( + item["name"], + item["original_name"], + item["build"], + item["run"], + item["has_error"], + ), + ] + ) + + # Header varies depending on whether we include the Result column + if include_result_col: + header = ("Diag", "Build", f"Run [{self.environment.run_target}]", "Result") + else: + header = ("Diag", "Build", f"Run [{self.environment.run_target}]") + + # Compute column widths based on plain text + col_widths = [len(h) for h in header] + for group in row_groups: + for r in group: + # Consider the display elements (excluding original_name and has_error) + # When include_result_col is True: r has 6 elements: [diag_name, original_name, build, run, result, has_error] + # When include_result_col is False: r has 5 elements: [diag_name, original_name, build, run, has_error] + if include_result_col: + display_elements = [ + r[0], + r[2], + r[3], + r[4], + ] # diag_name, build, run, result + else: + display_elements = [r[0], r[2], r[3]] # diag_name, build, run + for i, cell in enumerate(display_elements): + if len(str(cell)) > col_widths[i]: + col_widths[i] = len(str(cell)) + + def pad(cell: str, width: int) -> str: + return cell.ljust(width) + + # Build table lines + top = "┏" + "┳".join("━" * (w + 2) for w in col_widths) + "┓" + hdr = "┃ " + " ┃ ".join(pad(h, w) for h, w in zip(header, col_widths)) + " ┃" + sep = "┡" + "╇".join("━" * (w + 2) for w in col_widths) + "┩" + inner = "├" + "┼".join("─" * (w + 2) for w in col_widths) + "┤" + + body = [] + for gi, group in enumerate(row_groups): + for ri, r in enumerate(group): + # Unpack the row data based on whether we have the result column + if include_result_col: + ( + diag_name, + original_name, + build_plain, + run_plain, + result, + has_error, + ) = r + else: + diag_name, original_name, build_plain, run_plain, has_error = r + + # pad using plain text + diag_pad = pad(str(diag_name), col_widths[0]) + build_pad = pad(build_plain, col_widths[1]) + run_pad = pad(run_plain, col_widths[2]) + + # colorize status prefixes on the first row of each group only + unit = self._diag_units.get(original_name) if ri == 0 else None + if unit is not None: + build_colored = unit.colorize_status_text(build_pad) + run_colored = unit.colorize_status_text(run_pad) + else: + build_colored = build_pad + run_colored = run_pad + + # Build the row content + if include_result_col: + result_pad = pad(str(result), col_widths[3]) + # Apply red coloring to errors in the result column + if has_error: + result_colored = f"{red}{result_pad}{reset}" + else: + result_colored = result_pad + row_content = [diag_pad, build_colored, run_colored, result_colored] + else: + row_content = [diag_pad, build_colored, run_colored] + + body.append("│ " + " │ ".join(row_content) + " │") + # separator between diagnostics (groups), except after the last group + if gi != len(row_groups) - 1: + body.append(inner) + bot = "└" + "┴".join("─" * (w + 2) for w in col_widths) + "┘" + + # Compute overall result visibility line + try: + overall_pass = True + + # If no diagnostics were built at all, that's a failure + if not self._diag_units: + overall_pass = False + else: + for _name, _unit in self._diag_units.items(): + if not _unit.compile_passed(): + overall_pass = False + break + if self.environment.run_target is not None and not _unit.run_passed(): + overall_pass = False + break + + # Check batch runner status if in batch mode + if self.batch_mode: + # Check if batch runner failed + if self._batch_runner_failed: + overall_pass = False + # Check if batch runner exists and is in failed state + elif hasattr(self, "batch_runner") and self.batch_runner is not None: + if ( + hasattr(self.batch_runner, "state") + and self.batch_runner.state.name == "FAILED" + ): + overall_pass = False + except Exception: + overall_pass = False + + overall_line = ( + f"{bold}{green}STATUS: PASSED{reset}" + if overall_pass + else f"{bold}{red}STATUS: FAILED{reset}" + ) + + table_lines = [ + f"\n{bold}Summary{reset}", + f"Build root: {self.root_build_dir}", + f"Build Repro Manifest: {self._build_repo_manifest_path}", + top, + hdr, + sep, + *body, + bot, + ] + + # Count and print diagnostics that were built and run + built_count = 0 + run_count = 0 + + for name, unit in self._diag_units.items(): + # Count built diagnostics (those that compiled successfully) + if unit.compile_passed(): + built_count += 1 + + # Count run diagnostics (those that ran successfully) + if unit.run_passed(): + run_count += 1 + + # Add count information to table lines + table_lines.extend( + ["", f"Diagnostics built: {built_count}", f"Diagnostics run: {run_count}"] + ) + + # Note: Per-diag artifact section removed; artifacts are shown inline in the table + + # Append batch-mode details if applicable + if self.batch_mode: + payloads = list( + getattr(getattr(self, "batch_runner", None), "batch_payloads", []) or [] + ) + truf_elfs = list( + getattr(getattr(self, "batch_runner", None), "batch_truf_elfs", []) or [] + ) + # Pair each Truf ELF with its padded binary + truf_pairs = [] + try: + # Match the centralized naming in binary_utils: ..padded.bin + for elf in truf_elfs: + # Extract the base name for padded binary matching + basename = os.path.basename(elf) + # Remove .elf extension to get the base stem for padded binary matching + base_stem = basename.replace(".elf", "") + + dirn = os.path.dirname(elf) + # We cannot know entry here without re-reading; glob match fallbacks + pattern = os.path.join(dirn, base_stem + ".0x" + "*" + ".padded.bin") + matches = sorted(glob.glob(pattern)) + bin_path = matches[-1] if matches else None + truf_pairs.append((elf, bin_path)) + except Exception: + truf_pairs = [(elf, None) for elf in truf_elfs] + # Add batch runner status information + batch_status = "Unknown" + batch_error = None + if hasattr(self, "batch_runner") and self.batch_runner is not None: + if hasattr(self.batch_runner, "state"): + batch_status = self.batch_runner.state.name + if hasattr(self.batch_runner, "error_message") and self.batch_runner.error_message: + batch_error = self.batch_runner.error_message + + # Group ELFs by target type + target_elfs = {} + for elf_path, bin_path in truf_pairs: + basename = os.path.basename(elf_path) + if "." in basename: + parts = basename.split(".") + if len(parts) >= 2: + target = parts[-2] # Second to last part before .elf + if target not in target_elfs: + target_elfs[target] = [] + target_elfs[target].append(elf_path) + else: + # Fallback if filename doesn't match expected pattern + if "unknown" not in target_elfs: + target_elfs["unknown"] = [] + target_elfs["unknown"].append(elf_path) + else: + # Fallback if filename doesn't match expected pattern + if "unknown" not in target_elfs: + target_elfs["unknown"] = [] + target_elfs["unknown"].append(elf_path) + + # Build batch artifacts table using the same logic as diagnostics table + batch_rows = [] + + # Add status row + batch_rows.append(("Status", batch_status)) + + # Add error row if present + if batch_error: + batch_rows.append(("Error", batch_error)) + + # Add manifest row + batch_rows.append( + ( + "Truf Payload Manifest (consumed by truf-payload-generator)", + self._batch_manifest_path, + ) + ) + + # Add payloads rows + for payload in payloads: + batch_rows.append(("Truf Payloads (consumed by truf-runner)", payload)) + + # Add ELF rows grouped by target + for target, elf_paths in sorted(target_elfs.items()): + for i, elf_path in enumerate(elf_paths): + if i == 0: + batch_rows.append((f"Truf ELFs ({target})", elf_path)) + else: + batch_rows.append(("", elf_path)) + + # Build table using same logic as diagnostics + batch_header = ("Type", "Value") + batch_col_widths = [len(h) for h in batch_header] + + # Compute column widths + for row in batch_rows: + for i, cell in enumerate(row): + if len(str(cell)) > batch_col_widths[i]: + batch_col_widths[i] = len(str(cell)) + + # Build table lines + batch_top = "┏" + "┳".join("━" * (w + 2) for w in batch_col_widths) + "┓" + batch_hdr = ( + "┃ " + " ┃ ".join(pad(h, w) for h, w in zip(batch_header, batch_col_widths)) + " ┃" + ) + batch_sep = "┡" + "╇".join("━" * (w + 2) for w in batch_col_widths) + "┩" + batch_inner = "├" + "┼".join("─" * (w + 2) for w in batch_col_widths) + "┤" + + # Build body + batch_body = [] + for i, (type_name, value) in enumerate(batch_rows): + type_pad = pad(str(type_name), batch_col_widths[0]) + value_pad = pad(str(value), batch_col_widths[1]) + batch_body.append("│ " + " │ ".join([type_pad, value_pad]) + " │") + # Add separator between rows except after the last one + if i < len(batch_rows) - 1: + batch_body.append(batch_inner) + + batch_bot = "└" + "┴".join("─" * (w + 2) for w in batch_col_widths) + "┘" + + # Add the batch table to the main table lines + table_lines.extend( + [ + "", + f"{bold}Batch Mode Artifacts{reset}", + batch_top, + batch_hdr, + batch_sep, + *batch_body, + batch_bot, + ] + ) + + # Add Run Manifest before the final status + table_lines.append(f"\n{bold}Run Manifest{reset}:\n{self._run_manifest_path}") + + # Print overall result at the very end for visibility (after batch-mode details if present) + table_lines.append("") + table_lines.append(overall_line) + log.info("\n".join(table_lines)) diff --git a/scripts/build_tools/environment.py b/scripts/build_tools/environment.py new file mode 100644 index 00000000..b349c6e6 --- /dev/null +++ b/scripts/build_tools/environment.py @@ -0,0 +1,223 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import Dict, List, Optional + +import yaml + + +class Environment: + """Represents a build environment with configuration attributes.""" + + def __init__(self, name: str, **kwargs): + self.name = name + self.run_target = kwargs.get("run_target") + self.override_meson_options = kwargs.get("override_meson_options", {}) + self.override_diag_attributes = kwargs.get("override_diag_attributes", []) + self.extends = kwargs.get("extends") # String or list of strings + self.hidden = kwargs.get( + "hidden", False + ) # Whether this environment should be hidden from lists + + def __str__(self) -> str: + return ( + f"Environment(name={self.name}, run_target={self.run_target}, extends={self.extends})" + ) + + def __repr__(self) -> str: + return self.__str__() + + +class EnvironmentManager: + """Manages environment configurations with inheritance support.""" + + def __init__(self): + self.environments: Dict[str, Environment] = {} + + def register_environment(self, env: Environment) -> None: + """Register an environment with the manager.""" + self.environments[env.name] = env + + def get_environment(self, name: str) -> Environment: + """Get a fully resolved environment with all inherited attributes merged.""" + return self._resolve_environment(name) + + def list_environments(self) -> Dict[str, Environment]: + """Get all registered environments (unresolved).""" + return self.environments.copy() + + def list_visible_environments(self) -> Dict[str, Environment]: + """Get all visible (non-hidden) registered environments (unresolved).""" + return {name: env for name, env in self.environments.items() if not env.hidden} + + def _resolve_environment(self, name: str, visited: Optional[set] = None) -> Environment: + """Recursively resolve inheritance chain and merge attributes.""" + if visited is None: + visited = set() + + if name in visited: + raise ValueError(f"Circular inheritance detected: {name}") + + if name not in self.environments: + raise ValueError(f"Environment '{name}' not found") + + env = self.environments[name] + visited.add(name) + + # If no inheritance, return as-is + if not env.extends: + return env + + # Handle single inheritance + if isinstance(env.extends, str): + parent = self._resolve_environment(env.extends, visited) + return self._merge_environments(parent, env) + + # # Handle multiple inheritance + # elif isinstance(env.extends, list): + # # Merge all parents first + # merged_parent = None + # for parent_name in env.extends: + # parent = self._resolve_environment(parent_name, visited) + # if merged_parent is None: + # merged_parent = parent + # else: + # merged_parent = self._merge_environments(merged_parent, parent) + + # # Then merge with current environment + # return self._merge_environments(merged_parent, env) + + else: + raise ValueError(f"Invalid extends value for environment '{name}': {env.extends}") + + def _merge_environments(self, parent: Environment, child: Environment) -> Environment: + """Merge parent and child environments, with child taking precedence.""" + merged = Environment(child.name) + + # Merge run_target (child overrides parent) + merged.run_target = child.run_target if child.run_target is not None else parent.run_target + + # Merge meson options (child overrides parent) + merged.override_meson_options = parent.override_meson_options.copy() + merged.override_meson_options.update(child.override_meson_options) + + # Merge diag attributes (child overrides parent, not append) + # This prevents duplication when the same attribute is defined in both parent and child + merged.override_diag_attributes = parent.override_diag_attributes.copy() + + # Add child attributes, but avoid duplicates + for attr in child.override_diag_attributes: + # Check if this attribute (key part) already exists + attr_key = attr.split("=")[0] if "=" in attr else attr + existing_keys = [ + a.split("=")[0] if "=" in a else a for a in merged.override_diag_attributes + ] + + if attr_key in existing_keys: + # Replace the existing attribute + for i, existing_attr in enumerate(merged.override_diag_attributes): + existing_key = ( + existing_attr.split("=")[0] if "=" in existing_attr else existing_attr + ) + if existing_key == attr_key: + merged.override_diag_attributes[i] = attr + break + else: + # Add new attribute + merged.override_diag_attributes.append(attr) + + return merged + + def load_from_yaml(self, yaml_content: str) -> None: + """Load environments from YAML content.""" + data = yaml.safe_load(yaml_content) + if not data or "environments" not in data: + raise ValueError("YAML content must contain an 'environments' section") + + for env_name, env_config in data["environments"].items(): + if not isinstance(env_config, dict): + raise ValueError(f"Environment '{env_name}' configuration must be a dictionary") + + env = Environment(env_name, **env_config) + self.register_environment(env) + + def load_from_file(self, file_path: str) -> None: + """Load environments from a YAML file.""" + if not os.path.exists(file_path): + raise FileNotFoundError(f"Environment file not found: {file_path}") + + with open(file_path) as f: + yaml_content = f.read() + + self.load_from_yaml(yaml_content) + + def get_inheritance_chain(self, name: str) -> List[str]: + """Get the inheritance chain for an environment (for debugging/display).""" + chain = [] + visited = set() + + def _build_chain(env_name: str): + if env_name in visited: + return + visited.add(env_name) + + if env_name not in self.environments: + return + + env = self.environments[env_name] + if env.extends: + if isinstance(env.extends, str): + _build_chain(env.extends) + elif isinstance(env.extends, list): + for parent in env.extends: + _build_chain(parent) + + chain.append(env_name) + + _build_chain(name) + return chain + + +def get_environment_manager() -> EnvironmentManager: + """Create the default environment manager by loading from environments.yaml.""" + manager = EnvironmentManager() + + # Load from the environments.yaml file in the same directory as this script + env_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "environments.yaml") + + manager.load_from_file(env_file_path) + return manager + + +def format_environment_list(manager: EnvironmentManager) -> str: + """Format a list of all visible environments for display.""" + output = ["Available environments:", "=" * 50] + + for env_name in sorted(manager.list_visible_environments().keys()): + try: + resolved_env = manager.get_environment(env_name) + inheritance_chain = manager.get_inheritance_chain(env_name) + + output.append(f"\n{env_name}:") + output.append(f" Run Target: {resolved_env.run_target}") + + if len(inheritance_chain) > 1: + chain_str = " -> ".join(inheritance_chain[:-1]) # Exclude self + output.append(f" Inheritance: {chain_str}") + + if resolved_env.override_meson_options: + output.append(" Meson Options:") + for key, value in resolved_env.override_meson_options.items(): + output.append(f" {key}: {value}") + + if resolved_env.override_diag_attributes: + output.append(" Diag Attributes:") + for attr in resolved_env.override_diag_attributes: + output.append(f" {attr}") + + except Exception as e: + output.append(f"\n{env_name}: ERROR - {e}") + + return "\n".join(output) diff --git a/scripts/build_tools/environments.yaml b/scripts/build_tools/environments.yaml new file mode 100644 index 00000000..1cf8252a --- /dev/null +++ b/scripts/build_tools/environments.yaml @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# Environment configurations for build_diag.py +# Each environment can extend other environments to inherit their configurations +# Child environments override parent configurations + +environments: + # Build targets + # These are not currently used directly so they are hidden. + # The run targets extend these. + + fw-none: + hidden: true + override_meson_options: + riscv_priv_modes_enabled: [mmode, smode, umode] + boot_config: fw-none + + # Run targets + spike: + extends: fw-none + run_target: spike + override_meson_options: + diag_generate_disassembly: true + generate_trace: true diff --git a/scripts/build_tools/meson.py b/scripts/build_tools/meson.py index f2b6016b..da0ee9e5 100644 --- a/scripts/build_tools/meson.py +++ b/scripts/build_tools/meson.py @@ -1,180 +1,128 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 +import json import logging as log import os -import random -import shutil +import pprint +import subprocess import sys -import tempfile - -import yaml +from typing import Any, Dict, List sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) from data_structures import DictUtils # noqa from system import functions as system_functions # noqa -def convert_hart_mask_to_num_active_harts(hart_mask): - num_harts = 0 - hart_mask = int(hart_mask, 2) - while hart_mask != 0: - # We don't expect gaps in the hart mask at this point. - assert hart_mask & 1 - num_harts += 1 - hart_mask >>= 1 - return num_harts +class MesonBuildError(Exception): + """Custom exception for Meson build failures.""" + + def __init__(self, message, return_code=1): + self.message = message + self.return_code = return_code + super().__init__(self.message) + + +def quote_if_needed(x): + x_str = str(x) + if (x_str.startswith("'") and x_str.endswith("'")) or ( + x_str.startswith('"') and x_str.endswith('"') + ): + return x_str + return f"'{x_str}'" class Meson: + supported_toolchains: List[str] = ["gcc"] + def __init__( self, - jumpstart_dir, - diag_build_target, - keep_meson_builddir, + toolchain: str, + jumpstart_dir: str, + diag_name: str, + diag_sources: List[str], + diag_attributes_yaml: str, + builddir: str, ) -> None: self.meson_builddir = None - self.keep_meson_builddir = None + + assert toolchain in self.supported_toolchains + self.toolchain = toolchain if not os.path.exists(jumpstart_dir): raise Exception(f"Jumpstart directory does not exist: {jumpstart_dir}") - self.jumpstart_dir = os.path.abspath(jumpstart_dir) - self.diag_build_target = diag_build_target + self.diag_name = diag_name - self.diag_binary_name = self.diag_build_target.diag_source.diag_name + ".elf" + self.meson_options: Dict[str, Any] = {} - self.meson_options = {} + # Ensure build directory exists and is absolute + if not os.path.isabs(builddir): + builddir = os.path.abspath(builddir) + if not os.path.exists(builddir): + raise Exception(f"Meson build directory does not exist: {builddir}") + self.meson_builddir = builddir - self.meson_builddir = tempfile.mkdtemp( - prefix=f"{self.diag_build_target.diag_source.diag_name}_meson_builddir_" + self.setup_default_meson_options( + diag_sources, + diag_attributes_yaml, ) - self.keep_meson_builddir = keep_meson_builddir - - system_functions.create_empty_directory(self.diag_build_target.build_dir) - - if self.diag_build_target.rng_seed is None: - self.diag_build_target.rng_seed = random.randrange(sys.maxsize) - log.debug( - f"Diag: {self.diag_build_target.diag_source.diag_name} Seeding builder RNG with: {self.diag_build_target.rng_seed}" - ) - self.rng = random.Random(self.diag_build_target.rng_seed) - - def __del__(self): - if self.meson_builddir is not None and self.keep_meson_builddir is False: - log.debug(f"Removing meson build directory: {self.meson_builddir}") - shutil.rmtree(self.meson_builddir) - - def get_active_hart_mask(self): - active_hart_mask = None - - # 1. If the diag has an active_hart_mask defined, set active_hart_mask to that. - active_hart_mask = self.diag_build_target.diag_source.active_hart_mask - - # NOTE: The active_hart_mask can only be overriden if allow_active_hart_mask_override is set to True in the diag. - # 2. If the --active_hart_mask_override is specified on the command line, set active_hart_mask to active_hart_mask_override. - if self.diag_build_target.active_hart_mask_override is not None: - if active_hart_mask is not None: - log.warning( - f"Overriding active_hart_mask {active_hart_mask} with: {self.diag_build_target.active_hart_mask_override}" - ) - active_hart_mask = self.diag_build_target.active_hart_mask_override - - return active_hart_mask - - def setup_default_meson_options(self): - self.meson_options["diag_name"] = self.diag_binary_name - self.meson_options["diag_sources"] = self.diag_build_target.diag_source.get_sources() - self.meson_options["diag_attributes_yaml"] = ( - self.diag_build_target.diag_source.get_diag_attributes_yaml() - ) - self.meson_options["boot_config"] = self.diag_build_target.boot_config + def setup_default_meson_options( + self, + diag_sources: List[str], + diag_attributes_yaml: str, + ) -> None: + self.meson_options["diag_name"] = self.diag_name + self.meson_options["diag_sources"] = diag_sources + self.meson_options["diag_attributes_yaml"] = diag_attributes_yaml self.meson_options["diag_attribute_overrides"] = [] + # Default buildtype. Can be overridden by YAML or CLI meson option overrides. + self.meson_options["buildtype"] = "release" + self.meson_options["spike_additional_arguments"] = [] + self.meson_options["qemu_additional_arguments"] = [] - self.meson_options["diag_target"] = self.diag_build_target.target - if self.diag_build_target.target == "spike": - self.meson_options["spike_binary"] = "spike" - self.meson_options["generate_trace"] = "true" + self.trace_file = f"{self.meson_builddir}/{self.diag_name}.itrace" - self.spike_trace_file = ( - f"{self.meson_builddir}/{self.diag_build_target.diag_source.diag_name}.itrace" - ) - self.meson_options["spike_additional_arguments"].append( - f"--log={self.spike_trace_file}" - ) + # Override rig_path option if the RIG_ROOT env variable is set from loading the + # rivos-sdk/rig module our sourcing rig_env.sh. + if os.getenv("RIG_ROOT") is not None: + self.meson_options["rig_path"] = os.getenv("RIG_ROOT") - elif self.diag_build_target.target == "qemu": - self.meson_options["qemu_additional_arguments"] = [] + def override_meson_options_from_dict(self, overrides_dict: Dict[str, Any]) -> None: + if overrides_dict is None: + return + DictUtils.override_dict(self.meson_options, overrides_dict, False, True) - trace_file_name = f"{self.diag_build_target.diag_source.diag_name}.qemu.trace" - self.qemu_trace_file = f"{self.meson_builddir}/{trace_file_name}" + def get_meson_options(self) -> Dict[str, Any]: + """Return the current Meson options as a dict.""" + return self.meson_options - self.meson_options["qemu_additional_arguments"].extend( - [ - "--var", - f"out:{self.meson_builddir}", - "--var", - f"ap-logfile:{trace_file_name}", - ] - ) - else: - raise Exception(f"Unknown target: {self.diag_build_target.target}") + def get_meson_options_pretty(self, width: int = 120, spacing: str = "") -> str: + """Return a pretty-printed string of the Meson options. - active_hart_mask = self.get_active_hart_mask() - if active_hart_mask is not None: - self.meson_options["diag_attribute_overrides"].append( - f"active_hart_mask={active_hart_mask}" - ) - if self.diag_build_target.target == "spike": - self.meson_options["spike_additional_arguments"].append( - f"-p{convert_hart_mask_to_num_active_harts(active_hart_mask)}" - ) - - if self.diag_build_target.diag_attributes_cmd_line_overrides is not None: - self.meson_options["diag_attribute_overrides"].extend( - self.diag_build_target.diag_attributes_cmd_line_overrides - ) - - def apply_meson_option_overrides_from_diag(self): - if self.diag_build_target.diag_source.get_meson_options_override_yaml() is not None: - with open(self.diag_build_target.diag_source.get_meson_options_override_yaml()) as f: - meson_option_overrides = yaml.safe_load(f) - DictUtils.override_dict(self.meson_options, meson_option_overrides, False, True) - - def apply_meson_option_overrides_from_cmd_line(self): - if self.diag_build_target.meson_options_cmd_line_overrides is not None: - DictUtils.override_dict( - self.meson_options, - DictUtils.create_dict(self.diag_build_target.meson_options_cmd_line_overrides), - False, - True, - ) + spacing: A prefix added to each line to control left padding in callers. + """ + formatted = pprint.pformat(self.meson_options, width=width) + if spacing: + return "\n".join(f"{spacing}{line}" for line in formatted.splitlines()) + return formatted def setup(self): - log.debug( - f"Running meson setup for diag: {self.diag_build_target.diag_source.get_diag_src_dir()}" - ) - self.meson_setup_flags = {} - self.meson_setup_flags["--buildtype"] = self.diag_build_target.buildtype - self.meson_setup_flags["-Ddiag_generate_disassembly"] = "true" - - self.setup_default_meson_options() - self.apply_meson_option_overrides_from_diag() - self.apply_meson_option_overrides_from_cmd_line() - for option in self.meson_options: if isinstance(self.meson_options[option], list): if len(self.meson_options[option]) == 0: continue self.meson_setup_flags[f"-D{option}"] = ( - "[" + ",".join(f"'{x}'" for x in self.meson_options[option]) + "]" + "[" + ",".join(quote_if_needed(x) for x in self.meson_options[option]) + "]" ) + elif isinstance(self.meson_options[option], bool): + self.meson_setup_flags[f"-D{option}"] = str(self.meson_options[option]).lower() else: self.meson_setup_flags[f"-D{option}"] = self.meson_options[option] @@ -185,84 +133,120 @@ def setup(self): meson_setup_command.extend( [ "--cross-file", - f"cross_compile/public/{self.diag_build_target.toolchain}_options.txt", + os.path.join( + self.jumpstart_dir, f"cross_compile/public/{self.toolchain}_options.txt" + ), "--cross-file", - f"cross_compile/{self.diag_build_target.toolchain}.txt", + os.path.join(self.jumpstart_dir, f"cross_compile/{self.toolchain}.txt"), ] ) - log.debug(f"Running meson setup command: {meson_setup_command}") - system_functions.run_command(meson_setup_command, self.jumpstart_dir) + log.debug("Meson options:\n%s", self.get_meson_options_pretty(spacing="\t")) - if self.keep_meson_builddir is True: - self.diag_build_target.add_build_asset( - "meson_builddir", self.meson_builddir, None, True - ) + # Print the meson setup command in a format that can be copy-pasted to + # reproduce the build. + printable_meson_setup_command = " ".join(meson_setup_command) + printable_meson_setup_command = printable_meson_setup_command.replace("'", "\\'") + log.debug(f"meson setup: {self.diag_name}") + log.debug(printable_meson_setup_command) + return_code = system_functions.run_command(meson_setup_command, self.jumpstart_dir) + if return_code != 0: + error_msg = f"meson setup failed. Check: {self.meson_builddir}" + log.error(error_msg) + raise MesonBuildError(error_msg, return_code) def compile(self): - log.debug( - f"Running meson compile for diag: {self.diag_build_target.diag_source.get_diag_src_dir()}" - ) - - meson_compile_command = ["meson", "compile", "-C", self.meson_builddir] - system_functions.run_command(meson_compile_command, self.jumpstart_dir) - - diag_binary = os.path.join(self.meson_builddir, self.diag_binary_name) - if not os.path.exists(diag_binary): - raise Exception("diag binary not created by meson compile") - - diag_disasm = os.path.join(self.meson_builddir, self.diag_binary_name + ".dis") - if not os.path.exists(diag_disasm): - raise Exception("diag disasm not created by meson compile") - - self.diag_build_target.add_build_asset("disasm", diag_disasm) - self.diag_build_target.add_build_asset("binary", diag_binary) - log.debug(f"Diag compiled: {self.diag_build_target.get_build_asset('binary')}") - log.debug(f"Diag disassembly: {self.diag_build_target.get_build_asset('disasm')}") + meson_compile_command = ["meson", "compile", "-v", "-C", self.meson_builddir] + log.debug(f"meson compile: {self.diag_name}") + log.debug(" ".join(meson_compile_command)) + return_code = system_functions.run_command(meson_compile_command, self.jumpstart_dir) + + diag_elf = os.path.join(self.meson_builddir, self.diag_name + ".elf") + diag_disasm = os.path.join(self.meson_builddir, self.diag_name + ".dis") + + if return_code == 0: + if not os.path.exists(diag_elf): + error_msg = f"diag elf not created by meson compile. Check: {self.meson_builddir}" + raise MesonBuildError(error_msg) + + if return_code != 0: + error_msg = f"Compile failed. Check: {self.meson_builddir}" + log.error(error_msg) + raise MesonBuildError(error_msg, return_code) + + compiled_assets = {} + if os.path.exists(diag_disasm): + compiled_assets["disasm"] = diag_disasm + if os.path.exists(diag_elf): + compiled_assets["elf"] = diag_elf + return compiled_assets def test(self): - log.debug( - f"Running meson test for diag: {self.diag_build_target.diag_source.get_diag_src_dir()}" - ) - - meson_test_command = ["meson", "test", "-C", self.meson_builddir] - system_functions.run_command(meson_test_command, self.jumpstart_dir) - - if self.diag_build_target.target == "spike": - if not os.path.exists(self.spike_trace_file): - raise Exception( - f"Spike trace file not created by meson test: {self.spike_trace_file}" - ) - self.diag_build_target.add_build_asset("spike_trace", self.spike_trace_file) - log.debug(f"Diag trace file: {self.diag_build_target.get_build_asset('spike_trace')}") - elif self.diag_build_target.target == "qemu": - if not os.path.exists(self.qemu_trace_file): - raise Exception( - f"Qemu trace file not created by meson test: {self.qemu_trace_file}" - ) - self.diag_build_target.add_build_asset("qemu_trace", self.qemu_trace_file) - log.debug(f"Diag trace file: {self.diag_build_target.get_build_asset('qemu_trace')}") - - def get_generated_diag(self): - return self.diag_build_target - - -def build_jumpstart_diag( - jumpstart_dir, - diag_build_target, - disable_diag_run=False, - keep_meson_builddir=False, -): - meson = Meson(jumpstart_dir, diag_build_target, keep_meson_builddir) - - meson.setup() - meson.compile() - - if disable_diag_run is True: - log.warning( - f"Skipping running diag {diag_build_target.diag_source.diag_name} on target {diag_build_target.target} as diag run is disabled." - ) - else: - meson.test() - - return meson.get_generated_diag() + meson_test_command = ["meson", "test", "-v", "-C", self.meson_builddir] + log.debug(f"meson test: {self.diag_name}") + log.debug(" ".join(meson_test_command)) + return_code = system_functions.run_command(meson_test_command, self.jumpstart_dir) + + run_assets = {} + + generate_trace = bool(self.meson_options.get("generate_trace", False)) + if generate_trace: + if return_code == 0 and not os.path.exists(self.trace_file): + error_msg = f"Run passed but trace file not created. Check: {self.meson_builddir}" + raise MesonBuildError(error_msg) + + run_assets["trace"] = self.trace_file + elif self.trace_file and os.path.exists(self.trace_file): + error_msg = f"Trace generation was disabled but trace file {self.trace_file} created. Check: {self.meson_builddir}" + raise MesonBuildError(error_msg) + + if return_code != 0: + error_msg = f"Run failed. Check: {self.meson_builddir}" + log.error(error_msg) + raise MesonBuildError(error_msg, return_code) + + return run_assets + + def introspect(self): + """Run meson introspect and store the build options.""" + # --- Run meson introspect and store build options --- + + # Use subprocess.run to run the introspect command and capture output + introspect_cmd = ["meson", "introspect", self.meson_builddir, "--buildoptions"] + log.debug(f"Running meson introspect: {' '.join(introspect_cmd)}") + try: + result = subprocess.run( + introspect_cmd, + cwd=self.jumpstart_dir, + capture_output=True, + text=True, + check=False, + ) + result_code = result.returncode + result_out = result.stdout + except Exception as e: + log.error(f"Failed to run meson introspect command: {e}") + result_code = 1 + result_out = "" + + if result_code != 0: + error_msg = f"meson introspect failed. Check: {self.meson_builddir}" + log.error(error_msg) + raise MesonBuildError(error_msg, result_code) + + try: + options = json.loads(result_out) + meson_options = {} + for opt in options: + # Only store user options (not built-in) + if opt.get("section") == "user": + meson_options[opt["name"]] = opt["value"] + + # Replace the current meson options with the introspect options + self.meson_options = meson_options + + log.debug(f"Meson introspect options: {self.meson_options}") + except Exception as e: + error_msg = f"Failed to parse meson introspect output: {e}" + log.error(error_msg) + raise MesonBuildError(error_msg) diff --git a/scripts/data_structures/__init__.py b/scripts/data_structures/__init__.py index 7eb814b9..68adbc1b 100644 --- a/scripts/data_structures/__init__.py +++ b/scripts/data_structures/__init__.py @@ -1,10 +1,11 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 # __init__.py from .bitfield_utils import BitField +from .cstruct import CStruct, CStructField from .dict_utils import DictUtils from .list_utils import ListUtils @@ -13,4 +14,4 @@ # To better support introspection, modules should explicitly declare # the names in their public API using the __all__ attribute. -__all__ = ["BitField", "DictUtils", "ListUtils"] +__all__ = ["BitField", "CStruct", "CStructField", "DictUtils", "ListUtils"] diff --git a/scripts/data_structures/bitfield_utils.py b/scripts/data_structures/bitfield_utils.py index 6b794d7e..82e1875d 100644 --- a/scripts/data_structures/bitfield_utils.py +++ b/scripts/data_structures/bitfield_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -16,3 +16,44 @@ def place_bits(value, bits, bit_range): msb = bit_range[0] lsb = bit_range[1] return (value & ~(((1 << (msb - lsb + 1)) - 1) << lsb)) | (bits << lsb) + + @staticmethod + def find_lowest_set_bit(value): + """ + Find the position of the lowest set bit (0-indexed). + + Args: + value (int): The integer value to search + + Returns: + int: The position of the lowest set bit (0-indexed), or -1 if no bits are set + + Examples: + find_lowest_set_bit(0b1010) -> 1 # bit 1 is the lowest set bit + find_lowest_set_bit(0b1000) -> 3 # bit 3 is the lowest set bit + find_lowest_set_bit(0b0000) -> -1 # no bits are set + """ + if value == 0: + return -1 + return (value & -value).bit_length() - 1 + + @staticmethod + def find_highest_set_bit(value): + """ + Find the position of the highest set bit (0-indexed). + + Args: + value (int): The integer value to search + + Returns: + int: The position of the highest set bit (0-indexed), or -1 if no bits are set + + Examples: + find_highest_set_bit(0b1010) -> 3 # bit 3 is the highest set bit + find_highest_set_bit(0b1000) -> 3 # bit 3 is the highest set bit + find_highest_set_bit(0b0001) -> 0 # bit 0 is the highest set bit + find_highest_set_bit(0b0000) -> -1 # no bits are set + """ + if value == 0: + return -1 + return value.bit_length() - 1 diff --git a/scripts/data_structures/cstruct.py b/scripts/data_structures/cstruct.py new file mode 100644 index 00000000..43985d94 --- /dev/null +++ b/scripts/data_structures/cstruct.py @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +"""C struct representation and manipulation utilities.""" + +field_type_to_size_in_bytes = { + "uint8_t": 1, + "uint16_t": 2, + "uint32_t": 4, + "uint64_t": 8, +} + + +class CStructField: + """Represents a single field in a C struct.""" + + def __init__(self, name, field_type, num_elements=1): + self.name = name + self.field_type = field_type + self.num_elements = num_elements + self.size_in_bytes = field_type_to_size_in_bytes[field_type] + + +class CStruct: + """Represents a C struct with its fields and metadata.""" + + def __init__(self, name, fields_data): + self.name = name + self.fields = [] + self.size_in_bytes = 0 + self.alignment = 8 # Hardcoded to 8-byte alignment + self._parse_fields(fields_data) + self._calculate_offsets_and_size() + + def _parse_fields(self, fields_data): + """Parse field data from YAML into CStructField objects.""" + for field_name, field_spec in fields_data.items(): + if "," in field_spec: + field_type, num_elements = field_spec.split(",") + num_elements = int(num_elements.strip()) + else: + field_type = field_spec + num_elements = 1 + + field = CStructField(field_name, field_type.strip(), num_elements) + self.fields.append(field) + + def _calculate_offsets_and_size(self): + """Calculate field offsets and total struct size.""" + current_offset = 0 + + for field in self.fields: + # Align field to its natural boundary + while (current_offset % field.size_in_bytes) != 0: + current_offset += 1 + + field.offset = current_offset + current_offset += field.size_in_bytes * field.num_elements + + # Align struct to specified boundary + while (current_offset % self.alignment) != 0: + current_offset += 1 + + self.size_in_bytes = current_offset diff --git a/scripts/data_structures/dict_utils.py b/scripts/data_structures/dict_utils.py index c5e63da2..c0bf2ebe 100644 --- a/scripts/data_structures/dict_utils.py +++ b/scripts/data_structures/dict_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -10,9 +10,10 @@ def override_dict( original_dict, overrides_dict, original_is_superset=True, append_to_lists=False ): if original_is_superset is True: - assert set(original_dict.keys()).issuperset( - set(overrides_dict.keys()) - ), "Overrides contain keys not present in the original dictionary" + extra_keys = set(overrides_dict.keys()) - set(original_dict.keys()) + assert ( + not extra_keys + ), f"Overrides contain keys not present in the original dictionary: {extra_keys}" if append_to_lists is False: original_dict.update(overrides_dict) @@ -32,6 +33,14 @@ def create_dict(overrides_list): # Split at the first '=' name_value_pair = override.split("=", 1) + # Check if the split resulted in exactly 2 parts (key and value) + if len(name_value_pair) != 2: + raise ValueError( + f"Invalid override format: '{override}'. " + f"Expected format is 'key=value', but no '=' found. " + f"Example: 'generate_trace=true'" + ) + attribute_name = name_value_pair[0] attribute_value = name_value_pair[1] diff --git a/scripts/data_structures/list_utils.py b/scripts/data_structures/list_utils.py index a7dc6788..de5b6ec5 100644 --- a/scripts/data_structures/list_utils.py +++ b/scripts/data_structures/list_utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/generate_diag_sources.py b/scripts/generate_diag_sources.py index 44496aba..a5709721 100755 --- a/scripts/generate_diag_sources.py +++ b/scripts/generate_diag_sources.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -11,11 +11,13 @@ import math import os import sys +from enum import Enum import public.functions as public_functions import yaml -from data_structures import BitField, DictUtils, ListUtils +from data_structures import BitField, CStruct, DictUtils, ListUtils from memory_management import ( + AddressType, LinkerScript, MemoryMapping, PageSize, @@ -24,6 +26,7 @@ TranslationMode, TranslationStage, ) +from utils.napot_utils import align_to_napot_size, get_next_napot_size try: import rivos_internal.functions as rivos_internal_functions @@ -31,30 +34,55 @@ log.debug("rivos_internal Python module not present.") +class MemoryOp(Enum): + LOAD = 1 + STORE = 2 + + +def get_memop_of_size(memory_op_type, size_in_bytes): + if memory_op_type == MemoryOp.LOAD: + op = "l" + elif memory_op_type == MemoryOp.STORE: + op = "s" + else: + raise Exception(f"Invalid memory op type: {memory_op_type}") + + if size_in_bytes == 1: + return op + "b" + elif size_in_bytes == 2: + return op + "h" + elif size_in_bytes == 4: + return op + "w" + elif size_in_bytes == 8: + return op + "d" + else: + raise Exception(f"Invalid size: {size_in_bytes} bytes") + + class SourceGenerator: def __init__( self, jumpstart_source_attributes_yaml, - override_jumpstart_source_attributes, diag_attributes_yaml, override_diag_attributes, priv_modes_enabled, ): - self.priv_modes_enabled = priv_modes_enabled + self.linker_script = None + + self.priv_modes_enabled = None - self.process_source_attributes( - jumpstart_source_attributes_yaml, override_jumpstart_source_attributes + self.process_source_attributes(jumpstart_source_attributes_yaml) + + self.priv_modes_enabled = ListUtils.intersection( + self.jumpstart_source_attributes["priv_modes_supported"], + priv_modes_enabled, ) self.process_diag_attributes(diag_attributes_yaml, override_diag_attributes) self.process_memory_map() - self.create_page_tables_data() - - def process_source_attributes( - self, jumpstart_source_attributes_yaml, override_jumpstart_source_attributes - ): + def process_source_attributes(self, jumpstart_source_attributes_yaml): with open(jumpstart_source_attributes_yaml) as f: self.jumpstart_source_attributes = yaml.safe_load(f) @@ -76,13 +104,8 @@ def process_source_attributes( f"rivos_internal/ exists but rivos_internal_build is set to False in {jumpstart_source_attributes_yaml}" ) - if override_jumpstart_source_attributes: - # Override the default jumpstart source attribute values with the values - # specified on the command line. - DictUtils.override_dict( - self.jumpstart_source_attributes, - DictUtils.create_dict(override_jumpstart_source_attributes), - ) + # Parse C structs once and store them for later use + self.c_structs = self._parse_c_structs() def process_diag_attributes(self, diag_attributes_yaml, override_diag_attributes): self.diag_attributes_yaml = diag_attributes_yaml @@ -95,62 +118,221 @@ def process_diag_attributes(self, diag_attributes_yaml, override_diag_attributes self.jumpstart_source_attributes["diag_attributes"], diag_attributes ) + # Set the default diag entry label to start label of the highest privilege mode. + if self.jumpstart_source_attributes["diag_attributes"]["diag_entry_label"] is None: + self.jumpstart_source_attributes["diag_attributes"][ + "diag_entry_label" + ] = f"_{self.priv_modes_enabled[0]}_start" + if override_diag_attributes is not None: # Override the diag attributes with the values specified on the # command line. + cmd_line_diag_attribute_override_dict = DictUtils.create_dict(override_diag_attributes) DictUtils.override_dict( self.jumpstart_source_attributes["diag_attributes"], - DictUtils.create_dict(override_diag_attributes), + cmd_line_diag_attribute_override_dict, ) TranslationStage.set_virtualization_enabled( self.jumpstart_source_attributes["diag_attributes"]["enable_virtualization"] ) + self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"] = int( + self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"], 2 + ) + + active_cpu_mask = self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"] + if self.jumpstart_source_attributes["diag_attributes"]["primary_cpu_id"] is None: + # Set the CPU with the lowest CPU ID as the primary CPU. + self.jumpstart_source_attributes["diag_attributes"]["primary_cpu_id"] = ( + BitField.find_lowest_set_bit(active_cpu_mask) + ) + + self.max_num_cpus_supported = BitField.find_highest_set_bit(active_cpu_mask) + 1 + self.sanity_check_diag_attributes() + for stage in TranslationStage.get_enabled_stages(): + TranslationStage.set_selected_mode_for_stage( + stage, + self.jumpstart_source_attributes["diag_attributes"][ + f"{TranslationStage.get_atp_register(stage)}_mode" + ], + ) + + def assign_addresses_to_mapping_for_stage(self, mapping_dict, stage): + if "page_size" not in mapping_dict: + raise Exception(f"page_size is not specified for mapping: {mapping_dict}") + if "pma_memory_type" not in mapping_dict: + raise Exception(f"pma_memory_type is not specified for mapping: {mapping_dict}") + + # We want to find the next available physical address for the mapping. + # All the MMUs share the same physical address space so we need to find + # the next available address that is not already used by another mapping. + next_available_address = 0 + for target_mmu in MemoryMapping.get_supported_targets(): + if len(self.memory_map[target_mmu][stage]) == 0: + continue + temp_address = self.get_next_available_dest_addr_after_last_mapping( + target_mmu, stage, mapping_dict["page_size"], mapping_dict["pma_memory_type"] + ) + if temp_address > next_available_address: + next_available_address = temp_address + + if ( + self.jumpstart_source_attributes["diag_attributes"]["start_test_in_mmode"] is True + and mapping_dict.get("linker_script_section") is not None + and ".text" in mapping_dict["linker_script_section"].split(",") + ): + # Calculate the total size of the region + region_size = mapping_dict["page_size"] * mapping_dict["num_pages"] + + # Calculate the NAPOT size that will cover this region + napot_size = get_next_napot_size(region_size) + + # Align the address to the NAPOT size + next_available_address = align_to_napot_size(next_available_address, napot_size) + + if self.jumpstart_source_attributes["diag_attributes"]["satp_mode"] != "bare": + mapping_dict[TranslationStage.get_translates_from(stage)] = next_available_address + mapping_dict[TranslationStage.get_translates_to(stage)] = next_available_address + + return mapping_dict + + def has_no_addresses(self, mapping_dict): + """Check if a mapping has no address types set.""" + return not any( + address_type in mapping_dict and mapping_dict[address_type] is not None + for address_type in AddressType.get_all_address_types() + ) + + def get_sort_key_for_mapping(self, mapping_dict): + """Get a sort key for a mapping that sorts by page_size first, then by mappings that don't have addresses.""" + # Get page_size as the first sort criterion + page_size = mapping_dict.get("page_size", float("inf")) + + if self.has_no_addresses(mapping_dict): + # Mappings with no addresses come after page_size sorting + return ( + page_size, + 0, + ) + + # For mappings with addresses, sort by all address types in order + address_types = AddressType.types + sort_values = [] + for address_type in address_types: + value = mapping_dict.get(address_type) + if value is not None: + sort_values.append(value) + else: + # Use a large number for None values to ensure they sort after valid values + sort_values.append(float("inf")) + + # Mappings with addresses come after those without (1), then sort by address values + return ( + page_size, + 1, + ) + tuple(sort_values) + + def sort_diag_mappings(self): + return sorted( + self.jumpstart_source_attributes["diag_attributes"]["mappings"], + key=self.get_sort_key_for_mapping, + ) + + def add_diag_sections_to_mappings(self): + for mapping_dict in self.sort_diag_mappings(): + if self.has_no_addresses(mapping_dict): + if ( + self.jumpstart_source_attributes["diag_attributes"]["enable_virtualization"] + is True + ): + raise ValueError( + f"The logic to assign addresses to mappings with no addresses specified in diags that enable virtualization is not implemented yet. Failed on mapping: {mapping_dict}" + ) + mapping_dict = self.assign_addresses_to_mapping_for_stage( + mapping_dict, TranslationStage.get_enabled_stages()[0] + ) + + for target_mmu in MemoryMapping(mapping_dict, self.max_num_cpus_supported).get_field( + "target_mmu" + ): + # We need a per stage memory mapping object. + mapping = MemoryMapping(mapping_dict, self.max_num_cpus_supported) + + stage = mapping.get_field("translation_stage") + mapping.set_field("target_mmu", [target_mmu]) + + self.memory_map[target_mmu][stage].append(mapping) + def process_memory_map(self): - self.memory_map = {stage: [] for stage in TranslationStage.get_enabled_stages()} + self.memory_map = {} - for mapping in self.jumpstart_source_attributes["diag_attributes"]["mappings"]: - mapping = MemoryMapping(mapping) - self.memory_map[mapping.get_field("translation_stage")].append(mapping) + for supported_mmu in MemoryMapping.get_supported_targets(): + self.memory_map[supported_mmu] = {} + for stage in TranslationStage.get_enabled_stages(): + self.memory_map[supported_mmu][stage] = [] self.add_jumpstart_sections_to_mappings() - for stage in self.memory_map.keys(): - # Sort all the mappings by the destination address. - self.memory_map[stage] = sorted( - self.memory_map[stage], - key=lambda x: x.get_field(TranslationStage.get_translates_to(stage)), - reverse=False, - ) + self.add_diag_sections_to_mappings() + + for target_mmu in self.memory_map.keys(): + for stage in self.memory_map[target_mmu].keys(): + # Sort all the mappings by the destination address. + self.memory_map[target_mmu][stage] = sorted( + self.memory_map[target_mmu][stage], + key=lambda x: x.get_field(TranslationStage.get_translates_to(stage)), + reverse=False, + ) if self.jumpstart_source_attributes["rivos_internal_build"] is True: - rivos_internal_functions.process_memory_map(self.memory_map) + rivos_internal_functions.process_cpu_memory_map( + self.memory_map["cpu"], self.jumpstart_source_attributes + ) self.sanity_check_memory_map() + self.create_page_tables_data() + def create_page_tables_data(self): self.page_tables = {} - for stage in TranslationStage.get_enabled_stages(): - self.page_tables[stage] = PageTables( - self.jumpstart_source_attributes["diag_attributes"][ - f"{TranslationStage.get_atp_register(stage)}_mode" - ], - self.jumpstart_source_attributes["diag_attributes"][ - "max_num_pagetable_pages_per_stage" - ], - self.memory_map[stage], - ) + for target_mmu in MemoryMapping.get_supported_targets(): + if target_mmu not in self.memory_map: + # Don't create page tables for MMUs that don't have any + # mappings. + continue + + self.page_tables[target_mmu] = {} + + for stage in TranslationStage.get_enabled_stages(): + translation_mode = TranslationStage.get_selected_mode_for_stage(stage) + if translation_mode == "bare": + # No pagetable mappings for the bare mode. + continue + + self.page_tables[target_mmu][stage] = PageTables( + translation_mode, + self.jumpstart_source_attributes["diag_attributes"][ + "max_num_pagetable_pages_per_stage" + ], + self.memory_map[target_mmu][stage], + ) def sanity_check_memory_map(self): - public_functions.sanity_check_memory_map(self.memory_map) + public_functions.sanity_check_memory_map(self.memory_map["cpu"]) if self.jumpstart_source_attributes["rivos_internal_build"] is True: - rivos_internal_functions.sanity_check_memory_map(self.memory_map) + rivos_internal_functions.sanity_check_memory_map( + self.jumpstart_source_attributes["diag_attributes"], self.memory_map + ) def add_pagetable_mappings(self, start_address): + assert ( + start_address is not None and start_address >= 0 + ), f"Invalid start address for pagetables: {start_address}" + common_attributes = { "page_size": PageSize.SIZE_4K, "num_pages": self.jumpstart_source_attributes["diag_attributes"][ @@ -167,89 +349,125 @@ def add_pagetable_mappings(self, start_address): else: common_attributes["xwr"] = "0b001" - per_stage_pagetable_mappings = {} + for target_mmu in MemoryMapping.get_supported_targets(): + if target_mmu not in self.memory_map: + # Don't add pagetable mappings for MMUs that + # don't have any mappings. + continue - for stage in TranslationStage.get_enabled_stages(): - section_mapping = common_attributes.copy() - source_address_type = TranslationStage.get_translates_from(stage) - dest_address_type = TranslationStage.get_translates_to(stage) + per_stage_pagetable_mappings = {} - # The start of the pagetables have to be aligned to the size of the - # root (first level) page table. - translation_mode = self.jumpstart_source_attributes["diag_attributes"][ - f"{TranslationStage.get_atp_register(stage)}_mode" - ] - root_page_table_size = PageTableAttributes.mode_attributes[translation_mode][ - "pagetable_sizes" - ][0] - if (start_address % root_page_table_size) != 0: - start_address = ( - math.floor(start_address / root_page_table_size) + 1 - ) * root_page_table_size + for stage in TranslationStage.get_enabled_stages(): + translation_mode = TranslationStage.get_selected_mode_for_stage(stage) + if translation_mode == "bare": + # No pagetable mappings for the bare mode. + continue + + section_mapping = common_attributes.copy() + source_address_type = TranslationStage.get_translates_from(stage) + dest_address_type = TranslationStage.get_translates_to(stage) + + # The start of the pagetables have to be aligned to the size of the + # root (first level) page table. + root_page_table_size = PageTableAttributes.mode_attributes[translation_mode][ + "pagetable_sizes" + ][0] + if (start_address % root_page_table_size) != 0: + start_address = ( + math.floor(start_address / root_page_table_size) + 1 + ) * root_page_table_size + + section_mapping[source_address_type] = section_mapping[dest_address_type] = ( + start_address + ) - section_mapping[source_address_type] = section_mapping[dest_address_type] = ( - start_address - ) + section_mapping["translation_stage"] = stage + section_mapping["linker_script_section"] = ( + f".jumpstart.{target_mmu}.rodata.{stage}_stage.pagetables" + ) + section_mapping["target_mmu"] = [target_mmu] - section_mapping["translation_stage"] = stage - section_mapping["linker_script_section"] = f".jumpstart.rodata.{stage}_stage.pagetables" + per_stage_pagetable_mappings[stage] = MemoryMapping( + section_mapping, self.max_num_cpus_supported + ) - per_stage_pagetable_mappings[stage] = MemoryMapping(section_mapping) + self.memory_map[target_mmu][stage].insert( + len(self.memory_map[target_mmu][stage]), per_stage_pagetable_mappings[stage] + ) - self.memory_map[stage].insert( - len(self.memory_map[stage]), per_stage_pagetable_mappings[stage] - ) + start_address += common_attributes["num_pages"] * common_attributes["page_size"] - start_address += common_attributes["num_pages"] * common_attributes["page_size"] + if "g" in TranslationStage.get_enabled_stages(): + vs_stage_memory_mapping = per_stage_pagetable_mappings["vs"].copy() - if "g" in TranslationStage.get_enabled_stages(): - vs_stage_memory_mapping = per_stage_pagetable_mappings["vs"].copy() + vs_stage_memory_mapping.set_field("translation_stage", "g") - vs_stage_memory_mapping.set_field("translation_stage", "g") + mapping_address = vs_stage_memory_mapping.get_field( + TranslationStage.get_translates_to("vs") + ) + vs_stage_memory_mapping.set_field(TranslationStage.get_translates_from("vs"), None) + vs_stage_memory_mapping.set_field(TranslationStage.get_translates_to("vs"), None) + vs_stage_memory_mapping.set_field( + TranslationStage.get_translates_from("g"), mapping_address + ) + vs_stage_memory_mapping.set_field( + TranslationStage.get_translates_to("g"), mapping_address + ) - start_address = vs_stage_memory_mapping.get_field( - TranslationStage.get_translates_to("vs") - ) - vs_stage_memory_mapping.set_field(TranslationStage.get_translates_from("vs"), None) - vs_stage_memory_mapping.set_field(TranslationStage.get_translates_to("vs"), None) - vs_stage_memory_mapping.set_field( - TranslationStage.get_translates_from("g"), start_address - ) - vs_stage_memory_mapping.set_field( - TranslationStage.get_translates_to("g"), start_address - ) + vs_stage_memory_mapping.set_field("umode", 1) - vs_stage_memory_mapping.set_field("umode", 1) + self.memory_map[target_mmu]["g"].insert( + len(self.memory_map[target_mmu]["g"]), vs_stage_memory_mapping + ) - self.memory_map["g"].insert(len(self.memory_map["g"]), vs_stage_memory_mapping) + # Adds G-stage pagetable memory region into hs stage memory map to + # allow HS-mode to access G-stage pagetables. + if target_mmu == "cpu" and "g" in TranslationStage.get_enabled_stages(): + mapping = per_stage_pagetable_mappings["g"].copy() + mapping.set_field("translation_stage", "hs") + mapping.set_field("va", mapping.get_field("gpa")) + mapping.set_field("pa", mapping.get_field("spa")) + mapping.set_field("gpa", None) + mapping.set_field("spa", None) + self.memory_map[target_mmu]["hs"].insert( + len(self.memory_map[target_mmu]["hs"]), mapping + ) - for stage in TranslationStage.get_enabled_stages(): - self.add_pa_guard_page_after_last_mapping(stage) + # Adds VS-stage pagetable memory region into hs stage memory map to + # allow HS-mode to access VS-stage pagetables. + if target_mmu == "cpu" and "vs" in TranslationStage.get_enabled_stages(): + mapping = per_stage_pagetable_mappings["vs"].copy() + mapping.set_field("translation_stage", "hs") + mapping.set_field("pa", mapping.get_field("gpa")) + mapping.set_field("gpa", None) + self.memory_map[target_mmu]["hs"].insert( + len(self.memory_map[target_mmu]["hs"]), mapping + ) def add_jumpstart_sections_to_mappings(self): + target_mmu = "cpu" pagetables_start_address = 0 + for stage in TranslationStage.get_enabled_stages(): if self.jumpstart_source_attributes["rivos_internal_build"] is True: - self.memory_map[stage].extend( + self.memory_map[target_mmu][stage].extend( rivos_internal_functions.get_additional_mappings( + target_mmu, stage, self.jumpstart_source_attributes, ) ) - for mode in ListUtils.intersection( - self.jumpstart_source_attributes["priv_modes_supported"], self.priv_modes_enabled - ): - self.add_jumpstart_mode_mappings_for_stage(stage, mode) + for mode in self.priv_modes_enabled: + self.add_jumpstart_cpu_mode_mappings(target_mmu, stage, mode) - # Pagetables for each stage are placed consecutively in the physical address - # space. We will place the pagetables after the last physical address - # used by the jumpstart mappings in any stage. - next_available_dest_address = self.get_next_available_dest_addr_after_last_mapping( - stage, PageSize.SIZE_4K, "wb" - ) - if next_available_dest_address > pagetables_start_address: - pagetables_start_address = next_available_dest_address + # We will place the pagetables for all MMUs after the last + # physical address used by the CPU jumpstart mappings. + next_available_dest_address = self.get_next_available_dest_addr_after_last_mapping( + target_mmu, stage, PageSize.SIZE_4K, "wb" + ) + if next_available_dest_address > pagetables_start_address: + pagetables_start_address = next_available_dest_address self.add_pagetable_mappings(pagetables_start_address) @@ -266,9 +484,23 @@ def sanity_check_diag_attributes(self): self.jumpstart_source_attributes["diag_attributes"] ) - def get_next_available_dest_addr_after_last_mapping(self, stage, page_size, pma_memory_type): - previous_mapping_id = len(self.memory_map[stage]) - 1 - previous_mapping = self.memory_map[stage][previous_mapping_id] + assert ( + self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"].bit_count() + <= self.max_num_cpus_supported + ) + primary_cpu_id = int(self.jumpstart_source_attributes["diag_attributes"]["primary_cpu_id"]) + assert ( + self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"] + & (1 << primary_cpu_id) + ) != 0 + + def get_next_available_dest_addr_after_last_mapping( + self, target_mmu, stage, page_size, pma_memory_type + ): + assert len(self.memory_map[target_mmu][stage]) > 0, "No previous mappings found." + + previous_mapping_id = len(self.memory_map[target_mmu][stage]) - 1 + previous_mapping = self.memory_map[target_mmu][stage][previous_mapping_id] previous_mapping_size = previous_mapping.get_field( "page_size" @@ -287,7 +519,7 @@ def get_next_available_dest_addr_after_last_mapping(self, stage, page_size, pma_ return next_available_pa - def add_jumpstart_mode_mappings_for_stage(self, stage, mode): + def add_jumpstart_cpu_mode_mappings(self, cpu_mmu, stage, mode): area_name = f"jumpstart_{mode}" area_start_address_attribute_name = f"{mode}_start_address" @@ -311,24 +543,32 @@ def add_jumpstart_mode_mappings_for_stage(self, stage, mode): for section_name in self.jumpstart_source_attributes[area_name]: section_mapping = self.jumpstart_source_attributes[area_name][section_name].copy() + section_mapping["target_mmu"] = [cpu_mmu] section_mapping["translation_stage"] = stage - # This is where we pick up num_pages_for_jumpstart_*mode_* attributes from the diag_attributes - # Example: num_pages_for_jumpstart_smode_bss, num_pages_for_jumpstart_smode_rodata, etc. - num_pages_diag_attribute_name = f"num_pages_for_{area_name}_{section_name}" - if ( - "num_pages" in section_mapping - and num_pages_diag_attribute_name - in self.jumpstart_source_attributes["diag_attributes"] - ): - raise Exception( - f"num_pages specified for {section_name} in {area_name} and {num_pages_diag_attribute_name} specified in diag_attributes." - ) + if TranslationStage.get_selected_mode_for_stage(stage) == "bare": + section_mapping["no_pte_allocation"] = True + section_mapping.pop("xwr", None) + section_mapping.pop("umode", None) + + for attribute in ["num_pages", "page_size", "num_pages_per_cpu"]: + # This is where we allow the diag to override the attributes of jumpstart sections. + # We can change the page size and num_pages of the section. + # Example: num_pages_for_jumpstart_smode_bss, num_pages_for_jumpstart_mmode_rodata, + # num_pages_per_cpu_for_jumpstart_smode_bss, etc. + attribute_name = f"{attribute}_for_{area_name}_{section_name}" + if ( + attribute in section_mapping + and attribute_name in self.jumpstart_source_attributes["diag_attributes"] + ): + raise Exception( + f"{attribute} specified for {section_name} in {area_name} and {attribute_name} specified in diag_attributes." + ) - if num_pages_diag_attribute_name in self.jumpstart_source_attributes["diag_attributes"]: - section_mapping["num_pages"] = self.jumpstart_source_attributes["diag_attributes"][ - num_pages_diag_attribute_name - ] + if attribute_name in self.jumpstart_source_attributes["diag_attributes"]: + section_mapping[attribute] = self.jumpstart_source_attributes[ + "diag_attributes" + ][attribute_name] dest_address_type = TranslationStage.get_translates_to(stage) assert dest_address_type not in section_mapping @@ -339,17 +579,21 @@ def add_jumpstart_mode_mappings_for_stage(self, stage, mode): area_start_pa = None else: # We're going to start the PA of the new mapping after the PA range - # # of the last mapping. + # of the last mapping. section_mapping[dest_address_type] = ( self.get_next_available_dest_addr_after_last_mapping( + cpu_mmu, stage, - self.jumpstart_source_attributes[area_name][section_name]["page_size"], - self.jumpstart_source_attributes[area_name][section_name][ - "pma_memory_type" - ], + section_mapping["page_size"], + section_mapping.get("pma_memory_type", None), ) ) + if section_mapping.get("alignment", None) is not None: + section_mapping[dest_address_type] = ( + section_mapping[dest_address_type] + section_mapping["alignment"] - 1 + ) & ~(section_mapping["alignment"] - 1) + if ( "no_pte_allocation" not in section_mapping or section_mapping["no_pte_allocation"] is False @@ -368,39 +612,25 @@ def add_jumpstart_mode_mappings_for_stage(self, stage, mode): # #user-level accesses, as though executed in U-mode. section_mapping["umode"] = "0b1" - self.memory_map[stage].insert( - len(self.memory_map[stage]), MemoryMapping(section_mapping) - ) + if section_mapping.get("num_pages") == 0: + continue - def add_pa_guard_page_after_last_mapping(self, stage): - guard_page_mapping = {} - guard_page_mapping["page_size"] = PageSize.SIZE_4K - guard_page_mapping["pma_memory_type"] = "wb" - guard_page_mapping["translation_stage"] = stage - - # Guard pages have no allocations in the page tables - # but occupy space in the memory map. - # They also don't occupy space in the ELFs. - guard_page_mapping["no_pte_allocation"] = True - guard_page_mapping["valid"] = "0b0" - dest_address_type = TranslationStage.get_translates_to(stage) - guard_page_mapping[dest_address_type] = ( - self.get_next_available_dest_addr_after_last_mapping( - stage, guard_page_mapping["page_size"], guard_page_mapping["pma_memory_type"] + self.memory_map[cpu_mmu][stage].insert( + len(self.memory_map[cpu_mmu][stage]), + MemoryMapping(section_mapping, self.max_num_cpus_supported), ) - ) - guard_page_mapping["num_pages"] = 1 - - self.memory_map[stage].insert( - len(self.memory_map[stage]), MemoryMapping(guard_page_mapping) - ) def generate_linker_script(self, output_linker_script): - LinkerScript( - self.jumpstart_source_attributes["diag_entry_label"], - self.memory_map, - self.diag_attributes_yaml, - ).generate(output_linker_script) + self.linker_script = LinkerScript( + entry_label=self.jumpstart_source_attributes["diag_attributes"]["diag_entry_label"], + elf_address_range=( + self.jumpstart_source_attributes["diag_attributes"]["elf_start_address"], + self.jumpstart_source_attributes["diag_attributes"]["elf_end_address"], + ), + mappings=self.memory_map["cpu"], + attributes_file=self.diag_attributes_yaml, + ) + self.linker_script.generate(output_linker_script) def generate_defines_file(self, output_defines_file): with open(output_defines_file, "w") as file_descriptor: @@ -408,54 +638,202 @@ def generate_defines_file(self, output_defines_file): f"// This file is auto-generated by {sys.argv[0]} from {self.diag_attributes_yaml}\n" ) - file_descriptor.write("\n// Diag Attributes defines\n\n") + file_descriptor.write("\n// Jumpstart Attributes defines\n\n") + for define_name in self.jumpstart_source_attributes["defines"]: + file_descriptor.write(f"#ifndef {define_name}\n") + define_value = self.jumpstart_source_attributes["defines"][define_name] + # Write all integers as hexadecimal for consistency and C/Assembly compatibility + if isinstance(define_value, int): + file_descriptor.write(f"#define {define_name} 0x{define_value:x}\n") + else: + file_descriptor.write(f"#define {define_name} {define_value}\n") + file_descriptor.write("#endif\n") + file_descriptor.write("\n") + file_descriptor.write( + f"#define MAX_NUM_CPUS_SUPPORTED {self.max_num_cpus_supported}\n\n" + ) + + for mod in self.priv_modes_enabled: + file_descriptor.write(f"#define {mod.upper()}_MODE_ENABLED 1\n") + + file_descriptor.write("\n// Jumpstart Syscall Numbers defines\n\n") + current_syscall_number = 0 + for syscall_name in self.jumpstart_source_attributes["syscall_numbers"]: + file_descriptor.write(f"#define {syscall_name} {current_syscall_number}\n") + current_syscall_number += 1 + + file_descriptor.write("\n// Diag Attributes defines\n\n") # Perform some transformations so that we can print them as defines. diag_attributes = self.jumpstart_source_attributes["diag_attributes"].copy() - assert "active_hart_mask" in diag_attributes - active_hart_mask = int(diag_attributes["active_hart_mask"], 2) - assert ( - active_hart_mask.bit_count() - <= self.jumpstart_source_attributes["max_num_harts_supported"] - ) - diag_attributes["active_hart_mask"] = int(active_hart_mask) for stage in TranslationStage.get_enabled_stages(): atp_register = TranslationStage.get_atp_register(stage) - assert f"{atp_register}_mode" in diag_attributes diag_attributes[f"{atp_register}_mode"] = TranslationMode.get_encoding( - diag_attributes[f"{atp_register}_mode"] + TranslationStage.get_selected_mode_for_stage(stage) ) for attribute in diag_attributes: if isinstance(diag_attributes[attribute], bool): + file_descriptor.write(f"#ifndef {attribute.upper()}\n") file_descriptor.write( f"#define {attribute.upper()} {int(diag_attributes[attribute])}\n" ) + file_descriptor.write("#endif\n") elif isinstance(diag_attributes[attribute], int): + file_descriptor.write(f"#ifndef {attribute.upper()}\n") file_descriptor.write( f"#define {attribute.upper()} {hex(diag_attributes[attribute])}\n" ) + file_descriptor.write("#endif\n") + + # Generate stack-related defines + self.generate_stack_defines(file_descriptor) + + # Generate register context save/restore defines + self.generate_reg_context_save_restore_defines(file_descriptor) + + # Generate C structs defines + self.generate_cstructs_defines(file_descriptor) + + # Generate rivos internal defines if this is a rivos internal build + if self.jumpstart_source_attributes["rivos_internal_build"] is True: + rivos_internal_functions.add_rivos_internal_defines( + file_descriptor, self.jumpstart_source_attributes + ) file_descriptor.close() - def generate_hart_sync_functions(self, file_descriptor): - active_hart_mask = int( - self.jumpstart_source_attributes["diag_attributes"]["active_hart_mask"], 2 - ) + def generate_data_structures_file(self, output_data_structures_file): + with open(output_data_structures_file, "w") as file_descriptor: + file_descriptor.write( + f"// This file is auto-generated by {sys.argv[0]} from {self.diag_attributes_yaml}\n" + ) + file_descriptor.write("#pragma once\n\n") + + # Only include these headers in C code. + file_descriptor.write("#if !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)\n\n") + + file_descriptor.write("\n\n") + file_descriptor.write("#include \n") + file_descriptor.write("#include \n\n") + + # Generate C struct definitions + self.generate_cstructs_data_structures(file_descriptor) + + file_descriptor.write( + "#endif /* !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */\n\n" + ) + + file_descriptor.close() + + def find_memory_mapping_by_linker_section(self, linker_script_section, target_mmu=None): + """Find a MemoryMapping object by its linker_script_section name. + + Args: + linker_script_section (str): The linker script section name to search for + target_mmu (str, optional): The target MMU to search in. If None, searches all target MMUs. + + Returns: + MemoryMapping or None: The found MemoryMapping object, or None if not found + """ + target_mmus_to_search = [target_mmu] if target_mmu is not None else self.memory_map.keys() + + for mmu in target_mmus_to_search: + if mmu not in self.memory_map: + continue + for stage in self.memory_map[mmu].keys(): + for mapping in self.memory_map[mmu][stage]: + if mapping.get_field("linker_script_section") == linker_script_section: + return mapping + return None + + def generate_stack_defines(self, file_descriptor): + # This is a bit of a mess. Both mmode and smode share the same stack. + # We've named this stack "privileged" so we need to map the stack + # name to the mode. + stack_types = ListUtils.intersection(["umode"], self.priv_modes_enabled) + stack_types.append("privileged") + + for stack_type in stack_types: + # Make sure we can equally distribute the number of total stack pages + # among the cpus. + + # Find the MemoryMapping object for this stack type + linker_section = f".jumpstart.cpu.stack.{stack_type}" + stack_mapping = self.find_memory_mapping_by_linker_section(linker_section, "cpu") + if stack_mapping is None: + raise Exception( + f"MemoryMapping with linker_script_section '{linker_section}' not found in memory_map" + ) + + # Get the num_pages from the MemoryMapping object + num_pages_for_stack = stack_mapping.get_field("num_pages") + stack_page_size = stack_mapping.get_field("page_size") + + assert num_pages_for_stack % self.max_num_cpus_supported == 0 + num_pages_per_cpu_for_stack = int(num_pages_for_stack / self.max_num_cpus_supported) + + file_descriptor.write( + f"#define NUM_PAGES_PER_CPU_FOR_{stack_type.upper()}_STACK {num_pages_per_cpu_for_stack}\n\n" + ) + + file_descriptor.write( + f"#define {stack_type.upper()}_STACK_PAGE_SIZE {stack_page_size}\n\n" + ) + + def generate_stack(self, file_descriptor): + # This is a bit of a mess. Both mmode and smode share the same stack. + # We've named this stack "privileged" so we need to map the stack + # name to the mode. + stack_types = ListUtils.intersection(["umode"], self.priv_modes_enabled) + stack_types.append("privileged") + + for stack_type in stack_types: + # Make sure we can equally distribute the number of total stack pages + # among the cpus. + + # Find the MemoryMapping object for this stack type + linker_section = f".jumpstart.cpu.stack.{stack_type}" + stack_mapping = self.find_memory_mapping_by_linker_section(linker_section, "cpu") + if stack_mapping is None: + raise Exception( + f"MemoryMapping with linker_script_section '{linker_section}' not found in memory_map" + ) + + # Get the num_pages from the MemoryMapping object + num_pages_for_stack = stack_mapping.get_field("num_pages") + stack_page_size = stack_mapping.get_field("page_size") + + assert num_pages_for_stack % self.max_num_cpus_supported == 0 + num_pages_per_cpu_for_stack = int(num_pages_for_stack / self.max_num_cpus_supported) + + file_descriptor.write(f'.section .jumpstart.cpu.stack.{stack_type}, "aw"\n') + # Calculate alignment based on page size (log2 of page size) + alignment = stack_page_size.bit_length() - 1 + file_descriptor.write(f".align {alignment}\n") + file_descriptor.write(f".global {stack_type}_stack_top\n") + file_descriptor.write(f"{stack_type}_stack_top:\n") + for i in range(self.max_num_cpus_supported): + file_descriptor.write(f".global {stack_type}_stack_top_cpu_{i}\n") + file_descriptor.write(f"{stack_type}_stack_top_cpu_{i}:\n") + file_descriptor.write(f" .zero {num_pages_per_cpu_for_stack * stack_page_size}\n") + file_descriptor.write(f".global {stack_type}_stack_bottom\n") + file_descriptor.write(f"{stack_type}_stack_bottom:\n\n") + + def generate_cpu_sync_functions(self, file_descriptor): + active_cpu_mask = self.jumpstart_source_attributes["diag_attributes"]["active_cpu_mask"] modes = ListUtils.intersection(["mmode", "smode"], self.priv_modes_enabled) for mode in modes: file_descriptor.write( f""" -.section .jumpstart.text.{mode}, "ax" +.section .jumpstart.cpu.text.{mode}, "ax" # Inputs: -# a0: hart id of current hart -# a1: hart mask of harts to sync. -# a2: hart id of primary hart for sync -# a3: sync point address (4 byte aligned) -.global sync_harts_in_mask_from_{mode} -sync_harts_in_mask_from_{mode}: +# a0: cpu mask of cpus to sync. +# a1: sync point address (4 byte aligned) +.global sync_cpus_in_mask_from_{mode} +sync_cpus_in_mask_from_{mode}: addi sp, sp, -16 sd ra, 8(sp) sd fp, 0(sp) @@ -463,44 +841,53 @@ def generate_hart_sync_functions(self, file_descriptor): CHECKTC_DISABLE - li t0, 1 - sll t2, t0, a0 - sll t0, t0, a2 + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + # Get the lowest numbered cpu id in the mask to use as the primary cpu + # to drive the sync. + ctz t1, a0 - # Both this hart id and the primary hart id should be part of - # the mask of harts to sync - and t3, t2, a1 + li t4, 1 + sll t5, t4, t0 + sll t4, t4, t1 + + # Both this cpu id and the primary cpu id should be part of + # the mask of cpus to sync + and t3, t5, a0 beqz t3, jumpstart_{mode}_fail - and t3, t0, a1 + and t3, t4, a0 beqz t3, jumpstart_{mode}_fail - amoor.w.aqrl t3, t2, (a3) + amoor.w.aqrl t3, t5, (a1) # This bit should not be already set. - and t3, t3, t2 + and t3, t3, t5 bnez t3, jumpstart_{mode}_fail - bne t0, t2, wait_for_primary_hart_to_clear_sync_point_bits_{mode} + bne t4, t5, wait_for_primary_cpu_to_clear_sync_point_bits_{mode} -wait_for_all_harts_to_set_sync_point_bits_{mode}: - # Primary hart waits till all the harts have set their bits in the sync point. - lw t0, (a3) - bne t0, a1, wait_for_all_harts_to_set_sync_point_bits_{mode} +wait_for_all_cpus_to_set_sync_point_bits_{mode}: + # Primary cpu waits till all the cpus have set their bits in the sync point. + # twiddle thumbs to avoid excessive spinning + pause + lw t4, (a1) + bne t4, a0, wait_for_all_cpus_to_set_sync_point_bits_{mode} - amoswap.w t0, zero, (a3) + amoswap.w t4, zero, (a1) - bne t0, a1, jumpstart_{mode}_fail + bne t4, a0, jumpstart_{mode}_fail - j return_from_sync_harts_in_mask_from_{mode} + j return_from_sync_cpus_in_mask_from_{mode} -wait_for_primary_hart_to_clear_sync_point_bits_{mode}: - # non-primary harts wait for the primary hart to clear the sync point bits. - lw t0, (a3) - srl t0, t0, a0 - andi t0, t0, 1 - bnez t0, wait_for_primary_hart_to_clear_sync_point_bits_{mode} +wait_for_primary_cpu_to_clear_sync_point_bits_{mode}: + # non-primary cpus wait for the primary cpu to clear the sync point bits. + # twiddle thumbs to avoid excessive spinning + pause + lw t4, (a1) + srl t4, t4, t0 + andi t4, t4, 1 + bnez t4, wait_for_primary_cpu_to_clear_sync_point_bits_{mode} -return_from_sync_harts_in_mask_from_{mode}: +return_from_sync_cpus_in_mask_from_{mode}: CHECKTC_ENABLE ld ra, 8(sp) @@ -508,19 +895,17 @@ def generate_hart_sync_functions(self, file_descriptor): addi sp, sp, 16 ret -.global sync_all_harts_from_{mode} -sync_all_harts_from_{mode}: +.global sync_all_cpus_from_{mode} +sync_all_cpus_from_{mode}: addi sp, sp, -16 sd ra, 8(sp) sd fp, 0(sp) addi fp, sp, 16 - jal get_thread_attributes_hart_id_from_{mode} - li a1, {active_hart_mask} - li a2, PRIMARY_HART_ID - la a3, hart_sync_point + li a0, {active_cpu_mask} + la a1, cpu_sync_point - jal sync_harts_in_mask_from_{mode} + jal sync_cpus_in_mask_from_{mode} ld ra, 8(sp) ld fp, 0(sp) @@ -531,7 +916,7 @@ def generate_hart_sync_functions(self, file_descriptor): def generate_smode_fail_functions(self, file_descriptor): if "smode" in self.priv_modes_enabled: - file_descriptor.write('.section .jumpstart.text.smode, "ax"\n\n') + file_descriptor.write('.section .jumpstart.cpu.text.smode, "ax"\n\n') file_descriptor.write(".global jumpstart_smode_fail\n") file_descriptor.write("jumpstart_smode_fail:\n") @@ -546,25 +931,25 @@ def generate_smode_fail_functions(self, file_descriptor): file_descriptor.write(" li a1, DIAG_FAILED\n") file_descriptor.write(" jal sbi_system_reset\n") - file_descriptor.write(".global jumpstart_vsmode_fail\n") - file_descriptor.write("jumpstart_vsmode_fail:\n") - file_descriptor.write(" li a0, DIAG_FAILED\n") - file_descriptor.write(" j exit_from_vsmode\n") - def generate_mmu_functions(self, file_descriptor): modes = ListUtils.intersection(["mmode", "smode"], self.priv_modes_enabled) for mode in modes: - file_descriptor.write(f'.section .jumpstart.text.{mode}, "ax"\n\n') + file_descriptor.write(f'.section .jumpstart.cpu.text.{mode}, "ax"\n\n') file_descriptor.write(f".global setup_mmu_from_{mode}\n") file_descriptor.write(f"setup_mmu_from_{mode}:\n\n") for stage in TranslationStage.get_enabled_stages(): atp_register = TranslationStage.get_atp_register(stage) file_descriptor.write(f" li t0, {atp_register.upper()}_MODE\n") file_descriptor.write(f" slli t0, t0, {atp_register.upper()}64_MODE_SHIFT\n") - file_descriptor.write(f" la t1, {self.page_tables[stage].get_asm_label()}\n") - file_descriptor.write(" srai t1, t1, PAGE_OFFSET\n") - file_descriptor.write(" add t1, t1, t0\n") - file_descriptor.write(f" csrw {atp_register}, t1\n") + if stage in self.page_tables["cpu"]: + file_descriptor.write( + f" la t1, {self.page_tables['cpu'][stage].get_asm_label()}\n" + ) + file_descriptor.write(" srai t1, t1, PAGE_OFFSET\n") + file_descriptor.write(" add t0, t1, t0\n") + else: + assert TranslationStage.get_selected_mode_for_stage(stage) == "bare" + file_descriptor.write(f" csrw {atp_register}, t0\n") file_descriptor.write(" sfence.vma\n") if self.jumpstart_source_attributes["diag_attributes"]["enable_virtualization"] is True: @@ -573,31 +958,51 @@ def generate_mmu_functions(self, file_descriptor): file_descriptor.write(" ret\n") def generate_page_tables(self, file_descriptor): - for stage in TranslationStage.get_enabled_stages(): + for target_mmu in MemoryMapping.get_supported_targets(): + if target_mmu not in self.page_tables: + continue + + for stage in TranslationStage.get_enabled_stages(): + if stage not in self.page_tables[target_mmu]: + continue + + file_descriptor.write( + f'.section .jumpstart.{target_mmu}.rodata.{stage}_stage.pagetables, "a"\n\n' + ) - file_descriptor.write(f'.section .jumpstart.rodata.{stage}_stage.pagetables, "a"\n\n') + file_descriptor.write( + f".global {self.page_tables[target_mmu][stage].get_asm_label()}\n" + ) + file_descriptor.write(f"{self.page_tables[target_mmu][stage].get_asm_label()}:\n\n") - file_descriptor.write(f".global {self.page_tables[stage].get_asm_label()}\n") - file_descriptor.write(f"{self.page_tables[stage].get_asm_label()}:\n\n") + file_descriptor.write("/* Memory mappings in this page table:\n") + for mapping in self.page_tables[target_mmu][stage].get_mappings(): + if not mapping.is_bare_mapping(): + file_descriptor.write(f"{mapping}\n") + file_descriptor.write("*/\n") - pte_size_in_bytes = self.page_tables[stage].get_attribute("pte_size_in_bytes") - last_filled_address = None - for address in list(sorted(self.page_tables[stage].get_pte_addresses())): - if last_filled_address is not None and address != ( - last_filled_address + pte_size_in_bytes + pte_size_in_bytes = self.page_tables[target_mmu][stage].get_attribute( + "pte_size_in_bytes" + ) + last_filled_address = None + for address in list( + sorted(self.page_tables[target_mmu][stage].get_pte_addresses()) ): + if last_filled_address is not None and address != ( + last_filled_address + pte_size_in_bytes + ): + file_descriptor.write( + f".skip {hex(address - (last_filled_address + pte_size_in_bytes))}\n" + ) + log.debug( + f"Writing [{hex(address)}] = {hex(self.page_tables[target_mmu][stage].get_pte(address))}" + ) + file_descriptor.write(f"\n# [{hex(address)}]\n") file_descriptor.write( - f".skip {hex(address - (last_filled_address + pte_size_in_bytes))}\n" + f".{pte_size_in_bytes}byte {hex(self.page_tables[target_mmu][stage].get_pte(address))}\n" ) - log.debug( - f"Writing [{hex(address)}] = {hex(self.page_tables[stage].get_pte(address))}" - ) - file_descriptor.write(f"\n# [{hex(address)}]\n") - file_descriptor.write( - f".{pte_size_in_bytes}byte {hex(self.page_tables[stage].get_pte(address))}\n" - ) - last_filled_address = address + last_filled_address = address def generate_assembly_file(self, output_assembly_file): with open(output_assembly_file, "w") as file: @@ -605,14 +1010,22 @@ def generate_assembly_file(self, output_assembly_file): f"# This file is auto-generated by {sys.argv[0]} from {self.diag_attributes_yaml}\n" ) - file.write('#include "jumpstart_defines.h"\n\n') + file.write("\n\n") file.write('#include "cpu_bits.h"\n\n') self.generate_mmu_functions(file) self.generate_smode_fail_functions(file) - self.generate_hart_sync_functions(file) + self.generate_cpu_sync_functions(file) + + self.generate_stack(file) + + self.generate_thread_attributes_code(file) + + self.generate_reg_context_save_restore_assembly(file) + + self.generate_cstructs_assembly(file) if self.jumpstart_source_attributes["rivos_internal_build"] is True: rivos_internal_functions.generate_rivos_internal_mmu_functions( @@ -623,33 +1036,361 @@ def generate_assembly_file(self, output_assembly_file): file.close() - def translate(self, source_address): - for stage in TranslationStage.get_enabled_stages(): - try: - self.translate_stage(stage, source_address) - log.info(f"{stage} Stage: Translation SUCCESS\n\n") - except Exception as e: - log.warning(f"{stage} Stage: Translation FAILED: {e}\n\n") - - def translate_stage(self, stage, source_address): - translation_mode = self.jumpstart_source_attributes["diag_attributes"][ - f"{TranslationStage.get_atp_register(stage)}_mode" + def generate_thread_attributes_code(self, file_descriptor): + self.generate_thread_attributes_getter_functions(file_descriptor) + + modes = ListUtils.intersection(["smode", "mmode"], self.priv_modes_enabled) + mode_encodings = {"smode": "PRV_S", "mmode": "PRV_M"} + for mode in modes: + file_descriptor.write(f'.section .jumpstart.cpu.text.{mode}.init, "ax"\n') + file_descriptor.write("# Inputs:\n") + file_descriptor.write("# a0: cpu id\n") + file_descriptor.write("# a1: physical cpu id\n") + file_descriptor.write(f".global setup_thread_attributes_from_{mode}\n") + file_descriptor.write(f"setup_thread_attributes_from_{mode}:\n") + file_descriptor.write(" li t1, MAX_NUM_CPUS_SUPPORTED\n") + file_descriptor.write(f" bgeu a0, t1, jumpstart_{mode}_fail\n") + file_descriptor.write("\n") + # Save input parameters and return address to stack + file_descriptor.write(" addi sp, sp, -24\n") + file_descriptor.write(" sd a0, 0(sp) # Save cpu_id\n") + file_descriptor.write(" sd a1, 8(sp) # Save physical_cpu_id\n") + file_descriptor.write(" sd ra, 16(sp) # Save return address\n") + file_descriptor.write("\n") + # Call getter function to get thread attributes address for this cpu id + file_descriptor.write(f" jal get_thread_attributes_for_cpu_id_from_{mode}\n") + file_descriptor.write(" mv tp, a0 # Move returned address to tp\n") + file_descriptor.write("\n") + # Restore parameters from stack + file_descriptor.write(" ld ra, 16(sp) # Restore return address\n") + file_descriptor.write(" ld a1, 8(sp) # Restore physical_cpu_id\n") + file_descriptor.write(" ld a0, 0(sp) # Restore cpu_id\n") + file_descriptor.write(" addi sp, sp, 24\n") + file_descriptor.write("\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_CPU_ID(a0)\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_PHYSICAL_CPU_ID(a1)\n") + file_descriptor.write("\n") + file_descriptor.write(" li t0, TRAP_OVERRIDE_ATTRIBUTES_STRUCT_SIZE_IN_BYTES\n") + file_descriptor.write(" mul t0, a0, t0\n") + file_descriptor.write(" la t1, trap_override_attributes_region\n") + file_descriptor.write(" add t0, t1, t0\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_TRAP_OVERRIDE_STRUCT_ADDRESS(t0)\n") + file_descriptor.write("\n") + file_descriptor.write( + " li t0, REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES * MAX_NUM_CONTEXT_SAVES\n" + ) + file_descriptor.write(" mul t0, a0, t0\n") + file_descriptor.write("\n") + if "mmode" in modes: + file_descriptor.write(" la t1, mmode_reg_context_save_region\n") + file_descriptor.write(" add t1, t1, t0\n") + file_descriptor.write(" la t2, mmode_reg_context_save_region_end\n") + file_descriptor.write(f" bgeu t1, t2, jumpstart_{mode}_fail\n") + file_descriptor.write( + " SET_THREAD_ATTRIBUTES_MMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(t1)\n" + ) + file_descriptor.write(" li t1, MAX_NUM_CONTEXT_SAVES\n") + file_descriptor.write( + " SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_MMODE(t1)\n" + ) + file_descriptor.write("\n") + + file_descriptor.write(" csrr t1, marchid\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_MARCHID(t1)\n") + file_descriptor.write(" csrr t1, mimpid\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_MIMPID(t1)\n") + file_descriptor.write("\n") + + if "smode" in modes: + file_descriptor.write(" la t1, smode_reg_context_save_region\n") + file_descriptor.write(" add t1, t1, t0\n") + file_descriptor.write(" la t2, smode_reg_context_save_region_end\n") + file_descriptor.write(f" bgeu t1, t2, jumpstart_{mode}_fail\n") + file_descriptor.write( + " SET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(t1)\n" + ) + + file_descriptor.write(" li t1, MAX_NUM_CONTEXT_SAVES\n") + file_descriptor.write( + " SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(t1)\n" + ) + file_descriptor.write("\n") + file_descriptor.write(" li t0, 0\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_SMODE_SETUP_DONE(t0)\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_VSMODE_SETUP_DONE(t0)\n") + file_descriptor.write("\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_CURRENT_V_BIT(t0)\n") + file_descriptor.write("\n") + file_descriptor.write(f" li t0, {mode_encodings[mode]}\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_CURRENT_MODE(t0)\n") + file_descriptor.write("\n") + file_descriptor.write(" li t0, THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE\n") + file_descriptor.write(" SET_THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER(t0)\n") + file_descriptor.write("\n") + file_descriptor.write(" ret\n") + + def generate_thread_attributes_getter_functions(self, file_descriptor): + """Generate functions to get thread attributes struct address for a given CPU ID.""" + modes = ListUtils.intersection(["smode", "mmode"], self.priv_modes_enabled) + for mode in modes: + file_descriptor.write(f'.section .jumpstart.cpu.text.{mode}.init, "ax"\n') + file_descriptor.write("# Inputs:\n") + file_descriptor.write("# a0: cpu id\n") + file_descriptor.write("# Outputs:\n") + file_descriptor.write( + "# a0: address of thread attributes struct for the given cpu id\n" + ) + file_descriptor.write(f".global get_thread_attributes_for_cpu_id_from_{mode}\n") + file_descriptor.write(f"get_thread_attributes_for_cpu_id_from_{mode}:\n") + file_descriptor.write(" li t1, MAX_NUM_CPUS_SUPPORTED\n") + file_descriptor.write(f" bgeu a0, t1, jumpstart_{mode}_fail\n") + file_descriptor.write("\n") + file_descriptor.write(" li t2, THREAD_ATTRIBUTES_STRUCT_SIZE_IN_BYTES\n") + file_descriptor.write(" mul t2, a0, t2\n") + file_descriptor.write(" la t1, thread_attributes_region\n") + file_descriptor.write(" add a0, t1, t2\n") + file_descriptor.write(" ret\n\n") + + def generate_reg_context_save_restore_defines(self, file_descriptor): + """Generate defines for register context save/restore functionality.""" + assert ( + self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "temp_register" + ] + not in self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ]["gprs"] + ) + + num_registers = 0 + for reg_type in self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ]: + reg_names = self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ][reg_type] + for reg_name in reg_names: + file_descriptor.write( + f"#define {reg_name.upper()}_OFFSET_IN_SAVE_REGION ({num_registers} * 8)\n" + ) + num_registers += 1 + + temp_reg_name = self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "temp_register" ] + + file_descriptor.write( + f"\n#define REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES ({num_registers} * 8)\n" + ) + file_descriptor.write( + f"\n#define MAX_NUM_CONTEXT_SAVES {self.jumpstart_source_attributes['reg_context_to_save_across_exceptions']['max_num_context_saves']}\n" + ) + + file_descriptor.write("\n#define SAVE_ALL_GPRS ;") + for gpr_name in self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ]["gprs"]: + file_descriptor.write( + f"\\\n sd {gpr_name}, {gpr_name.upper()}_OFFSET_IN_SAVE_REGION({temp_reg_name}) ;" + ) + file_descriptor.write("\n\n") + + file_descriptor.write("\n#define RESTORE_ALL_GPRS ;") + for gpr_name in self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ]["gprs"]: + file_descriptor.write( + f"\\\n ld {gpr_name}, {gpr_name.upper()}_OFFSET_IN_SAVE_REGION({temp_reg_name}) ;" + ) + file_descriptor.write("\n\n") + + def generate_reg_context_save_restore_assembly(self, file_descriptor): + """Generate assembly code for register context save/restore regions.""" + num_registers = 0 + for reg_type in self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ]: + reg_names = self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "registers" + ][reg_type] + for reg_name in reg_names: + num_registers += 1 + + file_descriptor.write('\n\n.section .jumpstart.cpu.data.privileged, "a"\n') + modes = ListUtils.intersection(["mmode", "smode"], self.priv_modes_enabled) + file_descriptor.write( + f"\n# {modes} context saved registers:\n# {self.jumpstart_source_attributes['reg_context_to_save_across_exceptions']['registers']}\n" + ) + for mode in modes: + file_descriptor.write(f".global {mode}_reg_context_save_region\n") + file_descriptor.write(f"{mode}_reg_context_save_region:\n") + for i in range(self.max_num_cpus_supported): + file_descriptor.write( + f" # {mode} context save area for cpu {i}'s {num_registers} registers. {self.jumpstart_source_attributes['reg_context_to_save_across_exceptions']['max_num_context_saves']} nested contexts supported.\n" + ) + for i in range( + self.jumpstart_source_attributes["reg_context_to_save_across_exceptions"][ + "max_num_context_saves" + ] + ): + f" # Context {i}\n" + file_descriptor.write(f" .zero {num_registers * 8}\n\n") + file_descriptor.write(f".global {mode}_reg_context_save_region_end\n") + file_descriptor.write(f"{mode}_reg_context_save_region_end:\n\n") + + def generate_cstructs_defines(self, file_descriptor): + """Generate #define statements for struct sizes and field counts.""" + for c_struct in self.c_structs: + # Generate defines for array field counts + for field in c_struct.fields: + if field.num_elements > 1: + file_descriptor.write( + f"#define NUM_{field.name.upper()} {field.num_elements}\n" + ) + + # Generate struct size define + file_descriptor.write( + f"#define {c_struct.name.upper()}_STRUCT_SIZE_IN_BYTES {c_struct.size_in_bytes}\n\n" + ) + + # Generate field offset defines and getter/setter macros for thread_attributes + if c_struct.name == "thread_attributes": + for field in c_struct.fields: + file_descriptor.write( + f"#define {c_struct.name.upper()}_{field.name.upper()}_OFFSET {field.offset}\n" + ) + file_descriptor.write( + f"#define GET_{c_struct.name.upper()}_{field.name.upper()}(dest_reg) {get_memop_of_size(MemoryOp.LOAD, field.size_in_bytes)} dest_reg, {c_struct.name.upper()}_{field.name.upper()}_OFFSET(tp);\n" + ) + file_descriptor.write( + f"#define SET_{c_struct.name.upper()}_{field.name.upper()}(dest_reg) {get_memop_of_size(MemoryOp.STORE, field.size_in_bytes)} dest_reg, {c_struct.name.upper()}_{field.name.upper()}_OFFSET(tp);\n\n" + ) + + def generate_cstructs_data_structures(self, file_descriptor): + """Generate C struct definitions.""" + for c_struct in self.c_structs: + file_descriptor.write(f"struct {c_struct.name} {{\n") + for field in c_struct.fields: + if field.num_elements > 1: + file_descriptor.write( + f" {field.field_type} {field.name}[NUM_{field.name.upper()}];\n" + ) + else: + file_descriptor.write(f" {field.field_type} {field.name};\n") + file_descriptor.write(f"}} __attribute__((aligned({c_struct.alignment})));\n\n") + + # Generate offsetof assertions for compile-time verification + self._generate_offsetof_assertions(c_struct, file_descriptor) + + def _generate_offsetof_assertions(self, c_struct, file_descriptor): + """Generate _Static_assert statements using offsetof() for compile-time verification.""" + for field in c_struct.fields: + file_descriptor.write( + f"_Static_assert(offsetof(struct {c_struct.name}, {field.name}) == {field.offset}, " + f'"{c_struct.name}.{field.name} offset mismatch");\n' + ) + + # Generate size assertion + file_descriptor.write( + f"_Static_assert(sizeof(struct {c_struct.name}) == {c_struct.name.upper()}_STRUCT_SIZE_IN_BYTES, " + f'"{c_struct.name} size mismatch");\n\n' + ) + + def generate_cstructs_assembly(self, file_descriptor): + """Generate assembly code for struct regions and getter/setter functions.""" + for c_struct in self.c_structs: + # Generate assembly regions + file_descriptor.write('.section .jumpstart.cpu.c_structs.mmode, "aw"\n\n') + file_descriptor.write(f".global {c_struct.name}_region\n") + file_descriptor.write(f"{c_struct.name}_region:\n") + for i in range(self.max_num_cpus_supported): + file_descriptor.write(f".global {c_struct.name}_region_cpu_{i}\n") + file_descriptor.write(f"{c_struct.name}_region_cpu_{i}:\n") + file_descriptor.write(f" .zero {c_struct.size_in_bytes}\n") + file_descriptor.write(f".global {c_struct.name}_region_end\n") + file_descriptor.write(f"{c_struct.name}_region_end:\n\n") + + # Generate getter/setter functions for thread_attributes + if c_struct.name == "thread_attributes": + modes = ListUtils.intersection(["smode", "mmode"], self.priv_modes_enabled) + for field in c_struct.fields: + for mode in modes: + file_descriptor.write(f'.section .jumpstart.cpu.text.{mode}, "ax"\n') + getter_method = f"get_{c_struct.name}_{field.name}_from_{mode}" + file_descriptor.write(f".global {getter_method}\n") + file_descriptor.write(f"{getter_method}:\n") + file_descriptor.write( + f" GET_{c_struct.name.upper()}_{field.name.upper()}(a0)\n" + ) + file_descriptor.write(" ret\n\n") + + file_descriptor.write( + f".global set_{c_struct.name}_{field.name}_from_{mode}\n" + ) + file_descriptor.write(f"set_{c_struct.name}_{field.name}_from_{mode}:\n") + file_descriptor.write( + f" SET_{c_struct.name.upper()}_{field.name.upper()}(a0)\n" + ) + file_descriptor.write(" ret\n\n") + + # Validate total size + total_size_of_c_structs = sum(c_struct.size_in_bytes for c_struct in self.c_structs) + + # Find the MemoryMapping object for c_structs + linker_section = ".jumpstart.cpu.c_structs.mmode" + c_structs_mapping = self.find_memory_mapping_by_linker_section(linker_section, "cpu") + if c_structs_mapping is None: + raise Exception( + f"MemoryMapping with linker_script_section '{linker_section}' not found in memory_map" + ) + + # Get the num_pages and page_size from the MemoryMapping object + num_pages_for_c_structs = c_structs_mapping.get_field("num_pages") + c_structs_page_size = c_structs_mapping.get_field("page_size") + + max_allowed_size_of_c_structs = num_pages_for_c_structs * c_structs_page_size + + if total_size_of_c_structs * self.max_num_cpus_supported > max_allowed_size_of_c_structs: + raise Exception( + f"Total size of C structs ({total_size_of_c_structs}) exceeds maximum size allocated for C structs {max_allowed_size_of_c_structs}" + ) + + def _parse_c_structs(self): + """Parse C structs from YAML data into CStruct objects.""" + c_structs = [] + for struct_name, struct_data in self.jumpstart_source_attributes["c_structs"].items(): + c_struct = CStruct(struct_name, struct_data["fields"]) + c_structs.append(c_struct) + return c_structs + + def translate(self, source_address): + for target_mmu in MemoryMapping.get_supported_targets(): + for stage in TranslationStage.get_enabled_stages(): + try: + self.translate_stage(target_mmu, stage, source_address) + log.info(f"{target_mmu} MMU: {stage} Stage: Translation SUCCESS\n\n") + except Exception as e: + log.warning(f"{target_mmu} MMU: {stage} Stage: Translation FAILED: {e}\n\n") + + def translate_stage(self, target_mmu, stage, source_address): + translation_mode = TranslationStage.get_selected_mode_for_stage(stage) log.info( - f"{stage} Stage: Translating Address {hex(source_address)}. Translation.translation_mode = {translation_mode}." + f"{target_mmu} MMU: {stage} Stage: Translating Address {hex(source_address)}. Translation.translation_mode = {translation_mode}." ) attributes = PageTableAttributes(translation_mode) # Step 1 - a = self.page_tables[stage].get_start_address() + a = self.page_tables[target_mmu][stage].get_start_address() current_level = 0 pte_value = 0 # Step 2 while True: - log.info(f" {stage} Stage: a = {hex(a)}; current_level = {current_level}") + log.info( + f" {target_mmu} MMU: {stage} Stage: a = {hex(a)}; current_level = {current_level}" + ) pte_address = a + BitField.extract_bits( source_address, attributes.get_attribute("va_vpn_bits")[current_level] @@ -657,17 +1398,19 @@ def translate_stage(self, stage, source_address): if TranslationStage.get_next_stage(stage) is not None: log.info( - f" {stage} Stage: PTE Address {hex(pte_address)} needs next stage translation." + f" {target_mmu} MMU: {stage} Stage: PTE Address {hex(pte_address)} needs next stage translation." + ) + self.translate_stage( + target_mmu, TranslationStage.get_next_stage(stage), pte_address ) - self.translate_stage(TranslationStage.get_next_stage(stage), pte_address) - pte_value = self.page_tables[stage].read_sparse_memory(pte_address) + pte_value = self.page_tables[target_mmu][stage].read_sparse_memory(pte_address) if pte_value is None: raise ValueError(f"Level {current_level} PTE at {hex(pte_address)} is not valid.") log.info( - f" {stage} Stage: level{current_level} PTE: [{hex(pte_address)}] = {hex(pte_value)}" + f" {target_mmu} MMU: {stage} Stage: level{current_level} PTE: [{hex(pte_address)}] = {hex(pte_value)}" ) if BitField.extract_bits(pte_value, attributes.common_attributes["valid_bit"]) == 0: @@ -687,7 +1430,7 @@ def translate_stage(self, stage, source_address): ) if (xwr & 0x6) or (xwr & 0x1): - log.info(f" {stage} Stage: This is a Leaf PTE") + log.info(f" {target_mmu} MMU: {stage} Stage: This is a Leaf PTE") break else: if BitField.extract_bits(pte_value, attributes.common_attributes["a_bit"]) != 0: @@ -705,8 +1448,10 @@ def translate_stage(self, stage, source_address): source_address, (attributes.get_attribute("va_vpn_bits")[current_level][1] - 1, 0) ) - log.info(f" {stage} Stage: PTE value = {hex(pte_value)}") - log.info(f"{stage} Stage: Translated {hex(source_address)} --> {hex(dest_address)}") + log.info(f" {target_mmu} MMU: {stage} Stage: PTE value = {hex(pte_value)}") + log.info( + f"{target_mmu} MMU: {stage} Stage: Translated {hex(source_address)} --> {hex(dest_address)}" + ) return dest_address @@ -729,13 +1474,6 @@ def main(): required=True, type=str, ) - parser.add_argument( - "--override_jumpstart_source_attributes", - help="Overrides the JumpStart source attributes.", - required=False, - nargs="+", - default=None, - ) parser.add_argument( "--priv_modes_enabled", help=".", @@ -758,6 +1496,12 @@ def main(): parser.add_argument( "--output_linker_script", help="Linker script to generate", required=False, type=str ) + parser.add_argument( + "--output_data_structures_file", + help="Data structures file to generate with C struct definitions", + required=False, + type=str, + ) parser.add_argument( "--translate", help="Translate the address.", @@ -784,18 +1528,19 @@ def main(): source_generator = SourceGenerator( args.jumpstart_source_attributes_yaml, - args.override_jumpstart_source_attributes, args.diag_attributes_yaml, args.override_diag_attributes, args.priv_modes_enabled, ) - if args.output_assembly_file is not None: - source_generator.generate_assembly_file(args.output_assembly_file) if args.output_linker_script is not None: source_generator.generate_linker_script(args.output_linker_script) + if args.output_assembly_file is not None: + source_generator.generate_assembly_file(args.output_assembly_file) if args.output_defines_file is not None: source_generator.generate_defines_file(args.output_defines_file) + if args.output_data_structures_file is not None: + source_generator.generate_data_structures_file(args.output_data_structures_file) if args.translate is not None: source_generator.translate(args.translate) diff --git a/scripts/generate_jumpstart_sources.py b/scripts/generate_jumpstart_sources.py deleted file mode 100755 index d5097e3a..00000000 --- a/scripts/generate_jumpstart_sources.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/env python3 - -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -# Generates the jumpstart source files from the jumpstart attributes YAML file. - -import argparse -import logging as log -import os -import sys -from enum import Enum - -import yaml -from data_structures import DictUtils, ListUtils - - -class MemoryOp(Enum): - LOAD = (1,) - STORE = 2 - - -def get_memop_of_size(memory_op_type, size_in_bytes): - if memory_op_type == MemoryOp.LOAD: - op = "l" - elif memory_op_type == MemoryOp.STORE: - op = "s" - else: - raise Exception(f"Invalid memory op type: {memory_op_type}") - - if size_in_bytes == 1: - return op + "b" - elif size_in_bytes == 2: - return op + "h" - elif size_in_bytes == 4: - return op + "w" - elif size_in_bytes == 8: - return op + "d" - else: - raise Exception(f"Invalid size: {size_in_bytes} bytes") - - -field_type_to_size_in_bytes = { - "uint8_t": 1, - "uint16_t": 2, - "uint32_t": 4, - "uint64_t": 8, -} - - -class JumpStartGeneratedSource: - def __init__( - self, - jumpstart_source_attributes_yaml, - override_jumpstart_source_attributes, - defines_file, - data_structures_file, - assembly_file, - priv_modes_enabled, - ) -> None: - log.debug(f"Generating jumpstart source files from {jumpstart_source_attributes_yaml}") - - self.priv_modes_enabled = priv_modes_enabled - - self.attributes_data = None - with open(jumpstart_source_attributes_yaml) as f: - self.attributes_data = yaml.safe_load(f) - f.close() - - if override_jumpstart_source_attributes: - # Override the default jumpstart source attribute values with the values - # specified on the command line. - DictUtils.override_dict( - self.attributes_data, - DictUtils.create_dict(override_jumpstart_source_attributes), - ) - - self.defines_file_fd = open(defines_file, "w") - self.data_structures_file_fd = open(data_structures_file, "w") - self.assembly_file_fd = open(assembly_file, "w") - - def __del__(self): - self.defines_file_fd.close() - self.data_structures_file_fd.close() - self.assembly_file_fd.close() - - def generate(self): - self.generate_headers() - - self.generate_c_structs() - - self.generate_stack() - - self.generate_defines() - - self.generate_reg_context_save_restore_code() - - self.generate_thread_attributes_setup_code() - - def generate_headers(self): - self.defines_file_fd.write( - f"// This file is generated by {os.path.basename(__file__)}. Do not edit.\n\n" - ) - self.defines_file_fd.write("#pragma once\n\n") - self.data_structures_file_fd.write( - f"// This file is generated by {os.path.basename(__file__)}. Do not edit.\n\n" - ) - self.data_structures_file_fd.write("#pragma once\n\n") - - self.assembly_file_fd.write( - f"// This file is generated by {os.path.basename(__file__)}. Do not edit.\n\n" - ) - self.assembly_file_fd.write('#include "jumpstart_defines.h"\n\n') - self.assembly_file_fd.write('#include "cpu_bits.h"\n\n') - - self.defines_file_fd.write( - f"#define MAX_NUM_HARTS_SUPPORTED {self.attributes_data['max_num_harts_supported']}\n\n" - ) - - self.data_structures_file_fd.write('#include "jumpstart_defines.h"\n\n') - self.data_structures_file_fd.write("#include \n\n") - - def generate_c_structs(self): - total_size_of_c_structs = 0 - - for c_struct in self.attributes_data["c_structs"]: - c_struct_fields = self.attributes_data["c_structs"][c_struct]["fields"] - current_offset = 0 - - self.data_structures_file_fd.write(f"struct {c_struct} {{\n") - for field_name in c_struct_fields: - num_field_elements = 1 - if len(c_struct_fields[field_name].split(",")) > 1: - field_type = c_struct_fields[field_name].split(",")[0] - num_field_elements = int(c_struct_fields[field_name].split(",")[1]) - self.defines_file_fd.write( - f"#define NUM_{field_name.upper()} {num_field_elements}\n" - ) - else: - field_type = c_struct_fields[field_name] - - field_size_in_bytes = field_type_to_size_in_bytes[field_type] - if num_field_elements > 1: - self.data_structures_file_fd.write( - f" {field_type} {field_name}[NUM_{field_name.upper()}];\n" - ) - else: - self.data_structures_file_fd.write(f" {field_type} {field_name};\n") - - # Take care of the padding that the compiler will add. - while (current_offset % field_size_in_bytes) != 0: - current_offset += 1 - - if c_struct == "thread_attributes": - self.generate_getter_and_setter_methods_for_field( - c_struct, - field_name, - field_size_in_bytes, - current_offset, - ) - - current_offset += field_size_in_bytes * num_field_elements - - self.data_structures_file_fd.write("};\n\n") - - # Align the end of the struct to 8 bytes. - while (current_offset % 8) != 0: - current_offset += 1 - self.defines_file_fd.write( - f"#define {c_struct.upper()}_STRUCT_SIZE_IN_BYTES {current_offset}\n\n" - ) - - self.assembly_file_fd.write('.section .jumpstart.c_structs.smode, "aw"\n\n') - self.assembly_file_fd.write(f".global {c_struct}_region\n") - self.assembly_file_fd.write(f"{c_struct}_region:\n") - for i in range(self.attributes_data["max_num_harts_supported"]): - self.assembly_file_fd.write(f".global {c_struct}_region_hart_{i}\n") - self.assembly_file_fd.write(f"{c_struct}_region_hart_{i}:\n") - self.assembly_file_fd.write(f" .zero {current_offset}\n") - self.assembly_file_fd.write(f".global {c_struct}_region_end\n") - self.assembly_file_fd.write(f"{c_struct}_region_end:\n\n") - - total_size_of_c_structs += current_offset - - max_allowed_size_of_c_structs = ( - self.attributes_data["jumpstart_smode"]["c_structs"]["num_pages"] - * self.attributes_data["jumpstart_smode"]["c_structs"]["page_size"] - ) - - if ( - total_size_of_c_structs * self.attributes_data["max_num_harts_supported"] - > max_allowed_size_of_c_structs - ): - log.error( - f"Total size of C structs ({total_size_of_c_structs}) exceeds maximum size allocated for C structs {max_allowed_size_of_c_structs}" - ) - sys.exit(1) - - def generate_stack(self): - stack_types = ["smode", "umode"] - for stack_type in stack_types: - # Make sure we can equally distribute the number of total stack pages - # among the harts. - assert ( - self.attributes_data[f"jumpstart_{stack_type}"]["stack"]["num_pages"] - % self.attributes_data["max_num_harts_supported"] - == 0 - ) - num_pages_per_hart_for_stack = int( - self.attributes_data[f"jumpstart_{stack_type}"]["stack"]["num_pages"] - / self.attributes_data["max_num_harts_supported"] - ) - stack_page_size = self.attributes_data[f"jumpstart_{stack_type}"]["stack"]["page_size"] - - self.defines_file_fd.write( - f"#define NUM_PAGES_PER_HART_FOR_{stack_type.upper()}_STACK {num_pages_per_hart_for_stack}\n\n" - ) - - self.defines_file_fd.write( - f"#define {stack_type.upper()}_STACK_PAGE_SIZE {stack_page_size}\n\n" - ) - - self.assembly_file_fd.write(f'.section .jumpstart.stack.{stack_type}, "aw"\n') - self.assembly_file_fd.write(".align 12\n") - self.assembly_file_fd.write(f".global {stack_type}_stack_top\n") - self.assembly_file_fd.write(f"{stack_type}_stack_top:\n") - for i in range(self.attributes_data["max_num_harts_supported"]): - self.assembly_file_fd.write(f".global {stack_type}_stack_top_hart_{i}\n") - self.assembly_file_fd.write(f"{stack_type}_stack_top_hart_{i}:\n") - self.assembly_file_fd.write( - f" .zero {num_pages_per_hart_for_stack * stack_page_size}\n" - ) - self.assembly_file_fd.write(f".global {stack_type}_stack_bottom\n") - self.assembly_file_fd.write(f"{stack_type}_stack_bottom:\n\n") - - def generate_defines(self): - for define_name in self.attributes_data["defines"]: - self.defines_file_fd.write( - f"#define {define_name} {self.attributes_data['defines'][define_name]}\n" - ) - - self.defines_file_fd.write("\n") - current_syscall_number = 0 - for syscall_name in self.attributes_data["syscall_numbers"]: - self.defines_file_fd.write(f"#define {syscall_name} {current_syscall_number}\n") - current_syscall_number += 1 - - def generate_getter_and_setter_methods_for_field( - self, - c_struct, - field_name, - field_size_in_bytes, - field_offset_in_struct, - ): - self.defines_file_fd.write( - f"#define {c_struct.upper()}_{field_name.upper()}_OFFSET {field_offset_in_struct}\n" - ) - - self.defines_file_fd.write( - f"#define GET_{c_struct.upper()}_{field_name.upper()}(dest_reg) {get_memop_of_size(MemoryOp.LOAD, field_size_in_bytes)} dest_reg, {c_struct.upper()}_{field_name.upper()}_OFFSET(tp);\n" - ) - self.defines_file_fd.write( - f"#define SET_{c_struct.upper()}_{field_name.upper()}(dest_reg) {get_memop_of_size(MemoryOp.STORE, field_size_in_bytes)} dest_reg, {c_struct.upper()}_{field_name.upper()}_OFFSET(tp);\n\n" - ) - - modes = ListUtils.intersection(["smode", "mmode"], self.priv_modes_enabled) - for mode in modes: - self.assembly_file_fd.write(f'.section .jumpstart.text.{mode}, "ax"\n') - getter_method = f"get_{c_struct}_{field_name}_from_{mode}" - self.assembly_file_fd.write(f".global {getter_method}\n") - self.assembly_file_fd.write(f"{getter_method}:\n") - self.assembly_file_fd.write(f" GET_{c_struct.upper()}_{field_name.upper()}(a0)\n") - self.assembly_file_fd.write(" ret\n\n") - - self.assembly_file_fd.write(f".global set_{c_struct}_{field_name}_from_{mode}\n") - self.assembly_file_fd.write(f"set_{c_struct}_{field_name}_from_{mode}:\n") - self.assembly_file_fd.write(f" SET_{c_struct.upper()}_{field_name.upper()}(a0)\n") - self.assembly_file_fd.write(" ret\n\n") - - def generate_thread_attributes_setup_code(self): - modes = ListUtils.intersection(["smode", "mmode"], self.priv_modes_enabled) - mode_encodings = {"smode": "PRV_S", "mmode": "PRV_M"} - for mode in modes: - self.assembly_file_fd.write(f'.section .jumpstart.text.{mode}.init, "ax"\n') - self.assembly_file_fd.write("# Inputs:\n") - self.assembly_file_fd.write("# a0: hart id\n") - self.assembly_file_fd.write(f".global setup_thread_attributes_from_{mode}\n") - self.assembly_file_fd.write(f"setup_thread_attributes_from_{mode}:\n") - self.assembly_file_fd.write(" li t1, MAX_NUM_HARTS_SUPPORTED\n") - self.assembly_file_fd.write(f" bgeu a0, t1, jumpstart_{mode}_fail\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" li t2, THREAD_ATTRIBUTES_STRUCT_SIZE_IN_BYTES\n") - self.assembly_file_fd.write(" mul t2, a0, t2\n") - self.assembly_file_fd.write(" la t1, thread_attributes_region\n") - self.assembly_file_fd.write(" add tp, t1, t2\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_HART_ID(a0)\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" li t0, TRAP_OVERRIDE_ATTRIBUTES_STRUCT_SIZE_IN_BYTES\n") - self.assembly_file_fd.write(" mul t0, a0, t0\n") - self.assembly_file_fd.write(" la t1, trap_override_attributes_region\n") - self.assembly_file_fd.write(" add t0, t1, t0\n") - self.assembly_file_fd.write( - " SET_THREAD_ATTRIBUTES_TRAP_OVERRIDE_STRUCT_ADDRESS(t0)\n" - ) - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write( - " li t0, REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES * MAX_NUM_CONTEXT_SAVES\n" - ) - self.assembly_file_fd.write(" mul t0, a0, t0\n") - self.assembly_file_fd.write("\n") - if "mmode" in modes: - self.assembly_file_fd.write(" la t1, mmode_reg_context_save_region\n") - self.assembly_file_fd.write(" add t1, t1, t0\n") - self.assembly_file_fd.write(" la t2, mmode_reg_context_save_region_end\n") - self.assembly_file_fd.write(f" bgeu t1, t2, jumpstart_{mode}_fail\n") - self.assembly_file_fd.write( - " SET_THREAD_ATTRIBUTES_MMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(t1)\n" - ) - self.assembly_file_fd.write(" li t1, MAX_NUM_CONTEXT_SAVES\n") - self.assembly_file_fd.write( - " SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_MMODE(t1)\n" - ) - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" la t1, smode_reg_context_save_region\n") - self.assembly_file_fd.write(" add t1, t1, t0\n") - self.assembly_file_fd.write(" la t2, smode_reg_context_save_region_end\n") - self.assembly_file_fd.write(f" bgeu t1, t2, jumpstart_{mode}_fail\n") - self.assembly_file_fd.write( - " SET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(t1)\n" - ) - self.assembly_file_fd.write(" li t1, MAX_NUM_CONTEXT_SAVES\n") - self.assembly_file_fd.write( - " SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(t1)\n" - ) - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" li t0, 0\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_SMODE_SETUP_DONE(t0)\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_VSMODE_SETUP_DONE(t0)\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_CURRENT_V_BIT(t0)\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(f" li t0, {mode_encodings[mode]}\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_CURRENT_MODE(t0)\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" li t0, THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE\n") - self.assembly_file_fd.write(" SET_THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER(t0)\n") - self.assembly_file_fd.write("\n") - self.assembly_file_fd.write(" ret\n") - - def generate_reg_context_save_restore_code(self): - assert ( - self.attributes_data["reg_context_to_save_across_exceptions"]["temp_register"] - not in self.attributes_data["reg_context_to_save_across_exceptions"]["registers"][ - "gprs" - ] - ) - - num_registers = 0 - for reg_type in self.attributes_data["reg_context_to_save_across_exceptions"]["registers"]: - reg_names = self.attributes_data["reg_context_to_save_across_exceptions"]["registers"][ - reg_type - ] - for reg_name in reg_names: - self.defines_file_fd.write( - f"#define {reg_name.upper()}_OFFSET_IN_SAVE_REGION ({num_registers} * 8)\n" - ) - num_registers += 1 - - temp_reg_name = self.attributes_data["reg_context_to_save_across_exceptions"][ - "temp_register" - ] - - self.defines_file_fd.write( - f"\n#define REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES ({num_registers} * 8)\n" - ) - self.defines_file_fd.write( - f"\n#define MAX_NUM_CONTEXT_SAVES {self.attributes_data['reg_context_to_save_across_exceptions']['max_num_context_saves']}\n" - ) - - self.defines_file_fd.write("\n#define SAVE_ALL_GPRS ;") - for gpr_name in self.attributes_data["reg_context_to_save_across_exceptions"]["registers"][ - "gprs" - ]: - self.defines_file_fd.write( - f"\\\n sd {gpr_name}, {gpr_name.upper()}_OFFSET_IN_SAVE_REGION({temp_reg_name}) ;" - ) - self.defines_file_fd.write("\n\n") - - self.defines_file_fd.write("\n#define RESTORE_ALL_GPRS ;") - for gpr_name in self.attributes_data["reg_context_to_save_across_exceptions"]["registers"][ - "gprs" - ]: - self.defines_file_fd.write( - f"\\\n ld {gpr_name}, {gpr_name.upper()}_OFFSET_IN_SAVE_REGION({temp_reg_name}) ;" - ) - self.defines_file_fd.write("\n\n") - - self.assembly_file_fd.write('\n\n.section .jumpstart.data.smode, "aw"\n') - modes = ListUtils.intersection(["mmode", "smode"], self.priv_modes_enabled) - self.assembly_file_fd.write( - f"\n# {modes} context saved registers:\n# {self.attributes_data['reg_context_to_save_across_exceptions']['registers']}\n" - ) - for mode in modes: - self.assembly_file_fd.write(f".global {mode}_reg_context_save_region\n") - self.assembly_file_fd.write(f"{mode}_reg_context_save_region:\n") - for i in range(self.attributes_data["max_num_harts_supported"]): - self.assembly_file_fd.write( - f" # {mode} context save area for hart {i}'s {num_registers} registers. {self.attributes_data['reg_context_to_save_across_exceptions']['max_num_context_saves']} nested contexts supported.\n" - ) - for i in range( - self.attributes_data["reg_context_to_save_across_exceptions"][ - "max_num_context_saves" - ] - ): - f" # Context {i}\n" - self.assembly_file_fd.write(f" .zero {num_registers * 8}\n\n") - self.assembly_file_fd.write(f".global {mode}_reg_context_save_region_end\n") - self.assembly_file_fd.write(f"{mode}_reg_context_save_region_end:\n\n") - - -def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - "--jumpstart_source_attributes_yaml", - help="YAML containing the jumpstart attributes.", - required=True, - type=str, - ) - parser.add_argument( - "--override_jumpstart_source_attributes", - help="Overrides the JumpStart source attributes.", - required=False, - nargs="+", - default=None, - ) - parser.add_argument( - "--priv_modes_enabled", - help=".", - required=True, - nargs="+", - default=None, - ) - parser.add_argument( - "--defines_file", help="Header file containing the defines.", required=True, type=str - ) - parser.add_argument( - "--data_structures_file", - help="Header file containing the c structures.", - required=True, - type=str, - ) - parser.add_argument( - "--assembly_file", help="Assembly file containing functions.", required=True, type=str - ) - parser.add_argument( - "-v", "--verbose", help="Verbose output.", action="store_true", default=False - ) - args = parser.parse_args() - - if args.verbose: - log.basicConfig(format="%(levelname)s: [%(threadName)s]: %(message)s", level=log.DEBUG) - else: - log.basicConfig(format="%(levelname)s: [%(threadName)s]: %(message)s", level=log.INFO) - - source_generator = JumpStartGeneratedSource( - args.jumpstart_source_attributes_yaml, - args.override_jumpstart_source_attributes, - args.defines_file, - args.data_structures_file, - args.assembly_file, - args.priv_modes_enabled, - ) - - source_generator.generate() - - -if __name__ == "__main__": - main() diff --git a/scripts/memory_management/__init__.py b/scripts/memory_management/__init__.py index f041cfae..c2cb6986 100644 --- a/scripts/memory_management/__init__.py +++ b/scripts/memory_management/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/memory_management/linker_script.py b/scripts/memory_management/linker_script.py index 598b2524..94791c05 100644 --- a/scripts/memory_management/linker_script.py +++ b/scripts/memory_management/linker_script.py @@ -1,10 +1,12 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 import logging as log import sys +from .memory_mapping import MemoryMapping +from .page_size import PageSize from .page_tables import TranslationStage @@ -18,7 +20,13 @@ def __init__(self, entry): raise ValueError( f"Entry does not have a valid destination address for the {stage} stage: {entry}" ) - self.start_address = entry.get_field(TranslationStage.get_translates_to(stage)) + + # Get VA as linker script is supposed to use Virtual address. For M-mode and R-code mappings + # fallback to PA as these don't have a virtual address. + self.virt_start_address = entry.get_field(TranslationStage.get_translates_from(stage)) + self.phys_start_address = entry.get_field(TranslationStage.get_translates_to(stage)) + if self.virt_start_address is None: + self.virt_start_address = self.phys_start_address if entry.get_field("num_pages") is None: raise ValueError(f"Entry does not have a number of pages: {entry}") @@ -65,11 +73,17 @@ def __init__(self, entry): def get_top_level_name(self): return self.top_level_name - def get_start_address(self): - return self.start_address + def get_virt_start_address(self): + return self.virt_start_address + + def get_virt_end_address(self): + return self.virt_start_address + self.size - def get_end_address(self): - return self.start_address + self.size + def get_phys_start_address(self): + return self.phys_start_address + + def get_phys_end_address(self): + return self.phys_start_address + self.size def get_size(self): return self.size @@ -89,11 +103,11 @@ def merge(self, other_section): if subsection not in self.subsections: self.subsections.append(subsection) - if self.get_start_address() > other_section.get_start_address(): - self.start_address = other_section.get_start_address() + if self.get_phys_start_address() > other_section.get_phys_start_address(): + self.phys_start_address = other_section.get_phys_start_address() - if self.get_end_address() < other_section.get_end_address(): - self.size = other_section.get_end_address() - self.get_start_address() + if self.get_phys_end_address() < other_section.get_phys_end_address(): + self.size = other_section.get_phys_end_address() - self.get_phys_start_address() if other_section.is_padded(): self.padded = True @@ -102,13 +116,16 @@ def merge(self, other_section): self.type = other_section.get_type() def __str__(self): - return f"Section: {self.get_top_level_name()}; Start Address: {hex(self.get_start_address())}; Size: {self.get_size()}; Subsections: {self.get_subsections()}; Type: {self.get_type()}; Padded: {self.is_padded()}" + return f"Section: {self.get_top_level_name()}; Start Address: {hex(self.get_phys_start_address())}; Size: {self.get_size()}; Subsections: {self.get_subsections()}; Type: {self.get_type()}; Padded: {self.is_padded()}" class LinkerScript: - def __init__(self, entry_label, mappings, attributes_file): + def __init__(self, entry_label, elf_address_range, mappings, attributes_file): self.entry_label = entry_label self.attributes_file = attributes_file + self.elf_start_address, self.elf_end_address = elf_address_range + + self.guard_sections = None mappings_with_linker_sections = [] for stage in TranslationStage.get_enabled_stages(): @@ -140,18 +157,62 @@ def __init__(self, entry_label, mappings, attributes_file): f"Section names in {new_section} are used in {len(existing_sections_with_matching_subsections)} other sections." ) - # sort the self.sections by start address - self.sections.sort(key=lambda x: x.get_start_address()) + self.sections.sort(key=lambda x: x.get_phys_start_address()) - # check for overlaps in the sections + # Add guard sections after each section that isn't immediately followed + # by another section. + # The linker can detect overruns of a section if there is a section + # immediately following it in the memory layout. + # We will also need to generate the corresponding assembly code + # for each guard section. Otherwise the linker will ignore the guard section. + self.guard_sections = [] for i in range(len(self.sections) - 1): if ( - self.sections[i].get_start_address() + self.sections[i].get_size() - > self.sections[i + 1].get_start_address() + self.sections[i].get_phys_end_address() + < self.sections[i + 1].get_phys_start_address() ): - raise ValueError( - f"Linker sections overlap:\n\t{self.sections[i]}\n\t{self.sections[i + 1]}" + self.guard_sections.append( + LinkerScriptSection( + MemoryMapping( + { + "translation_stage": TranslationStage.get_enabled_stages()[ + 0 + ], # any stage works. We just need a valid one. + TranslationStage.get_translates_to( + TranslationStage.get_enabled_stages()[0] + ): self.sections[i].get_phys_end_address(), + "num_pages": 1, + "page_size": PageSize.SIZE_4K, + "linker_script_section": f".linker_guard_section_{len(self.guard_sections)}", + } + ) + ) ) + self.sections.extend(self.guard_sections) + self.sections.sort(key=lambda x: x.get_phys_start_address()) + + # check for overlaps in the sections and that sections are within ELF address range + for i in range(len(self.sections)): + section_start = self.sections[i].get_phys_start_address() + section_end = section_start + self.sections[i].get_size() + + # Check section is within allowed ELF address range if specified + if self.elf_start_address is not None or self.elf_end_address is not None: + if self.elf_start_address is not None and section_start < self.elf_start_address: + raise ValueError( + f"{self.sections[i]} is outside allowed ELF address range - start address {hex(section_start)} is less than elf_start_address {hex(self.elf_start_address)}" + ) + if self.elf_end_address is not None and section_end > self.elf_end_address: + raise ValueError( + f"{self.sections[i]} is outside allowed ELF address range - end address {hex(section_end)} is greater than elf_end_address {hex(self.elf_end_address)}" + ) + + # Check for overlap with next section + if i < len(self.sections) - 1: + if section_end > self.sections[i + 1].get_phys_start_address(): + raise ValueError( + f"Linker sections overlap:\n\t{self.sections[i]}\n\t{self.sections[i + 1]}" + ) self.program_headers = [] for section in self.sections: @@ -181,6 +242,9 @@ def get_entry_label(self): def get_attributes_file(self): return self.attributes_file + def get_guard_sections(self): + return self.guard_sections + def generate(self, output_linker_script): file = open(output_linker_script, "w") if file is None: @@ -192,34 +256,51 @@ def generate(self, output_linker_script): file.write('OUTPUT_ARCH( "riscv" )\n') file.write(f"ENTRY({self.get_entry_label()})\n\n") + # Add MEMORY region definitions + file.write("MEMORY\n{\n") + for section in self.get_sections(): + memory_name = section.get_top_level_name().replace(".", "_").upper() + start_addr = hex(section.get_virt_start_address()) + size = hex(section.get_size()) + file.write(f" {memory_name} (rwx) : ORIGIN = {start_addr}, LENGTH = {size}\n") + file.write("}\n\n") + file.write("SECTIONS\n{\n") defined_sections = [] # The linker script lays out the diag in physical memory. The # mappings are already sorted by PA. for section in self.get_sections(): - file.write(f" /* {','.join(section.get_subsections())}:\n") + file.write(f"\n\n /* {','.join(section.get_subsections())}:\n") file.write( - f" PA Range: {hex(section.get_start_address())} - {hex(section.get_start_address() + section.get_size())}\n" + f" PA Range: {hex(section.get_phys_start_address())} - {hex(section.get_phys_end_address())}\n" + f" VA Range: {hex(section.get_virt_start_address())} - {hex(section.get_virt_end_address())}\n" ) file.write(" */\n") - file.write(f" . = {hex(section.get_start_address())};\n") + file.write(f" . = {hex(section.get_virt_start_address())};\n") - file.write(f" {section.get_top_level_name()} {section.get_type()} : {{\n") top_level_section_variable_name_prefix = ( section.get_top_level_name().replace(".", "_").upper() ) file.write(f" {top_level_section_variable_name_prefix}_START = .;\n") + file.write( + f" {section.get_top_level_name()} {section.get_type()} : AT({hex(section.get_phys_start_address())}) {{\n" + ) for section_name in section.get_subsections(): assert section_name not in defined_sections file.write(f" *({section_name})\n") defined_sections.append(section_name) if section.is_padded(): file.write(" BYTE(0)\n") - file.write(f" }} : {section.get_top_level_name()}\n\n") - file.write(f" . = {hex(section.get_start_address() + section.get_size() - 1)};\n") + file.write( + f" }} > {top_level_section_variable_name_prefix} : {section.get_top_level_name()}\n" + ) + file.write( + f" . = {hex(section.get_virt_start_address() + section.get_size() - 1)};\n" + ) file.write(f" {top_level_section_variable_name_prefix}_END = .;\n") - file.write("/DISCARD/ : { *(" + " ".join(self.get_discard_sections()) + ") }\n") + + file.write("\n\n/DISCARD/ : { *(" + " ".join(self.get_discard_sections()) + ") }\n") file.write("\n}\n") # Specify separate load segments in the program headers for the diff --git a/scripts/memory_management/memory_mapping.py b/scripts/memory_management/memory_mapping.py index 7f69c86e..62467687 100644 --- a/scripts/memory_management/memory_mapping.py +++ b/scripts/memory_management/memory_mapping.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -8,6 +8,12 @@ from .page_tables import AddressType, TranslationStage +class TranslationStageNoAddressTypesError(ValueError): + """Raised when a translation stage cannot be assigned to a memory mapping.""" + + pass + + class MappingField: def __init__( self, name, field_type, input_yaml_type, allowed_values, default_value, required @@ -28,9 +34,14 @@ def get_value(self): def check_value(self, value): if self.allowed_values is not None: - assert ( - value in self.allowed_values - ), f"Invalid value for field {self.name}: {value}. Allowed values are: {self.allowed_values}" + if isinstance(value, list): + assert all( + [v in self.allowed_values for v in value] + ), f"Invalid value for field {self.name}: {value}. Allowed values are: {self.allowed_values}" + else: + assert ( + value in self.allowed_values + ), f"Invalid value for field {self.name}: {value}. Allowed values are: {self.allowed_values}" def set_value_from_yaml(self, yaml_value): assert isinstance(yaml_value, self.input_yaml_type) @@ -57,7 +68,9 @@ def set_value(self, value): class MemoryMapping: - def __init__(self, mapping_dict) -> None: + supported_target_mmus = ["cpu"] + + def __init__(self, mapping_dict, max_num_cpus_supported=None) -> None: self.fields = { "va": MappingField("va", int, int, None, None, False), "gpa": MappingField("gpa", int, int, None, None, False), @@ -73,12 +86,13 @@ def __init__(self, mapping_dict) -> None: None, True, ), - "num_pages": MappingField("num_pages", int, int, None, None, True), + "num_pages": MappingField("num_pages", int, int, None, None, False), + "num_pages_per_cpu": MappingField("num_pages_per_cpu", int, int, None, None, False), "alias": MappingField("alias", bool, bool, None, False, False), "pma_memory_type": MappingField( - "pma_memory_type", str, str, ["uc", "wc", "wb"], None, False + "pma_memory_type", str, str, ["uc", "wc", "wb", None], "uc", False ), - "pbmt_mode": MappingField("pbmt_mode", str, str, ["io", "nc"], None, False), + "pbmt_mode": MappingField("pbmt_mode", str, str, ["pma", "io", "nc"], "pma", False), "linker_script_section": MappingField( "linker_script_section", str, str, None, None, False ), @@ -87,6 +101,10 @@ def __init__(self, mapping_dict) -> None: "translation_stage": MappingField( "translation_stage", str, str, list(TranslationStage.stages.keys()), None, False ), + "target_mmu": MappingField( + "target_mmu", list, list, self.supported_target_mmus, ["cpu"], False + ), + "alignment": MappingField("alignment", int, int, None, None, False), } assert set(self.fields.keys()).issuperset( @@ -102,10 +120,43 @@ def __init__(self, mapping_dict) -> None: else: self.fields[field_name].set_value_from_yaml(mapping_dict[field_name]) + if ( + mapping_dict.get("num_pages", None) is None + and mapping_dict.get("num_pages_per_cpu", None) is None + ): + raise ValueError( + f"num_pages or num_pages_per_cpu must be specified for the mapping: {mapping_dict}" + ) + elif ( + mapping_dict.get("num_pages", None) is not None + and mapping_dict.get("num_pages_per_cpu", None) is not None + ): + raise ValueError( + f"num_pages and num_pages_per_cpu cannot both be specified for the mapping: {mapping_dict}" + ) + + # Convert num_pages_per_cpu to num_pages. We only need num_pages going forward. + if mapping_dict.get("num_pages_per_cpu", None) is not None: + if max_num_cpus_supported is None: + raise ValueError( + "max_num_cpus_supported cannot be None when num_pages_per_cpu is not None" + ) + self.fields["num_pages"].set_value( + int(mapping_dict["num_pages_per_cpu"]) * max_num_cpus_supported + ) + + # Alias mappings should have no pma_memory_type. + if self.get_field("alias") is True and mapping_dict.get("pma_memory_type") is None: + self.set_field("pma_memory_type", None) + self.set_translation_stage() self.sanity_check_field_values() + @classmethod + def get_supported_targets(self): + return self.supported_target_mmus + def set_translation_stage(self): if self.get_field("translation_stage") is not None: return @@ -116,9 +167,11 @@ def set_translation_stage(self): if self.get_field(address_type) is not None ] - assert ( - len(address_types) <= 2 - ), f"Mapping has more than 2 address types set: {address_types}" + if len(address_types) == 0: + raise TranslationStageNoAddressTypesError(f"No address types set for mapping: {self}") + + if len(address_types) > 2: + raise ValueError(f"Mapping has more than 2 address types set: {address_types}") for stage in TranslationStage.get_enabled_stages(): if ( @@ -184,17 +237,23 @@ def sanity_check_field_values(self): f"{destination_address_type.upper()} value {self.get_field(destination_address_type)} is not aligned with page_size {self.get_field('page_size')}" ) - # Remove the source and destination addresses from the list of address types. + # Check that we only have the allowed set of address types set for this + # mapping. disallowed_address_types = AddressType.get_all_address_types() - disallowed_address_types.remove(source_address_type) disallowed_address_types.remove(destination_address_type) + if ( + TranslationStage.get_selected_mode_for_stage(self.get_field("translation_stage")) + != "bare" + ): + # Only non-bare mappings can have source address type set. + disallowed_address_types.remove(source_address_type) - assert all( - [ - address_type in self.fields.keys() and self.get_field(address_type) is None - for address_type in disallowed_address_types - ] - ), f"Disallowed address type in: {disallowed_address_types} when translation_stage is set to {self.get_field('translation_stage')}" + for address_type in disallowed_address_types: + assert address_type in self.fields.keys() + if self.get_field(address_type) is not None: + raise ValueError( + f"Address type '{address_type}' invalid for translation stage '{self.get_field('translation_stage')}' with translation mode '{TranslationStage.get_selected_mode_for_stage(self.get_field('translation_stage'))}' in mapping:\n{self}\n\n" + ) # Make sure that there are only 2 address types set for this mapping. address_types = [ @@ -237,6 +296,9 @@ def sanity_check_field_values(self): ): raise ValueError(f"umode not set to 1 for g stage mapping: {self}") + # Validate canonical addresses for virtual addresses + self._validate_canonical_addresses() + def get_field(self, field_name): assert field_name in self.fields.keys() return self.fields[field_name].get_value() @@ -258,3 +320,62 @@ def __str__(self) -> str: def copy(self): return copy.deepcopy(self) + + def _validate_canonical_addresses(self): + """ + Validate that virtual addresses are canonical for the given translation mode. + """ + # Get the translation stage and mode + translation_stage = self.get_field("translation_stage") + if translation_stage is None: + return + + translation_mode = TranslationStage.get_selected_mode_for_stage(translation_stage) + if translation_mode == "bare": + return + + # Get the source address type for this stage + source_address_type = TranslationStage.get_translates_from(translation_stage) + va = self.get_field(source_address_type) + + if va is None: + return + + # Validate the canonical address + self._validate_canonical_address(va, translation_mode) + + def _validate_canonical_address(self, va, translation_mode): + """ + Validate that a 64-bit virtual address is canonical for the given translation mode. + + Args: + va: 64-bit virtual address to validate + translation_mode: The translation mode (sv39, sv48, sv57, etc.) + + Raises: + ValueError: If the address is non-canonical for the given mode + """ + # Get the attributes for this translation mode + from .page_tables import PageTableAttributes + + va_mask = PageTableAttributes(translation_mode).get_attribute("va_mask") + va_bits = va_mask.bit_length() # Number of valid VA bits + + # Extract the sign bit (most significant valid bit) + sign_bit = (va >> (va_bits - 1)) & 1 + + # Extract the upper bits that should be sign-extended + actual_upper = va >> va_bits + + # Calculate what the upper bits should be (all 0s or all 1s) + if sign_bit: + expected_upper = (1 << (64 - va_bits)) - 1 # All 1s + else: + expected_upper = 0 # All 0s + + # Check if the upper bits are properly sign-extended + if actual_upper != expected_upper: + raise ValueError( + f"Non-canonical address 0x{va:016x} for {translation_mode}: " + f"bits 63:{va_bits} (0x{actual_upper:016x}) must all equal bit {va_bits-1} ({sign_bit})" + ) diff --git a/scripts/memory_management/page_size.py b/scripts/memory_management/page_size.py index 35477fd6..5528ec22 100644 --- a/scripts/memory_management/page_size.py +++ b/scripts/memory_management/page_size.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/memory_management/page_tables.py b/scripts/memory_management/page_tables.py index 9c9fa97e..9403e469 100644 --- a/scripts/memory_management/page_tables.py +++ b/scripts/memory_management/page_tables.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -6,7 +6,6 @@ import enum import logging as log import math -import sys import typing from data_structures import BitField @@ -77,28 +76,32 @@ class TranslationStage: stages = { "s": { - "modes": ["bare", "sv39", "sv48"], + "valid_modes": ["bare", "sv39", "sv48"], + "selected_mode": None, "translates": ["va", "pa"], "virtualization_enabled": False, "next_stage": None, "atp_register": "satp", }, "hs": { - "modes": ["bare", "sv39", "sv48"], + "valid_modes": ["bare", "sv39", "sv48"], + "selected_mode": None, "translates": ["va", "pa"], "virtualization_enabled": True, "next_stage": None, "atp_register": "satp", }, "vs": { - "modes": ["bare", "sv39", "sv48"], + "valid_modes": ["bare", "sv39", "sv48"], + "selected_mode": None, "translates": ["va", "gpa"], "virtualization_enabled": True, "next_stage": "g", "atp_register": "vsatp", }, "g": { - "modes": ["bare", "sv39x4", "sv48x4"], + "valid_modes": ["bare", "sv39x4", "sv48x4"], + "selected_mode": None, "translates": ["gpa", "spa"], "virtualization_enabled": True, "next_stage": None, @@ -143,7 +146,34 @@ def is_valid_mode_for_stage(cls, stage: str, mode: str) -> bool: if TranslationMode.is_valid_mode(mode) is False: raise ValueError(f"Invalid TranslationMode: {mode}") - return TranslationMode.get_encoding(mode) in cls.stages[stage]["modes"] + return mode in cls.stages[stage]["valid_modes"] + + @classmethod + def set_selected_mode_for_stage(cls, stage: str, mode: str): + if not cls.is_valid_stage(stage): + raise ValueError( + f"Invalid TranslationStage: {stage} with virtualization enabled: {cls.virtualization_enabled}" + ) + + if TranslationMode.is_valid_mode(mode) is False: + raise ValueError(f"Invalid TranslationMode: {mode}") + + if not TranslationStage.is_valid_mode_for_stage(stage, mode): + raise ValueError(f"Invalid TranslationMode: {mode} for TranslationStage: {stage}") + + cls.stages[stage]["selected_mode"] = mode + + @classmethod + def get_selected_mode_for_stage(cls, stage: str): + if not cls.is_valid_stage(stage): + raise ValueError( + f"Invalid TranslationStage: {stage} with virtualization enabled: {cls.virtualization_enabled}" + ) + + if cls.stages[stage]["selected_mode"] is None: + raise ValueError(f"TranslationMode not set for TranslationStage: {stage}") + + return cls.stages[stage]["selected_mode"] @classmethod def get_address_types(cls, stage: str): @@ -212,6 +242,7 @@ class PageTableAttributes: "pte_ppn_bits": [(53, 28), (27, 19), (18, 10)], "page_sizes": [PageSize.SIZE_1G, PageSize.SIZE_2M, PageSize.SIZE_4K], "pagetable_sizes": [PageSize.SIZE_4K, PageSize.SIZE_4K, PageSize.SIZE_4K], + "va_mask": (1 << 39) - 1, }, "sv48": { "pte_size_in_bytes": 8, @@ -219,6 +250,7 @@ class PageTableAttributes: "va_vpn_bits": [(47, 39), (38, 30), (29, 21), (20, 12)], "pa_ppn_bits": [(55, 39), (38, 30), (29, 21), (20, 12)], "pte_ppn_bits": [(53, 37), (36, 28), (27, 19), (18, 10)], + "va_mask": (1 << 48) - 1, "page_sizes": [ PageSize.SIZE_512G, PageSize.SIZE_1G, @@ -241,10 +273,12 @@ class PageTableAttributes: # sv39x4 is identical to an Sv39 virtual address, except with # 2 more bits at the high end in VPN[2] mode_attributes["sv39x4"]["va_vpn_bits"][0] = (40, 30) + mode_attributes["sv39x4"]["va_mask"] = (1 << 40) - 1 # sv48x4 is identical to an Sv48 virtual address, except with # 2 more bits at the high end in VPN[3] mode_attributes["sv48x4"]["va_vpn_bits"][0] = (49, 39) + mode_attributes["sv48x4"]["va_mask"] = (1 << 49) - 1 # For Sv32x4, Sv39x4, Sv48x4, and Sv57x4, the root page table is 16 # KiB and must be aligned to a 16-KiB boundary. @@ -309,7 +343,8 @@ def __init__(self, translation_mode, max_num_4K_pages, memory_mappings): # List of PageTablePage objects self.pages = [] self.translation_mode = translation_mode - self.translation_stage = memory_mappings[0].get_field("translation_stage") + self.mappings = memory_mappings + self.translation_stage = self.mappings[0].get_field("translation_stage") self.max_num_4K_pages = max_num_4K_pages self.asm_label = f"{self.translation_stage}_stage_pagetables_start" @@ -318,7 +353,7 @@ def __init__(self, translation_mode, max_num_4K_pages, memory_mappings): self.pte_memory = {} self.start_address = None - for mapping in memory_mappings: + for mapping in self.mappings: if mapping.get_field( "linker_script_section" ) is not None and f"{self.translation_stage}_stage.pagetables" in mapping.get_field( @@ -330,12 +365,9 @@ def __init__(self, translation_mode, max_num_4K_pages, memory_mappings): break if self.start_address is None: - log.error("No pagetables section found in memory mappings") - sys.exit(1) + raise Exception("No pagetables section found in memory mappings") - self.create_from_mappings( - mapping for mapping in memory_mappings if mapping.is_bare_mapping() is False - ) + self.create_from_mappings() def get_asm_label(self): return self.asm_label @@ -352,6 +384,10 @@ def get_pte(self, address): def get_new_page(self, va, level): log.debug(f"get_page_table_page({hex(va)}, {level})") assert self.start_address is not None + + # When creating pagetable entries, we need to ignore the upper VA bits + va_mask = self.attributes.get_attribute("va_mask") + va = va & va_mask # look for an existing pagetable page that contains the given VA for page in self.pages: if page.contains(va, level): @@ -417,10 +453,9 @@ def write_sparse_memory(self, address, value): if address in self.pte_memory: if self.pte_memory[address] != value: - log.error( + raise Exception( f"[{hex(address)}] already contains a different value {hex(self.pte_memory[address])}. Cannot update to {hex(value)}" ) - sys.exit(1) log.debug(f"[{hex(address)}] already contains {hex(value)}. No update needed.") else: self.pte_memory[address] = value @@ -434,11 +469,14 @@ def read_sparse_memory(self, address): return None # Populates the sparse memory with the pagetable entries - def create_from_mappings(self, memory_mappings): + def create_from_mappings(self): source_address_type = TranslationStage.get_translates_from(self.translation_stage) dest_address_type = TranslationStage.get_translates_to(self.translation_stage) - for entry in self.split_mappings_at_page_granularity(memory_mappings): + # No page tables for the bare mappings. + mappings = [mapping for mapping in self.mappings if mapping.is_bare_mapping() is False] + + for entry in self.split_mappings_at_page_granularity(mappings): assert self.translation_stage == entry.get_field("translation_stage") assert entry.get_field("page_size") in self.get_attribute("page_sizes") leaf_level = self.get_attribute("page_sizes").index(entry.get_field("page_size")) @@ -497,11 +535,10 @@ def create_from_mappings(self, memory_mappings): pte_value, 1, self.attributes.common_attributes["d_bit"] ) - if entry.get_field("pbmt_mode") is not None: - pbmt_mode = PbmtMode.get_encoding(entry.get_field("pbmt_mode").lower()) - pte_value = BitField.place_bits( - pte_value, pbmt_mode, self.attributes.common_attributes["pbmt_bits"] - ) + pbmt_mode = PbmtMode.get_encoding(entry.get_field("pbmt_mode").lower()) + pte_value = BitField.place_bits( + pte_value, pbmt_mode, self.attributes.common_attributes["pbmt_bits"] + ) pte_value = BitField.place_bits( pte_value, @@ -547,3 +584,6 @@ def create_from_mappings(self, memory_mappings): self.pte_memory[pte_region_sparse_memory_start] = 0 if pte_region_sparse_memory_end not in self.pte_memory: self.pte_memory[pte_region_sparse_memory_end] = 0 + + def get_mappings(self): + return self.mappings diff --git a/scripts/public/functions.py b/scripts/public/functions.py index 3db30ddf..0243018f 100644 --- a/scripts/public/functions.py +++ b/scripts/public/functions.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/system/functions.py b/scripts/system/functions.py index e8713cdb..a32ae75e 100644 --- a/scripts/system/functions.py +++ b/scripts/system/functions.py @@ -1,12 +1,126 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 +""" +System utility functions for process management and file operations. + +This module includes an automatic process cleanup mechanism that ensures spawned +subprocesses (like Spike) are killed when the script is interrupted (Ctrl+C) or exits. + +Process Cleanup Mechanism: +-------------------------- +1. All processes spawned via run_command() are tracked in a global registry +2. A SIGINT (Ctrl+C) handler is installed at module import time +3. When Ctrl+C is pressed: + - The signal handler immediately kills all registered process groups + - The original Python signal handler is called to raise KeyboardInterrupt + - This ensures single Ctrl+C kills all Spike processes across all threads +4. An atexit handler provides backup cleanup on normal script exit +""" + +import atexit import logging as log import os import shutil import signal import subprocess +import threading +import time + +# Global registry to track active process groups so they can be cleaned up on interrupt +_active_process_groups = set() +_process_groups_lock = threading.Lock() +_original_sigint_handler = signal.getsignal(signal.SIGINT) +_cleanup_in_progress = False + + +def register_process_group(pgid): + """Register a process group ID for cleanup on interrupt.""" + with _process_groups_lock: + _active_process_groups.add(pgid) + log.debug(f"Registered process group: {pgid}") + + +def unregister_process_group(pgid): + """Unregister a process group ID.""" + with _process_groups_lock: + _active_process_groups.discard(pgid) + log.debug(f"Unregistered process group: {pgid}") + + +def cleanup_all_process_groups(show_message=True): + """Kill all registered process groups. Called on script interruption or exit. + + This function is idempotent and safe to call multiple times. + """ + global _cleanup_in_progress + + with _process_groups_lock: + # Prevent concurrent cleanup attempts + if _cleanup_in_progress or not _active_process_groups: + return + + _cleanup_in_progress = True + process_groups = list(_active_process_groups) + + # Only print if we have processes to clean up and message is requested + if show_message: + try: + log.info("Cleaning up spawned processes...") + except Exception: + # Logging might not be available during shutdown + try: + print("\nCleaning up spawned processes...", flush=True) + except Exception: + pass + + # First pass: send SIGTERM to all process groups + for pgid in process_groups: + try: + os.killpg(pgid, signal.SIGTERM) + try: + log.debug(f"Sent SIGTERM to process group: {pgid}") + except Exception: + pass + except ProcessLookupError: + # Process already terminated + pass + except Exception as e: + try: + log.warning(f"Failed to kill process group {pgid}: {e}") + except Exception: + pass + + # Give processes a brief moment to terminate gracefully + if process_groups: + time.sleep(0.05) + + # Clear the registry + with _process_groups_lock: + _active_process_groups.clear() + _cleanup_in_progress = False + + +def _sigint_handler(signum, frame): + """Handle SIGINT (Ctrl+C) by immediately killing all spawned processes.""" + # First, kill all spawned processes immediately + cleanup_all_process_groups(show_message=True) + + # Then restore and call the original handler to raise KeyboardInterrupt + signal.signal(signal.SIGINT, _original_sigint_handler) + if callable(_original_sigint_handler): + _original_sigint_handler(signum, frame) + else: + # If no handler or default, raise KeyboardInterrupt + raise KeyboardInterrupt() + + +# Install our signal handler at import time. +signal.signal(signal.SIGINT, _sigint_handler) + +# Register cleanup function to run at exit (backup) +atexit.register(lambda: cleanup_all_process_groups(show_message=False)) def create_empty_directory(directory): @@ -34,9 +148,21 @@ def find_files_with_extensions_in_dir(root, extensions): return sources -def run_command(command, run_directory): +def read_io_stream(stream, callback): + for line in iter(stream.readline, b""): + callback(line) + + +def run_command(command, run_directory, timeout=None, extra_env=None): log.debug(f"Running command: {' '.join(command)}") group_pid = None + returncode = None + stdout_output = [] + stderr_output = [] + # Prepare environment + env = os.environ.copy() + if extra_env is not None: + env.update(extra_env) try: p = subprocess.Popen( command, @@ -44,24 +170,61 @@ def run_command(command, run_directory): stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, # Assign the child and all its subprocesses to a new process group. + env=env, ) group_pid = os.getpgid(p.pid) - stdout, stderr = p.communicate() - returncode = p.wait() + register_process_group(group_pid) + + # Function to capture output + def capture_output(stream, log_func, output_list): + for line in iter(stream.readline, b""): + decoded_line = line.decode().strip() + log_func(decoded_line) + output_list.append(decoded_line) + + # Print stdout and stderr in real-time as they are produced + stdout_thread = threading.Thread( + target=capture_output, args=(p.stdout, lambda x: log.debug(x), stdout_output) + ) + stderr_thread = threading.Thread( + target=capture_output, args=(p.stderr, lambda x: log.debug(x), stderr_output) + ) + stdout_thread.start() + stderr_thread.start() + + try: + returncode = p.wait(timeout=timeout) + except subprocess.TimeoutExpired: + log.warning(f"Command timed out after {timeout}s, killing process group {group_pid}") + try: + os.killpg(group_pid, signal.SIGTERM) + except ProcessLookupError: + pass # Process already terminated + returncode = -1 + if returncode != 0: - log.error(f"Command: {' '.join(command)} failed.") - log.error(stdout.decode()) - log.error(stderr.decode()) - raise Exception(f"Command: {' '.join(command)} failed.") + log.error(f"COMMAND FAILED: {' '.join(command)}") + full_output = f"STDOUT:\n{'-' * 40}\n" + full_output += "\n".join(stdout_output) + full_output += f"\n\nSTDERR:\n{'-' * 40}\n" + full_output += "\n".join(stderr_output) + log.error(full_output) + else: + log.debug("Command executed successfully.") + + stdout_thread.join() + stderr_thread.join() + except KeyboardInterrupt: log.error(f"Command: {' '.join(command)} interrupted.") - if group_pid is not None: - # p.kill() seems to only kill the child process and not the - # subprocesses of the child. This leaves the subprocesses of the - # child orphaned. - # For example, "meson test" spawns spike which doesn't get killed - # when p.kill() is called on "meson test". - # Instead, kill the whole process group containing the child process - # and it's subprocesses. - os.killpg(group_pid, signal.SIGTERM) + # Note: cleanup_all_process_groups() is already called by the signal handler, + # but we call it here as a safety net in case the signal handler didn't run. + # The function is idempotent, so calling it multiple times is safe. + cleanup_all_process_groups(show_message=False) raise Exception(f"Command: {' '.join(command)} interrupted.") + finally: + # Always unregister the process group when done + if group_pid is not None: + unregister_process_group(group_pid) + + return returncode diff --git a/scripts/utils/__init__.py b/scripts/utils/__init__.py new file mode 100644 index 00000000..f382217e --- /dev/null +++ b/scripts/utils/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/scripts/utils/binary_utils.py b/scripts/utils/binary_utils.py new file mode 100644 index 00000000..7d03a3d8 --- /dev/null +++ b/scripts/utils/binary_utils.py @@ -0,0 +1,41 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import subprocess +from typing import Optional + + +def get_elf_entry_point(elf_path: str) -> Optional[str]: + """ + Return the ELF entry point address as a hex string prefixed with 0x (e.g. "0x90000000"). + Uses riscv64-unknown-elf-readelf to extract the value. + """ + try: + result = subprocess.run( + ["riscv64-unknown-elf-readelf", "-h", elf_path], capture_output=True, text=True + ) + if result.returncode != 0: + log.error(f"readelf failed for {elf_path}: {result.stderr}") + return None + for line in (result.stdout or "").splitlines(): + line = line.strip() + if line.lower().startswith("entry point address:"): + # Expected formats: + # Entry point address: 0x90000000 + # Entry point address: 0x0000000090000000 + try: + value = line.split(":", 1)[1].strip() + except Exception: + value = "" + if not value: + return None + value = value.lower() + if value.startswith("0x"): + return value + # Fallback if readelf ever returns a plain number + return f"0x{value}" + except Exception as exc: + log.error(f"Failed to read ELF entry point from {elf_path}: {exc}") + return None diff --git a/scripts/utils/generate_batch_test_manifest.py b/scripts/utils/generate_batch_test_manifest.py new file mode 100755 index 00000000..a97a447a --- /dev/null +++ b/scripts/utils/generate_batch_test_manifest.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +import json +import sys + +import yaml + + +def load_manifest_json(file_path): + with open(file_path) as file: + data = json.load(file) + return data + + +manifest = {"payload": []} + +for test_manifest_file in sys.argv[1:]: + truf_test_manifest = load_manifest_json(test_manifest_file) + if truf_test_manifest: + manifest["payload"].append(truf_test_manifest) + +yaml_str = yaml.dump(manifest, default_flow_style=False) +print(yaml_str) diff --git a/scripts/utils/napot_utils.py b/scripts/utils/napot_utils.py new file mode 100644 index 00000000..34ec8c25 --- /dev/null +++ b/scripts/utils/napot_utils.py @@ -0,0 +1,109 @@ +# SPDX-FileCopyrightText: 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Tuple + + +def is_napot_size(size: int) -> bool: + """ + Check if a size is a NAPOT (Naturally Aligned Power Of Two) value. + + Args: + size: The size to check + + Returns: + True if the size is a NAPOT value, False otherwise + """ + return size > 0 and (size & (size - 1)) == 0 + + +def get_next_napot_size(size: int) -> int: + """ + Get the next larger NAPOT size that can cover the given size. + + Args: + size: The minimum size needed + + Returns: + The next larger NAPOT size that can cover the given size + """ + if size <= 0: + return 1 + + if is_napot_size(size): + return size + + # Find the next larger NAPOT value + napot_size = 1 + while napot_size < size: + napot_size <<= 1 + + return napot_size + + +def get_previous_napot_size(size: int) -> int: + """ + Get the previous smaller NAPOT size. + + Args: + size: The size to find the previous NAPOT for + + Returns: + The previous smaller NAPOT size + """ + if size <= 1: + return 1 + + # Find the next larger NAPOT value first + next_napot = get_next_napot_size(size) + + # If the input size is already NAPOT, return it + if next_napot == size: + return size + + # Otherwise, return the previous NAPOT + return next_napot >> 1 + + +def get_napot_sizes_for_range(size: int) -> Tuple[int, int]: + """ + Get both the previous and next NAPOT sizes for a given size. + + Args: + size: The size to find NAPOT sizes for + + Returns: + A tuple of (previous_napot_size, next_napot_size) + """ + next_napot = get_next_napot_size(size) + prev_napot = get_previous_napot_size(size) + + return (prev_napot, next_napot) + + +def align_to_napot_size(address: int, napot_size: int) -> int: + """ + Align an address to a NAPOT size boundary. + + Args: + address: The address to align + napot_size: The NAPOT size to align to + + Returns: + The aligned address + + Raises: + ValueError: If napot_size is not a valid NAPOT value + """ + + # Validate that napot_size is actually a NAPOT value + if not is_napot_size(napot_size): + raise ValueError(f"napot_size {napot_size} is not a valid NAPOT value") + + # If already aligned, return as-is + if address & (napot_size - 1) == 0: + return address + + # Find the next aligned address + return (address + napot_size - 1) & ~(napot_size - 1) diff --git a/src/common/data.privileged.S b/src/common/data.privileged.S new file mode 100644 index 00000000..31a1c321 --- /dev/null +++ b/src/common/data.privileged.S @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + + +# The supervisor data section is can be accessed from both +# machine and supervisor mode. +.section .jumpstart.cpu.data.privileged, "aw" + +.global cpu_status_tracker +cpu_status_tracker: + .rept MAX_NUM_CPUS_SUPPORTED + .byte CPU_INACTIVE + .endr + + +.align 2 +.global cpu_sync_point +cpu_sync_point: + # We're going to use the amoor.w instruction to update the bits + # so allocate 4 bytes. + .4byte 0x0 diff --git a/src/common/data.smode.S b/src/common/data.smode.S deleted file mode 100644 index 030c1f7e..00000000 --- a/src/common/data.smode.S +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -#include "jumpstart_defines.h" - -# The supervisor data section is can be accessed from both -# machine and supervisor mode. -.section .jumpstart.data.smode, "aw" - -.global hart_status_tracker -hart_status_tracker: - .rept MAX_NUM_HARTS_SUPPORTED - .byte HART_INACTIVE - .endr - - -.align 2 -.global hart_sync_point -hart_sync_point: - # We're going to use the amoor.w instruction to update the bits - # so allocate 4 bytes. - .4byte 0x0 diff --git a/src/common/heap.smode.S b/src/common/heap.smode.S new file mode 100644 index 00000000..d2910a6d --- /dev/null +++ b/src/common/heap.smode.S @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + + +.section .jumpstart.cpu.text.smode, "ax" + +.global setup_default_heap +setup_default_heap: +#if ENABLE_HEAP == 1 + addi sp, sp, -16 + sd ra, 8(sp) + sd fp, 0(sp) + addi fp, sp, 16 + + la a0, _JUMPSTART_CPU_SMODE_HEAP_START + # The heap end is set to the last byte of the heap. + # Add 1 to the heap end to include the last byte. + la a1, _JUMPSTART_CPU_SMODE_HEAP_END + addi a1, a1, 1 + li a2, BACKING_MEMORY_DDR + li a3, MEMORY_TYPE_WB + jal setup_heap + + ld ra, 8(sp) + ld fp, 0(sp) + addi sp, sp, 16 +#endif // ENABLE_HEAP == 1 + + ret diff --git a/src/common/heap.smode.c b/src/common/heap.smode.c index 6acc1f07..8444f2ad 100644 --- a/src/common/heap.smode.c +++ b/src/common/heap.smode.c @@ -1,65 +1,180 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +// SPDX-FileCopyrightText: 2016 by Lukasz Janyst #include "heap.smode.h" + +#include +#include + +#include "cpu_bits.h" #include "jumpstart.h" -#include "jumpstart_defines.h" #include "lock.smode.h" +#include "tablewalk.smode.h" #include "uart.smode.h" -#include +#define MEMCHUNK_USED 0x8000000000000000ULL +#define MEMCHUNK_MAX_SIZE (MEMCHUNK_USED - 1) + +#define NUM_HEAPS_SUPPORTED 3 + +#define MEMCHUNK_USED 0x8000000000000000ULL +#define MEMCHUNK_MAX_SIZE (MEMCHUNK_USED - 1) +#define MIN_HEAP_SEGMENT_BYTES \ + (PER_HEAP_ALLOCATION_METADATA_SIZE + MIN_HEAP_ALLOCATION_SIZE) + +// Helper macro to align size to minimum allocation size +#define ALIGN_TO_MIN_ALLOC(size) \ + ((((size - 1) >> __builtin_ctzll(MIN_HEAP_ALLOCATION_SIZE)) \ + << __builtin_ctzll(MIN_HEAP_ALLOCATION_SIZE)) + \ + MIN_HEAP_ALLOCATION_SIZE) -extern uint64_t _JUMPSTART_SMODE_HEAP_START[]; -extern uint64_t _JUMPSTART_SMODE_HEAP_END[]; +static_assert(sizeof(memchunk) == PER_HEAP_ALLOCATION_METADATA_SIZE, + "PER_HEAP_ALLOCATION_METADATA_SIZE mismatch"); -void setup_heap(void); -void print_heap(void); //------------------------------------------------------------------------------ -// Malloc helper structs +// Heap info struct //------------------------------------------------------------------------------ -struct memchunk { - struct memchunk *next; - uint64_t size; +struct heap_info { + uint8_t backing_memory; + uint8_t memory_type; + memchunk *head; + memchunk *last_allocated; // Track where we last allocated from + size_t size; + spinlock_t lock; + volatile uint8_t setup_done; }; -typedef struct memchunk memchunk; +__attr_privdata struct heap_info heaps[NUM_HEAPS_SUPPORTED] = { + {BACKING_MEMORY_DDR, MEMORY_TYPE_WB, NULL, NULL, 0, 0, 0}, + {BACKING_MEMORY_DDR, MEMORY_TYPE_WC, NULL, NULL, 0, 0, 0}, + {BACKING_MEMORY_DDR, MEMORY_TYPE_UC, NULL, NULL, 0, 0, 0}, +}; -#define MIN_HEAP_ALLOCATION_BYTES 8 -#define MIN_HEAP_SEGMENT_BYTES (sizeof(memchunk) + MIN_HEAP_ALLOCATION_BYTES) +__attr_stext static struct heap_info *find_matching_heap(uint8_t backing_memory, + uint8_t memory_type) { + for (int i = 0; i < NUM_HEAPS_SUPPORTED; i++) { + if (heaps[i].backing_memory == backing_memory && + heaps[i].memory_type == memory_type) { + return &heaps[i]; + } + } + return NULL; +} -__attribute__((section(".jumpstart.data.smode"))) static memchunk *head; -__attribute__(( - section(".jumpstart.data.smode"))) volatile uint8_t heap_setup_done = 0; +__attr_stext bool is_valid_heap(uint8_t backing_memory, uint8_t memory_type) { + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + return (target_heap != NULL && target_heap->setup_done && + target_heap->head != 0); +} + +//------------------------------------------------------------------------------ +// Helper iterator for two-pass chunk search starting from last_allocated +// Returns the next chunk to check, or NULL when iteration is complete +//------------------------------------------------------------------------------ +typedef struct { + memchunk *current; + memchunk *start; + memchunk *head; + bool second_pass; +} chunk_iterator_t; + +__attr_stext void init_chunk_iterator(chunk_iterator_t *iter, + struct heap_info *heap) { + iter->head = heap->head; + iter->start = heap->last_allocated ? heap->last_allocated->next : heap->head; + if (!iter->start) + iter->start = heap->head; // Wrap around if at end + iter->current = iter->start; + iter->second_pass = false; +} + +__attr_stext memchunk *next_chunk(chunk_iterator_t *iter) { + if (!iter->current) { + // First pass exhausted, start second pass from head if needed + if (!iter->second_pass && iter->start != iter->head) { + iter->second_pass = true; + iter->current = iter->head; + } else { + return NULL; // Iteration complete + } + } + + // In second pass, stop when we reach the start point + if (iter->second_pass && iter->current == iter->start) { + return NULL; + } + + memchunk *result = iter->current; + iter->current = iter->current->next; + return result; +} + +//------------------------------------------------------------------------------ +// Helper functions to convert numeric values to readable strings +//------------------------------------------------------------------------------ +__attr_stext const char *backing_memory_to_string(uint8_t backing_memory) { + switch (backing_memory) { + case BACKING_MEMORY_DDR: + return "DDR"; + default: + return "UNKNOWN"; + } +} + +__attr_stext const char *memory_type_to_string(uint8_t memory_type) { + switch (memory_type) { + case MEMORY_TYPE_WB: + return "WB"; + case MEMORY_TYPE_WC: + return "WC"; + case MEMORY_TYPE_UC: + return "UC"; + default: + return "UNKNOWN"; + } +} -__attribute__((section(".jumpstart.data.smode"))) static spinlock_t heap_lock = - 0; -#define MEMCHUNK_USED 0x8000000000000000ULL -#define MEMCHUNK_MAX_SIZE (MEMCHUNK_USED - 1) //------------------------------------------------------------------------------ // Allocate memory on the heap //------------------------------------------------------------------------------ -__attribute__((section(".jumpstart.text.smode"))) void *malloc(size_t size) { - if (head == 0 || size > MEMCHUNK_MAX_SIZE) { +__attr_stext void *malloc_from_memory(size_t size, uint8_t backing_memory, + uint8_t memory_type) { + if (!is_valid_heap(backing_memory, memory_type)) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); + return 0; + } + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + if (size > MEMCHUNK_MAX_SIZE || size == 0) { + printk("Error: Invalid size for malloc request\n"); + jumpstart_smode_fail(); return 0; } void *result = 0; - acquire_lock(&heap_lock); - //---------------------------------------------------------------------------- - // Allocating anything less than 8 bytes is kind of pointless, the - // book-keeping overhead is too big. - //---------------------------------------------------------------------------- - uint64_t alloc_size = (((size - 1) >> 3) << 3) + 8; + acquire_lock(&target_heap->lock); + + uint64_t alloc_size = ALIGN_TO_MIN_ALLOC(size); //---------------------------------------------------------------------------- - // Try to find a suitable chunk that is unused + // Try to find a suitable chunk that is unused, starting from last allocation //---------------------------------------------------------------------------- - memchunk *chunk = head; - while (chunk) { + chunk_iterator_t iter; + init_chunk_iterator(&iter, target_heap); + + memchunk *chunk = NULL; + while ((chunk = next_chunk(&iter)) != NULL) { if (!(chunk->size & MEMCHUNK_USED) && chunk->size >= alloc_size) { break; } - chunk = chunk->next; } if (!chunk) { @@ -70,143 +185,398 @@ __attribute__((section(".jumpstart.text.smode"))) void *malloc(size_t size) { // Split the chunk if it's big enough to contain one more header and at // least 8 more bytes //---------------------------------------------------------------------------- - if (chunk->size > alloc_size + sizeof(memchunk) + 8) { + if (chunk->size >= alloc_size + MIN_HEAP_SEGMENT_BYTES) { memchunk *new_chunk = - (memchunk *)((void *)chunk + sizeof(memchunk) + alloc_size); - new_chunk->size = chunk->size - alloc_size - sizeof(memchunk); + (memchunk *)((void *)chunk + PER_HEAP_ALLOCATION_METADATA_SIZE + + alloc_size); + new_chunk->size = + chunk->size - alloc_size - PER_HEAP_ALLOCATION_METADATA_SIZE; new_chunk->next = chunk->next; chunk->next = new_chunk; chunk->size = alloc_size; } //---------------------------------------------------------------------------- - // Mark the chunk as used and return the memory + // Mark the chunk as used, update last_allocated, and return the memory //---------------------------------------------------------------------------- chunk->size |= MEMCHUNK_USED; - result = (void *)chunk + sizeof(memchunk); + target_heap->last_allocated = chunk; + result = (void *)chunk + PER_HEAP_ALLOCATION_METADATA_SIZE; exit_malloc: - release_lock(&heap_lock); + release_lock(&target_heap->lock); return result; } -//------------------------------------------------------------------------------ -// Free the memory -//------------------------------------------------------------------------------ -__attribute__((section(".jumpstart.text.smode"))) void free(void *ptr) { +__attr_stext void free_from_memory(void *ptr, uint8_t backing_memory, + uint8_t memory_type) { if (!ptr) { return; } - acquire_lock(&heap_lock); - memchunk *chunk = (memchunk *)((void *)ptr - sizeof(memchunk)); + if (!is_valid_heap(backing_memory, memory_type)) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); + } + + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + + acquire_lock(&target_heap->lock); + + // Validate that ptr is within heap bounds + memchunk *chunk = + (memchunk *)((void *)ptr - PER_HEAP_ALLOCATION_METADATA_SIZE); + if (chunk < target_heap->head || !target_heap->head) { + printk("Error: Invalid free - address below heap start\n"); + goto exit_free; + } + + // Verify this is actually a used chunk + if (!(chunk->size & MEMCHUNK_USED)) { + printk("Error: Double free detected\n"); + jumpstart_smode_fail(); + } + + // Basic sanity check on chunk size + if ((chunk->size & MEMCHUNK_MAX_SIZE) > MEMCHUNK_MAX_SIZE) { + printk("Error: Invalid chunk size in free\n"); + jumpstart_smode_fail(); + } + + // Mark the chunk as free chunk->size &= ~MEMCHUNK_USED; - release_lock(&heap_lock); + + // Clear last_allocated if it points to the freed chunk + if (target_heap->last_allocated == chunk) { + target_heap->last_allocated = NULL; + } + + // Coalesce with next chunk if it exists and is free + if (chunk->next && !(chunk->next->size & MEMCHUNK_USED)) { + chunk->size += chunk->next->size + PER_HEAP_ALLOCATION_METADATA_SIZE; + chunk->next = chunk->next->next; + } + + // Coalesce with previous chunk if it exists and is free + memchunk *prev = target_heap->head; + while (prev && prev->next != chunk) { + prev = prev->next; + } + if (prev && !(prev->size & MEMCHUNK_USED)) { + prev->size += chunk->size + PER_HEAP_ALLOCATION_METADATA_SIZE; + prev->next = chunk->next; + + // We need chunk to set last_allocated if it's NULL. + chunk = prev; + } + + if (target_heap->last_allocated == NULL) { + // We've cleared last_allocated because it was set to the freed chunk. + // Look for the next allocated chunk after this one as replacement. + // We need to do this after any coalescing operations so that we're only + // assigning last_allocated to valid chunks. + memchunk *next_allocated = chunk->next; + while (next_allocated && !(next_allocated->size & MEMCHUNK_USED)) { + next_allocated = next_allocated->next; + } + + if (next_allocated) { + // Found a next allocated chunk, use it + target_heap->last_allocated = next_allocated; + } else { + // No next allocated chunk found, look backwards + memchunk *prev = target_heap->head; + memchunk *prev_allocated = NULL; + while (prev && prev != chunk) { + if (prev->size & MEMCHUNK_USED) { + prev_allocated = prev; + } + prev = prev->next; + } + + if (prev_allocated) { + target_heap->last_allocated = prev_allocated; + } else { + // No allocated chunks found, set to NULL as fallback + // This will cause the next allocation to start from head + target_heap->last_allocated = NULL; + } + } + } + +exit_free: + release_lock(&target_heap->lock); } //------------------------------------------------------------------------------ // Set up the heap //------------------------------------------------------------------------------ -__attribute__((section(".jumpstart.text.smode"))) void setup_heap(void) { +__attr_stext void setup_heap(uint64_t heap_start, uint64_t heap_end, + uint8_t backing_memory, uint8_t memory_type) { disable_checktc(); - if (heap_setup_done) { + + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + if (target_heap == NULL) { + printk( + "Error: No matching heap found for backing_memory=%d, memory_type=%d\n", + backing_memory, memory_type); + jumpstart_smode_fail(); + } + + if (target_heap->setup_done) { + // Verify the heap address matches what was previously set up + if (target_heap->head != (memchunk *)heap_start) { + printk("Error: Heap already initialized at different address. " + "Expected: 0x%lx, Got: 0x%lx\n", + (uint64_t)target_heap->head, heap_start); + jumpstart_smode_fail(); + } return; } - acquire_lock(&heap_lock); + acquire_lock(&target_heap->lock); - // Prevent double initialization. A hart might have been waiting for the lock - // while the heap was initialized by another hart. - if (heap_setup_done == 0) { - uint64_t *heap_start = (uint64_t *)&_JUMPSTART_SMODE_HEAP_START; - uint64_t *heap_end = (uint64_t *)&_JUMPSTART_SMODE_HEAP_END; + // Prevent double initialization. A cpu might have been waiting for the lock + // while the heap was initialized by another cpu. + if (target_heap->setup_done == 0) { - head = (memchunk *)heap_start; - head->next = NULL; - head->size = - (uint64_t)heap_end - (uint64_t)heap_start - (uint64_t)sizeof(memchunk); + // Translate the start and end of the heap sanity check it's memory type. + struct translation_info xlate_info; + translate_VA(heap_start, &xlate_info); + if (xlate_info.walk_successful == 0) { + printk("Error: Unable to translate heap start address.\n"); + jumpstart_smode_fail(); + } + + if (xlate_info.xatp_mode != VM_1_10_MBARE) { + // Only sanity check the memory type if the SATP mode is not Bare. + + // WB = PMA in PBMT + // UC = IO in PBMT + // WC = NC in PBMT + if ((memory_type == MEMORY_TYPE_WB && + xlate_info.pbmt_mode != PTE_PBMT_PMA) || + (memory_type == MEMORY_TYPE_UC && + xlate_info.pbmt_mode != PTE_PBMT_IO) || + (memory_type == MEMORY_TYPE_WC && + xlate_info.pbmt_mode != PTE_PBMT_NC)) { + printk("Error: Heap start address is not correct memory type."); + jumpstart_smode_fail(); + } + + translate_VA(heap_end - 1, &xlate_info); + if (xlate_info.walk_successful == 0) { + printk("Error: Unable to translate heap end address.\n"); + jumpstart_smode_fail(); + } + if ((memory_type == MEMORY_TYPE_WB && + xlate_info.pbmt_mode != PTE_PBMT_PMA) || + (memory_type == MEMORY_TYPE_UC && + xlate_info.pbmt_mode != PTE_PBMT_IO) || + (memory_type == MEMORY_TYPE_WC && + xlate_info.pbmt_mode != PTE_PBMT_NC)) { + printk("Error: Heap end address is not correct memory type."); + jumpstart_smode_fail(); + } + } - heap_setup_done = 1; + target_heap->head = (memchunk *)heap_start; + target_heap->last_allocated = NULL; // Initialize last_allocated to NULL + target_heap->head->next = NULL; + target_heap->head->size = + heap_end - heap_start - PER_HEAP_ALLOCATION_METADATA_SIZE; + target_heap->size = heap_end - heap_start; + + target_heap->setup_done = 1; + } else { + // Verify the heap address matches what was previously set up + if (target_heap->head != (memchunk *)heap_start) { + printk("Error: Heap already initialized at different address. " + "Expected: 0x%lx, Got: 0x%lx\n", + (uint64_t)target_heap->head, heap_start); + jumpstart_smode_fail(); + } + if (target_heap->size != heap_end - heap_start) { + printk("Error: Heap size mismatch. Expected: 0x%lx, Got: 0x%lx\n", + target_heap->size, heap_end - heap_start); + jumpstart_smode_fail(); + } } - release_lock(&heap_lock); + release_lock(&target_heap->lock); enable_checktc(); } -__attribute__((section(".jumpstart.text.smode"))) void *calloc(size_t nmemb, - size_t size) { - uint8_t *data = malloc(nmemb * size); - for (size_t i = 0; i < nmemb * size; ++i) { - data[i] = 0; +__attr_stext void deregister_heap(uint8_t backing_memory, uint8_t memory_type) { + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + if (target_heap == NULL) { + printk( + "Error: No matching heap found for backing_memory=%d, memory_type=%d\n", + backing_memory, memory_type); + jumpstart_smode_fail(); + } + + if (target_heap->setup_done == 0) { + return; + } + + acquire_lock(&target_heap->lock); + + size_t size_of_all_chunks = 0; + + memchunk *chunk = target_heap->head; + while (chunk) { + if (chunk->size & MEMCHUNK_USED) { + printk("Error: Chunk still in use\n"); + jumpstart_smode_fail(); + } + size_of_all_chunks += chunk->size + PER_HEAP_ALLOCATION_METADATA_SIZE; + chunk = chunk->next; + } + + if (size_of_all_chunks != target_heap->size) { + printk("Error: Heap size mismatch. Expected: 0x%lx, Got: 0x%lx\n", + target_heap->size, size_of_all_chunks); + jumpstart_smode_fail(); + } + + target_heap->setup_done = 0; + target_heap->head = NULL; + target_heap->last_allocated = NULL; // Clear last_allocated pointer + target_heap->size = 0; + release_lock(&target_heap->lock); +} + +__attr_stext size_t get_heap_size(uint8_t backing_memory, uint8_t memory_type) { + if (!is_valid_heap(backing_memory, memory_type)) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); + return 0; + } + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + if (target_heap == NULL) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); + return 0; + } + + return target_heap->size; +} + +__attr_stext void *calloc_from_memory(size_t nmemb, size_t size, + uint8_t backing_memory, + uint8_t memory_type) { + uint8_t *data = malloc_from_memory(nmemb * size, backing_memory, memory_type); + if (data) { + for (size_t i = 0; i < nmemb * size; ++i) { + *(data + i) = 0; + } } return data; } -__attribute__((section(".jumpstart.text.smode"))) void * -memalign(size_t alignment, size_t size) { - if (alignment & (alignment - 1)) { - // alignment is not a power of 2 +__attr_stext void *memalign_from_memory(size_t alignment, size_t size, + uint8_t backing_memory, + uint8_t memory_type) { + // Validate alignment is non-zero and a power of 2 + if (alignment == 0 || alignment & (alignment - 1)) { return 0; } - if (head == 0 || size > MEMCHUNK_MAX_SIZE) { + if (!is_valid_heap(backing_memory, memory_type)) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); return 0; } - if (alignment <= 8) { - return malloc(size); + struct heap_info *target_heap = + find_matching_heap(backing_memory, memory_type); + if (target_heap == NULL) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(backing_memory), + memory_type_to_string(memory_type)); + jumpstart_smode_fail(); + return 0; + } + + if (size > MEMCHUNK_MAX_SIZE) { + printk("Error: Invalid size for memalign request\n"); + jumpstart_smode_fail(); + return 0; + } + + // For small alignments, use regular malloc since heap ensures + // MIN_HEAP_ALLOCATION_SIZE alignment + if (alignment <= MIN_HEAP_ALLOCATION_SIZE) { + return malloc_from_memory(size, backing_memory, memory_type); } void *result = 0; - acquire_lock(&heap_lock); - //---------------------------------------------------------------------------- - // Allocating anything less than 8 bytes is kind of pointless, the - // book-keeping overhead is too big. - //---------------------------------------------------------------------------- - uint64_t alloc_size = (((size - 1) >> 3) << 3) + 8; + acquire_lock(&target_heap->lock); + + uint64_t alloc_size = ALIGN_TO_MIN_ALLOC(size); //---------------------------------------------------------------------------- - // Try to find a suitable chunk that is unused + // Try to find a suitable chunk that is unused, starting from last allocation //---------------------------------------------------------------------------- + chunk_iterator_t iter; + init_chunk_iterator(&iter, target_heap); + uint64_t pow2 = (uint64_t)__builtin_ctzll((uint64_t)alignment); uint8_t aligned = 0; uint64_t aligned_start = 0, start = 0, end = 0; - memchunk *chunk; - for (chunk = head; chunk; chunk = chunk->next) { - // Chunk used - if (chunk->size & MEMCHUNK_USED) { - continue; - } + memchunk *chunk = NULL; - // Chunk too small - if (chunk->size < alloc_size) { + memchunk *c; + while ((c = next_chunk(&iter)) != NULL) { + // Skip if chunk is used or too small + if (c->size & MEMCHUNK_USED || c->size < alloc_size) { continue; } - start = (uint64_t)((char *)chunk + sizeof(memchunk)); - end = (uint64_t)((char *)chunk + sizeof(memchunk) + chunk->size); - aligned_start = (((start - 1) >> pow2) << pow2) + alignment; + // Calculate chunk boundaries + start = (uint64_t)((char *)c + PER_HEAP_ALLOCATION_METADATA_SIZE); + end = (uint64_t)((char *)c + PER_HEAP_ALLOCATION_METADATA_SIZE + c->size); - // The current chunk is already aligned so just allocate it + // First try: Check if chunk's start address can be used directly after + // alignment + aligned_start = (((start - 1) >> pow2) << pow2) + alignment; if (start == aligned_start) { + // Current chunk is already properly aligned - use it as-is aligned = 1; + chunk = c; break; } - // The start of the allocated chunk must leave space for the 8 bytes of data - // payload and metadata of the new chunk + // Second try: Check if we can split this chunk to create an aligned + // allocation We need space for: metadata + minimum allocation before the + // aligned address aligned_start = ((((start + MIN_HEAP_SEGMENT_BYTES) - 1) >> pow2) << pow2) + alignment; - // Aligned start must be within the chunk + // Verify the aligned address fits within the chunk if (aligned_start >= end) { continue; } - // The current chunk is too small + // Verify there's enough space for the requested allocation if (aligned_start + alloc_size > end) { continue; } + // Found a suitable chunk we can split + chunk = c; break; } @@ -214,35 +584,102 @@ memalign(size_t alignment, size_t size) { goto exit_memalign; } - // If chunk is not aligned we need to allecate a new chunk just before it + //---------------------------------------------------------------------------- + // Handle chunk allocation based on alignment result + //---------------------------------------------------------------------------- + // If the chunk's start address is not naturally aligned, we need to split it: + // 1. The first chunk contains the unaligned portion before aligned_start + // 2. The second chunk starts at aligned_start and will be used for allocation + // This ensures the allocated memory satisfies the alignment requirement while + // preserving the rest of the chunk for future allocations if (!aligned) { memchunk *new_chunk = - (memchunk *)((void *)aligned_start - sizeof(memchunk)); + (memchunk *)((void *)aligned_start - PER_HEAP_ALLOCATION_METADATA_SIZE); new_chunk->size = end - aligned_start; new_chunk->next = chunk->next; - chunk->size -= (new_chunk->size + sizeof(memchunk)); + chunk->size -= (new_chunk->size + PER_HEAP_ALLOCATION_METADATA_SIZE); chunk->next = new_chunk; chunk = chunk->next; } - // If the chunk needs to be trimmed - if (chunk->size > alloc_size + sizeof(memchunk) + 8) { + // Trim excess space from the aligned chunk if possible + if (chunk->size >= alloc_size + MIN_HEAP_SEGMENT_BYTES) { memchunk *new_chunk = - (memchunk *)((void *)chunk + sizeof(memchunk) + alloc_size); - new_chunk->size = chunk->size - alloc_size - sizeof(memchunk); + (memchunk *)((void *)chunk + PER_HEAP_ALLOCATION_METADATA_SIZE + + alloc_size); + new_chunk->size = + chunk->size - alloc_size - PER_HEAP_ALLOCATION_METADATA_SIZE; new_chunk->next = chunk->next; chunk->next = new_chunk; chunk->size = alloc_size; } + + //---------------------------------------------------------------------------- + // Finalize allocation + //---------------------------------------------------------------------------- chunk->size |= MEMCHUNK_USED; - result = (void *)chunk + sizeof(memchunk); + target_heap->last_allocated = chunk; + result = (void *)chunk + PER_HEAP_ALLOCATION_METADATA_SIZE; + exit_memalign: - release_lock(&heap_lock); + release_lock(&target_heap->lock); return result; } -__attribute__((section(".jumpstart.text.smode"))) void *memset(void *s, int c, - size_t n) { +__attr_stext void print_heap(void) { + if (!is_valid_heap(BACKING_MEMORY_DDR, MEMORY_TYPE_WB)) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(BACKING_MEMORY_DDR), + memory_type_to_string(MEMORY_TYPE_WB)); + jumpstart_smode_fail(); + } + + struct heap_info *target_heap = + find_matching_heap(BACKING_MEMORY_DDR, MEMORY_TYPE_WB); + if (target_heap == NULL) { + printk("Error: Heap not initialized for %s/%s.\n", + backing_memory_to_string(BACKING_MEMORY_DDR), + memory_type_to_string(MEMORY_TYPE_WB)); + jumpstart_smode_fail(); + return; + } + + acquire_lock(&target_heap->lock); + printk("===================\n"); + memchunk *chunk = target_heap->head; + while (chunk != 0) { + if (chunk->size & MEMCHUNK_USED) { + printk("[USED] Size:0x%llx\n", (chunk->size & MEMCHUNK_MAX_SIZE)); + } else { + printk("[FREE] Size:0x%lx Start:0x%lx\n", chunk->size, + (uint64_t)((void *)chunk + PER_HEAP_ALLOCATION_METADATA_SIZE)); + } + chunk = chunk->next; + } + + printk("===================\n"); + release_lock(&target_heap->lock); +} + +// The default versions of the functions use the DDR and WB memory type. +__attr_stext void *malloc(size_t size) { + return malloc_from_memory(size, BACKING_MEMORY_DDR, MEMORY_TYPE_WB); +} + +__attr_stext void free(void *ptr) { + free_from_memory(ptr, BACKING_MEMORY_DDR, MEMORY_TYPE_WB); +} + +__attr_stext void *calloc(size_t nmemb, size_t size) { + return calloc_from_memory(nmemb, size, BACKING_MEMORY_DDR, MEMORY_TYPE_WB); +} + +__attr_stext void *memalign(size_t alignment, size_t size) { + return memalign_from_memory(alignment, size, BACKING_MEMORY_DDR, + MEMORY_TYPE_WB); +} + +__attr_stext void *memset(void *s, int c, size_t n) { uint8_t *p = s; for (size_t i = 0; i < n; i++) { *(p++) = (uint8_t)c; @@ -250,8 +687,7 @@ __attribute__((section(".jumpstart.text.smode"))) void *memset(void *s, int c, return s; } -__attribute__((section(".jumpstart.text.smode"))) void * -memcpy(void *dest, const void *src, size_t n) { +__attr_stext void *memcpy(void *dest, const void *src, size_t n) { size_t numQwords = n / 8; size_t remindingBytes = n % 8; @@ -269,21 +705,3 @@ memcpy(void *dest, const void *src, size_t n) { return dest; } - -__attribute__((section(".jumpstart.text.smode"))) void print_heap(void) { - acquire_lock(&heap_lock); - printk("===================\n"); - memchunk *chunk = head; - while (chunk != 0) { - if (chunk->size & MEMCHUNK_USED) { - printk("[USED] Size:0x%llx\n", (chunk->size & MEMCHUNK_MAX_SIZE)); - } else { - printk("[FREE] Size:0x%lx Start:0x%lx\n", chunk->size, - (uint64_t)((void *)chunk + sizeof(memchunk))); - } - chunk = chunk->next; - } - - printk("===================\n"); - release_lock(&heap_lock); -} diff --git a/src/common/jumpstart.mmode.S b/src/common/jumpstart.mmode.S index 2e8bc8b9..4f47b16e 100644 --- a/src/common/jumpstart.mmode.S +++ b/src/common/jumpstart.mmode.S @@ -1,57 +1,46 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" # This section should fall into the initial 4K page set up. -.section .jumpstart.text.mmode.init.enter, "ax" +.section .jumpstart.cpu.text.mmode.init.enter, "ax" .global _mmode_start _mmode_start: la t0, mtvec_trap_handler csrw mtvec, t0 - csrr t0, mhartid + csrr a0, mhartid - li t1, MAX_NUM_HARTS_SUPPORTED - bge t0, t1, just_wfi_from_mmode + # Outputs: a0: cpu id + jal get_cpu_id - # Set up the stack. - # S-mode and M-mode share the same stack. - li t1, (NUM_PAGES_PER_HART_FOR_SMODE_STACK * SMODE_STACK_PAGE_SIZE) - mul t3, t0, t1 - la t2, smode_stack_top - add sp, t2, t3 - add sp, sp, t1 # We want the stack bottom. + # Checks if this cpu is in the active cpu mask and parks inactive cpus. + # Returns if the current CPU is in the active cpu mask. + # Inputs: a0: cpu id + jal handle_inactive_cpus - mv fp, sp + # Inputs: a0: cpu id + jal setup_stack - li t1, BATCH_MODE - beqz t1, 1f - - # When running in batch mode, have the primary hart save away it's - # return address. - # This return address is common to all the harts and allows all - # harts to exit back to where they came from. - csrr t0, mhartid - li t1, PRIMARY_HART_ID - bne t0, t1, 1f - la t1, batch_mode_exit_address - sd ra, (t1) - fence rw, rw - la t1, batch_mode_exit_lock - sd zero, (t1) + # Inputs: a0: cpu id + # a1: mhartid + csrr a1, mhartid + jal setup_thread_attributes_from_mmode -1: - # The mmode init code is expected to fit in a 4KB page for Rivos internal - # reasons. - la t0, _JUMPSTART_TEXT_MMODE_INIT_BOUNDARY - la t1, _JUMPSTART_TEXT_MMODE_INIT_ENTER_START - sub t2, t0, t1 - li t3, 0x1000 # 4KB - bgt t2, t3, jumpstart_mmode_fail + # Any C code we run can be compiled down to use floating point and + # vector instructions so we need to make sure that we have these enabled. + jal enable_mmode_float_and_vector_instructions + + MMODE_ROLE_ENABLE # Run the setup_mmode before running any more code. Only the first # 4K page of mmode code is set up to run right now. setup_mmode() @@ -60,45 +49,20 @@ _mmode_start: jal reset_csrs - csrr t0, mhartid - - # Check if this hart is in the active hart mask. - li a0, ACTIVE_HART_MASK - li t1, 1 - sll t1, t1, t0 - and a0, a0, t1 - bnez a0, 2f - - # Inactive hart. - - # If running in batch mode, return the inactive hart. - li t2, BATCH_MODE - bnez t2, batch_mode_return_unused_hart - - # Send the hart to WFI if not running in batch mode. - j just_wfi_from_mmode - -2: - # Have the hart mark itself as running. - la t1, hart_status_tracker + # Have the cpu mark itself as running. + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + la t1, cpu_status_tracker add t1, t1, t0 - li t2, HART_RUNNING + li t2, CPU_RUNNING sb t2, 0(t1) - mv a0, t0 - jal setup_thread_attributes_from_mmode + jal enable_mmode_interrupts - # Enable interrupts in machine mode. - li t0, MSTATUS_MIE - csrs mstatus, t0 - li t0, MSTATUS_MPIE - csrc mstatus, t0 - li t0, MIP_MEIP - csrw mie, t0 + jal program_mstateen + jal program_hstateen jal program_menvcfg - - jal enable_mmode_float_and_vector_instructions + jal program_mseccfg jal setup_smode_trap_delegation @@ -112,7 +76,69 @@ _mmode_start: 1: j jump_to_main -.section .jumpstart.text.mmode, "ax" +# Inputs: +# a0: cpu id +.global setup_stack +setup_stack: + # Set up the stack. + # S-mode and M-mode share the same stack. + li t1, (NUM_PAGES_PER_CPU_FOR_PRIVILEGED_STACK * PRIVILEGED_STACK_PAGE_SIZE) + mul t3, a0, t1 + la t2, privileged_stack_top + add sp, t2, t3 + add sp, sp, t1 # We want the stack bottom. + + mv fp, sp + + ret + +.global enable_mmode_float_and_vector_instructions +enable_mmode_float_and_vector_instructions: + li t0, (MSTATUS_VS | MSTATUS_FS) + csrrs t1, mstatus, t0 + + # Set vtype.vill=0 by running a dummy vsetvl instruction. + # There are vector instructions (such as vmv1r.v) that + # can run without running a vsetvl instruction first so we + # need to make sure that the reset value of vill=1 has been cleared. + vsetivli zero, 8, e8, m1, ta, ma + + ret + +# Input: a0: logical cpu id +.global handle_inactive_cpus +handle_inactive_cpus: + # Check if this cpu is in the active cpu mask. + li t2, ACTIVE_CPU_MASK + li t1, 1 + sll t1, t1, a0 + and t2, t2, t1 + bnez t2, 1f + + # Inactive cpu. + # Send the cpu to WFI. + j just_wfi_from_mmode + +1: + ret + + +.section .jumpstart.cpu.text.mmode, "ax" + +.global enable_mmode_interrupts +enable_mmode_interrupts: + # Enable interrupts in machine mode. + li t0, MSTATUS_MDT | MSTATUS_SDT + csrc mstatus, t0 + li t0, MSTATUS_MIE + csrs mstatus, t0 + li t0, MSTATUS_MPIE + csrc mstatus, t0 + li t0, MIP_MEIP + csrw mie, t0 + + ret + .global setup_smode_trap_delegation setup_smode_trap_delegation: @@ -130,7 +156,7 @@ setup_smode_trap_delegation: (1 << RISCV_EXCP_INST_PAGE_FAULT) | \ (1 << RISCV_EXCP_LOAD_PAGE_FAULT) | \ (1 << RISCV_EXCP_STORE_PAGE_FAULT) | \ - (1 << RISCV_EXCP_DATA_CORRUPTION_EXCEPTION) | \ + (1 << RISCV_EXCP_HW_ERR) | \ (1 << RISCV_EXCP_INST_GUEST_PAGE_FAULT) | \ (1 << RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT) | \ (1 << RISCV_EXCP_VIRT_INSTRUCTION_FAULT) | \ @@ -176,6 +202,18 @@ program_henvcfg: ret +.global program_mstateen +program_mstateen: + li t0, (SMSTATEEN0_HSCONTXT | SMSTATEEN0_IMSIC | SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT | SMSTATEEN0_HSENVCFG | SMSTATEEN_STATEEN | SMSTATEEN0_CTR) + csrw mstateen0, t0 + ret + +.global program_hstateen +program_hstateen: + li t0, (SMSTATEEN0_IMSIC | SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT) + csrw hstateen0, t0 + ret + .global program_menvcfg program_menvcfg: # CBIE: Cache Block Invalidate instruction Enable @@ -184,11 +222,17 @@ program_menvcfg: # PMBTE: Enables Svpbmt extension for S-mode and G-stage address translation. # i.e., for page tables pointed to by satp or hgatp. # CDE: Counter Delegation Enable - li t0, (MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE | MENVCFG_PBMTE | MENVCFG_CDE) + li t0, (MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE | MENVCFG_PBMTE | MENVCFG_CDE | MENVCFG_STCE) csrrs t1, menvcfg, t0 ret +.global program_mseccfg +program_mseccfg: + li t0, MSECCFG_SSEED | MSECCFG_USEED + csrs mseccfg, t0 + ret + .global reset_csrs reset_csrs: csrw mcause, zero @@ -207,13 +251,6 @@ reset_csrs: ret -.global enable_mmode_float_and_vector_instructions -enable_mmode_float_and_vector_instructions: - li t0, (MSTATUS_VS | MSTATUS_FS) - csrrs t1, mstatus, t0 - - ret - .global delegate_mmode_resources_to_smode delegate_mmode_resources_to_smode: # Delegate resources which are otherwise retained by M mode. @@ -234,10 +271,13 @@ jump_to_main: li t0, START_TEST_IN_MMODE bnez t0, jump_to_main_in_mmode +#ifdef SMODE_MODE_ENABLED jal delegate_mmode_resources_to_smode - la a0, main jal run_function_in_smode +#else + li a0, DIAG_FAILED +#endif j _mmode_end @@ -246,11 +286,14 @@ jump_to_main: handle_env_call_from_smode: # a7 will contain the syscall number +#ifdef SMODE_MODE_ENABLED li t0, SYSCALL_RUN_FUNC_IN_SMODE_COMPLETE beq a7, t0, handle_syscall_run_func_in_smode_complete +#endif j jumpstart_mmode_fail +#ifdef SMODE_MODE_ENABLED handle_syscall_run_func_in_smode_complete: # This is the return to machine path for run_function_in_smode(). @@ -348,6 +391,8 @@ run_function_in_smode_return_point: addi sp, sp, 16 ret +#endif + # The mtvec.base must always be 4 byte aligned. .align 2 .global mtvec_trap_handler @@ -359,11 +404,9 @@ mtvec_trap_handler: li gp, PRV_M SET_THREAD_ATTRIBUTES_CURRENT_MODE(gp) - # We don't currently expect mmode to handle a trap taken from VS mode. - # Once we do the following code will need to be updated to save and restore - # the V bit value across the trap handler. - GET_THREAD_ATTRIBUTES_CURRENT_V_BIT(gp) - bnez gp, jumpstart_mmode_fail + # We could be coming from VS or VU mode. Clear the V bit. + li gp, 0 + SET_THREAD_ATTRIBUTES_CURRENT_V_BIT(gp) # We just need to check MSB of MPP field here to determine if we came from # M or S mode. U mode is also handled in S mode path. @@ -392,12 +435,27 @@ save_context: csrr t0, mepc sd t0, EPC_OFFSET_IN_SAVE_REGION(gp) + csrr t0, hstatus + sd t0, HSTATUS_OFFSET_IN_SAVE_REGION(gp) + csrr t0, mstatus sd t0, STATUS_OFFSET_IN_SAVE_REGION(gp) - csrr t0, hstatus - sd t0, HSTATUS_OFFSET_IN_SAVE_REGION(gp) + # We just need to check the SPP field here to determine if we came from + # S or U mode. + bexti t0, t0, MSTATUS_MPP_SHIFT + bnez t0, 1f + + # We're handling a trap from umode. + # Switch to the S-mode stack as we can't use the Umode stack. + # We get the smode stack from the smode context that was saved + # when we ran run_function_in_umode() - the context just prior to this. + GET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) + addi t0, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES + ld sp, SP_OFFSET_IN_SAVE_REGION(t0) + GET_THREAD_ATTRIBUTES_MMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) +1: # Point to the address of the next context save region for the next # trap handler. addi gp, gp, REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES @@ -407,8 +465,11 @@ save_context: csrr a0, mcause call get_mmode_trap_handler_override - beqz a0, check_for_env_call_requests + bnez a0, run_registered_trap_handler + + j check_for_env_call_requests +run_registered_trap_handler: # Jump to the registered trap handler. # TODO: Do we need to pass any arguments to the trap handler? # If so, we need to restore them from the context save region. @@ -432,14 +493,18 @@ restore_context: ld t0, STATUS_OFFSET_IN_SAVE_REGION(gp) csrw mstatus, t0 - bexti t0, t0, MSTATUS_MPP_MSB - bnez t0, restore_all_gprs + # We could be returning back to VS or VU mode. Set the V bit. + bexti t1, t0, MSTATUS_MPV_SHIFT + SET_THREAD_ATTRIBUTES_CURRENT_V_BIT(t1) -restore_smode_context: - MMODE_ROLE_DISABLE + srli t0, t0, MSTATUS_MPP_SHIFT + andi t0, t0, 3 + li t1, PRV_M + beq t0, t1, restore_all_gprs - li t0, PRV_S +restore_s_u_mode_context: SET_THREAD_ATTRIBUTES_CURRENT_MODE(t0) + MMODE_ROLE_DISABLE restore_all_gprs: RESTORE_ALL_GPRS @@ -497,3 +562,12 @@ set_mepc_for_current_exception: addi t0, t0, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES sd a0, EPC_OFFSET_IN_SAVE_REGION(t0) ret + +.section .jumpstart.cpu.text.mmode.init.end, "ax" + +// The address of this function will be used to find the end of the mmode init +// section. +.global mmode_init_4k_boundary +mmode_init_4k_boundary: + wfi + j mmode_init_4k_boundary diff --git a/src/common/jumpstart.smode.S b/src/common/jumpstart.smode.S index 19e22cec..a2fc61bc 100644 --- a/src/common/jumpstart.smode.S +++ b/src/common/jumpstart.smode.S @@ -1,11 +1,16 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" -.section .jumpstart.text.smode, "ax" +.section .jumpstart.cpu.text.smode, "ax" .global setup_smode setup_smode: @@ -22,10 +27,12 @@ setup_smode: jal setup_mmu_from_smode - jal setup_heap - jal setup_uart + jal setup_default_heap + + jal register_default_smode_exception_handlers + li t0, 1 SET_THREAD_ATTRIBUTES_SMODE_SETUP_DONE(t0) @@ -52,7 +59,7 @@ setup_smode_trap_vector: .global setup_smode_interrupt_enables setup_smode_interrupt_enables: # Enable interrupts. - li t0, SSTATUS_SIE | SSTATUS_SPP + li t0, SSTATUS_SIE | SSTATUS_SPIE csrs sstatus, t0 # Enable external interrupts. @@ -182,6 +189,9 @@ disable_mmu_from_smode: .align 2 .global stvec_trap_handler stvec_trap_handler: + # Save gp as we use it in this handler. + csrw sscratch, gp + li gp, PRV_S SET_THREAD_ATTRIBUTES_CURRENT_MODE(gp) @@ -253,15 +263,19 @@ restore_context: ld t0, EPC_OFFSET_IN_SAVE_REGION(gp) csrw sepc, t0 + ld t0, HSTATUS_OFFSET_IN_SAVE_REGION(gp) + csrw hstatus, t0 + + # We could be returning back to VS or VU mode. Set the V bit. + bexti t0, t0, HSTATUS_SPV_SHIFT + SET_THREAD_ATTRIBUTES_CURRENT_V_BIT(t0) + ld t0, STATUS_OFFSET_IN_SAVE_REGION(gp) csrw sstatus, t0 bexti t0, t0, SSTATUS_SPP_SHIFT beqz t0, restore_umode_context - ld t0, HSTATUS_OFFSET_IN_SAVE_REGION(gp) - csrw hstatus, t0 - j restore_all_gprs restore_umode_context: @@ -278,6 +292,9 @@ restore_all_gprs: addi gp, gp, 1 SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + # Restore gp as we had saved it at the start of this handler. + csrr gp, sscratch + # The return_from_stvec_trap_handler label is referenced in control transfer # records diag so mark it as global. .global return_from_stvec_trap_handler @@ -297,8 +314,10 @@ check_for_env_call_requests: handle_env_call_from_umode: # a7 will contain the syscall number +#ifdef UMODE_MODE_ENABLED li t0, SYSCALL_RUN_FUNC_IN_UMODE_COMPLETE beq a7, t0, handle_syscall_run_func_in_umode_complete +#endif j jumpstart_smode_fail @@ -310,33 +329,36 @@ handle_env_call_from_vsmode: j jumpstart_smode_fail -handle_syscall_run_func_in_umode_complete: - # This is the return to supervisor path for run_function_in_umode(). +handle_syscall_run_func_in_vsmode_complete: + # This is the return to supervisor path for run_function_in_vsmode(). - # Re-enable interrupts that were disabled in run_function_in_umode(). + # Re-enable interrupts that were disabled in run_function_in_vsmode(). # Set SPIE to 1, on sret this will set SIE to 1. li t0, (PRV_S << SSTATUS_SPP_SHIFT) | SSTATUS_SPIE csrs sstatus, t0 - la t0, run_function_in_umode_return_point + li t0, HSTATUS_SPV + csrc hstatus, t0 + + la t0, run_function_in_vsmode_return_point csrw sepc, t0 # Point to the address of the context save region we used when we - # took the RUN_FUNC_IN_UMODE_COMPLETE syscall. + # took the RUN_FUNC_IN_VSMODE_COMPLETE syscall. GET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES - # The return value from the umode function is in the umode - # context saved for a0 when we took the ecall exception from umode to + # The return value from the vsmode function is in the vsmode + # context saved for a0 when we took the ecall exception from vsmode to # smode. ld t0, A0_OFFSET_IN_SAVE_REGION(gp) # Place it in the a0 location for the mmode context we saved before calling - # run_function_in_umode(). + # run_function_in_vsmode(). addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES sd t0, A0_OFFSET_IN_SAVE_REGION(gp) - # Restore S mode context from before the run_function_in_umode() call. + # Restore S mode context from before the run_function_in_vsmode() call. RESTORE_ALL_GPRS # This location is now free to be used by the next trap handler entry. @@ -349,36 +371,34 @@ handle_syscall_run_func_in_umode_complete: sret -handle_syscall_run_func_in_vsmode_complete: - # This is the return to supervisor path for run_function_in_vsmode(). +#ifdef UMODE_MODE_ENABLED +handle_syscall_run_func_in_umode_complete: + # This is the return to supervisor path for run_function_in_umode(). - # Re-enable interrupts that were disabled in run_function_in_vsmode(). + # Re-enable interrupts that were disabled in run_function_in_umode(). # Set SPIE to 1, on sret this will set SIE to 1. li t0, (PRV_S << SSTATUS_SPP_SHIFT) | SSTATUS_SPIE csrs sstatus, t0 - li t0, HSTATUS_SPV - csrc hstatus, t0 - - la t0, run_function_in_vsmode_return_point + la t0, run_function_in_umode_return_point csrw sepc, t0 # Point to the address of the context save region we used when we - # took the RUN_FUNC_IN_VSMODE_COMPLETE syscall. + # took the RUN_FUNC_IN_UMODE_COMPLETE syscall. GET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES - # The return value from the vsmode function is in the vsmode - # context saved for a0 when we took the ecall exception from vsmode to + # The return value from the umode function is in the umode + # context saved for a0 when we took the ecall exception from umode to # smode. ld t0, A0_OFFSET_IN_SAVE_REGION(gp) # Place it in the a0 location for the mmode context we saved before calling - # run_function_in_vsmode(). + # run_function_in_umode(). addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES sd t0, A0_OFFSET_IN_SAVE_REGION(gp) - # Restore S mode context from before the run_function_in_vsmode() call. + # Restore S mode context from before the run_function_in_umode() call. RESTORE_ALL_GPRS # This location is now free to be used by the next trap handler entry. @@ -430,8 +450,8 @@ run_function_in_umode: csrc sstatus, t0 # Switch to the U-mode stack. - GET_THREAD_ATTRIBUTES_HART_ID(t0) - li t1, (NUM_PAGES_PER_HART_FOR_UMODE_STACK * UMODE_STACK_PAGE_SIZE) + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + li t1, (NUM_PAGES_PER_CPU_FOR_UMODE_STACK * UMODE_STACK_PAGE_SIZE) mul t0, t0, t1 la t2, umode_stack_top add sp, t2, t0 @@ -453,6 +473,9 @@ run_function_in_umode_return_point: addi sp, sp, 16 ret +#endif + + .global just_wfi_from_smode just_wfi_from_smode: wfi diff --git a/src/common/jumpstart.umode.S b/src/common/jumpstart.umode.S index c8d69aff..464c2d5f 100644 --- a/src/common/jumpstart.umode.S +++ b/src/common/jumpstart.umode.S @@ -1,10 +1,15 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" -.section .jumpstart.text.umode, "ax" +.section .jumpstart.cpu.text.umode, "ax" # Inputs: # a0: address of the function to run. diff --git a/src/common/jumpstart.vsmode.S b/src/common/jumpstart.vsmode.S index e4673517..e36e5501 100644 --- a/src/common/jumpstart.vsmode.S +++ b/src/common/jumpstart.vsmode.S @@ -1,11 +1,16 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" -.section .jumpstart.text.smode, "ax" +.section .jumpstart.cpu.text.smode, "ax" .global setup_vsmode setup_vsmode: @@ -90,7 +95,6 @@ jump_to_function_in_vsmode: jalr ra, t0 -.global exit_from_vsmode exit_from_vsmode: # a0 contains the exit code. li a7, SYSCALL_RUN_FUNC_IN_VSMODE_COMPLETE @@ -99,10 +103,18 @@ exit_from_vsmode: # We shouldn't come back here. wfi +.global jumpstart_vsmode_fail +jumpstart_vsmode_fail: + li a0, DIAG_FAILED + j exit_from_vsmode + # The stvec.base must always be 4 byte aligned. .align 2 .global vstvec_trap_handler vstvec_trap_handler: + # Save gp as we use it in this handler. + csrw sscratch, gp + li gp, PRV_S SET_THREAD_ATTRIBUTES_CURRENT_MODE(gp) @@ -132,7 +144,7 @@ vstvec_trap_handler: # We're handling a trap from vumode. # Switch to the S-mode stack as we can't use the vumode stack. - # We get the smode stack from the smode context that was saved + # We get the smode stack from the vsmode context that was saved # when we ran run_function_in_vumode() - the context just prior to this. addi t0, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES ld sp, SP_OFFSET_IN_SAVE_REGION(t0) @@ -189,6 +201,9 @@ restore_all_gprs: addi gp, gp, 1 SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + # Restore gp as we had saved it at the start of this handler. + csrr gp, sscratch + # The return_from_stvec_trap_handler label is referenced in control transfer # records diag so mark it as global. .global return_from_vstvec_trap_handler @@ -205,9 +220,121 @@ check_for_env_call_requests: handle_env_call_from_vumode: # a7 will contain the syscall number +#ifdef UMODE_MODE_ENABLED + li t0, SYSCALL_RUN_FUNC_IN_VUMODE_COMPLETE + beq a7, t0, handle_syscall_run_func_in_vumode_complete +#endif + j jumpstart_vsmode_fail handle_env_call_from_vsmode: # a7 will contain the syscall number j jumpstart_vsmode_fail + +#ifdef UMODE_MODE_ENABLED +handle_syscall_run_func_in_vumode_complete: + # This is the return to supervisor path for run_function_in_vumode(). + + # Re-enable interrupts that were disabled in run_function_in_vumode(). + # Set SPIE to 1, on sret this will set SIE to 1. + li t0, (PRV_S << SSTATUS_SPP_SHIFT) | SSTATUS_SPIE + csrs sstatus, t0 + + la t0, run_function_in_vumode_return_point + csrw sepc, t0 + + # Point to the address of the context save region we used when we + # took the RUN_FUNC_IN_VUMODE_COMPLETE syscall. + GET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) + addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES + + # The return value from the vumode function is in the vumode + # context saved for a0 when we took the ecall exception from umode to + # smode. + ld t0, A0_OFFSET_IN_SAVE_REGION(gp) + + # Place it in the a0 location for the mmode context we saved before calling + # run_function_in_vumode(). + addi gp, gp, -REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES + sd t0, A0_OFFSET_IN_SAVE_REGION(gp) + + # Restore VS mode context from before the run_function_in_vumode() call. + RESTORE_ALL_GPRS + + # This location is now free to be used by the next trap handler entry. + SET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) + + # We've freed 2 context saves. + GET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + addi gp, gp, 2 + SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + + sret + +# Inputs: +# a0: address of the function to run in usermode. +# a1-a7 contains the arguments to pass to the user function. +.global run_function_in_vumode +run_function_in_vumode: + addi sp, sp, -16 + sd ra, 8(sp) + sd fp, 0(sp) + addi fp, sp, 16 + + GET_THREAD_ATTRIBUTES_CURRENT_MODE(t0) + li t1, PRV_S + bne t0, t1, jumpstart_vsmode_fail + + # Make sure we only call this function from VS mode (for now). + GET_THREAD_ATTRIBUTES_CURRENT_V_BIT(t0) + beqz t0, jumpstart_vsmode_fail + + # Disable interrupts when switching modes to avoid clobbering any + # state we set up if we encounter an interrupt. + csrci sstatus, SSTATUS_SIE + + # Make sure we have enough context saves remaining in S mode. + GET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + beqz gp, jumpstart_vsmode_fail + + addi gp, gp, -1 + SET_THREAD_ATTRIBUTES_NUM_CONTEXT_SAVES_REMAINING_IN_SMODE(gp) + + # Save VS-mode context + GET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) + SAVE_ALL_GPRS + + # Point to the address of the next context save region for the next + # trap handler. + addi gp, gp, REG_CONTEXT_SAVE_REGION_SIZE_IN_BYTES + SET_THREAD_ATTRIBUTES_SMODE_REG_CONTEXT_SAVE_REGION_ADDRESS(gp) + + # Load VU-mode context. We just need to set sepc, sstatus and a0 register. + li t0, (PRV_S << SSTATUS_SPP_SHIFT) + csrc sstatus, t0 + + # Switch to the VU-mode stack + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + li t1, (NUM_PAGES_PER_CPU_FOR_UMODE_STACK * UMODE_STACK_PAGE_SIZE) + mul t0, t0, t1 + la t2, umode_stack_top + add sp, t2, t0 + add sp, sp, t1 # We want the stack bottom. + + li t0, PRV_U + SET_THREAD_ATTRIBUTES_CURRENT_MODE(t0) + + la t0, jump_to_function_in_vumode + csrw sepc, t0 + + sret + +# Inputs: +# a0: return status from U-mode function. +run_function_in_vumode_return_point: + ld ra, 8(sp) + ld fp, 0(sp) + addi sp, sp, 16 + ret +#endif diff --git a/src/common/jumpstart.vumode.S b/src/common/jumpstart.vumode.S new file mode 100644 index 00000000..e450900a --- /dev/null +++ b/src/common/jumpstart.vumode.S @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + + +.section .jumpstart.cpu.text.umode, "ax" + +# Inputs: +# a0: address of the function to run. +# a1-a7 contains the arguments to pass to the umode function. +.global jump_to_function_in_vumode +jump_to_function_in_vumode: + mv t0, a0 + + # Function arguments have to be passed in a0-a6. + mv a0, a1 + mv a1, a2 + mv a2, a3 + mv a3, a4 + mv a4, a5 + mv a5, a6 + mv a6, a7 + + jalr ra, t0 + +exit_from_vumode: + # a0 contains the exit code. + li a7, SYSCALL_RUN_FUNC_IN_VUMODE_COMPLETE + ecall + + # We shouldn't come back here. + wfi + +.global jumpstart_vumode_fail +jumpstart_vumode_fail: + li a0, DIAG_FAILED + j exit_from_vumode diff --git a/src/common/lock.mmode.c b/src/common/lock.mmode.c new file mode 100644 index 00000000..059607ae --- /dev/null +++ b/src/common/lock.mmode.c @@ -0,0 +1,25 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "lock.h" +#include "jumpstart.h" +#include "lock.mmode.h" + +__attr_mtext static uint64_t m_swap_atomic(uint64_t *val, uint64_t new_value, + amoswapKind_t kind) { + return _swap_atomic(val, new_value, kind); + +fail: + jumpstart_mmode_fail(); +} + +__attr_mtext void m_acquire_lock(spinlock_t *lock) { + _acquire_lock(lock, m_swap_atomic); +} + +__attr_mtext void m_release_lock(spinlock_t *lock) { + _release_lock(lock, m_swap_atomic); +} diff --git a/src/common/lock.smode.c b/src/common/lock.smode.c index 0056618d..e3ea061e 100644 --- a/src/common/lock.smode.c +++ b/src/common/lock.smode.c @@ -1,53 +1,25 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ -#include "lock.smode.h" +#include "lock.h" #include "jumpstart.h" +#include "lock.smode.h" -typedef enum { - AMOSWAP_ACQUIRE, - AMOSWAP_RELEASE, -} amoswapKind_t; - -__attribute__((section(".jumpstart.text.smode"))) static uint64_t -swap_atomic(uint64_t *val, uint64_t new_value, amoswapKind_t kind) { - uint64_t result; - switch (kind) { - case AMOSWAP_RELEASE: - __asm__ __volatile__("amoswap.d.rl %0, %2, %1" - : "=r"(result), "+A"(*val) - : "r"(new_value) - : "memory"); - break; - case AMOSWAP_ACQUIRE: - __asm__ __volatile__("amoswap.d.aq %0, %2, %1" - : "=r"(result), "+A"(*val) - : "r"(new_value) - : "memory"); - break; - default: - jumpstart_smode_fail(); - } +__attr_stext static uint64_t swap_atomic(uint64_t *val, uint64_t new_value, + amoswapKind_t kind) { + return _swap_atomic(val, new_value, kind); - return result; +fail: + jumpstart_smode_fail(); } -__attribute__((section(".jumpstart.text.smode"))) void -acquire_lock(spinlock_t *lock) { - disable_checktc(); - while (1) { - if (*(volatile uint64_t *)lock) { - continue; - } - if (swap_atomic(lock, 1, AMOSWAP_ACQUIRE) == 0) { - break; - } - } - enable_checktc(); +__attr_stext void acquire_lock(spinlock_t *lock) { + _acquire_lock(lock, swap_atomic); } -__attribute__((section(".jumpstart.text.smode"))) void -release_lock(spinlock_t *lock) { - swap_atomic(lock, 0, AMOSWAP_RELEASE); +__attr_stext void release_lock(spinlock_t *lock) { + _release_lock(lock, swap_atomic); } diff --git a/src/common/meson.build b/src/common/meson.build index ed745d88..1240d547 100644 --- a/src/common/meson.build +++ b/src/common/meson.build @@ -1,26 +1,29 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 mmode_sources += files('jumpstart.mmode.S', + 'data.privileged.S', + 'lock.mmode.c', + 'thread_attributes.mmode.c', + 'time.mmode.c', 'trap_handler.mmode.c', + 'uart.mmode.c', 'utils.mmode.c') -smode_sources += files('data.smode.S', - 'jumpstart.smode.S', + +smode_sources += files('jumpstart.smode.S', 'jumpstart.vsmode.S', 'tablewalk.smode.c', 'trap_handler.smode.c', 'string.smode.c', + 'time.smode.c', 'utils.smode.c', 'uart.smode.c', 'heap.smode.c', - 'lock.smode.c') - -if get_option('boot_config') == 'fw-sbi' - smode_sources += files( - 'sbi_firmware_boot.smode.S', - ) -endif + 'heap.smode.S', + 'lock.smode.c', + 'thread_attributes.smode.c') -umode_sources += files('jumpstart.umode.S') +umode_sources += files('jumpstart.umode.S', + 'jumpstart.vumode.S') diff --git a/src/common/sbi_firmware_boot.smode.S b/src/common/sbi_firmware_boot.smode.S index e0c5b2ed..2bcd98af 100644 --- a/src/common/sbi_firmware_boot.smode.S +++ b/src/common/sbi_firmware_boot.smode.S @@ -1,97 +1,102 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" -.section .jumpstart.text.smode.init.enter, "ax" +.section .jumpstart.cpu.text.smode.init.enter, "ax" # In sbi_firmware_boot mode, other firmwares run in M-mode and drop hand over control # to JumpStart in S-mode. This code is the entry point for such environments. -# We expect that only one hart is running at this point and all the other -# harts are in STOPPED state. The running hart will make SBI HSM calls to -# wake up the other harts and start them running in S-mode. +# We expect that only one cpu is running at this point and all the other +# cpus are in STOPPED state. The running cpu will make SBI HSM calls to +# wake up the other cpus and start them running in S-mode. # Inputs: -# a0: This hart's hartid. +# a0: This cpu's cpuid. .global sbi_firmware_trampoline sbi_firmware_trampoline: mv t0, a0 li t1, 0 # hid = 0 - li t2, ACTIVE_HART_MASK - mv t2, a0 # active_hart_mask + li t2, ACTIVE_CPU_MASK + mv t2, a0 # active_cpu_mask -start_active_harts_loop: - beq t1, t0, invoke_sbi_start_hart_done # Don't run sbi_hart_start on self. +start_active_cpus_loop: + beq t1, t0, invoke_sbi_start_cpu_done # Don't run sbi_cpu_start on self. - andi t3, t2, 1 # t3 = active_hart_mask & 1 - bnez t3, invoke_sbi_start_hart # Run sbi_hart_start on this active hart. + andi t3, t2, 1 # t3 = active_cpu_mask & 1 + bnez t3, invoke_sbi_start_cpu # Run sbi_cpu_start on this active cpu. - j invoke_sbi_start_hart_done + j invoke_sbi_start_cpu_done -invoke_sbi_start_hart: - mv a0, t1 # param1: hartid of hart to start. - la a1, _smode_start # param2: start_address at which to start the hart. +invoke_sbi_start_cpu: + mv a0, t1 # param1: cpuid of cpu to start. + la a1, _smode_start # param2: start_address at which to start the cpu. li a2, 0 # param3: opaque - jal sbi_hart_start - bnez a0, jumpstart_smode_fail # Fail if sbi_hart_start returns non-zero + jal sbi_cpu_start + bnez a0, jumpstart_smode_fail # Fail if sbi_cpu_start returns non-zero -invoke_sbi_hart_status: +invoke_sbi_cpu_status: mv a0, t1 - jal sbi_hart_get_status - bnez a0, jumpstart_smode_fail # Fail if sbi_hart_get_status returns non-zero + jal sbi_cpu_get_status + bnez a0, jumpstart_smode_fail # Fail if sbi_cpu_get_status returns non-zero - # the hart status is returned in a1. - # SBI HART status is 0 if the hart is running. Wait till the hart is running. - bnez a1, invoke_sbi_hart_status + # the cpu status is returned in a1. + # SBI CPU status is 0 if the cpu is running. Wait till the cpu is running. + bnez a1, invoke_sbi_cpu_status -invoke_sbi_start_hart_done: - srli t2, t2, 1 # active_hart_mask >> 1 - beqz t2, start_active_harts_loop_end # if active_hart_mask == 0, done. +invoke_sbi_start_cpu_done: + srli t2, t2, 1 # active_cpu_mask >> 1 + beqz t2, start_active_cpus_loop_end # if active_cpu_mask == 0, done. addi t1, t1, 1 # hid++ - j start_active_harts_loop + j start_active_cpus_loop -start_active_harts_loop_end: - li t1, ACTIVE_HART_MASK - mv a0, t0 # $a0 = my_hart_id +start_active_cpus_loop_end: + li t1, ACTIVE_CPU_MASK + mv a0, t0 # $a0 = my_cpu_id srl t1, t1, a0 andi t1, t1, 1 bnez t1, _smode_start # go to _smode_start if active thread - # or else stop this hart and wfi - jal sbi_hart_stop + # or else stop this cpu and wfi + jal sbi_cpu_stop j just_wfi_from_smode # should never get here. -.section .jumpstart.text.smode, "ax" +.section .jumpstart.cpu.text.smode, "ax" # Inputs: -# a0: hart id. +# a0: cpu id. .global _smode_start _smode_start: # This code mirrors _mmode_start in start.mmode.S mv t0, a0 - li a0, ACTIVE_HART_MASK + li a0, ACTIVE_CPU_MASK li t1, 1 sll t1, t1, t0 and a0, a0, t1 - # Send all inactive harts to wfi. + # Send all inactive cpus to wfi. beqz a0, just_wfi_from_smode - # Have the hart mark itself as running. - la t1, hart_status_tracker + # Have the cpu mark itself as running. + la t1, cpu_status_tracker add t1, t1, t0 - li t2, HART_RUNNING + li t2, CPU_RUNNING sb t2, 0(t1) mv a0, t0 jal setup_thread_attributes_from_smode # S-mode and M-mode share the same stack. - GET_THREAD_ATTRIBUTES_HART_ID(t0) - li t1, (NUM_PAGES_PER_HART_FOR_SMODE_STACK * SMODE_STACK_PAGE_SIZE) + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + li t1, (NUM_PAGES_PER_CPU_FOR_PRIVILEGED_STACK * PRIVILEGED_STACK_PAGE_SIZE) mul t0, t0, t1 - la t2, smode_stack_top + la t2, privileged_stack_top add sp, t2, t0 add sp, sp, t1 # We want the stack bottom. @@ -105,55 +110,55 @@ _smode_start: _smode_end: # a0 will contain diag pass/fail status. - # Store pass/fail status into the hart status tracker. - GET_THREAD_ATTRIBUTES_HART_ID(t0) - la t1, hart_status_tracker + # Store pass/fail status into the cpu status tracker. + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + la t1, cpu_status_tracker add t1, t1, t0 sb a0, 0(t1) - # Have all the secondary harts wait on the wfi. - # the primary hart will go through the exit routine. - li t1, PRIMARY_HART_ID + # Have all the secondary cpus wait on the wfi. + # the primary cpu will go through the exit routine. + li t1, PRIMARY_CPU_ID bne t0, t1, just_wfi_from_smode CHECKTC_DISABLE - # Check the status of all the active harts. - # a0: Active hart mask. Gets shifted right as we check each hart. - # t0: hart_status_tracker address - # t1: Hart id of the current hart we're checking status of. - li a0, ACTIVE_HART_MASK - la t0, hart_status_tracker + # Check the status of all the active cpus. + # a0: Active cpu mask. Gets shifted right as we check each cpu. + # t0: cpu_status_tracker address + # t1: CPU id of the current cpu we're checking status of. + li a0, ACTIVE_CPU_MASK + la t0, cpu_status_tracker li t1, 0x0 -check_hart_status_loop: +check_cpu_status_loop: andi t6, a0, 0x1 - beqz t6, done_with_current_hart + beqz t6, done_with_current_cpu # Active core, check it's pass/fail status. - add t5, t0, t1 # pointer to the hart's status + add t5, t0, t1 # pointer to the cpu's status - li t6, HART_INACTIVE -wait_for_inactive_hart_loop: + li t6, CPU_INACTIVE +wait_for_inactive_cpu_loop: lb t4, 0(t5) - beq t4, t6, wait_for_inactive_hart_loop + beq t4, t6, wait_for_inactive_cpu_loop - li t6, HART_RUNNING -wait_for_running_hart_loop: + li t6, CPU_RUNNING +wait_for_running_cpu_loop: lb t4, 0(t5) - beq t4, t6, wait_for_running_hart_loop + beq t4, t6, wait_for_running_cpu_loop li t6, DIAG_PASSED bne t4, t6, jumpstart_sbi_firmware_boot_fail -done_with_current_hart: +done_with_current_cpu: srli a0, a0, 1 addi t1, t1, 1 - bnez a0, check_hart_status_loop + bnez a0, check_cpu_status_loop CHECKTC_ENABLE - # All harts have passed, we're done. + # All cpus have passed, we're done. li t1, DIAG_PASSED bne a0, t1, jumpstart_sbi_firmware_boot_fail @@ -167,8 +172,6 @@ jumpstart_sbi_firmware_boot_fail: run_end_of_sim_sequence: # NOTE: this will not work on RTL simulation. - li t1, IN_QEMU_MODE - bnez t1, invoke_sbi_reset slli t1, a0, 1 ori t1, t1, 1 @@ -177,50 +180,43 @@ run_end_of_sim_sequence: 1: j 1b # wait for termination -invoke_sbi_reset: - mv a1, a0 - li a0, 0 # sbi_system_reset: param1(a0): SHUTDOWN - # sbi_system_reset: param2(a1): DIAG_PASS(0)/DIAG_FAIL(1) - jal sbi_system_reset - j just_wfi_from_smode - #define SBI_HSM_EID 0x48534D -#define SBI_HSM_HART_START_FID 0 -#define SBI_HSM_HART_STOP_FID 1 -#define SBI_HSM_HART_STATUS_FID 2 +#define SBI_HSM_CPU_START_FID 0 +#define SBI_HSM_CPU_STOP_FID 1 +#define SBI_HSM_CPU_STATUS_FID 2 #define SBI_SRST_EID 0x53525354 #define SBI_SRST_SYSTEM_RESET_FID 0 -.section .jumpstart.text.smode, "ax" +.section .jumpstart.cpu.text.smode, "ax" # Reference: # https://github.com/riscv-non-isa/riscv-sbi-doc/blob/master/src/ext-hsm.adoc # Prototype: -# struct sbiret sbi_hart_start(unsigned long hartid, +# struct sbiret sbi_cpu_start(unsigned long cpuid, # unsigned long start_addr, # unsigned long opaque) -.global sbi_hart_start -sbi_hart_start: - li a6, SBI_HSM_HART_START_FID +.global sbi_cpu_start +sbi_cpu_start: + li a6, SBI_HSM_CPU_START_FID li a7, SBI_HSM_EID ecall ret # Prototype: -# struct sbiret sbi_hart_stop(void) -.global sbi_hart_stop -sbi_hart_stop: - li a6, SBI_HSM_HART_STOP_FID +# struct sbiret sbi_cpu_stop(void) +.global sbi_cpu_stop +sbi_cpu_stop: + li a6, SBI_HSM_CPU_STOP_FID li a7, SBI_HSM_EID ecall ret # Prototype: -# struct sbiret sbi_hart_get_status(unsigned long hartid) -.global sbi_hart_get_status -sbi_hart_get_status: - li a6, SBI_HSM_HART_STATUS_FID +# struct sbiret sbi_cpu_get_status(unsigned long cpuid) +.global sbi_cpu_get_status +sbi_cpu_get_status: + li a6, SBI_HSM_CPU_STATUS_FID li a7, SBI_HSM_EID ecall ret @@ -235,7 +231,7 @@ sbi_system_reset: ret -.section .jumpstart.data.smode, "aw", @progbits +.section .jumpstart.cpu.data.privileged, "aw", @progbits .align 6 .globl tohost diff --git a/src/common/string.smode.c b/src/common/string.smode.c index 2eed6be3..a26fd86f 100644 --- a/src/common/string.smode.c +++ b/src/common/string.smode.c @@ -1,7 +1,10 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + // SPDX-FileCopyrightText: 1990 - 2011 The FreeBSD Foundation -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 #include #include @@ -9,14 +12,12 @@ #include #include -int toupper(int c); - -static char *ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper) - __attribute__((section(".jumpstart.text.smode"))); +#include "jumpstart.h" -int islower(int c) __attribute__((section(".jumpstart.text.smode"))); -int isupper(int c) __attribute__((section(".jumpstart.text.smode"))); -int tolower(int c) __attribute__((section(".jumpstart.text.smode"))); +int toupper(int c); +int islower(int c) __attr_stext; +int isupper(int c) __attr_stext; +int tolower(int c) __attr_stext; inline int islower(int c) { return c >= 'a' && c <= 'z'; @@ -30,13 +31,47 @@ inline int tolower(int c) { return isupper(c) ? c - ('A' - 'a') : c; } -__attribute__((section(".jumpstart.text.smode"))) __attribute__((const)) int -toupper(int c) { +__attr_stext __attribute__((const)) int toupper(int c) { return islower(c) ? c + ('A' - 'a') : c; } -__attribute__((section(".jumpstart.text.smode"))) size_t -strlen(const char *str) { +#pragma GCC diagnostic push +#if defined(__clang__) +#pragma GCC diagnostic ignored "-Wtautological-pointer-compare" +#elif defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#endif + +/* Disable nonnull warning for these functions since we want to keep NULL checks + * for bare-metal safety, even though the functions are marked as nonnull */ +__attr_stext char *strcpy(char *dest, const char *src) { + if (dest == NULL || src == NULL) { + return NULL; + } + + char *original_dest = dest; + while (*src != '\0') { + *dest = *src; + dest++; + src++; + } + *dest = '\0'; + return original_dest; +} + +__attr_stext int strcmp(const char *s1, const char *s2) { + if (s1 == NULL || s2 == NULL) { + return -1; + } + + while (*s1 && (*s1 == *s2)) { + s1++; + s2++; + } + return *(const unsigned char *)s1 - *(const unsigned char *)s2; +} + +__attr_stext size_t strlen(const char *str) { size_t len = 0; while (str[len]) @@ -58,8 +93,8 @@ static char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz"; * written in the buffer (i.e., the first character of the string). * The buffer pointed to by `nbuf' must have length >= MAXNBUF. */ -static char *ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, - int upper) { +__attr_stext static char *ksprintn(char *nbuf, uintmax_t num, int base, + int *lenp, int upper) { char *p, c; p = nbuf; @@ -76,8 +111,8 @@ static char *ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, /* * Scaled down version of printf(3). */ -__attribute__((section(".jumpstart.text.smode"))) int -vsnprintf(char *str, size_t size, char const *fmt, va_list ap) { +__attr_stext int vsnprintf(char *str, size_t size, char const *fmt, + va_list ap) { #define PCHAR(c) \ do { \ if (size >= 2) { \ @@ -384,8 +419,7 @@ vsnprintf(char *str, size_t size, char const *fmt, va_list ap) { #pragma GCC diagnostic pop -__attribute__((section(".jumpstart.text.smode"))) int -snprintf(char *buf, size_t size, const char *fmt, ...) { +__attr_stext int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int retval = 0; diff --git a/src/common/tablewalk.smode.c b/src/common/tablewalk.smode.c index 79c8874e..f704e367 100644 --- a/src/common/tablewalk.smode.c +++ b/src/common/tablewalk.smode.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "tablewalk.smode.h" #include "cpu_bits.h" @@ -8,40 +10,61 @@ #include "utils.smode.h" struct mmu_mode_attribute { - uint8_t satp_mode; + uint8_t xatp_mode; uint8_t pte_size_in_bytes; uint8_t num_levels; struct bit_range va_vpn_bits[MAX_NUM_PAGE_TABLE_LEVELS]; struct bit_range pa_ppn_bits[MAX_NUM_PAGE_TABLE_LEVELS]; struct bit_range pte_ppn_bits[MAX_NUM_PAGE_TABLE_LEVELS]; + struct bit_range pbmt_mode_bits; }; // TODO: generate this from the Python. -const struct mmu_mode_attribute mmu_mode_attributes[] = { - {.satp_mode = VM_1_10_SV39, + +const struct mmu_mode_attribute mmu_hsmode_attributes[] = { + {.xatp_mode = VM_1_10_SV39x4, + .pte_size_in_bytes = 8, + .num_levels = 3, + .va_vpn_bits = {{40, 30}, {29, 21}, {20, 12}}, + .pa_ppn_bits = {{55, 30}, {29, 21}, {20, 12}}, + .pte_ppn_bits = {{53, 28}, {27, 19}, {18, 10}}, + .pbmt_mode_bits = {62, 61}}, + + {.xatp_mode = VM_1_10_SV48x4, + .pte_size_in_bytes = 8, + .num_levels = 4, + .va_vpn_bits = {{49, 39}, {38, 30}, {29, 21}, {20, 12}}, + .pa_ppn_bits = {{55, 39}, {38, 30}, {29, 21}, {20, 12}}, + .pte_ppn_bits = {{53, 37}, {36, 28}, {27, 19}, {18, 10}}, + .pbmt_mode_bits = {62, 61}}, +}; + +const struct mmu_mode_attribute mmu_smode_attributes[] = { + {.xatp_mode = VM_1_10_SV39, .pte_size_in_bytes = 8, .num_levels = 3, .va_vpn_bits = {{38, 30}, {29, 21}, {20, 12}}, .pa_ppn_bits = {{55, 30}, {29, 21}, {20, 12}}, - .pte_ppn_bits = {{53, 28}, {27, 19}, {18, 10}}}, + .pte_ppn_bits = {{53, 28}, {27, 19}, {18, 10}}, + .pbmt_mode_bits = {62, 61}}, - {.satp_mode = VM_1_10_SV48, + {.xatp_mode = VM_1_10_SV48, .pte_size_in_bytes = 8, .num_levels = 4, .va_vpn_bits = {{47, 39}, {38, 30}, {29, 21}, {20, 12}}, .pa_ppn_bits = {{55, 39}, {38, 30}, {29, 21}, {20, 12}}, - .pte_ppn_bits = {{53, 37}, {36, 28}, {27, 19}, {18, 10}}}, + .pte_ppn_bits = {{53, 37}, {36, 28}, {27, 19}, {18, 10}}, + .pbmt_mode_bits = {62, 61}}, }; -__attribute__((section(".jumpstart.text.smode"))) void -translate_VA(uint64_t va, struct translation_info *xlate_info) { +__attr_stext static void +translate(uint64_t xatp, const struct mmu_mode_attribute *mmu_mode_attribute, + uint64_t va, struct translation_info *xlate_info) { // C reimplementation of the DiagSource.translate_VA() from // generate_diag_sources.py. - uint64_t satp_value = read_csr(satp); - xlate_info->satp_mode = (uint8_t)get_field(satp_value, SATP64_MODE); + xlate_info->xatp_mode = (uint8_t)get_field(xatp, SATP64_MODE); xlate_info->va = va; - xlate_info->pa = 0; xlate_info->levels_traversed = 0; xlate_info->walk_successful = 0; @@ -50,28 +73,14 @@ translate_VA(uint64_t va, struct translation_info *xlate_info) { xlate_info->pte_value[i] = 0; } - if (xlate_info->satp_mode == VM_1_10_MBARE) { + if (xlate_info->xatp_mode == VM_1_10_MBARE) { xlate_info->pa = va; xlate_info->walk_successful = 1; return; } - const struct mmu_mode_attribute *mmu_mode_attribute = 0; - for (uint8_t i = 0; - i < sizeof(mmu_mode_attributes) / sizeof(struct mmu_mode_attribute); - ++i) { - if (mmu_mode_attributes[i].satp_mode == xlate_info->satp_mode) { - mmu_mode_attribute = &mmu_mode_attributes[i]; - break; - } - } - - if (mmu_mode_attribute == 0) { - jumpstart_smode_fail(); - } - // Step 1 - uint64_t a = (satp_value & SATP64_PPN) << PAGE_OFFSET; + uint64_t a = (xatp & SATP64_PPN) << PAGE_OFFSET; uint8_t current_level = 0; @@ -125,6 +134,75 @@ translate_VA(uint64_t va, struct translation_info *xlate_info) { } } + xlate_info->pbmt_mode = + extract_bits(xlate_info->pte_value[xlate_info->levels_traversed - 1], + mmu_mode_attribute->pbmt_mode_bits); xlate_info->pa = a + extract_bits(va, (struct bit_range){PAGE_OFFSET - 1, 0}); xlate_info->walk_successful = 1; } + +__attr_stext void translate_GVA(uint64_t gva, + struct translation_info *xlate_info) { + uint64_t vsatp_value = read_csr(vsatp); + uint8_t mode = (uint8_t)get_field(vsatp_value, VSATP64_MODE); + + const struct mmu_mode_attribute *attribute = 0; + for (uint8_t i = 0; + i < sizeof(mmu_smode_attributes) / sizeof(mmu_smode_attributes[0]); + ++i) { + if (mmu_smode_attributes[i].xatp_mode == mode) { + attribute = &mmu_smode_attributes[i]; + break; + } + } + + if (!attribute) { + jumpstart_smode_fail(); + } + + translate(vsatp_value, attribute, gva, xlate_info); +} + +__attr_stext void translate_GPA(uint64_t gpa, + struct translation_info *xlate_info) { + uint64_t hgatp_value = read_csr(hgatp); + uint8_t mode = (uint8_t)get_field(hgatp_value, HGATP64_MODE); + + const struct mmu_mode_attribute *attribute = 0; + for (uint8_t i = 0; + i < sizeof(mmu_hsmode_attributes) / sizeof(mmu_hsmode_attributes[0]); + ++i) { + if (mmu_hsmode_attributes[i].xatp_mode == mode) { + attribute = &mmu_hsmode_attributes[i]; + break; + } + } + + if (!attribute) { + jumpstart_smode_fail(); + } + + translate(hgatp_value, attribute, gpa, xlate_info); +} + +__attr_stext void translate_VA(uint64_t va, + struct translation_info *xlate_info) { + uint64_t satp_value = read_csr(satp); + uint8_t mode = (uint8_t)get_field(satp_value, SATP64_MODE); + + const struct mmu_mode_attribute *attribute = 0; + for (uint8_t i = 0; + i < sizeof(mmu_smode_attributes) / sizeof(mmu_smode_attributes[0]); + ++i) { + if (mmu_smode_attributes[i].xatp_mode == mode) { + attribute = &mmu_smode_attributes[i]; + break; + } + } + + if (!attribute) { + jumpstart_smode_fail(); + } + + translate(satp_value, attribute, va, xlate_info); +} diff --git a/src/common/thread_attributes.mmode.c b/src/common/thread_attributes.mmode.c new file mode 100644 index 00000000..f95fa40a --- /dev/null +++ b/src/common/thread_attributes.mmode.c @@ -0,0 +1,17 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include "jumpstart.h" + +__attr_mtext uint8_t get_physical_cpu_id_for_cpu_id_from_mmode(uint8_t cpu_id) { + // Get the thread attributes struct address for the given cpu_id + struct thread_attributes *thread_attributes_ptr = + get_thread_attributes_for_cpu_id_from_mmode(cpu_id); + + return thread_attributes_ptr->physical_cpu_id; +} diff --git a/src/common/thread_attributes.smode.c b/src/common/thread_attributes.smode.c new file mode 100644 index 00000000..28861c4e --- /dev/null +++ b/src/common/thread_attributes.smode.c @@ -0,0 +1,17 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include "jumpstart.h" + +__attr_stext uint8_t get_physical_cpu_id_for_cpu_id_from_smode(uint8_t cpu_id) { + // Get the thread attributes struct address for the given cpu_id + struct thread_attributes *thread_attributes_ptr = + get_thread_attributes_for_cpu_id_from_smode(cpu_id); + + return thread_attributes_ptr->physical_cpu_id; +} diff --git a/src/common/time.mmode.c b/src/common/time.mmode.c new file mode 100644 index 00000000..8708ffb8 --- /dev/null +++ b/src/common/time.mmode.c @@ -0,0 +1,16 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include "cpu_bits.h" +#include "delay.h" +#include "jumpstart.h" +#include "time.mmode.h" + +__attr_mtext void delay_us_from_mmode(uint32_t delay_in_useconds) { + _delay_us(delay_in_useconds); +} diff --git a/src/common/time.smode.c b/src/common/time.smode.c new file mode 100644 index 00000000..a50ba34d --- /dev/null +++ b/src/common/time.smode.c @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +#include "cpu_bits.h" +#include "delay.h" +#include "jumpstart.h" +#include "time.smode.h" + +__attr_stext uint64_t read_time(void) { + uint64_t time_val; + asm volatile("rdtime %0" : "=r"(time_val)); + return time_val; +} + +__attr_stext void delay_us_from_smode(uint32_t delay_in_useconds) { + _delay_us(delay_in_useconds); +} + +__attr_stext int gettimeofday(struct timeval *tv, + void *tz __attribute__((unused))) { + uint64_t timer_ticks = read_time(); + + // Convert timer ticks to seconds and microseconds + uint64_t seconds = timer_ticks / (CPU_CLOCK_FREQUENCY_IN_MHZ * 1000000); + uint64_t microseconds = timer_ticks / (CPU_CLOCK_FREQUENCY_IN_MHZ); + + tv->tv_sec = seconds; + tv->tv_usec = microseconds; + + return 0; // Success +} + +__attr_stext time_t time(time_t *tloc) { + struct timeval tv; + + // Call gettimeofday() to get the current time + if (gettimeofday(&tv, NULL) != 0) { + return (time_t)-1; // Error case + } + + // Extract the seconds part + time_t current_time = (time_t)tv.tv_sec; + + // If tloc is not NULL, store the time in the location pointed to by tloc + if (tloc != NULL) { + *tloc = current_time; + } + + return current_time; // Return the current time in seconds +} diff --git a/src/common/trap_handler.mmode.c b/src/common/trap_handler.mmode.c index 71683deb..588b183b 100644 --- a/src/common/trap_handler.mmode.c +++ b/src/common/trap_handler.mmode.c @@ -1,11 +1,13 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" -__attribute__((section(".jumpstart.text.mmode"))) void +__attr_mtext void register_mmode_trap_handler_override(uint64_t mcause, uint64_t handler_address) { uint64_t trap_override_struct_address = @@ -34,8 +36,7 @@ register_mmode_trap_handler_override(uint64_t mcause, } } -__attribute__((section(".jumpstart.text.mmode"))) void -deregister_mmode_trap_handler_override(uint64_t mcause) { +__attr_mtext void deregister_mmode_trap_handler_override(uint64_t mcause) { uint64_t trap_override_struct_address = get_thread_attributes_trap_override_struct_address_from_mmode(); @@ -70,8 +71,7 @@ deregister_mmode_trap_handler_override(uint64_t mcause) { } } -__attribute__((section(".jumpstart.text.mmode"))) uint64_t -get_mmode_trap_handler_override(uint64_t mcause) { +__attr_mtext uint64_t get_mmode_trap_handler_override(uint64_t mcause) { uint64_t trap_override_struct_address = get_thread_attributes_trap_override_struct_address_from_mmode(); diff --git a/src/common/trap_handler.smode.c b/src/common/trap_handler.smode.c index 7a169d46..05b85c0a 100644 --- a/src/common/trap_handler.smode.c +++ b/src/common/trap_handler.smode.c @@ -1,11 +1,14 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" +#include "uart.smode.h" -__attribute__((section(".jumpstart.text.smode"))) void +__attr_stext void register_smode_trap_handler_override(uint64_t mcause, uint64_t handler_address) { uint64_t trap_override_struct_address = @@ -34,8 +37,7 @@ register_smode_trap_handler_override(uint64_t mcause, } } -__attribute__((section(".jumpstart.text.smode"))) void -deregister_smode_trap_handler_override(uint64_t mcause) { +__attr_stext void deregister_smode_trap_handler_override(uint64_t mcause) { uint64_t trap_override_struct_address = get_thread_attributes_trap_override_struct_address_from_smode(); @@ -70,8 +72,7 @@ deregister_smode_trap_handler_override(uint64_t mcause) { } } -__attribute__((section(".jumpstart.text.smode"))) uint64_t -get_smode_trap_handler_override(uint64_t mcause) { +__attr_stext uint64_t get_smode_trap_handler_override(uint64_t mcause) { uint64_t trap_override_struct_address = get_thread_attributes_trap_override_struct_address_from_smode(); @@ -96,7 +97,7 @@ get_smode_trap_handler_override(uint64_t mcause) { return trap_overrides->smode_exception_handler_overrides[exception_code]; } -__attribute__((section(".jumpstart.text.smode"))) void +__attr_stext void register_vsmode_trap_handler_override(uint64_t mcause, uint64_t handler_address) { if (get_thread_attributes_current_v_bit_from_smode() != 1) { @@ -129,8 +130,7 @@ register_vsmode_trap_handler_override(uint64_t mcause, } } -__attribute__((section(".jumpstart.text.smode"))) void -deregister_vsmode_trap_handler_override(uint64_t mcause) { +__attr_stext void deregister_vsmode_trap_handler_override(uint64_t mcause) { if (get_thread_attributes_current_v_bit_from_smode() != 1) { jumpstart_vsmode_fail(); } @@ -169,8 +169,7 @@ deregister_vsmode_trap_handler_override(uint64_t mcause) { } } -__attribute__((section(".jumpstart.text.smode"))) uint64_t -get_vsmode_trap_handler_override(uint64_t mcause) { +__attr_stext uint64_t get_vsmode_trap_handler_override(uint64_t mcause) { if (get_thread_attributes_current_v_bit_from_smode() != 1) { jumpstart_vsmode_fail(); } @@ -198,3 +197,111 @@ get_vsmode_trap_handler_override(uint64_t mcause) { return trap_overrides->vsmode_exception_handler_overrides[exception_code]; } + +// Helper function to get exception name +__attr_stext static const char *get_exception_name(uint64_t exception_id) { + switch (exception_id) { + case RISCV_EXCP_INST_ADDR_MIS: + return "Instruction Address Misaligned"; + case RISCV_EXCP_INST_ACCESS_FAULT: + return "Instruction Access Fault"; + case RISCV_EXCP_ILLEGAL_INST: + return "Illegal Instruction"; + case RISCV_EXCP_BREAKPOINT: + return "Breakpoint"; + case RISCV_EXCP_LOAD_ADDR_MIS: + return "Load Address Misaligned"; + case RISCV_EXCP_LOAD_ACCESS_FAULT: + return "Load Access Fault"; + case RISCV_EXCP_STORE_AMO_ADDR_MIS: + return "Store/AMO Address Misaligned"; + case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: + return "Store/AMO Access Fault"; + case RISCV_EXCP_U_ECALL: + return "User ECALL"; + case RISCV_EXCP_S_ECALL: + return "Supervisor ECALL"; + case RISCV_EXCP_VS_ECALL: + return "Virtual Supervisor ECALL"; + case RISCV_EXCP_M_ECALL: + return "Machine ECALL"; + case RISCV_EXCP_INST_PAGE_FAULT: + return "Instruction Page Fault"; + case RISCV_EXCP_LOAD_PAGE_FAULT: + return "Load Page Fault"; + case RISCV_EXCP_STORE_PAGE_FAULT: + return "Store Page Fault"; + case RISCV_EXCP_SW_CHECK: + return "SW check"; + case RISCV_EXCP_HW_ERR: + return "HW Error"; + default: + return "Unknown Exception"; + } +} + +// Default exception handler for unexpected exceptions +__attr_stext void default_smode_exception_handler(void) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + uint64_t exception_id = read_csr(scause) & SCAUSE_EC_MASK; + uint64_t sepc = read_csr(sepc); + uint64_t stval = read_csr(stval); + uint64_t sstatus = read_csr(sstatus); + + printk("CPU_%d_LOG: ERROR: Unexpected exception occurred!\n", cpu_id); + printk("CPU_%d_LOG: Exception details:\n", cpu_id); + printk("CPU_%d_LOG: Exception ID: 0x%lx (%s)\n", cpu_id, exception_id, + get_exception_name(exception_id)); + printk("CPU_%d_LOG: Program Counter (sepc): 0x%lx\n", cpu_id, sepc); + printk("CPU_%d_LOG: Trap Value (stval): 0x%lx\n", cpu_id, stval); + printk("CPU_%d_LOG: Status Register (sstatus): 0x%lx\n", cpu_id, sstatus); + printk( + "CPU_%d_LOG: Status bits: SPP=%d | SIE=%d | SPIE=%d | UBE=%d | SBE=%d\n", + cpu_id, + (int)((sstatus >> SSTATUS_SPP_POS) & 1), // SPP - Previous privilege level + (int)((sstatus >> SSTATUS_SIE_POS) & + 1), // SIE - Supervisor Interrupt Enable + (int)((sstatus >> SSTATUS_SPIE_POS) & + 1), // SPIE - Previous Interrupt Enable + (int)((sstatus >> SSTATUS_UBE_POS) & 1), // UBE - User mode endianness + (int)((sstatus >> SSTATUS_SBE_POS) & + 1)); // SBE - Supervisor mode endianness + + jumpstart_smode_fail(); +} + +// Register handlers for all exceptions except the ecalls. +// The ecalls are expected as we use them to move between modes. +__attr_stext void register_default_smode_exception_handlers(void) { + register_smode_trap_handler_override( + RISCV_EXCP_INST_ADDR_MIS, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_INST_ACCESS_FAULT, + (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_ILLEGAL_INST, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_BREAKPOINT, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_LOAD_ADDR_MIS, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_LOAD_ACCESS_FAULT, + (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_STORE_AMO_ADDR_MIS, + (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_STORE_AMO_ACCESS_FAULT, + (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_INST_PAGE_FAULT, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_LOAD_PAGE_FAULT, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_STORE_PAGE_FAULT, + (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_SW_CHECK, (uint64_t)(&default_smode_exception_handler)); + register_smode_trap_handler_override( + RISCV_EXCP_HW_ERR, (uint64_t)(&default_smode_exception_handler)); +} diff --git a/src/common/uart.mmode.c b/src/common/uart.mmode.c new file mode 100644 index 00000000..f8b64b9d --- /dev/null +++ b/src/common/uart.mmode.c @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "uart.mmode.h" +#include "jumpstart.h" +#include "lock.mmode.h" +#include "uart.h" + +#include +#include +#include + +extern void m_putch(char c); + +void m_mark_uart_as_enabled(void); + +__attribute__(( + section(".jumpstart.cpu.data.privileged"))) static volatile uint8_t + uart_initialized = 0; + +__attr_mtext void m_mark_uart_as_enabled(void) { + uart_initialized = 1; +} + +__attr_mtext int m_is_uart_enabled(void) { + return uart_initialized == 1; +} + +__attr_mtext int m_puts(const char *str) { + return _puts(uart_initialized, m_putch, str); +} diff --git a/src/common/uart.smode.c b/src/common/uart.smode.c index 2606f838..bfa28ff4 100644 --- a/src/common/uart.smode.c +++ b/src/common/uart.smode.c @@ -1,87 +1,48 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "uart.smode.h" #include "jumpstart.h" -#include "jumpstart_defines.h" #include "lock.smode.h" +#include "uart.h" #include #include #include -extern void putch(char c); - int toupper(int c); static int vprintk(const char *fmt, va_list args) - __attribute__((format(printf, 1, 0))) - __attribute__((section(".jumpstart.text.smode"))); + __attribute__((format(printf, 1, 0))) __attr_stext; void mark_uart_as_enabled(void); -__attribute__((section( - ".jumpstart.data.smode"))) static volatile uint8_t uart_initialized = 0; - __attribute__(( - section(".jumpstart.data.smode"))) static spinlock_t printk_lock = 0; + section(".jumpstart.cpu.data.privileged"))) static volatile uint8_t + uart_initialized = 0; -__attribute__((section(".jumpstart.text.smode"))) void -mark_uart_as_enabled(void) { +__attr_privdata static spinlock_t printk_lock = 0; + +__attr_stext void mark_uart_as_enabled(void) { uart_initialized = 1; } -__attribute__((section(".jumpstart.text.smode"))) int is_uart_enabled(void) { +__attr_stext int is_uart_enabled(void) { return uart_initialized == 1; } -__attribute__((section(".jumpstart.text.smode"))) int puts(const char *str) { - if (uart_initialized == 0) { - jumpstart_smode_fail(); - } - - int count = 0; - - while (*str != '\0') { - putch(*str); - count++; - str++; - } - - return count; +__attr_stext int puts(const char *str) { + return _puts(uart_initialized, putch, str); } #define VPRINTK_BUFFER_SIZE 1024 static int vprintk(const char *fmt, va_list args) { - static char buf[VPRINTK_BUFFER_SIZE]; - int rc; - - rc = vsnprintf(buf, sizeof(buf), fmt, args); - - if (rc > (int)sizeof(buf)) { - puts("vprintk() buffer overflow\n"); - return -1; - } - - return puts(buf); + return _vprintk(puts, fmt, args); } -__attribute__((section(".jumpstart.text.smode"))) int printk(const char *fmt, - ...) { - if (uart_initialized == 0) { - return 0; - } - - va_list args; - int rc; - - acquire_lock(&printk_lock); - - va_start(args, fmt); - rc = vprintk(fmt, args); - va_end(args); - - release_lock(&printk_lock); - - return rc; +__attr_stext int printk(const char *fmt, ...) { + return _printk(printk_lock, acquire_lock, release_lock, uart_initialized, + vprintk, fmt); } diff --git a/src/common/utils.mmode.c b/src/common/utils.mmode.c index 3a1e9a66..cc42cf79 100644 --- a/src/common/utils.mmode.c +++ b/src/common/utils.mmode.c @@ -1,13 +1,14 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "utils.mmode.h" #include "cpu_bits.h" #include "jumpstart.h" -__attribute__((section(".jumpstart.text.mmode"))) int32_t -mmode_try_get_seed(void) { +__attr_mtext int32_t mmode_try_get_seed(void) { uint32_t seed; uint32_t i = 100; @@ -27,23 +28,29 @@ mmode_try_get_seed(void) { } #define RAND_MAX 0x7fffffff -__attribute__((section(".jumpstart.data.smode"))) uint64_t next = 1; -__attribute__((section(".jumpstart.text.mmode"))) uint64_t -__mmode_random(void) { +__attr_privdata uint64_t next = 1; + +__attr_mtext uint64_t __mmode_random(void) { + uint64_t val; + int64_t ret; /* Based on rand in diags/perf/membw/libc_replacement.h */ /* This multiplier was obtained from Knuth, D.E., "The Art of Computer Programming," Vol 2, Seminumerical Algorithms, Third Edition, Addison-Wesley, 1998, p. 106 (line 26) & p. 108 */ - next = next * __extension__ 6364136223846793005LL + 1; - return (int64_t)((next >> 32) & RAND_MAX); + + do { + val = load_reserved_64(&next); + val = val * __extension__ 6364136223846793005LL + 1; + ret = (int64_t)((val >> 32) & RAND_MAX); + } while (store_conditional_64(&next, val) != 0); + + return ret; } -__attribute__((section(".jumpstart.text.mmode"))) int32_t -get_random_number_from_mmode(void) { +__attr_mtext int32_t get_random_number_from_mmode(void) { return (int32_t)__mmode_random(); } -__attribute__((section(".jumpstart.text.mmode"))) void -set_random_seed_from_mmode(int32_t seed) { +__attr_mtext void set_random_seed_from_mmode(int32_t seed) { next = (uint64_t)seed; } diff --git a/src/common/utils.smode.c b/src/common/utils.smode.c index b6a3b810..443bf98f 100644 --- a/src/common/utils.smode.c +++ b/src/common/utils.smode.c @@ -1,29 +1,28 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "utils.smode.h" #include "cpu_bits.h" #include "jumpstart.h" -__attribute__((section(".jumpstart.text.smode"))) __attribute__((const)) -uint64_t +__attr_stext __attribute__((const)) uint64_t extract_bits(uint64_t value, struct bit_range range) { uint8_t msb = range.msb; uint8_t lsb = range.lsb; return ((value >> lsb) & ((1ULL << (msb - lsb + 1)) - 1)); } -__attribute__((section(".jumpstart.text.smode"))) __attribute__((const)) -uint64_t +__attr_stext __attribute__((const)) uint64_t place_bits(uint64_t value, uint64_t bits, struct bit_range range) { uint8_t msb = range.msb; uint8_t lsb = range.lsb; return (value & ~(((1ULL << (msb - lsb + 1)) - 1) << lsb)) | (bits << lsb); } -__attribute__((section(".jumpstart.text.smode"))) int32_t -smode_try_get_seed(void) { +__attr_stext int32_t smode_try_get_seed(void) { uint32_t seed; uint32_t i = 100; @@ -43,24 +42,29 @@ smode_try_get_seed(void) { } #define RAND_MAX 0x7fffffff -__attribute__((section(".jumpstart.data.smode"))) uint64_t snext = 1; +__attr_privdata uint64_t snext = 1; -__attribute__((section(".jumpstart.text.smode"))) uint64_t -__smode_random(void) { +__attr_stext uint64_t __smode_random(void) { + uint64_t val; + int64_t ret; /* Based on rand in diags/perf/membw/libc_replacement.h */ /* This multiplier was obtained from Knuth, D.E., "The Art of Computer Programming," Vol 2, Seminumerical Algorithms, Third Edition, Addison-Wesley, 1998, p. 106 (line 26) & p. 108 */ - snext = snext * __extension__ 6364136223846793005LL + 1; - return (int64_t)((snext >> 32) & RAND_MAX); + + do { + val = load_reserved_64(&snext); + val = val * __extension__ 6364136223846793005LL + 1; + ret = (int64_t)((val >> 32) & RAND_MAX); + } while (store_conditional_64(&snext, val) != 0); + + return ret; } -__attribute__((section(".jumpstart.text.smode"))) int32_t -get_random_number_from_smode(void) { +__attr_stext int32_t get_random_number_from_smode(void) { return (int32_t)__smode_random(); } -__attribute__((section(".jumpstart.text.smode"))) void -set_random_seed_from_smode(int32_t seed) { +__attr_stext void set_random_seed_from_smode(int32_t seed) { snext = (uint64_t)seed; } diff --git a/src/meson.build b/src/meson.build index 9ca7c775..b2a3c0c7 100644 --- a/src/meson.build +++ b/src/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/src/public/exit.mmode.S b/src/public/exit.mmode.S index 26f8ead4..f0ba0e4a 100644 --- a/src/public/exit.mmode.S +++ b/src/public/exit.mmode.S @@ -1,69 +1,70 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" -.section .jumpstart.text.mmode.init.exit, "ax" +.section .jumpstart.cpu.text.mmode.init.exit, "ax" .global _mmode_end _mmode_end: # a0 will contain diag pass/fail status. - # Store pass/fail status into the hart status tracker. - csrr t0, mhartid - la t1, hart_status_tracker + # Store pass/fail status into the cpu status tracker. + GET_THREAD_ATTRIBUTES_CPU_ID(t0) + la t1, cpu_status_tracker add t1, t1, t0 sb a0, 0(t1) - # The primary hart will go through the exit routine. - li t1, PRIMARY_HART_ID + # The primary cpu will go through the exit routine. + li t1, PRIMARY_CPU_ID beq t0, t1, 1f - # Secondary hart. - # If we're running in batch mode, return the hart. - li t0, BATCH_MODE - bnez t0, batch_mode_return_hart - - # otherwise have all the secondary harts wait on the wfi. + # Secondary cpu. + # Have all the secondary cpus wait on the wfi. j just_wfi_from_mmode 1: - # Check the status of all the active harts. - # a0: Active hart mask. Gets shifted right as we check each hart. - # t0: hart_status_tracker address - # t1: Hart id of the current hart we're checking status of. - li a0, ACTIVE_HART_MASK - la t0, hart_status_tracker + # Check the status of all the active cpus. + # a0: Active cpu mask. Gets shifted right as we check each cpu. + # t0: cpu_status_tracker address + # t1: CPU id of the current cpu we're checking status of. + li a0, ACTIVE_CPU_MASK + la t0, cpu_status_tracker li t1, 0x0 -check_hart_status_loop: +check_cpu_status_loop: andi t6, a0, 0x1 - beqz t6, done_with_current_hart + beqz t6, done_with_current_cpu # Active core, check it's pass/fail status. - add t5, t0, t1 # pointer to the hart's status + add t5, t0, t1 # pointer to the cpu's status - li t6, HART_INACTIVE -wait_for_inactive_hart_loop: + li t6, CPU_INACTIVE +wait_for_inactive_cpu_loop: lb t4, 0(t5) - beq t4, t6, wait_for_inactive_hart_loop + beq t4, t6, wait_for_inactive_cpu_loop - li t6, HART_RUNNING -wait_for_running_hart_loop: + li t6, CPU_RUNNING +wait_for_running_cpu_loop: lb t4, 0(t5) - beq t4, t6, wait_for_running_hart_loop + beq t4, t6, wait_for_running_cpu_loop li t6, DIAG_PASSED bne t4, t6, jumpstart_mmode_fail -done_with_current_hart: +done_with_current_cpu: srli a0, a0, 1 addi t1, t1, 1 - bnez a0, check_hart_status_loop + bnez a0, check_cpu_status_loop - # All harts have passed, we're done. + # All cpus have passed, we're done. li t1, DIAG_PASSED bne a0, t1, jumpstart_mmode_fail @@ -75,9 +76,6 @@ done_with_current_hart: jumpstart_mmode_fail: li a0, DIAG_FAILED - li t0, BATCH_MODE - bnez t0, batch_mode_return_hart - run_end_of_sim_sequence: slli a0, a0, 1 ori a0, a0, 1 @@ -92,40 +90,7 @@ just_wfi_from_mmode: wfi j just_wfi_from_mmode -.global batch_mode_return_unused_hart -batch_mode_return_unused_hart: - li a0, DIAG_PASSED -# a0: return value -.global batch_mode_return_hart -batch_mode_return_hart: - la t0, batch_mode_exit_lock -acquire_exit_lock: - ld t1, (t0) - bnez t1, acquire_exit_lock - li t2, 1 - amoswap.d.aq t2, t2, (t0) - bnez t2, acquire_exit_lock - - la t1, batch_mode_exit_address - ld ra, (t1) - -release_exit_lock: - sd zero, (t0) - csrw mepc, ra - li t0, MSTATUS_MPP - csrw mstatus, t0 - mret - -.section .jumpstart.data.smode, "aw", @progbits - -.align 8 -.global batch_mode_exit_address -batch_mode_exit_address: - .8byte 0 -.global batch_mode_exit_lock -batch_mode_exit_lock: - # initial state is locked. This is set to zero by primary hart after saving return address - .8byte 1 +.section .jumpstart.cpu.data.privileged, "aw", @progbits .align 6 .globl tohost diff --git a/src/public/init.mmode.S b/src/public/init.mmode.S index 465ceea6..bae47492 100644 --- a/src/public/init.mmode.S +++ b/src/public/init.mmode.S @@ -1,19 +1,24 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #include "cpu_bits.h" -.section .jumpstart.text.mmode.init, "ax" +.section .jumpstart.cpu.text.mmode.init, "ax" .global setup_mmode setup_mmode: ret -.section .jumpstart.text.mmode.init.end, "ax" - -.global _JUMPSTART_TEXT_MMODE_INIT_BOUNDARY -_JUMPSTART_TEXT_MMODE_INIT_BOUNDARY: - j jumpstart_mmode_fail - ret +# Output: +# a0: cpuid +.global get_cpu_id +get_cpu_id: + csrr a0, mhartid + ret diff --git a/src/public/jump_to_main.mmode.S b/src/public/jump_to_main.mmode.S index 5f88bbc9..e0a993d7 100644 --- a/src/public/jump_to_main.mmode.S +++ b/src/public/jump_to_main.mmode.S @@ -1,10 +1,15 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" -.section .jumpstart.text.mmode, "ax" +.section .jumpstart.cpu.text.mmode, "ax" .global jump_to_main_in_mmode jump_to_main_in_mmode: diff --git a/src/public/jumpstart_public_source_attributes.yaml b/src/public/jumpstart_public_source_attributes.yaml index e7e3d735..4122c2aa 100644 --- a/src/public/jumpstart_public_source_attributes.yaml +++ b/src/public/jumpstart_public_source_attributes.yaml @@ -1,13 +1,9 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -diag_entry_label: _mmode_start - rivos_internal_build: false -max_num_harts_supported: 4 - priv_modes_supported: [mmode, smode, umode] # Hard limits on how many pages the jumsptart infrastructure itself will occupy. @@ -17,107 +13,118 @@ priv_modes_supported: [mmode, smode, umode] jumpstart_mmode: text: page_size: 0x1000 - num_pages: 4 - linker_script_section: ".jumpstart.text.mmode.init.enter,.jumpstart.text.mmode.init.exit,.jumpstart.text.mmode.init,.jumpstart.text.mmode.init.end,.jumpstart.text.mmode" + linker_script_section: ".jumpstart.cpu.text.mmode.init.enter,.jumpstart.cpu.text.mmode.init.exit,.jumpstart.cpu.text.mmode.init,.jumpstart.cpu.text.mmode.init.end,.jumpstart.cpu.text.mmode" pma_memory_type: "wb" no_pte_allocation: True -jumpstart_smode: - text: - page_size: 0x1000 - num_pages: 3 - xwr: "0b101" - umode: "0b0" - pma_memory_type: "wb" - linker_script_section: ".jumpstart.text.smode.init.enter,.jumpstart.text.smode.init,.jumpstart.text.smode" stack: page_size: 0x1000 - num_pages: 4 xwr: "0b011" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".jumpstart.stack.smode" + linker_script_section: ".jumpstart.cpu.stack.privileged" c_structs: page_size: 0x1000 - num_pages: 2 + num_pages_per_cpu: 1 xwr: "0b011" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".jumpstart.c_structs.smode" + linker_script_section: ".jumpstart.cpu.c_structs.mmode" data: page_size: 0x1000 - num_pages: 3 xwr: "0b011" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".jumpstart.data.smode" + linker_script_section: ".jumpstart.cpu.data.privileged" + rodata: + page_size: 0x1000 + xwr: "0b001" + umode: "0b0" + pma_memory_type: "wb" + linker_script_section: ".rodata" sdata: page_size: 0x1000 - num_pages: 1 xwr: "0b011" umode: "0b0" pma_memory_type: "wb" linker_script_section: ".sdata" - bss: +jumpstart_smode: + text: page_size: 0x1000 - xwr: "0b011" + xwr: "0b101" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".bss,.sbss,.sbss.*" - rodata: + linker_script_section: ".jumpstart.cpu.text.smode.init.enter,.jumpstart.cpu.text.smode.init,.jumpstart.cpu.text.smode" + bss: page_size: 0x1000 - xwr: "0b001" + xwr: "0b011" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".rodata" + linker_script_section: ".bss,.sbss,.sbss.*" heap: - page_size: 0x200000 - num_pages: 2 xwr: "0b011" umode: "0b0" pma_memory_type: "wb" - linker_script_section: ".jumpstart.smode.heap" + linker_script_section: ".jumpstart.cpu.smode.heap" jumpstart_umode: text: page_size: 0x1000 - num_pages: 1 xwr: "0b101" umode: "0b1" pma_memory_type: "wb" - linker_script_section: ".jumpstart.text.umode" + linker_script_section: ".jumpstart.cpu.text.umode" stack: page_size: 0x1000 - num_pages: 4 xwr: "0b011" umode: "0b1" pma_memory_type: "wb" - linker_script_section: ".jumpstart.stack.umode" + linker_script_section: ".jumpstart.cpu.stack.umode" # These attributes can be overriden by the test attributes file or # at build time. diag_attributes: + diag_entry_label: null start_test_in_mmode: false enable_virtualization: false - max_num_pagetable_pages_per_stage: 30 - num_pages_for_jumpstart_smode_bss: 7 - num_pages_for_jumpstart_smode_rodata: 1 - allow_page_table_modifications: false mmode_start_address: 0x80000000 # By default smode and umode areas will be placed after the mmode area # unless given values by a diag. smode_start_address: null umode_start_address: null - active_hart_mask: '0b1' + num_pages_for_jumpstart_mmode_text: 3 + num_pages_per_cpu_for_jumpstart_mmode_data: 2 + num_pages_per_cpu_for_jumpstart_mmode_stack: 1 + num_pages_for_jumpstart_smode_text: 5 + num_pages_for_jumpstart_mmode_sdata: 1 + num_pages_for_jumpstart_smode_bss: 7 + page_size_for_jumpstart_smode_heap: 0x200000 + num_pages_for_jumpstart_smode_heap: 2 + num_pages_for_jumpstart_mmode_rodata: 2 + num_pages_for_jumpstart_umode_text: 1 + num_pages_per_cpu_for_jumpstart_umode_stack: 1 + max_num_pagetable_pages_per_stage: 30 + allow_page_table_modifications: false + active_cpu_mask: '0b1' + # We'll pick the lowest cpu id as the primary cpu id if the diag + # doesn't explicitly specify it or it's not overriden on the command line. + primary_cpu_id: null satp_mode: 'sv39' vsatp_mode: 'sv39' hgatp_mode: 'sv39x4' mappings: null - batch_mode: false + enable_uart: true + enable_heap: false + build_rng_seed: 0xdeadbeef + # Limit the range of the ELF load sections. If not set then + # no limit is applied. + elf_start_address: null + elf_end_address: null c_structs: thread_attributes: fields: - hart_id: uint8_t + cpu_id: uint8_t + physical_cpu_id: uint8_t current_mode: uint8_t current_v_bit: uint8_t smode_setup_done: uint8_t @@ -127,31 +134,37 @@ c_structs: num_context_saves_remaining_in_mmode: uint8_t smode_reg_context_save_region_address: uint64_t num_context_saves_remaining_in_smode: uint8_t + marchid: uint64_t + mimpid: uint64_t bookend_magic_number: uint64_t trap_override_attributes: fields: vsmode_interrupt_handler_overrides: uint64_t,46 vsmode_exception_handler_overrides: uint64_t,20 smode_interrupt_handler_overrides: uint64_t,46 - smode_exception_handler_overrides: uint64_t,20 + smode_exception_handler_overrides: uint64_t,24 mmode_interrupt_handler_overrides: uint64_t,46 - mmode_exception_handler_overrides: uint64_t,20 + mmode_exception_handler_overrides: uint64_t,24 defines: THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE: 0x3317150533171505 PAGE_OFFSET: 12 DIAG_PASSED: 0 DIAG_FAILED: 1 - # These are the various states that a hart can be in. - HART_RUNNING: 2 - HART_INACTIVE: 3 - PRIMARY_HART_ID: 0 + # These are the various states that a cpu can be in. + CPU_RUNNING: 2 + CPU_INACTIVE: 3 CHECKTC_DISABLE: nop CHECKTC_ENABLE: nop MMODE_ROLE_DISABLE: nop MMODE_ROLE_ENABLE: nop STIMER_RESET: nop MTIMER_RESET: nop + CPU_CLOCK_FREQUENCY_IN_MHZ: 1 + BACKING_MEMORY_DDR: 1 + MEMORY_TYPE_WB: 3 + MEMORY_TYPE_WC: 1 + MEMORY_TYPE_UC: 0 syscall_numbers: - SYSCALL_RUN_FUNC_IN_UMODE_COMPLETE diff --git a/src/public/meson.build b/src/public/meson.build index e9e97b1d..ffce98b5 100644 --- a/src/public/meson.build +++ b/src/public/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/src/public/uart/meson.build b/src/public/uart/meson.build index f54d0917..a7c4fe26 100644 --- a/src/public/uart/meson.build +++ b/src/public/uart/meson.build @@ -1,7 +1,11 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 +mmode_sources += files( + 'uart.mmode.c', + ) + smode_sources += files( 'uart.smode.c', ) diff --git a/src/public/uart/uart.mmode.c b/src/public/uart/uart.mmode.c new file mode 100644 index 00000000..6bfe701e --- /dev/null +++ b/src/public/uart/uart.mmode.c @@ -0,0 +1,20 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "jumpstart.h" +#include + +void setup_uart(void); + +__attr_mtext __attribute__((noreturn)) void m_putch(char c) { + // Implement putch code here + (void)c; + jumpstart_mmode_fail(); +} + +__attr_mtext void m_setup_uart(void) { + // Implement Uart Setup code here +} diff --git a/src/public/uart/uart.smode.c b/src/public/uart/uart.smode.c index 1f76eefa..141a02ec 100644 --- a/src/public/uart/uart.smode.c +++ b/src/public/uart/uart.smode.c @@ -1,21 +1,20 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" -#include "jumpstart_defines.h" #include -void putch(char c); void setup_uart(void); -__attribute__((section(".jumpstart.text.smode"))) __attribute__((noreturn)) void -putch(char c) { +__attr_stext __attribute__((noreturn)) void putch(char c) { // Implement putch code here (void)c; jumpstart_smode_fail(); } -__attribute__((section(".jumpstart.text.smode"))) void setup_uart(void) { +__attr_stext void setup_uart(void) { // Implement Uart Setup code here } diff --git a/tests/common/meson.build b/tests/common/meson.build index 7a60685e..bbe879e9 100644 --- a/tests/common/meson.build +++ b/tests/common/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -19,53 +19,44 @@ start_in_smode_tests += [ ['test010', 'ELF checks.'], ['test011', 'Handle user mode exceptions in supervisor mode.'], ['test012', 'Exit with DIAG_FAILED to test fail path', '', true], - ['test013', 'test000 with 4 harts.', '-p4'], - ['test014', 'Hart 2 exits with DIAG_FAILED to test MP fail path.', '-p4', true], + ['test013', 'test000 with 4 cpus.', '-p4'], + ['test014', 'Cpu 2 exits with DIAG_FAILED to test MP fail path.', '-p4', true], ['test019', 'Sync 4P CPUs.', '-p4'], ['test020', 'translate_VA() and page table modification test.'], ['test021', '2P translate_VA() and page table modification test.', '-p2', false], ['test022', 'Exit with jumpstart_smode_fail() to test fail path.', '', true], ['test026', 'VA != PA mapping.'], ['test027', 'sv39 VA aliasing test.'], - ['test028', 'Super Pages (SATP.mode = sv39) VA translation test.', '-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x2000,0xC0022000:0x1000,0xD0000000:0x400000,0xE0000000:0x400000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000'], - ['test029', 'Super Pages (SATP.mode = sv48) VA translation test.', '-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x2000,0xC0022000:0x1000,0xD0000000:0x400000,0xE0000000:0x400000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000'], + ['test028', 'Super Pages (SATP.mode = sv39) VA translation test.', '-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x4000,0xD0000000:0x400000,0xE0000000:0x401000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000'], + ['test029', 'Super Pages (SATP.mode = sv48) VA translation test.', '-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x4000,0xD0000000:0x400000,0xE0000000:0x401000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000'], ['test030', 'Heap malloc test.'], - ['test031', 'Simple spinlock test with 4 harts', '-p4'], + ['test031', 'Simple spinlock test with 4 cpus', '-p4'], ['test033', 'Exit with jumpstart_umode_fail() to test umode fail path.', '', true], - ['test034', 'Simple spinlock test with 4 active harts and 4 inactive ones.', '-p8'], ['test036', 'sv48 VA aliasing test.'], ['test037', 'FP/Vector test.'], - ['test039', 'MP heap malloc test.', '-p4'], ['test045', 'Run C/Assembly functions with run_function_in_vsmode() from supervisor mode.'], - ['test046', 'Register and run vsmode illegal instruction exception handler.'], + ['test046', 'Register and run vsmode illegal instruction exception handler.', '-p4'], ['test047', 'Hypervisor load/store.'], + ['test048', 'Run C/Assembly functions with run_function_in_vumode() from VS mode.'], + ['test049', 'Exit with jumpstart_vumode_fail() to test umode fail path.', '', true], + ['test050', 'Exit with jumpstart_vsmode_fail() to test fail path.', '', true], + ['test051', 'MMU with SATP.mode = bare.'], + ['test052', 'Test string.h functions.'], + ['test053', 'Test time() and gettimeofday().'], + ['test058', 'Run cores 1 and 3 with cores 0 and 2 marked as inactive.', '-p4'], + ['test067', 'Test address assignment for mappings with no addresses specified.'], + ['test070', 'Test expandable mappings.', '-p4'], ] start_in_mmode_tests += [ - ['test009', 'Jump to main() in machine mode and exit.'], ['test017', 'Register and run Machine mode illegal instruction exception handler.'], ['test018', 'Run C/Assembly functions with run_function_in_smode() from machine mode.'], ['test023', 'Handle S mode exceptions in M mode handlers.'], - ['test038', '2P where only non-primary hart runs functions with run_functions_in_smode().', '-p2'], + ['test038', '2P where only non-primary cpu runs functions with run_functions_in_smode().', '-p2'], ['test040', 'Run smode function during mmode exception handler.'], ['test041', 'Fail gracefully on hitting too many nested exceptions in smode.', '', true], ['test042', 'Run Supervisor mode illegal instruction exception handler on 4 cores.', '-p4'], ['test044', 'Tests random number generation and seed csr from both M and S modes.', '-p4'], ] -firmware_boot_tests += [] - -tests_disabled_on_qemu += [] tests_disabled_on_spike += [] -tests_disabled_for_sbi_firmware_boot += [ - 'test010', - 'test043', - ] - -# FW doesn't appear to enable the trap delegation of VS excall in smode. -# Disabling virtualization tests till this is resolved. -# https://rivosinc.atlassian.net/browse/SW-7451 -tests_disabled_for_sbi_firmware_boot += [ - 'test045', - 'test046', - ] diff --git a/tests/common/test000/test000.c b/tests/common/test000/test000.c index 68e6e242..dceb9d5a 100644 --- a/tests/common/test000/test000.c +++ b/tests/common/test000/test000.c @@ -1,17 +1,24 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" +extern uint64_t s_stage_pagetables_start; + +extern uint64_t _TEXT_START; + int main(void) { uint64_t main_function_address = (uint64_t)&main; - if (main_function_address != 0xD0020000) { + volatile uint64_t text_section_start = (uint64_t)(&_TEXT_START); + if (main_function_address != text_section_start) { return DIAG_FAILED; } - if (get_thread_attributes_hart_id_from_smode() != 0) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { return DIAG_FAILED; } @@ -28,7 +35,15 @@ int main(void) { return DIAG_FAILED; } - if (get_field(read_csr(satp), SATP64_MODE) != VM_1_10_SV39) { + uint64_t satp_value = read_csr(satp); + + if (get_field(satp_value, SATP64_MODE) != VM_1_10_SV39) { + return DIAG_FAILED; + } + + uint64_t expected_satp_ppn = + ((uint64_t)&s_stage_pagetables_start) >> PAGE_OFFSET; + if (get_field(satp_value, SATP64_PPN) != expected_satp_ppn) { return DIAG_FAILED; } diff --git a/tests/common/test000/test000.diag_attributes.yaml b/tests/common/test000/test000.diag_attributes.yaml index e864c79b..df04c935 100644 --- a/tests/common/test000/test000.diag_attributes.yaml +++ b/tests/common/test000/test000.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test001/test001.c b/tests/common/test001/test001.c index b0db6def..e4bcde8c 100644 --- a/tests/common/test001/test001.c +++ b/tests/common/test001/test001.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -11,7 +13,7 @@ int main(void) { return DIAG_FAILED; } - if (get_thread_attributes_hart_id_from_smode() != 0) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { return DIAG_FAILED; } diff --git a/tests/common/test001/test001.diag_attributes.yaml b/tests/common/test001/test001.diag_attributes.yaml index 984bc290..b88d5947 100644 --- a/tests/common/test001/test001.diag_attributes.yaml +++ b/tests/common/test001/test001.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test002/test002.S b/tests/common/test002/test002.S index 132a75cb..853bed67 100644 --- a/tests/common/test002/test002.S +++ b/tests/common/test002/test002.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #define BYTES_TO_COPY (64 * 8) diff --git a/tests/common/test002/test002.c b/tests/common/test002/test002.c index e7cfc8bb..a230ac09 100644 --- a/tests/common/test002/test002.c +++ b/tests/common/test002/test002.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -51,7 +53,7 @@ uint8_t c_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, } int main(void) { - if (get_thread_attributes_hart_id_from_smode() != 0) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { return DIAG_FAILED; } @@ -64,6 +66,10 @@ int main(void) { return DIAG_FAILED; } + if (NUM_PAGES_PER_CPU_FOR_JUMPSTART_UMODE_STACK != 2) { + return DIAG_FAILED; + } + if (run_function_in_umode((uint64_t)asm_check_passed_in_arguments, 1, 2, 3, 4, 5, 6, 7) != DIAG_PASSED) { return DIAG_FAILED; diff --git a/tests/common/test002/test002.diag_attributes.yaml b/tests/common/test002/test002.diag_attributes.yaml index 2facf39b..040a61a9 100644 --- a/tests/common/test002/test002.diag_attributes.yaml +++ b/tests/common/test002/test002.diag_attributes.yaml @@ -1,9 +1,11 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" +num_pages_per_cpu_for_jumpstart_umode_stack: 2 + mappings: - va: 0xd0020000 diff --git a/tests/common/test003/test003.S b/tests/common/test003/test003.S index 1fbbb7e6..5d334826 100644 --- a/tests/common/test003/test003.S +++ b/tests/common/test003/test003.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .global test003_illegal_instruction_function test003_illegal_instruction_function: diff --git a/tests/common/test003/test003.c b/tests/common/test003/test003.c index 55fb3348..639bf493 100644 --- a/tests/common/test003/test003.c +++ b/tests/common/test003/test003.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test003/test003.diag_attributes.yaml b/tests/common/test003/test003.diag_attributes.yaml index e864c79b..ca5cdc9f 100644 --- a/tests/common/test003/test003.diag_attributes.yaml +++ b/tests/common/test003/test003.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - @@ -19,7 +19,6 @@ mappings: va: 0xD0022000 pa: 0xD0022000 xwr: "0b011" - valid: "0b0" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" diff --git a/tests/common/test006/test006.c b/tests/common/test006/test006.c index 25aaa397..5ca15970 100644 --- a/tests/common/test006/test006.c +++ b/tests/common/test006/test006.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test006/test006.diag_attributes.yaml b/tests/common/test006/test006.diag_attributes.yaml index e864c79b..df04c935 100644 --- a/tests/common/test006/test006.diag_attributes.yaml +++ b/tests/common/test006/test006.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test009/test009.S b/tests/common/test009/test009.S deleted file mode 100644 index 13a2205e..00000000 --- a/tests/common/test009/test009.S +++ /dev/null @@ -1,15 +0,0 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -#include "jumpstart_defines.h" - -.section .text, "ax" - -.global just_nops -just_nops: - .rept (((1 << PAGE_OFFSET) * 2) / 4) - nop - .endr - - ret diff --git a/tests/common/test009/test009.c b/tests/common/test009/test009.c deleted file mode 100644 index 6c4b29a7..00000000 --- a/tests/common/test009/test009.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cpu_bits.h" -#include "jumpstart.h" - -void just_nops(void); - -int main(void) { - if (get_thread_attributes_hart_id_from_mmode() != 0) { - return DIAG_FAILED; - } - - if (get_thread_attributes_bookend_magic_number_from_mmode() != - THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE) { - return DIAG_FAILED; - } - - if (get_thread_attributes_current_mode_from_mmode() != PRV_M) { - return DIAG_FAILED; - } - - just_nops(); - - return DIAG_PASSED; -} diff --git a/tests/common/test010/test010.c b/tests/common/test010/test010.c index 5b2cbe0a..f461eca3 100644 --- a/tests/common/test010/test010.c +++ b/tests/common/test010/test010.c @@ -1,31 +1,43 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" -extern uint64_t _JUMPSTART_TEXT_MMODE_INIT_ENTER_START; -extern uint64_t _JUMPSTART_TEXT_SMODE_INIT_ENTER_START; -extern uint64_t _JUMPSTART_TEXT_UMODE_START; +extern uint64_t _JUMPSTART_CPU_TEXT_MMODE_INIT_ENTER_START; +extern uint64_t _JUMPSTART_CPU_TEXT_SMODE_INIT_ENTER_START; +extern uint64_t _JUMPSTART_CPU_TEXT_UMODE_START; extern uint64_t _BSS_START; extern uint64_t _BSS_END; -#define ADDR(var) ((uint64_t) & (var)) +extern uint64_t _TEXT_START; +extern uint64_t _TEXT_END; + +extern uint64_t _DATA_START; +extern uint64_t _DATA_END; + +#define ADDR(var) ((uint64_t)&(var)) #define VAR_WITHIN_REGION(var, start, end) \ (((ADDR(var) >= (start)) && (ADDR(var) + (sizeof(var)) < (end))) ? 1 : 0) uint64_t uninitialized_var; uint64_t zero_initialized_var = 0; -uint8_t uninitialized_arr[128]; -uint8_t zero_initialized_arr[128] = {0}; +#define NUM_ARRAY_ELEMENTS 128 +uint8_t uninitialized_arr[NUM_ARRAY_ELEMENTS]; +uint8_t zero_initialized_arr[NUM_ARRAY_ELEMENTS] = {0}; -uint8_t store_faulted = 0; +__attribute__((section(".data"))) uint8_t store_faulted = 0; static void skip_faulting_store_instruction(void) { + volatile uint64_t data_start_address = ADDR(_DATA_START); + volatile uint64_t expected_fault_address = data_start_address + 0x1000; + uint64_t stval_value = read_csr(stval); - if (stval_value != 0xC0023000) { + if (stval_value != expected_fault_address) { jumpstart_smode_fail(); } @@ -46,47 +58,59 @@ static void skip_faulting_store_instruction(void) { __attribute__((section(".text.startup"))) __attribute__((pure)) int main(void) { // Check that the M-mode, S-mode, U-mode start address overrides worked. uint64_t mmode_start_address = - (uint64_t)&_JUMPSTART_TEXT_MMODE_INIT_ENTER_START; + (uint64_t)&_JUMPSTART_CPU_TEXT_MMODE_INIT_ENTER_START; if (mmode_start_address != MMODE_START_ADDRESS) { return DIAG_FAILED; } uint64_t smode_start_address = - (uint64_t)&_JUMPSTART_TEXT_SMODE_INIT_ENTER_START; + (uint64_t)&_JUMPSTART_CPU_TEXT_SMODE_INIT_ENTER_START; if (smode_start_address != SMODE_START_ADDRESS) { return DIAG_FAILED; } - uint64_t umode_start_address = (uint64_t)&_JUMPSTART_TEXT_UMODE_START; + uint64_t umode_start_address = (uint64_t)&_JUMPSTART_CPU_TEXT_UMODE_START; if (umode_start_address != UMODE_START_ADDRESS) { return DIAG_FAILED; } + // The compiler seems to optimize out the variables without volatile. + volatile uint64_t text_start_address = ADDR(_TEXT_START); + volatile uint64_t text_end_address = ADDR(_TEXT_END); + // Check that these functions are in the right place. uint64_t main_function_address = (uint64_t)&main; - if (main_function_address != 0xC0020000) { + if (main_function_address != text_start_address) { + return DIAG_FAILED; + } + + // Check that the skip_faulting_store_instruction() is in the .text section. + if (VAR_WITHIN_REGION(skip_faulting_store_instruction, text_start_address, + text_end_address) == 0) { return DIAG_FAILED; } // Check BSS. + volatile uint64_t bss_start_address = ADDR(_BSS_START); + volatile uint64_t bss_end_address = ADDR(_BSS_END); // These variables should be located within the BSS section. - if (VAR_WITHIN_REGION(uninitialized_var, ADDR(_BSS_START), ADDR(_BSS_END)) == - 0) { + if (VAR_WITHIN_REGION(uninitialized_var, bss_start_address, + bss_end_address) == 0) { return DIAG_FAILED; } - if (VAR_WITHIN_REGION(zero_initialized_var, ADDR(_BSS_START), - ADDR(_BSS_END)) == 0) { + if (VAR_WITHIN_REGION(zero_initialized_var, bss_start_address, + bss_end_address) == 0) { return DIAG_FAILED; } - if (VAR_WITHIN_REGION(uninitialized_arr, ADDR(_BSS_START), ADDR(_BSS_END)) == - 0) { + if (VAR_WITHIN_REGION(uninitialized_arr, bss_start_address, + bss_end_address) == 0) { return DIAG_FAILED; } - if (VAR_WITHIN_REGION(zero_initialized_arr, ADDR(_BSS_START), - ADDR(_BSS_END)) == 0) { + if (VAR_WITHIN_REGION(zero_initialized_arr, bss_start_address, + bss_end_address) == 0) { return DIAG_FAILED; } @@ -95,23 +119,33 @@ __attribute__((section(".text.startup"))) __attribute__((pure)) int main(void) { return DIAG_FAILED; } - for (uint8_t i = 0; i < 128; i++) { + for (uint8_t i = 0; i < NUM_ARRAY_ELEMENTS; i++) { if (uninitialized_arr[i] || zero_initialized_arr[i]) { return DIAG_FAILED; } } - // Read and write to the page at 0xC0022000 - uint64_t *ptr = (uint64_t *)0xC0022000; - *ptr = UINT64_C(0x1234567890ABCDEF); - if (*ptr != UINT64_C(0x1234567890ABCDEF)) { + volatile uint64_t data_start_address = ADDR(_DATA_START); + volatile uint64_t data_end_address = ADDR(_DATA_END); + // We have 2 pages in the .data section. There is an unmapped page in between + // the 2 pages so there are 3 pages between _DATA_START and _DATA_END. + // Check that there are 3 4K pages between _DATA_START and _DATA_END. + if ((data_end_address - data_start_address + 1) != (3 * 0x1000)) { + return DIAG_FAILED; + } + + // RW to the first page. + volatile uint64_t first_page_address = data_start_address; + volatile uint64_t second_page_address = data_start_address + 0x1000; + volatile uint64_t third_page_address = data_start_address + 0x2000; + *((uint64_t *)first_page_address) = UINT64_C(0x1234567890ABCDEF); + if (*((uint64_t *)first_page_address) != UINT64_C(0x1234567890ABCDEF)) { return DIAG_FAILED; } - // Read and write to the page at 0xC0024000 - ptr = (uint64_t *)0xC0024000; - *ptr = UINT64_C(0x1234567890ABCDEF); - if (*ptr != UINT64_C(0x1234567890ABCDEF)) { + // RW to the third page. + *((uint64_t *)third_page_address) = UINT64_C(0x1234567890ABCDEF); + if (*((uint64_t *)third_page_address) != UINT64_C(0x1234567890ABCDEF)) { return DIAG_FAILED; } @@ -119,10 +153,8 @@ __attribute__((section(".text.startup"))) __attribute__((pure)) int main(void) { RISCV_EXCP_STORE_PAGE_FAULT, (uint64_t)(&skip_faulting_store_instruction)); - // This page is also part of the .data linker script section but it does - // not have a page mapping so it will fault. - ptr = (uint64_t *)0xC0023000; - *ptr = UINT64_C(0x1234567890ABCDEF); + // The second page doesn't have a mapping set up so it should fault. + *((uint64_t *)second_page_address) = UINT64_C(0x1234567890ABCDEF); if (store_faulted == 0) { return DIAG_FAILED; diff --git a/tests/common/test010/test010.diag_attributes.yaml b/tests/common/test010/test010.diag_attributes.yaml index 4b784935..b86c52ab 100644 --- a/tests/common/test010/test010.diag_attributes.yaml +++ b/tests/common/test010/test010.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -11,16 +11,16 @@ umode_start_address: 0x83000000 mappings: - - va: 0xC0020000 - pa: 0xC0020000 + va: 0xc0020000 + pa: 0xc0020000 xwr: "0b101" page_size: 0x1000 - num_pages: 2 + num_pages: 1 pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xC0022000 - pa: 0xC0022000 + va: 0xc0021000 + pa: 0xc0021000 xwr: "0b011" page_size: 0x1000 num_pages: 1 @@ -31,8 +31,8 @@ mappings: # linker_script_section # The linker will generate a single section for these two mappings. # The missing page starting at 0xC0023000 will not have a page mapping. - va: 0xC0024000 - pa: 0xC0024000 + va: 0xc0023000 + pa: 0xc0023000 xwr: "0b011" page_size: 0x1000 num_pages: 1 diff --git a/tests/common/test011/test011.S b/tests/common/test011/test011.S index 4566014a..70979caa 100644 --- a/tests/common/test011/test011.S +++ b/tests/common/test011/test011.S @@ -1,4 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test011/test011.c b/tests/common/test011/test011.c index c66592fa..4e790a56 100644 --- a/tests/common/test011/test011.c +++ b/tests/common/test011/test011.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -20,7 +22,7 @@ static void test011_exception_handler(void) { } int main(void) { - if (get_thread_attributes_hart_id_from_smode() != 0) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { return DIAG_FAILED; } diff --git a/tests/common/test011/test011.diag_attributes.yaml b/tests/common/test011/test011.diag_attributes.yaml index 7e4eb90e..47622fbf 100644 --- a/tests/common/test011/test011.diag_attributes.yaml +++ b/tests/common/test011/test011.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test012/test012.c b/tests/common/test012/test012.c index ec6c2e23..e98d8952 100644 --- a/tests/common/test012/test012.c +++ b/tests/common/test012/test012.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" diff --git a/tests/common/test012/test012.diag_attributes.yaml b/tests/common/test012/test012.diag_attributes.yaml index e864c79b..8da84121 100644 --- a/tests/common/test012/test012.diag_attributes.yaml +++ b/tests/common/test012/test012.diag_attributes.yaml @@ -1,23 +1,19 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - - va: 0xD0020000 - pa: 0xD0020000 xwr: "0b101" page_size: 0x1000 num_pages: 2 pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xD0022000 - pa: 0xD0022000 xwr: "0b011" valid: "0b0" page_size: 0x1000 diff --git a/tests/common/test013/test013.c b/tests/common/test013/test013.c index bec8ccc0..66ec2d3e 100644 --- a/tests/common/test013/test013.c +++ b/tests/common/test013/test013.c @@ -1,13 +1,15 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" int main(void) { - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id > 3) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (cpu_id > 3) { return DIAG_FAILED; } diff --git a/tests/common/test013/test013.diag_attributes.yaml b/tests/common/test013/test013.diag_attributes.yaml index 5623569e..7faffe18 100644 --- a/tests/common/test013/test013.diag_attributes.yaml +++ b/tests/common/test013/test013.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1111" mappings: - diff --git a/tests/common/test014/test014.c b/tests/common/test014/test014.c index 9a40baf6..68e83d98 100644 --- a/tests/common/test014/test014.c +++ b/tests/common/test014/test014.c @@ -1,12 +1,14 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" int main(void) { - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id == 2) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (cpu_id == 2) { return DIAG_FAILED; } diff --git a/tests/common/test014/test014.diag_attributes.yaml b/tests/common/test014/test014.diag_attributes.yaml index 5623569e..b0ac2bd1 100644 --- a/tests/common/test014/test014.diag_attributes.yaml +++ b/tests/common/test014/test014.diag_attributes.yaml @@ -1,15 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1111" mappings: - - va: 0xC0020000 - pa: 0xC0020000 xwr: "0b101" page_size: 0x1000 num_pages: 1 diff --git a/tests/common/test017/test017.S b/tests/common/test017/test017.S index 7beb1909..c7874ca0 100644 --- a/tests/common/test017/test017.S +++ b/tests/common/test017/test017.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .global main main: diff --git a/tests/common/test017/test017.c b/tests/common/test017/test017.c index f28ba21f..0c87f63a 100644 --- a/tests/common/test017/test017.c +++ b/tests/common/test017/test017.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -51,11 +53,14 @@ void test017_illegal_instruction_handler(void) { int test017_main(void) { uint64_t main_function_address = (uint64_t)&main; + if (main_function_address != 0xC0020000) { + // If this check is broken then it's likely that some jumpstart runtime + // function hasn't been correctly tagged with __attr_mtext. return DIAG_FAILED; } - if (get_thread_attributes_hart_id_from_mmode() != 0) { + if (get_thread_attributes_cpu_id_from_mmode() != 0) { return DIAG_FAILED; } diff --git a/tests/common/test017/test017.diag_attributes.yaml b/tests/common/test017/test017.diag_attributes.yaml index 12fb1ceb..a03fc2cf 100644 --- a/tests/common/test017/test017.diag_attributes.yaml +++ b/tests/common/test017/test017.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test018/test018.S b/tests/common/test018/test018.S index ff1a8634..79ab86f1 100644 --- a/tests/common/test018/test018.S +++ b/tests/common/test018/test018.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #define BYTES_TO_COPY (64 * 8) diff --git a/tests/common/test018/test018.c b/tests/common/test018/test018.c index cac1efef..01a981ff 100644 --- a/tests/common/test018/test018.c +++ b/tests/common/test018/test018.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -63,13 +65,21 @@ uint8_t c_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, } int main(void) { + uint64_t main_function_address = (uint64_t)&main; + + if (main_function_address != 0xC0020000) { + // If this check is broken then it's likely that some jumpstart runtime + // function hasn't been correctly tagged with __attr_mtext. + return DIAG_FAILED; + } + if (MAX_NUM_CONTEXT_SAVES < 2) { // We need at least 2 smode context saves to run // run_function_in_smode(). return DIAG_FAILED; } - if (get_thread_attributes_hart_id_from_mmode() != 0) { + if (get_thread_attributes_cpu_id_from_mmode() != 0) { return DIAG_FAILED; } diff --git a/tests/common/test018/test018.diag_attributes.yaml b/tests/common/test018/test018.diag_attributes.yaml index 0724fae1..d54ca94b 100644 --- a/tests/common/test018/test018.diag_attributes.yaml +++ b/tests/common/test018/test018.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test019/test019.c b/tests/common/test019/test019.c index 1d172299..6867d9d5 100644 --- a/tests/common/test019/test019.c +++ b/tests/common/test019/test019.c @@ -1,12 +1,75 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" +// Separate sync points for each CPU combination +static uint32_t all_cpus_sync_point __attribute__((section(".data"))) = 0; +static uint32_t pair_01_sync_point __attribute__((section(".data"))) = 0; +static uint32_t pair_13_sync_point __attribute__((section(".data"))) = 0; +static uint32_t subset_012_sync_point __attribute__((section(".data"))) = 0; + int main(void) { - for (int i = 0; i < 10; ++i) { - sync_all_harts_from_smode(); + // Get current CPU ID + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + + // Test 1: Original sync_all_cpus_from_smode() test + for (int i = 0; i < 5; ++i) { + sync_all_cpus_from_smode(); + } + + if (ACTIVE_CPU_MASK != 0xf) { + // We expect that all 4 cpus are active. + return DIAG_FAILED; + } + + // Test 2: sync_cpus_in_mask_from_smode() with all CPUs (should be equivalent + // to sync_all_cpus_from_smode) + for (int i = 0; i < 3; ++i) { + sync_cpus_in_mask_from_smode(ACTIVE_CPU_MASK, + (uint64_t)&all_cpus_sync_point); + } + + // Test 3: sync_cpus_in_mask_from_smode() with individual CPUs + // Each CPU syncs with itself only + uint64_t single_cpu_mask = 1UL << cpu_id; // Only this CPU + uint32_t single_cpu_sync_point = 0; + + for (int i = 0; i < 2; ++i) { + sync_cpus_in_mask_from_smode(single_cpu_mask, + (uint64_t)&single_cpu_sync_point); + } + + // Test 4: sync_cpus_in_mask_from_smode() with pairs of CPUs + // CPU 0 and 1 sync together + if (cpu_id == 0 || cpu_id == 1) { + uint64_t pair_mask = 0x3; // 0b0011 - CPUs 0 and 1 + + for (int i = 0; i < 2; ++i) { + sync_cpus_in_mask_from_smode(pair_mask, (uint64_t)&pair_01_sync_point); + } + } + + // Test 5: sync_cpus_in_mask_from_smode() with CPUs 1 and 3 + if (cpu_id == 1 || cpu_id == 3) { + uint64_t pair_mask = 0xA; // 0b1010 - CPUs 1 and 3 + + for (int i = 0; i < 2; ++i) { + sync_cpus_in_mask_from_smode(pair_mask, (uint64_t)&pair_13_sync_point); + } + } + + // Test 6: sync_cpus_in_mask_from_smode() with subset (CPUs 0, 1, 2) + if (cpu_id <= 2) { + uint64_t subset_mask = 0x7; // 0b0111 - CPUs 0, 1, 2 + + for (int i = 0; i < 2; ++i) { + sync_cpus_in_mask_from_smode(subset_mask, + (uint64_t)&subset_012_sync_point); + } } return DIAG_PASSED; diff --git a/tests/common/test019/test019.diag_attributes.yaml b/tests/common/test019/test019.diag_attributes.yaml index fc5ba17c..9fd370de 100644 --- a/tests/common/test019/test019.diag_attributes.yaml +++ b/tests/common/test019/test019.diag_attributes.yaml @@ -1,23 +1,19 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1111" mappings: - - va: 0xC0020000 - pa: 0xC0020000 xwr: "0b101" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xC0022000 - pa: 0xC0022000 xwr: "0b011" page_size: 0x1000 num_pages: 1 diff --git a/tests/common/test020/test020.c b/tests/common/test020/test020.c index 165a2ebb..5312f03f 100644 --- a/tests/common/test020/test020.c +++ b/tests/common/test020/test020.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -21,7 +23,7 @@ int main(void) { return DIAG_FAILED; } - if (xlate_info.satp_mode != VM_1_10_SV39) { + if (xlate_info.xatp_mode != VM_1_10_SV39) { return DIAG_FAILED; } @@ -52,5 +54,30 @@ int main(void) { return DIAG_FAILED; } + translate_VA(0xC0022000, &xlate_info); + if (xlate_info.walk_successful != 1) { + return DIAG_FAILED; + } + if (xlate_info.pbmt_mode != PTE_PBMT_IO) { + return DIAG_FAILED; + } + + translate_VA(0xC0023000, &xlate_info); + if (xlate_info.walk_successful != 1) { + return DIAG_FAILED; + } + if (xlate_info.pbmt_mode != PTE_PBMT_NC) { + return DIAG_FAILED; + } + + // The default PBMT mode is PMA if not specified. + translate_VA(0xC0024000, &xlate_info); + if (xlate_info.walk_successful != 1) { + return DIAG_FAILED; + } + if (xlate_info.pbmt_mode != PTE_PBMT_PMA) { + return DIAG_FAILED; + } + return DIAG_PASSED; } diff --git a/tests/common/test020/test020.diag_attributes.yaml b/tests/common/test020/test020.diag_attributes.yaml index b414e7ad..eaf975fa 100644 --- a/tests/common/test020/test020.diag_attributes.yaml +++ b/tests/common/test020/test020.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" allow_page_table_modifications: true @@ -16,6 +16,7 @@ mappings: page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" + pbmt_mode: "pma" linker_script_section: ".text" - va: 0xC0021000 @@ -25,4 +26,31 @@ mappings: page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" + pbmt_mode: "pma" linker_script_section: ".data" + - + va: 0xC0022000 + pa: 0xC0022000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + pbmt_mode: "io" + + - + va: 0xC0023000 + pa: 0xC0023000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + pbmt_mode: "nc" + + # The default PBMT mode is PMA if not specified. + - + va: 0xC0024000 + pa: 0xC0024000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" diff --git a/tests/common/test021/test021.S b/tests/common/test021/test021.S index 231031e3..15f4169c 100644 --- a/tests/common/test021/test021.S +++ b/tests/common/test021/test021.S @@ -1,4 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test021/test021.c b/tests/common/test021/test021.c index cc754173..1fc57028 100644 --- a/tests/common/test021/test021.c +++ b/tests/common/test021/test021.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ /* Restoring translation-data coherence: @@ -9,10 +11,10 @@ Restoring translation-data coherence: Initial condition: PTE(X) = (OA=PA_X, V=0) -Hart0’s instructions: +CPU0’s instructions: (H0.0) Store (OA=PA_X) to PTE(X) -Hart1’s instructions: +CPU1’s instructions: (H1.0) Load from PTE(X) (H1.1) Execute an SFENCE.VMA (H1.2) Load from X @@ -31,10 +33,10 @@ uint8_t is_load_allowed_to_data_area(void); extern uint64_t data_area; uint64_t data_area_address = (uint64_t)&data_area; -void hart1_load_page_fault_handler(void); -void hart1_load_page_fault_handler(void) { - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id != 1) { +void cpu1_load_page_fault_handler(void); +void cpu1_load_page_fault_handler(void) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (cpu_id != 1) { jumpstart_smode_fail(); } @@ -49,8 +51,8 @@ void hart1_load_page_fault_handler(void) { } int main(void) { - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id > 1) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (cpu_id > 1) { return DIAG_FAILED; } @@ -64,18 +66,18 @@ int main(void) { return DIAG_FAILED; } - if (hart_id == 1) { + if (cpu_id == 1) { register_smode_trap_handler_override( - RISCV_EXCP_LOAD_PAGE_FAULT, (uint64_t)(&hart1_load_page_fault_handler)); + RISCV_EXCP_LOAD_PAGE_FAULT, (uint64_t)(&cpu1_load_page_fault_handler)); if (is_load_allowed_to_data_area() == 1) { return DIAG_FAILED; } } - sync_all_harts_from_smode(); + sync_all_cpus_from_smode(); - if (hart_id == 0) { + if (cpu_id == 0) { *((uint64_t *)xlate_info.pte_address[2]) = xlate_info.pte_value[2] | PTE_V; asm volatile("sfence.vma"); } else { diff --git a/tests/common/test021/test021.diag_attributes.yaml b/tests/common/test021/test021.diag_attributes.yaml index 34e80bf5..87726b4e 100644 --- a/tests/common/test021/test021.diag_attributes.yaml +++ b/tests/common/test021/test021.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b11" +active_cpu_mask: "0b11" allow_page_table_modifications: true diff --git a/tests/common/test022/test022.c b/tests/common/test022/test022.c index d8f3ae07..caf64afe 100644 --- a/tests/common/test022/test022.c +++ b/tests/common/test022/test022.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" diff --git a/tests/common/test022/test022.diag_attributes.yaml b/tests/common/test022/test022.diag_attributes.yaml index db6e9747..941163b9 100644 --- a/tests/common/test022/test022.diag_attributes.yaml +++ b/tests/common/test022/test022.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test023/test023.S b/tests/common/test023/test023.S index 32ba4953..8d3354f1 100644 --- a/tests/common/test023/test023.S +++ b/tests/common/test023/test023.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text.smode, "ax" diff --git a/tests/common/test023/test023.c b/tests/common/test023/test023.c index 11559c80..9741b329 100644 --- a/tests/common/test023/test023.c +++ b/tests/common/test023/test023.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test023/test023.diag_attributes.yaml b/tests/common/test023/test023.diag_attributes.yaml index 0724fae1..d54ca94b 100644 --- a/tests/common/test023/test023.diag_attributes.yaml +++ b/tests/common/test023/test023.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test026/test026.S b/tests/common/test026/test026.S index d40d6768..ae4ec211 100644 --- a/tests/common/test026/test026.S +++ b/tests/common/test026/test026.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" #define MAGIC_VALUE 0xcafecafecafecafe diff --git a/tests/common/test026/test026.c b/tests/common/test026/test026.c index 792bdebf..12b772a8 100644 --- a/tests/common/test026/test026.c +++ b/tests/common/test026/test026.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -11,6 +13,14 @@ extern uint64_t load_from_address(uint64_t address); uint8_t PA_access_faulted = 0; +__attribute__((section(".text_safe"))) __attribute__((noinline)) static uint64_t +load_with_disabled_mmu(uint64_t addr) { + disable_mmu_from_smode(); + uint64_t val = *(uint64_t *)addr; + setup_mmu_from_smode(); + return val; +} + static void skip_instruction(void) { uint64_t reg = get_sepc_for_current_exception(); @@ -31,7 +41,7 @@ int main(void) { const uint64_t VA = UINT64_C(0xC0033000); const uint64_t PA = UINT64_C(0xC0043000); uint64_t data_area_address = (uint64_t)&data_area; - if (data_area_address != PA) { + if (data_area_address != VA) { return DIAG_FAILED; } @@ -88,10 +98,8 @@ int main(void) { return DIAG_FAILED; } - disable_mmu_from_smode(); - // PA access should now succeed with the MMU off. - uint64_t value_at_PA = load_from_address(PA); + uint64_t value_at_PA = load_with_disabled_mmu(PA); if (value_at_PA != new_magic_value) { return DIAG_FAILED; } diff --git a/tests/common/test026/test026.diag_attributes.yaml b/tests/common/test026/test026.diag_attributes.yaml index 0a81dd78..5251065c 100644 --- a/tests/common/test026/test026.diag_attributes.yaml +++ b/tests/common/test026/test026.diag_attributes.yaml @@ -1,14 +1,14 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - - va: 0xc0020000 + va: 0xd0000000 pa: 0xc0020000 xwr: "0b101" page_size: 0x1000 @@ -16,14 +16,21 @@ mappings: pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xc0023000 + va: 0xd0002000 pa: 0xc0023000 xwr: "0b011" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" linker_script_section: ".data" - + - + va: 0xc0024000 + pa: 0xc0024000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text_safe" - va: 0xc0033000 pa: 0xc0043000 diff --git a/tests/common/test027/test027.S b/tests/common/test027/test027.S index 27c76e19..2904793e 100644 --- a/tests/common/test027/test027.S +++ b/tests/common/test027/test027.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text, "ax" diff --git a/tests/common/test027/test027.c b/tests/common/test027/test027.c index c3d96332..87b44672 100644 --- a/tests/common/test027/test027.c +++ b/tests/common/test027/test027.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -15,8 +17,9 @@ int main(void) { const uint64_t rw_VA_alias = UINT64_C(0xC0033000); const uint64_t ro_VA_alias = UINT64_C(0xC0053000); const uint64_t PA = UINT64_C(0xC0043000); + const uint64_t VA = UINT64_C(0xC0033000); uint64_t data_area_address = (uint64_t)&data_area; - if (data_area_address != PA) { + if (data_area_address != VA) { return DIAG_FAILED; } diff --git a/tests/common/test027/test027.diag_attributes.yaml b/tests/common/test027/test027.diag_attributes.yaml index a42b35df..33881e49 100644 --- a/tests/common/test027/test027.diag_attributes.yaml +++ b/tests/common/test027/test027.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test028/meson_option_overrides.yaml b/tests/common/test028/meson_option_overrides.yaml new file mode 100644 index 00000000..67ef6fc8 --- /dev/null +++ b/tests/common/test028/meson_option_overrides.yaml @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +spike_additional_arguments: ["-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x4000,0xD0000000:0x400000,0xE0000000:0x401000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000"] diff --git a/tests/common/test028/test028.S b/tests/common/test028/test028.S index 50b3ada1..0ea0b347 100644 --- a/tests/common/test028/test028.S +++ b/tests/common/test028/test028.S @@ -1,4 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test028/test028.c b/tests/common/test028/test028.c index dfdab4c8..583ce670 100644 --- a/tests/common/test028/test028.c +++ b/tests/common/test028/test028.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test028/test028.diag_attributes.yaml b/tests/common/test028/test028.diag_attributes.yaml index 77f0beac..ec552800 100644 --- a/tests/common/test028/test028.diag_attributes.yaml +++ b/tests/common/test028/test028.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test029/meson_option_overrides.yaml b/tests/common/test029/meson_option_overrides.yaml new file mode 100644 index 00000000..67ef6fc8 --- /dev/null +++ b/tests/common/test029/meson_option_overrides.yaml @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +spike_additional_arguments: ["-m0x80000000:0x40000000,0xC0000000:0x1000,0xC0020000:0x4000,0xD0000000:0x400000,0xE0000000:0x401000,0xD0400000:0x200000,0x100000000:0x4000000,0x140000000:0x40000000"] diff --git a/tests/common/test029/test029.S b/tests/common/test029/test029.S index 50b3ada1..0ea0b347 100644 --- a/tests/common/test029/test029.S +++ b/tests/common/test029/test029.S @@ -1,4 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test029/test029.c b/tests/common/test029/test029.c index dfdab4c8..583ce670 100644 --- a/tests/common/test029/test029.c +++ b/tests/common/test029/test029.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test029/test029.diag_attributes.yaml b/tests/common/test029/test029.diag_attributes.yaml index fe28c560..fa8e86d8 100644 --- a/tests/common/test029/test029.diag_attributes.yaml +++ b/tests/common/test029/test029.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv48" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: diff --git a/tests/common/test030/test030.c b/tests/common/test030/test030.c index c4076637..7bc0bf98 100644 --- a/tests/common/test030/test030.c +++ b/tests/common/test030/test030.c @@ -1,14 +1,22 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "heap.smode.h" #include "jumpstart.h" #include "tablewalk.smode.h" -extern uint64_t _JUMPSTART_SMODE_HEAP_START; -extern uint64_t _JUMPSTART_SMODE_HEAP_END; +#include +#include + +// memalign is not in standard C, declare it here +void *memalign(size_t alignment, size_t size); + +extern uint64_t _JUMPSTART_CPU_SMODE_HEAP_START; +extern uint64_t _JUMPSTART_CPU_SMODE_HEAP_END; int test_malloc(void); int test_calloc(void); int test_memalign(void); @@ -22,8 +30,8 @@ int test_memset(void); #define ARRAY_LEN 10 int test_malloc(void) { - const uint64_t max_heap_size = (uint64_t)&_JUMPSTART_SMODE_HEAP_END - - (uint64_t)&_JUMPSTART_SMODE_HEAP_START; + const uint64_t max_heap_size = (uint64_t)&_JUMPSTART_CPU_SMODE_HEAP_END - + (uint64_t)&_JUMPSTART_CPU_SMODE_HEAP_START; uint8_t *x8 = malloc(sizeof(uint8_t)); if (x8 == 0) { diff --git a/tests/common/test030/test030.diag_attributes.yaml b/tests/common/test030/test030.diag_attributes.yaml index 801f88f4..fecd50ad 100644 --- a/tests/common/test030/test030.diag_attributes.yaml +++ b/tests/common/test030/test030.diag_attributes.yaml @@ -1,10 +1,12 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" + +enable_heap: true mappings: - diff --git a/tests/common/test031/test031.c b/tests/common/test031/test031.c index 3e3485bd..e93b5245 100644 --- a/tests/common/test031/test031.c +++ b/tests/common/test031/test031.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -30,7 +32,7 @@ static void update_variables(uint8_t tid) { } int main(void) { - uint8_t tid = get_thread_attributes_hart_id_from_smode(); + uint8_t tid = get_thread_attributes_cpu_id_from_smode(); if (tid > 3) { return DIAG_FAILED; } @@ -46,7 +48,7 @@ int main(void) { release_lock(&lock); } - sync_all_harts_from_smode(); + sync_all_cpus_from_smode(); // Check final value if (new != NUM_ITER * (0 + 1 + 2 + 3)) { diff --git a/tests/common/test031/test031.diag_attributes.yaml b/tests/common/test031/test031.diag_attributes.yaml index 517032db..8bab8202 100644 --- a/tests/common/test031/test031.diag_attributes.yaml +++ b/tests/common/test031/test031.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1111" mappings: - diff --git a/tests/common/test033/test033.c b/tests/common/test033/test033.c index 7df1ed34..f33642f1 100644 --- a/tests/common/test033/test033.c +++ b/tests/common/test033/test033.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" diff --git a/tests/common/test033/test033.diag_attributes.yaml b/tests/common/test033/test033.diag_attributes.yaml index 526906c9..e7065fe5 100644 --- a/tests/common/test033/test033.diag_attributes.yaml +++ b/tests/common/test033/test033.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test034/test034.c b/tests/common/test034/test034.c deleted file mode 100644 index 3e3485bd..00000000 --- a/tests/common/test034/test034.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cpu_bits.h" -#include "jumpstart.h" -#include "lock.smode.h" - -#define NUM_ITER 100 - -spinlock_t lock = 0; - -uint8_t last_visitor = 0xFF; -uint64_t old = 0; -uint64_t new = 0; - -static uint8_t check_variables(void); -static void update_variables(uint8_t tid); - -static uint8_t check_variables(void) { - // If only one visitor enters the critical section at any given time this - // invariant will evaluate to true - return new == (old + last_visitor); -} - -static void update_variables(uint8_t tid) { - old = new; - new = old + tid; - last_visitor = tid; -} - -int main(void) { - uint8_t tid = get_thread_attributes_hart_id_from_smode(); - if (tid > 3) { - return DIAG_FAILED; - } - - for (uint8_t i = 0; i < NUM_ITER; i++) { - acquire_lock(&lock); - - if (last_visitor != 0xFF && !check_variables()) { - return DIAG_FAILED; - } - - update_variables(tid); - release_lock(&lock); - } - - sync_all_harts_from_smode(); - - // Check final value - if (new != NUM_ITER * (0 + 1 + 2 + 3)) { - return DIAG_FAILED; - } - - return DIAG_PASSED; -} diff --git a/tests/common/test036/test036.S b/tests/common/test036/test036.S index 1902c6df..b1d5499d 100644 --- a/tests/common/test036/test036.S +++ b/tests/common/test036/test036.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text, "ax" diff --git a/tests/common/test036/test036.c b/tests/common/test036/test036.c index c37e0de4..d6dbb639 100644 --- a/tests/common/test036/test036.c +++ b/tests/common/test036/test036.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test036/test036.diag_attributes.yaml b/tests/common/test036/test036.diag_attributes.yaml index 36bf9ca0..9ded2e84 100644 --- a/tests/common/test036/test036.diag_attributes.yaml +++ b/tests/common/test036/test036.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv48" -active_hart_mask: "0b0001" +active_cpu_mask: "0b0001" mappings: - diff --git a/tests/common/test037/test037.S b/tests/common/test037/test037.S index ac906d07..4e10fffb 100644 --- a/tests/common/test037/test037.S +++ b/tests/common/test037/test037.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text, "ax" diff --git a/tests/common/test037/test037.c b/tests/common/test037/test037.c index 6ecf4ddd..2d7500df 100644 --- a/tests/common/test037/test037.c +++ b/tests/common/test037/test037.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test037/test037.diag_attributes.yaml b/tests/common/test037/test037.diag_attributes.yaml index c4af18be..ca5cdc9f 100644 --- a/tests/common/test037/test037.diag_attributes.yaml +++ b/tests/common/test037/test037.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test038/test038.S b/tests/common/test038/test038.S index f57ec206..7adbd4f6 100644 --- a/tests/common/test038/test038.S +++ b/tests/common/test038/test038.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text.smode, "ax" @@ -42,6 +47,6 @@ asm_check_passed_in_arguments_return: .section .data.smode, "aw" -.global non_primary_hart_done -non_primary_hart_done: +.global non_primary_cpu_done +non_primary_cpu_done: .byte 0x0 diff --git a/tests/common/test038/test038.c b/tests/common/test038/test038.c index 1faeb83f..e54368c5 100644 --- a/tests/common/test038/test038.c +++ b/tests/common/test038/test038.c @@ -1,17 +1,21 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include #include "cpu_bits.h" #include "heap.smode.h" #include "jumpstart.h" -// We have smode init code that has to be run by one of the harts. -// This test has the non-primary hart run smode code after starting in mmode +// We have smode init code that has to be run by one of the cpus. +// This test has the non-primary cpu run smode code after starting in mmode // to make sure that the initialization is done irrespective of which core // runs the smode code. -extern volatile uint8_t non_primary_hart_done; +extern volatile uint8_t non_primary_cpu_done; uint8_t asm_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, uint8_t a3, uint8_t a4, uint8_t a5, @@ -90,8 +94,8 @@ static int test_run_function_in_smode(void) { } int main(void) { - uint8_t hart_id = get_thread_attributes_hart_id_from_mmode(); - if (hart_id > 1) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_mmode(); + if (cpu_id > 1) { return DIAG_FAILED; } @@ -104,7 +108,7 @@ int main(void) { return DIAG_FAILED; } - if (hart_id != PRIMARY_HART_ID) { + if (cpu_id != PRIMARY_CPU_ID) { // We haven't run any smode code so the smode setup should not be done. if (get_thread_attributes_smode_setup_done_from_mmode() != 0) { return DIAG_FAILED; @@ -114,10 +118,10 @@ int main(void) { return DIAG_FAILED; } - non_primary_hart_done = 1; + non_primary_cpu_done = 1; } else { - while (non_primary_hart_done == 0) { - // Wait for the non-primary hart to finish. + while (non_primary_cpu_done == 0) { + // Wait for the non-primary cpu to finish. } // We haven't run any smode code so the smode setup should not be done. diff --git a/tests/common/test038/test038.diag_attributes.yaml b/tests/common/test038/test038.diag_attributes.yaml index 185d213c..a02ab42b 100644 --- a/tests/common/test038/test038.diag_attributes.yaml +++ b/tests/common/test038/test038.diag_attributes.yaml @@ -1,12 +1,11 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -active_hart_mask: "0b11" - +active_cpu_mask: "0b11" satp_mode: "sv39" - start_test_in_mmode: True +enable_heap: True mappings: - diff --git a/tests/common/test039/test039.c b/tests/common/test039/test039.c deleted file mode 100644 index 9ae7faef..00000000 --- a/tests/common/test039/test039.c +++ /dev/null @@ -1,209 +0,0 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cpu_bits.h" -#include "heap.smode.h" -#include "jumpstart.h" -/* -Multithreaded Malloc Test: - -In this test, we perform "ALLOCS_PER_HART" memory allocations for -"NUM_ITERATION" iterations. we store the pointer of all memory allocation for -every hart/iteration in a allocation table. -We expect all the pointers across harts for a given iteration to be unique. -*/ - -#define NUM_INTERATIONS 8 -#define ALLOCS_PER_HART 12 -#define HEAP_STRUCT_PADDING 16 -extern uint64_t _JUMPSTART_SMODE_HEAP_START; -extern uint64_t _JUMPSTART_SMODE_HEAP_END; -// Sorted in ascending order -const uint64_t alloc_sizes[] = {8, 16, 32, 48, 64}; -const uint64_t aligns[] = {0x8, 0x10, 0x80}; -#define ARRAY_LEN(arr, type) (sizeof(arr) / sizeof(type)) - -void *allocated[MAX_NUM_HARTS_SUPPORTED][NUM_INTERATIONS][ALLOCS_PER_HART] = { - 0}; - -static uint64_t allocation_entropy(uint64_t seed_hash, uint64_t hart_id, - uint64_t iter, uint64_t alloc_index) { - uint64_t hash = seed_hash; - const uint64_t magic = 0x9e3779b9; - hash ^= hart_id + magic + (hash << 6) + (hash >> 2); - hash ^= iter + magic + (hash << 6) + (hash >> 2); - hash ^= alloc_index + magic + (hash << 6) + (hash >> 2); - return hash; -} - -static uint64_t get_allocation_size(uint64_t hart_id, uint64_t iter, - uint64_t alloc_index) { - uint64_t hash = allocation_entropy(0, hart_id, iter, alloc_index); - return alloc_sizes[hash % ARRAY_LEN(alloc_sizes, uint64_t)]; -} - -static uint64_t get_allocation_align(uint64_t hart_id, uint64_t iter, - uint64_t alloc_index) { - uint64_t hash = allocation_entropy(0, hart_id, iter, alloc_index); - hash = allocation_entropy(hash, hart_id, iter, alloc_index); - return aligns[hash % ARRAY_LEN(aligns, uint64_t)]; -} - -static int make_allocations(uint64_t hart_id, int iter) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - uint64_t size = get_allocation_size(hart_id, (uint64_t)iter, (uint64_t)j); - void *ptr = malloc(size); - if (ptr == 0) { - return DIAG_FAILED; - } - memset(ptr, (int)hart_id, size); - allocated[hart_id][iter][j] = ptr; - } - return DIAG_PASSED; -} - -static int make_callocations(uint64_t hart_id, int iter) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - uint64_t size = get_allocation_size(hart_id, (uint64_t)iter, (uint64_t)j); - void *ptr = calloc(1, size); - if (ptr == 0) { - return DIAG_FAILED; - } - memset(ptr, (int)hart_id, size); - allocated[hart_id][iter][j] = ptr; - } - return DIAG_PASSED; -} - -static int make_aligned_allocations(uint64_t hart_id, int iter) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - uint64_t size = get_allocation_size(hart_id, (uint64_t)iter, (uint64_t)j); - uint64_t align = get_allocation_align(hart_id, (uint64_t)iter, (uint64_t)j); - void *ptr = memalign(align, size); - if (ptr == 0) { - return DIAG_FAILED; - } - memset(ptr, (int)hart_id, size); - allocated[hart_id][iter][j] = ptr; - } - return DIAG_PASSED; -} - -static void cleanup_test(uint64_t hart_id) { - for (int iter = 0; iter < NUM_INTERATIONS; iter++) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - free(allocated[hart_id][iter][j]); - } - } - return; -} -// Free only some of the allocations to force uneven work across harts. -static void free_some_allocations(uint64_t hart_id, int iter) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - uint64_t hash = allocation_entropy(0, hart_id, (uint64_t)iter, (uint64_t)j); - if (hash % 3 > 0) { - free(allocated[hart_id][iter][j]); - } - } - return; -} - -static int test_allocations(uint64_t hart_id, int iter) { - for (int j = 0; j < ALLOCS_PER_HART; j++) { - uint8_t *ptr = (uint8_t *)allocated[hart_id][iter][j]; - uint64_t size = get_allocation_size(hart_id, (uint64_t)iter, (uint64_t)j); - for (uint64_t x = 0; x < size; x++) { - if (ptr[x] != hart_id) { - return DIAG_FAILED; - } - } - } - return DIAG_PASSED; -} - -int test_malloc(uint64_t hart_id) { - // Make sure all hart start at the same time - sync_all_harts_from_smode(); - for (int i = 0; i < NUM_INTERATIONS; i++) { - if (make_allocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_allocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - free_some_allocations(hart_id, i); - } - sync_all_harts_from_smode(); - cleanup_test(hart_id); - return DIAG_PASSED; -} - -int test_calloc(uint64_t hart_id) { - // Make sure all hart start at the same time - sync_all_harts_from_smode(); - for (int i = 0; i < NUM_INTERATIONS; i++) { - if (make_callocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_allocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - free_some_allocations(hart_id, i); - } - sync_all_harts_from_smode(); - cleanup_test(hart_id); - return DIAG_PASSED; -} - -int test_memalign(uint64_t hart_id) { - // Make sure all hart start at the same time - sync_all_harts_from_smode(); - for (int i = 0; i < NUM_INTERATIONS; i++) { - if (make_aligned_allocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_allocations(hart_id, i) != DIAG_PASSED) { - return DIAG_FAILED; - } - free_some_allocations(hart_id, i); - } - sync_all_harts_from_smode(); - cleanup_test(hart_id); - return DIAG_PASSED; -} - -static int check_heap_size(void) { - // This check ensures that all planned allocation for the worst case will fit - // in available heap size. - const uint64_t max_heap_size = (uint64_t)&_JUMPSTART_SMODE_HEAP_END - - (uint64_t)&_JUMPSTART_SMODE_HEAP_START; - const uint64_t max_align = aligns[ARRAY_LEN(aligns, uint64_t) - 1]; - const uint64_t max_alloc = alloc_sizes[ARRAY_LEN(alloc_sizes, uint64_t) - 1]; - if (max_heap_size / max_align / ALLOCS_PER_HART / NUM_INTERATIONS / - MAX_NUM_HARTS_SUPPORTED < - (max_alloc + HEAP_STRUCT_PADDING)) { - return DIAG_FAILED; - } - return DIAG_PASSED; -} - -int main(void) { - uint64_t hart_id = get_thread_attributes_hart_id_from_smode(); - if (hart_id > MAX_NUM_HARTS_SUPPORTED) { - return DIAG_FAILED; - } - if (check_heap_size() != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_malloc(hart_id) != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_calloc(hart_id) != DIAG_PASSED) { - return DIAG_FAILED; - } - if (test_memalign(hart_id) != DIAG_PASSED) { - return DIAG_FAILED; - } - return DIAG_PASSED; -} diff --git a/tests/common/test040/test040.S b/tests/common/test040/test040.S index 7d7bf247..034bb05b 100644 --- a/tests/common/test040/test040.S +++ b/tests/common/test040/test040.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .global main main: diff --git a/tests/common/test040/test040.c b/tests/common/test040/test040.c index 2548de78..4dc8bdd8 100644 --- a/tests/common/test040/test040.c +++ b/tests/common/test040/test040.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" diff --git a/tests/common/test040/test040.diag_attributes.yaml b/tests/common/test040/test040.diag_attributes.yaml index cd17942d..6e3147c0 100644 --- a/tests/common/test040/test040.diag_attributes.yaml +++ b/tests/common/test040/test040.diag_attributes.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test041/test041.S b/tests/common/test041/test041.S index 89a84437..ae92df45 100644 --- a/tests/common/test041/test041.S +++ b/tests/common/test041/test041.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .global illegal_instruction_function illegal_instruction_function: diff --git a/tests/common/test041/test041.c b/tests/common/test041/test041.c index 13badb7c..268ec7e9 100644 --- a/tests/common/test041/test041.c +++ b/tests/common/test041/test041.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ // This is a copy of test003 with one extra nested exception that should // cause this to fail. diff --git a/tests/common/test041/test041.diag_attributes.yaml b/tests/common/test041/test041.diag_attributes.yaml index e864c79b..df04c935 100644 --- a/tests/common/test041/test041.diag_attributes.yaml +++ b/tests/common/test041/test041.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" mappings: - diff --git a/tests/common/test042/test042.S b/tests/common/test042/test042.S index 1fbbb7e6..5d334826 100644 --- a/tests/common/test042/test042.S +++ b/tests/common/test042/test042.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .global test003_illegal_instruction_function test003_illegal_instruction_function: diff --git a/tests/common/test042/test042.c b/tests/common/test042/test042.c index 93440d68..e188c698 100644 --- a/tests/common/test042/test042.c +++ b/tests/common/test042/test042.c @@ -1,12 +1,14 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" // 4P version of test003 which nests as many exceptions as allowed in smode. -// The harts sync up after they've each reached the max number of nested +// The cpus sync up after they've each reached the max number of nested // exceptions. void test003_illegal_instruction_handler(void); @@ -23,17 +25,17 @@ void test003_illegal_instruction_handler(void) { jumpstart_smode_fail(); } - uint8_t hart_id = get_thread_attributes_hart_id_from_smode(); + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); - --num_context_saves_to_take[hart_id]; + --num_context_saves_to_take[cpu_id]; - if (num_context_saves_to_take[hart_id] != + if (num_context_saves_to_take[cpu_id] != get_thread_attributes_num_context_saves_remaining_in_smode_from_smode()) { jumpstart_smode_fail(); } - if (num_context_saves_to_take[hart_id] > 0) { - if (num_context_saves_to_take[hart_id] % 2) { + if (num_context_saves_to_take[cpu_id] > 0) { + if (num_context_saves_to_take[cpu_id] % 2) { if (alt_test003_illegal_instruction_function() != DIAG_PASSED) { jumpstart_smode_fail(); } @@ -43,10 +45,10 @@ void test003_illegal_instruction_handler(void) { } } } else { - // the hart has used up all the context saves. Sync up all the harts + // the cpu has used up all the context saves. Sync up all the cpus // so any issue with the save/restore of the context is caught on the // unwind. - sync_all_harts_from_smode(); + sync_all_cpus_from_smode(); } if (get_thread_attributes_current_mode_from_smode() != PRV_S) { @@ -57,7 +59,7 @@ void test003_illegal_instruction_handler(void) { } int main(void) { - if (get_thread_attributes_hart_id_from_smode() > 3) { + if (get_thread_attributes_cpu_id_from_smode() > 3) { return DIAG_FAILED; } diff --git a/tests/common/test042/test042.diag_attributes.yaml b/tests/common/test042/test042.diag_attributes.yaml index 320fe94a..f43f2263 100644 --- a/tests/common/test042/test042.diag_attributes.yaml +++ b/tests/common/test042/test042.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1111" mappings: - @@ -19,7 +19,6 @@ mappings: va: 0xD0022000 pa: 0xD0022000 xwr: "0b011" - valid: "0b0" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" diff --git a/tests/common/test043/test043.S b/tests/common/test043/test043.S deleted file mode 100644 index 560cdfbb..00000000 --- a/tests/common/test043/test043.S +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -.section .jumpstart.text.mmode.init, "ax" - -#padding init area with a whole 4K page to test failure -.global aaa__dummy_array -aaa__dummy_array: - .space 4096 diff --git a/tests/common/test043/test043.c b/tests/common/test043/test043.c deleted file mode 100644 index 54c6f458..00000000 --- a/tests/common/test043/test043.c +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "jumpstart.h" - -int main() { - return DIAG_PASSED; -} diff --git a/tests/common/test043/test043.diag_attributes.yaml b/tests/common/test043/test043.diag_attributes.yaml deleted file mode 100644 index 6704940c..00000000 --- a/tests/common/test043/test043.diag_attributes.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -# -# SPDX-License-Identifier: Apache-2.0 - -satp_mode: "sv39" - -# Override default M-mode, S-mode, U-mode start address -mmode_start_address: 0x81000000 -smode_start_address: 0x82000000 -umode_start_address: 0x83000000 - -mappings: - - - va: 0xC0020000 - pa: 0xC0020000 - xwr: "0b101" - page_size: 0x1000 - num_pages: 2 - pma_memory_type: "wb" - linker_script_section: ".text" - - - va: 0xC0022000 - pa: 0xC0022000 - xwr: "0b011" - page_size: 0x1000 - num_pages: 1 - pma_memory_type: "wb" - linker_script_section: ".data" diff --git a/tests/common/test044/test044.c b/tests/common/test044/test044.c index fdd0203c..d85cc83a 100644 --- a/tests/common/test044/test044.c +++ b/tests/common/test044/test044.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" @@ -13,7 +15,7 @@ #define MISS_LIMIT 5 #define CHECK_SEED(flt_cnt, local_cnt, curr_seed, last_seed, misses) \ - if (flt_cnt[hart_id] != local_cnt) \ + if (flt_cnt[cpu_id] != local_cnt) \ jumpstart_mmode_fail(); \ if (curr_seed == last_seed) { \ misses++; \ @@ -22,7 +24,7 @@ } #define SCHECK_SEED(flt_cnt, local_cnt, curr_seed, last_seed, misses) \ - if (flt_cnt[hart_id] != local_cnt) \ + if (flt_cnt[cpu_id] != local_cnt) \ jumpstart_smode_fail(); \ if (curr_seed == last_seed) { \ misses++; \ @@ -30,16 +32,17 @@ jumpstart_smode_fail(); \ } -__attribute__((section(".data.smode"))) volatile uint64_t - fault_count_s[MAX_NUM_HARTS_SUPPORTED] = {0}; +__attribute__((section( + ".data.smode"))) volatile uint64_t fault_count_s[MAX_NUM_CPUS_SUPPORTED] = { + 0}; __attribute__((section(".text.smode"))) static void smode_exception_handler(void) { - uint64_t hart_id = get_thread_attributes_hart_id_from_smode(); + uint64_t cpu_id = get_thread_attributes_cpu_id_from_smode(); unsigned long epc = get_sepc_for_current_exception(); uint64_t tval = read_csr(stval); - fault_count_s[hart_id]++; + fault_count_s[cpu_id]++; // skip over the faulting load if ((tval & 0x3) == 0x3) @@ -51,7 +54,7 @@ smode_exception_handler(void) { } __attribute__((section(".text.smode"))) int smode_main(void) { - uint64_t hart_id = get_thread_attributes_hart_id_from_smode(); + uint64_t cpu_id = get_thread_attributes_cpu_id_from_smode(); uint32_t seed = 0, last_seed = 0; int rand = 0, last_rand = 0; uint64_t temp = 65321512512; @@ -63,11 +66,14 @@ __attribute__((section(".text.smode"))) int smode_main(void) { /* Test M-mode access. */ int random = smode_try_get_seed(); - if (random < 0 || fault_count_s[hart_id] != 0) + if (random < 0 || fault_count_s[cpu_id] != 0) jumpstart_smode_fail(); - set_random_seed_from_smode((int)random); - for (int i = 0; i < 1024; i++) { + if (cpu_id == 0) + set_random_seed_from_smode((int)random * BUILD_RNG_SEED); + + sync_all_cpus_from_smode(); + for (int i = 0; i < 10; i++) { rand = get_random_number_from_smode(); if (rand == last_rand) return DIAG_FAILED; @@ -75,7 +81,7 @@ __attribute__((section(".text.smode"))) int smode_main(void) { last_rand = rand; } - for (unsigned i = 0; i < 1024; i++) { + for (unsigned i = 0; i < 10; i++) { /* Try csrrwi, it shouldn't fault. */ last_seed = seed; __asm__ __volatile__("csrrwi %0, seed, 5" : "=r"(seed)::"memory"); @@ -164,14 +170,14 @@ __attribute__((section(".text.smode"))) int smode_main(void) { return DIAG_PASSED; } -volatile uint64_t fault_count[MAX_NUM_HARTS_SUPPORTED] = {0}; +volatile uint64_t fault_count[MAX_NUM_CPUS_SUPPORTED] = {0}; static void mmode_exception_handler(void) { - uint64_t hart_id = get_thread_attributes_hart_id_from_mmode(); + uint64_t cpu_id = get_thread_attributes_cpu_id_from_mmode(); unsigned long epc = get_mepc_for_current_exception(); uint64_t mtval = read_csr(mtval); - fault_count[hart_id]++; + fault_count[cpu_id]++; // skip over the faulting load if ((mtval & 0x3) == 0x3) @@ -183,7 +189,7 @@ static void mmode_exception_handler(void) { } int main(void) { - uint64_t hart_id = get_thread_attributes_hart_id_from_mmode(); + uint64_t cpu_id = get_thread_attributes_cpu_id_from_mmode(); uint32_t seed = 0, last_seed = 0; int rand = 0, last_rand = 0; uint64_t temp = 65321512512; @@ -194,11 +200,14 @@ int main(void) { (uint64_t)(mmode_exception_handler)); /* Test M-mode access. */ int random = mmode_try_get_seed(); - if (random < 0 || fault_count[hart_id] != 0) + if (random < 0 || fault_count[cpu_id] != 0) jumpstart_mmode_fail(); - set_random_seed_from_mmode((int)random); - for (int i = 0; i < 1024; i++) { + if (cpu_id == 0) + set_random_seed_from_mmode((int)random * BUILD_RNG_SEED); + + sync_all_cpus_from_mmode(); + for (int i = 0; i < 10; i++) { rand = get_random_number_from_mmode(); if (rand == last_rand) return DIAG_FAILED; @@ -206,7 +215,7 @@ int main(void) { last_rand = rand; } - for (unsigned i = 0; i < 1024; i++) { + for (unsigned i = 0; i < 10; i++) { /* Try csrrwi, it shouldn't fault. */ last_seed = seed; __asm__ __volatile__("csrrwi %0, seed, 5" : "=r"(seed)::"memory"); diff --git a/tests/common/test044/test044.diag_attributes.yaml b/tests/common/test044/test044.diag_attributes.yaml index b60392e7..fdf73be8 100644 --- a/tests/common/test044/test044.diag_attributes.yaml +++ b/tests/common/test044/test044.diag_attributes.yaml @@ -1,10 +1,12 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" start_test_in_mmode: true +active_cpu_mask: "0b1111" + mappings: - pa: 0xC0020000 diff --git a/tests/common/test045/test045.S b/tests/common/test045/test045.S index 1d7d9f63..d2a1a5ea 100644 --- a/tests/common/test045/test045.S +++ b/tests/common/test045/test045.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text.vsmode, "ax" diff --git a/tests/common/test045/test045.c b/tests/common/test045/test045.c index 78694e1f..fe90dc95 100644 --- a/tests/common/test045/test045.c +++ b/tests/common/test045/test045.c @@ -1,10 +1,15 @@ -// SPDX-FileCopyrightText: 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" +extern uint64_t vs_stage_pagetables_start; +extern uint64_t g_stage_pagetables_start; + // vsmode mode functions // The assembly functions are already tagged with the .text.vsmode section // attribute. @@ -52,7 +57,7 @@ uint8_t c_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, } int main(void) { - if (get_thread_attributes_hart_id_from_smode() != 0) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { return DIAG_FAILED; } @@ -69,6 +74,28 @@ int main(void) { return DIAG_FAILED; } + uint64_t vsatp_value = read_csr(vsatp); + if (get_field(vsatp_value, VSATP64_MODE) != VM_1_10_SV39) { + return DIAG_FAILED; + } + + uint64_t expected_vsatp_ppn = + ((uint64_t)&vs_stage_pagetables_start) >> PAGE_OFFSET; + if (get_field(vsatp_value, VSATP64_PPN) != expected_vsatp_ppn) { + return DIAG_FAILED; + } + + uint64_t hgatp_value = read_csr(hgatp); + if (get_field(hgatp_value, HGATP64_MODE) != VM_1_10_SV39) { + return DIAG_FAILED; + } + + uint64_t expected_hgatp_ppn = + ((uint64_t)&g_stage_pagetables_start) >> PAGE_OFFSET; + if (get_field(hgatp_value, HGATP64_PPN) != expected_hgatp_ppn) { + return DIAG_FAILED; + } + if (run_function_in_vsmode((uint64_t)asm_check_passed_in_arguments, 1, 2, 3, 4, 5, 6, 7) != DIAG_PASSED) { return DIAG_FAILED; diff --git a/tests/common/test045/test045.diag_attributes.yaml b/tests/common/test045/test045.diag_attributes.yaml index 238c696e..075b951d 100644 --- a/tests/common/test045/test045.diag_attributes.yaml +++ b/tests/common/test045/test045.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" enable_virtualization: True diff --git a/tests/common/test046/test046.S b/tests/common/test046/test046.S index 3eff246e..c781a901 100644 --- a/tests/common/test046/test046.S +++ b/tests/common/test046/test046.S @@ -1,8 +1,13 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -#include "jumpstart_defines.h" .section .text.vsmode, "ax", @progbits diff --git a/tests/common/test046/test046.c b/tests/common/test046/test046.c index ad5566a0..a7b059cf 100644 --- a/tests/common/test046/test046.c +++ b/tests/common/test046/test046.c @@ -1,24 +1,29 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "cpu_bits.h" #include "jumpstart.h" -void test046_illegal_instruction_handler(void) - __attribute__((section(".text.vsmode"))); -int test046_illegal_instruction_function(void) - __attribute__((section(".text.vsmode"))); -int alt_test046_illegal_instruction_function(void) - __attribute__((section(".text.vsmode"))); -int vsmode_main(void) __attribute__((section(".text.vsmode"))); +#define __vs_text __attribute__((section(".text.vsmode"))) +#define __vs_data __attribute__((section(".data.vsmode"))) + +void test046_illegal_instruction_handler(void) __vs_text; +int test046_illegal_instruction_function(void) __vs_text; +int alt_test046_illegal_instruction_function(void) __vs_text; +int vsmode_main(void) __vs_text; // Nest as many exceptions as are allowed. // We have saved the smode context to jump into vsmode so we have // 1 less context save to take. -uint8_t num_context_saves_to_take = MAX_NUM_CONTEXT_SAVES - 1; +uint8_t __vs_data num_context_saves_to_take[MAX_NUM_CPUS_SUPPORTED] = { + [0 ... MAX_NUM_CPUS_SUPPORTED - 1] = MAX_NUM_CONTEXT_SAVES - 1}; void test046_illegal_instruction_handler(void) { + uint64_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { jumpstart_vsmode_fail(); } @@ -26,15 +31,15 @@ void test046_illegal_instruction_handler(void) { jumpstart_vsmode_fail(); } - --num_context_saves_to_take; + --num_context_saves_to_take[cpu_id]; - if (num_context_saves_to_take != + if (num_context_saves_to_take[cpu_id] != get_thread_attributes_num_context_saves_remaining_in_smode_from_smode()) { jumpstart_vsmode_fail(); } - if (num_context_saves_to_take > 0) { - if (num_context_saves_to_take % 2) { + if (num_context_saves_to_take[cpu_id] > 0) { + if (num_context_saves_to_take[cpu_id] % 2) { if (alt_test046_illegal_instruction_function() != DIAG_PASSED) { jumpstart_vsmode_fail(); } @@ -56,6 +61,8 @@ void test046_illegal_instruction_handler(void) { } int vsmode_main() { + uint64_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + if (get_thread_attributes_current_v_bit_from_smode() != 1) { return DIAG_FAILED; } @@ -64,6 +71,12 @@ int vsmode_main() { RISCV_EXCP_ILLEGAL_INST, (uint64_t)(&test046_illegal_instruction_handler)); + if (num_context_saves_to_take[cpu_id] < 2) { + // We test 2 different types of illegal instruction functions + // and require at least 2 levels of nesting to test both. + return DIAG_FAILED; + } + if (test046_illegal_instruction_function() != DIAG_PASSED) { return DIAG_FAILED; } @@ -89,12 +102,6 @@ int main(void) { return DIAG_FAILED; } - if (num_context_saves_to_take < 2) { - // We test 2 different types of illegal instruction functions - // and require at least 2 levels of nesting to test both. - return DIAG_FAILED; - } - if (run_function_in_vsmode((uint64_t)vsmode_main) != DIAG_PASSED) { return DIAG_FAILED; } diff --git a/tests/common/test046/test046.diag_attributes.yaml b/tests/common/test046/test046.diag_attributes.yaml index 7984de9a..076df3fa 100644 --- a/tests/common/test046/test046.diag_attributes.yaml +++ b/tests/common/test046/test046.diag_attributes.yaml @@ -1,11 +1,9 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" - -active_hart_mask: "0b1" - +active_cpu_mask: "0b1111" enable_virtualization: True mappings: @@ -21,7 +19,6 @@ mappings: va: 0xD0022000 pa: 0xD0022000 xwr: "0b011" - valid: "0b0" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" @@ -45,3 +42,21 @@ mappings: num_pages: 2 pma_memory_type: "wb" linker_script_section: ".text.vsmode" + + - + va: 0xD0026000 + gpa: 0xD0026000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + + - + gpa: 0xD0026000 + spa: 0xD0026000 + xwr: "0b011" + umode: "0b1" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".data.vsmode" diff --git a/tests/common/test047/test047.S b/tests/common/test047/test047.S index 88f99c2d..e4ab8811 100644 --- a/tests/common/test047/test047.S +++ b/tests/common/test047/test047.S @@ -1,4 +1,10 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/common/test047/test047.c b/tests/common/test047/test047.c index ef9d0335..c959cb41 100644 --- a/tests/common/test047/test047.c +++ b/tests/common/test047/test047.c @@ -1,6 +1,8 @@ -// SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. -// -// SPDX-License-Identifier: Apache-2.0 +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ #include "jumpstart.h" diff --git a/tests/common/test047/test047.diag_attributes.yaml b/tests/common/test047/test047.diag_attributes.yaml index 1162ea13..fd2a3f68 100644 --- a/tests/common/test047/test047.diag_attributes.yaml +++ b/tests/common/test047/test047.diag_attributes.yaml @@ -1,10 +1,10 @@ -# SPDX-FileCopyrightText: 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1" +active_cpu_mask: "0b1" enable_virtualization: True diff --git a/tests/common/test048/test048.S b/tests/common/test048/test048.S new file mode 100644 index 00000000..ad20731f --- /dev/null +++ b/tests/common/test048/test048.S @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + + +#define BYTES_TO_COPY (64 * 8) + +.section .text.vumode, "ax" + +# Inputs: +# a0-a6 +.global asm_check_passed_in_arguments +asm_check_passed_in_arguments: + li t0, 1 + bne a0, t0, asm_check_passed_in_arguments_failed + + li t0, 2 + bne a1, t0, asm_check_passed_in_arguments_failed + + li t0, 3 + bne a2, t0, asm_check_passed_in_arguments_failed + + li t0, 4 + bne a3, t0, asm_check_passed_in_arguments_failed + + li t0, 5 + bne a4, t0, asm_check_passed_in_arguments_failed + + li t0, 6 + bne a5, t0, asm_check_passed_in_arguments_failed + + li t0, 7 + bne a6, t0, asm_check_passed_in_arguments_failed + + li a0, DIAG_PASSED + j asm_check_passed_in_arguments_return + +asm_check_passed_in_arguments_failed: + li a0, DIAG_FAILED + +asm_check_passed_in_arguments_return: + ret + +.global copy_bytes +copy_bytes: + la t0, source_location + la t1, destination_location + addi t2, t0, BYTES_TO_COPY + +1: + ld t3, 0(t0) + sd t3, 0(t1) + addi t0, t0, 8 + addi t1, t1, 8 + bne t0, t2, 1b + + li a0, DIAG_PASSED + + ret + +.global get_bytes_to_copy +get_bytes_to_copy: + li a0, BYTES_TO_COPY + ret + +.section .data.vumode, "aw" + +.global source_location +source_location: + .rept BYTES_TO_COPY + .byte 0xab + .endr + +.global destination_location +destination_location: + .rept BYTES_TO_COPY + .byte 0xcd + .endr diff --git a/tests/common/test048/test048.c b/tests/common/test048/test048.c new file mode 100644 index 00000000..bc81f497 --- /dev/null +++ b/tests/common/test048/test048.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "jumpstart.h" + +// user mode functions +// The assembly functions are already tagged with the .text.vumode section +// attribute. +uint8_t asm_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, + uint8_t a3, uint8_t a4, uint8_t a5, + uint8_t a6); +uint8_t c_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, + uint8_t a3, uint8_t a4, uint8_t a5, + uint8_t a6) + __attribute__((section(".text.vumode"))) __attribute__((const)); + +uint8_t vsmode_function(void) __attribute__((section(".text.vsmode"))) +__attribute__((const)); + +uint8_t get_bytes_to_copy(void); +int copy_bytes(void); +int compare_copied_bytes(void) __attribute__((section(".text.vumode"))) +__attribute__((pure)); + +extern uint64_t source_location; +extern uint64_t destination_location; + +uint8_t c_check_passed_in_arguments(uint8_t a0, uint8_t a1, uint8_t a2, + uint8_t a3, uint8_t a4, uint8_t a5, + uint8_t a6) { + if (a0 != 1) { + return DIAG_FAILED; + } + if (a1 != 2) { + return DIAG_FAILED; + } + if (a2 != 3) { + return DIAG_FAILED; + } + if (a3 != 4) { + return DIAG_FAILED; + } + if (a4 != 5) { + return DIAG_FAILED; + } + if (a5 != 6) { + return DIAG_FAILED; + } + if (a6 != 7) { + return DIAG_FAILED; + } + return DIAG_PASSED; +} + +uint8_t vsmode_function(void) { + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_v_bit_from_smode() != 1) { + return DIAG_FAILED; + } + + if (run_function_in_vumode((uint64_t)asm_check_passed_in_arguments, 1, 2, 3, + 4, 5, 6, 7) != DIAG_PASSED) { + return DIAG_FAILED; + } + + if (run_function_in_vumode((uint64_t)c_check_passed_in_arguments, 1, 2, 3, 4, + 5, 6, 7) != DIAG_PASSED) { + return DIAG_FAILED; + } + + int bytes_to_copy = run_function_in_vumode((uint64_t)get_bytes_to_copy); + if (bytes_to_copy != 512) { + return DIAG_FAILED; + } + + // We want supervisor mode to be able to write to the user mode data area + // so set SSTATUS.SUM to 1. + uint64_t sstatus_value = read_csr(sstatus); + sstatus_value |= MSTATUS_SUM; + write_csr(sstatus, sstatus_value); + + uint64_t fill_value = 0x123456789abcdef0; + + for (uint8_t i = 0; i < 5; ++i) { + // Read a Supervisor mode register to really make sure we're in + // supervisor mode. + fill_value += read_csr(sscratch); + + uint64_t *src = (uint64_t *)&source_location; + for (int j = 0; j < (bytes_to_copy / 8); ++j) { + src[j] = fill_value; + ++fill_value; + } + + if (run_function_in_vumode((uint64_t)copy_bytes) != 0) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + if (run_function_in_vumode((uint64_t)compare_copied_bytes) != 0) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + } + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_v_bit_from_smode() != 1) { + return DIAG_FAILED; + } + + return DIAG_PASSED; +} + +int main(void) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { + return DIAG_FAILED; + } + + if (get_thread_attributes_bookend_magic_number_from_smode() != + THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + uint8_t ret = run_function_in_vsmode((uint64_t)vsmode_function); + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_v_bit_from_smode() != 0) { + return DIAG_FAILED; + } + + return ret; +} + +int compare_copied_bytes(void) { + uint8_t bytes_to_copy = get_bytes_to_copy(); + + uint64_t *src = (uint64_t *)&source_location; + uint64_t *dst = (uint64_t *)&destination_location; + + for (int i = 0; i < (bytes_to_copy / 8); i++) { + if (src[i] != dst[i]) { + return DIAG_FAILED; + } + } + + return DIAG_PASSED; +} diff --git a/tests/common/test048/test048.diag_attributes.yaml b/tests/common/test048/test048.diag_attributes.yaml new file mode 100644 index 00000000..8a24992c --- /dev/null +++ b/tests/common/test048/test048.diag_attributes.yaml @@ -0,0 +1,82 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "sv39" + +enable_virtualization: true + +mappings: + - + va: 0xd0020000 + pa: 0xd0020000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text" + - + va: 0xd0022000 + pa: 0xd0022000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + + - + va: 0xd0023000 + gpa: 0xd0023000 + xwr: "0b101" + valid: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + - + gpa: 0xd0023000 + spa: 0xd0023000 + xwr: "0b101" + umode: "0b1" + valid: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text.vsmode" + + - + va: 0xd0024000 + gpa: 0xd0024000 + xwr: "0b101" + umode: "0b1" + valid: "0b1" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + - + gpa: 0xd0024000 + spa: 0xd0024000 + xwr: "0b101" + umode: "0b1" + valid: "0b1" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text.vumode" + - + va: 0xd0026000 + gpa: 0xd0026000 + xwr: "0b011" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + + - + gpa: 0xd0026000 + spa: 0xd0026000 + xwr: "0b011" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data.vumode" diff --git a/tests/common/test049/test049.c b/tests/common/test049/test049.c new file mode 100644 index 00000000..ceedbf5f --- /dev/null +++ b/tests/common/test049/test049.c @@ -0,0 +1,22 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "jumpstart.h" + +void vumode_main(void) __attribute__((section(".text.vumode"))); +int vsmode_main(void) __attribute__((section(".text.vsmode"))); + +int main(void) { + return run_function_in_vsmode((uint64_t)vsmode_main); +} + +int vsmode_main(void) { + return run_function_in_vumode((uint64_t)vumode_main); +} + +void vumode_main(void) { + jumpstart_vumode_fail(); +} diff --git a/tests/common/test049/test049.diag_attributes.yaml b/tests/common/test049/test049.diag_attributes.yaml new file mode 100644 index 00000000..07a763bd --- /dev/null +++ b/tests/common/test049/test049.diag_attributes.yaml @@ -0,0 +1,66 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "sv39" + +enable_virtualization: true + +mappings: + - + va: 0xC0020000 + pa: 0xC0020000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text" + - + va: 0xC0021000 + pa: 0xC0021000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + + - + va: 0xC0022000 + gpa: 0xC0022000 + xwr: "0b101" + valid: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + + - + gpa: 0xC0022000 + spa: 0xC0022000 + xwr: "0b101" + valid: "0b1" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text.vsmode" + + - + va: 0xC0023000 + gpa: 0xC0023000 + xwr: "0b101" + umode: "0b1" + valid: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + + - + gpa: 0xC0023000 + spa: 0xC0023000 + xwr: "0b101" + valid: "0b1" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text.vumode" diff --git a/tests/common/test050/test050.c b/tests/common/test050/test050.c new file mode 100644 index 00000000..5659cc6a --- /dev/null +++ b/tests/common/test050/test050.c @@ -0,0 +1,19 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "jumpstart.h" + +__attribute__((const)) int main(void); + +void vsmode_main(void) __attribute__((section(".text.vsmode"))); + +void vsmode_main(void) { + jumpstart_vsmode_fail(); +} + +int main(void) { + return run_function_in_vsmode((uint64_t)vsmode_main); +} diff --git a/tests/common/test050/test050.diag_attributes.yaml b/tests/common/test050/test050.diag_attributes.yaml new file mode 100644 index 00000000..a952eaa9 --- /dev/null +++ b/tests/common/test050/test050.diag_attributes.yaml @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: 2024 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "sv39" + +enable_virtualization: true + +mappings: + - + va: 0xC0020000 + pa: 0xC0020000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text" + - + va: 0xC0022000 + pa: 0xC0022000 + xwr: "0b011" + valid: "0b0" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + - + va: 0xC0023000 + gpa: 0xC0023000 + xwr: "0b101" + valid: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + + - + gpa: 0xC0023000 + spa: 0xC0023000 + xwr: "0b101" + valid: "0b1" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text.vsmode" diff --git a/tests/common/test051/test051.c b/tests/common/test051/test051.c new file mode 100644 index 00000000..106c00dc --- /dev/null +++ b/tests/common/test051/test051.c @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "jumpstart.h" + +__attribute__((section(".data_no_address"))) uint64_t data_no_address_var = + 0x12345678; + +int main(void) { + if (get_thread_attributes_cpu_id_from_smode() != 0) { + return DIAG_FAILED; + } + + if (get_thread_attributes_bookend_magic_number_from_smode() != + THREAD_ATTRIBUTES_BOOKEND_MAGIC_NUMBER_VALUE) { + return DIAG_FAILED; + } + + if (SATP_MODE != VM_1_10_MBARE) { + return DIAG_FAILED; + } + + if (get_thread_attributes_current_mode_from_smode() != PRV_S) { + return DIAG_FAILED; + } + + if (get_field(read_csr(satp), SATP64_MODE) != VM_1_10_MBARE) { + return DIAG_FAILED; + } + + if (get_field(read_csr(satp), SATP64_PPN) != 0) { + return DIAG_FAILED; + } + + return DIAG_PASSED; +} diff --git a/tests/common/test051/test051.diag_attributes.yaml b/tests/common/test051/test051.diag_attributes.yaml new file mode 100644 index 00000000..55381cb9 --- /dev/null +++ b/tests/common/test051/test051.diag_attributes.yaml @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "bare" + +active_cpu_mask: "0b1" + +mappings: + - + pa: 0xD0020000 + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text" + - + pa: 0xD0022000 + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + - + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data_no_address" diff --git a/tests/common/test052/test052.c b/tests/common/test052/test052.c new file mode 100644 index 00000000..ddcca032 --- /dev/null +++ b/tests/common/test052/test052.c @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "jumpstart.h" + +#include +#include +#include + +int assert(int condition) { + return condition ? DIAG_PASSED : DIAG_FAILED; +} + +// Unit tests for strlen +int test_strlen() { + static const char str1[] = "hello"; + static const char str2[] = ""; + static const char str3[] = "baremetal"; + static const char str4[] = "hello SeNtiNel"; + + if (assert(strlen(str1) == sizeof(str1) - 1) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strlen(str2) == sizeof(str2) - 1) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strlen(str3) == sizeof(str3) - 1) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strlen(str4) == sizeof(str4) - 1) != DIAG_PASSED) + return DIAG_FAILED; + + return DIAG_PASSED; +} + +// Unit tests for strcpy +int test_strcpy() { + char dest[20]; + + strcpy(dest, "hello"); + if (assert(strcmp(dest, "hello") == 0) != DIAG_PASSED) + return DIAG_FAILED; + + strcpy(dest, "baremetal"); + if (assert(strcmp(dest, "baremetal") == 0) != DIAG_PASSED) + return DIAG_FAILED; + + strcpy(dest, ""); + if (assert(strcmp(dest, "") == 0) != DIAG_PASSED) + return DIAG_FAILED; + + return DIAG_PASSED; +} + +// Unit tests for strcmp +int test_strcmp() { + if (assert(strcmp("hello", "hello") == 0) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strcmp("hello", "world") != 0) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strcmp("abc", "abcd") < 0) != DIAG_PASSED) + return DIAG_FAILED; + if (assert(strcmp("abcd", "abc") > 0) != DIAG_PASSED) + return DIAG_FAILED; + + return DIAG_PASSED; // Success +} + +int main() { + // Run tests and check for DIAG_FAILED + if (test_strlen() != DIAG_PASSED) + return DIAG_FAILED; + if (test_strcpy() != DIAG_PASSED) + return DIAG_FAILED; + if (test_strcmp() != DIAG_PASSED) + return DIAG_FAILED; + + // If no failures, return DIAG_PASSED + return DIAG_PASSED; +} diff --git a/tests/common/test039/test039.diag_attributes.yaml b/tests/common/test052/test052.diag_attributes.yaml similarity index 61% rename from tests/common/test039/test039.diag_attributes.yaml rename to tests/common/test052/test052.diag_attributes.yaml index b2063d80..3d35f240 100644 --- a/tests/common/test039/test039.diag_attributes.yaml +++ b/tests/common/test052/test052.diag_attributes.yaml @@ -1,26 +1,25 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" - -active_hart_mask: "0b1111" +active_cpu_mask: "0b1" mappings: - - va: 0xc0020000 - pa: 0xc0020000 + va: 0xD0020000 + pa: 0xD0020000 xwr: "0b101" page_size: 0x1000 num_pages: 2 pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xc0022000 - pa: 0xc0022000 + va: 0xD0022000 + pa: 0xD0022000 xwr: "0b011" - valid: "0b1" + valid: "0b0" page_size: 0x1000 - num_pages: 3 + num_pages: 1 pma_memory_type: "wb" linker_script_section: ".data" diff --git a/tests/common/test053/test053.c b/tests/common/test053/test053.c new file mode 100644 index 00000000..a2a0b56b --- /dev/null +++ b/tests/common/test053/test053.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "jumpstart.h" +#include "uart.smode.h" + +#include +#include + +// Function to check if time() is working correctly +int test_time() { + time_t current_time = time(NULL); + if (current_time == (time_t)-1) { + printk("test_time: FAILED - time() returned -1\n"); + return DIAG_FAILED; + } else { + printk("test_time: PASSED - current time: %ld\n", current_time); + return DIAG_PASSED; + } +} + +// Function to check if gettimeofday() is working correctly +int test_gettimeofday() { + struct timeval tv; + int result = gettimeofday(&tv, NULL); + + printk("test_gettimeofday: define CPU_CLOCK_FREQUENCY_IN_MHZ %d\n", + CPU_CLOCK_FREQUENCY_IN_MHZ); + + if (result != 0) { + printk("test_gettimeofday: FAILED - gettimeofday() returned %d\n", result); + return DIAG_FAILED; + } else if ((tv.tv_sec < 0) || (tv.tv_usec < 0)) { + printk("test_gettimeofday: FAILED - invalid time values: %ld seconds, %ld " + "microseconds\n", + tv.tv_sec, tv.tv_usec); + return DIAG_FAILED; + } else { + printk("test_gettimeofday: PASSED - time: %ld seconds, %ld microseconds\n", + tv.tv_sec, tv.tv_usec); + return DIAG_PASSED; + } +} + +// Main function to run the tests +int main() { + if (test_time() != DIAG_PASSED) { + return DIAG_FAILED; + } + if (test_gettimeofday() != DIAG_PASSED) { + return DIAG_FAILED; + } + + return DIAG_PASSED; +} diff --git a/tests/common/test034/test034.diag_attributes.yaml b/tests/common/test053/test053.diag_attributes.yaml similarity index 64% rename from tests/common/test034/test034.diag_attributes.yaml rename to tests/common/test053/test053.diag_attributes.yaml index 517032db..df04c935 100644 --- a/tests/common/test034/test034.diag_attributes.yaml +++ b/tests/common/test053/test053.diag_attributes.yaml @@ -1,25 +1,25 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 satp_mode: "sv39" -active_hart_mask: "0b1111" +active_cpu_mask: "0b1" mappings: - - va: 0xc0020000 - pa: 0xc0020000 + va: 0xD0020000 + pa: 0xD0020000 xwr: "0b101" page_size: 0x1000 num_pages: 2 pma_memory_type: "wb" linker_script_section: ".text" - - va: 0xc0022000 - pa: 0xc0022000 + va: 0xD0022000 + pa: 0xD0022000 xwr: "0b011" - valid: "0b1" + valid: "0b0" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" diff --git a/tests/common/test058/test058.c b/tests/common/test058/test058.c new file mode 100644 index 00000000..228caaab --- /dev/null +++ b/tests/common/test058/test058.c @@ -0,0 +1,25 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "heap.smode.h" +#include "jumpstart.h" + +int main(void) { + uint8_t cpu_id = get_thread_attributes_cpu_id_from_smode(); + + if (cpu_id != 1 && cpu_id != 3) { + return DIAG_FAILED; + } + + if (PRIMARY_CPU_ID != 1) { + // The cpu with the lowest cpu_id in the active cpu mask is the primary + // cpu. + return DIAG_FAILED; + } + + return DIAG_PASSED; +} diff --git a/tests/common/test009/test009.diag_attributes.yaml b/tests/common/test058/test058.diag_attributes.yaml similarity index 55% rename from tests/common/test009/test009.diag_attributes.yaml rename to tests/common/test058/test058.diag_attributes.yaml index 822c379c..bb19404c 100644 --- a/tests/common/test009/test009.diag_attributes.yaml +++ b/tests/common/test058/test058.diag_attributes.yaml @@ -1,20 +1,26 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 -start_test_in_mmode: true +# Enable cpus 1 and 3 +active_cpu_mask: "0b1010" satp_mode: "sv39" mappings: - - pa: 0xc0020000 + va: 0xD0020000 + pa: 0xD0020000 + xwr: "0b101" page_size: 0x1000 num_pages: 2 pma_memory_type: "wb" linker_script_section: ".text" - - pa: 0xc0022000 + va: 0xD0022000 + pa: 0xD0022000 + xwr: "0b011" + valid: "0b0" page_size: 0x1000 num_pages: 1 pma_memory_type: "wb" diff --git a/tests/common/test061/test061.diag_attributes.yaml b/tests/common/test061/test061.diag_attributes.yaml new file mode 100644 index 00000000..61d0757f --- /dev/null +++ b/tests/common/test061/test061.diag_attributes.yaml @@ -0,0 +1,82 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "sv39" + +active_cpu_mask: "0b11" +allow_page_table_modifications: true +enable_virtualization: True + +mappings: + - + va: 0xC0020000 + pa: 0xC0020000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text" + + - + va: 0xC0021000 + pa: 0xC0021000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + +# two stage mappings for the .two_stage section + - + va: 0xC0022000 + gpa: 0xC0022000 + xwr: "0b101" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + - + gpa: 0xC0022000 + spa: 0xC0022000 + xwr: "0b101" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".text.vsmode" + + - + va: 0xC0023000 + gpa: 0xC0023000 + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + - + gpa: 0xC0023000 + spa: 0xC0023000 + xwr: "0b011" + umode: "0b1" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data.vsmode" + + - + va: 0xC0024000 + gpa: 0xC0024000 + xwr: "0b001" + valid: "0b0" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + - + gpa: 0xC0024000 + spa: 0xC0024000 + xwr: "0b001" + umode: "0b1" + valid: "0b0" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data.diag.vsmode" diff --git a/tests/common/test067/test067.c b/tests/common/test067/test067.c new file mode 100644 index 00000000..18912392 --- /dev/null +++ b/tests/common/test067/test067.c @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "cpu_bits.h" +#include "jumpstart.h" + +#include "uart.smode.h" + +extern uint64_t _TEXT_START; +extern uint64_t _DATA_4K_START; +extern uint64_t _DATA_4K_2_START; +extern uint64_t _DATA_2MB_START; +extern uint64_t _DATA_2MB_WITH_EXPLICIT_ADDRESS_START; + +__attribute__((section(".data_4K"))) uint64_t data_var = 0x12345678; +__attribute__((section(".data_4K_2"))) uint64_t data_var_2 = 0x12345678; + +__attribute__((section(".data_2MB"))) uint64_t data_2mb_var = 0x12345678; +__attribute__((section(".data_2MB_with_explicit_address"))) uint64_t + data_2mb_with_explicit_address_var = 0x12345678; + +int main(void) { + uint64_t main_function_address = (uint64_t)&main; + volatile uint64_t text_section_start = (uint64_t)(&_TEXT_START); + if (main_function_address != text_section_start) { + return DIAG_FAILED; + } + + // Check that the data_var is in the data section. + volatile uint64_t data_section_start = (uint64_t)(&_DATA_4K_START); + if ((uint64_t)&data_var != data_section_start) { + return DIAG_FAILED; + } + + volatile uint64_t data_4k_2_section_start = (uint64_t)(&_DATA_4K_2_START); + if ((uint64_t)&data_var_2 != data_4k_2_section_start) { + return DIAG_FAILED; + } + + volatile uint64_t data_2mb_section_start = (uint64_t)(&_DATA_2MB_START); + if ((uint64_t)&data_2mb_var != data_2mb_section_start) { + return DIAG_FAILED; + } + + volatile uint64_t data_2mb_with_explicit_address_section_start = + (uint64_t)(&_DATA_2MB_WITH_EXPLICIT_ADDRESS_START); + if ((uint64_t)&data_2mb_with_explicit_address_var != + data_2mb_with_explicit_address_section_start) { + return DIAG_FAILED; + } + + // We expect jumpstart to sort the mappings by page_size first, then by + // mappings that don't have addresses. + if (data_4k_2_section_start >= data_2mb_section_start) { + return DIAG_FAILED; + } + if (data_2mb_section_start >= data_2mb_with_explicit_address_section_start) { + return DIAG_FAILED; + } + + return DIAG_PASSED; +} diff --git a/tests/common/test067/test067.diag_attributes.yaml b/tests/common/test067/test067.diag_attributes.yaml new file mode 100644 index 00000000..03226018 --- /dev/null +++ b/tests/common/test067/test067.diag_attributes.yaml @@ -0,0 +1,50 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +satp_mode: "sv39" + +active_cpu_mask: "0b1" + +# We expect jumpstart to sort the mappings by page_size first, then by mappings that don't have addresses. + +mappings: + - + xwr: "0b101" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text" + - + xwr: "0b011" + valid: "0b0" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data_4K" + + - + va: 0xd0000000 + pa: 0xd0000000 + xwr: "0b011" + valid: "0b0" + page_size: 0x200000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data_2MB_with_explicit_address" + + - + xwr: "0b011" + valid: "0b0" + page_size: 0x200000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data_2MB" + + - + xwr: "0b011" + valid: "0b0" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".data_4K_2" diff --git a/tests/common/test070/test070.c b/tests/common/test070/test070.c new file mode 100644 index 00000000..9fd60827 --- /dev/null +++ b/tests/common/test070/test070.c @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: 2025 Rivos Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "jumpstart.h" +#include "uart.smode.h" + +extern uint64_t _EXPANDABLE_SC1_START; +extern uint64_t _EXPANDABLE_SC2_START; +extern uint64_t _FIXED_SC1_START; +extern uint64_t _EXPANDABLE_SC1_END; +extern uint64_t _EXPANDABLE_SC2_END; +extern uint64_t _FIXED_SC1_END; + +#define EXPANDABLE_SC1_PAGE_SIZE 0x1000UL +#define EXPANDABLE_SC2_PAGE_SIZE 0x200000UL +#define FIXED_SC1_PAGE_SIZE 0x1000UL +#define EXPANDABLE_SC1_NUM_PAGES 1 +#define EXPANDABLE_SC2_NUM_PAGES 2 +#define FIXED_SC1_NUM_PAGES 1 + +#ifdef __clang__ +__attribute__((optnone)) +#else +__attribute__((optimize("O0"))) +#endif +int main(void) { + uint8_t cpuid = get_thread_attributes_cpu_id_from_smode(); + + if (cpuid == PRIMARY_CPU_ID) { + uint8_t num_cpus = MAX_NUM_CPUS_SUPPORTED; + + // Calculate sizes using linker variables + uint64_t expandable_sc1_size = + ((uint64_t)&_EXPANDABLE_SC1_END - (uint64_t)&_EXPANDABLE_SC1_START + 1); + uint64_t expandable_sc2_size = + ((uint64_t)&_EXPANDABLE_SC2_END - (uint64_t)&_EXPANDABLE_SC2_START + 1); + uint64_t fixed_sc1_size = + ((uint64_t)&_FIXED_SC1_END - (uint64_t)&_FIXED_SC1_START + 1); + uint64_t expected_sc1_size = + (EXPANDABLE_SC1_PAGE_SIZE * EXPANDABLE_SC1_NUM_PAGES * num_cpus); + uint64_t expected_sc2_size = + (EXPANDABLE_SC2_PAGE_SIZE * EXPANDABLE_SC2_NUM_PAGES * num_cpus); + uint64_t expected_fixed_size = (FIXED_SC1_PAGE_SIZE * FIXED_SC1_NUM_PAGES); + + // Compare against expected sizes + if (expandable_sc1_size != expected_sc1_size) { + printk("Expandable SC1 size mismatch, Expected: %lu, Actual: %lu\n", + expected_sc1_size, expandable_sc1_size); + return DIAG_FAILED; + } + if (expandable_sc2_size != expected_sc2_size) { + printk("Expandable SC2 size mismatch, Expected: %lu, Actual: %lu\n", + expected_sc2_size, expandable_sc2_size); + return DIAG_FAILED; + } + if (fixed_sc1_size != expected_fixed_size) { + printk("Fixed SC1 size mismatch, Expected: %lu, Actual: %lu\n", + expected_fixed_size, fixed_sc1_size); + return DIAG_FAILED; + } + } + return DIAG_PASSED; +} diff --git a/tests/common/test070/test070.diag_attributes.yaml b/tests/common/test070/test070.diag_attributes.yaml new file mode 100644 index 00000000..790fbd54 --- /dev/null +++ b/tests/common/test070/test070.diag_attributes.yaml @@ -0,0 +1,41 @@ +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +active_cpu_mask: "0b1111" +satp_mode: "sv39" + +mappings: + - + xwr: "0b101" + page_size: 0x1000 + num_pages: 2 + pma_memory_type: "wb" + linker_script_section: ".text" + - + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pma_memory_type: "wb" + linker_script_section: ".data" + - + xwr: "0b011" + page_size: 0x1000 + num_pages_per_cpu: 1 + pma_memory_type: "wb" + linker_script_section: ".expandable_sc1" + + - + xwr: "0b011" + page_size: 0x200000 + num_pages_per_cpu: 2 + pma_memory_type: "wb" + linker_script_section: ".expandable_sc2" + + - + xwr: "0b011" + page_size: 0x1000 + num_pages: 1 + pbmt_mode: "pma" + pma_memory_type: "wb" + linker_script_section: ".fixed_sc1" diff --git a/tests/meson.build b/tests/meson.build index 1455a1cc..1aeacdae 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2023 - 2024 Rivos Inc. +# SPDX-FileCopyrightText: 2023 - 2025 Rivos Inc. # # SPDX-License-Identifier: Apache-2.0 @@ -8,11 +8,8 @@ source_suffixes = ['.S', '.c'] start_in_mmode_tests = [] # diag main() is in mmode start_in_smode_tests = [] # diag main() is in smode -firmware_boot_tests = [] # diag needs boot_config!=fw-none -tests_disabled_on_qemu = [] tests_disabled_on_spike = [] -tests_disabled_for_sbi_firmware_boot = [] subdir('common') test_root_directories = [meson.current_source_dir() + '/' + 'common'] @@ -32,9 +29,6 @@ if 'smode' in riscv_priv_modes_enabled unit_tests += start_in_smode_tests endif -if get_option('boot_config') != 'fw-none' - unit_tests += firmware_boot_tests -endif foreach unit_test : unit_tests test_name = unit_test.get(0) @@ -44,15 +38,9 @@ foreach unit_test : unit_tests test_expected_to_fail = unit_test.get(3, false) - test_disabled_on_qemu = test_name in tests_disabled_on_qemu test_disabled_on_spike = test_name in tests_disabled_on_spike - test_disabled_for_sbi_firmware_boot = test_name in tests_disabled_for_sbi_firmware_boot - if get_option('diag_target') == 'spike' and test_disabled_on_spike == true - continue - elif get_option('diag_target') == 'qemu' and test_disabled_on_qemu == true - continue - elif get_option('boot_config') == 'fw-sbi' and test_disabled_for_sbi_firmware_boot == true + if get_option('run_target') == 'spike' and test_disabled_on_spike == true continue endif @@ -76,17 +64,20 @@ foreach unit_test : unit_tests input : diag_source_generator_common_inputs + [diag_attributes_yaml], output : [test_name + '.generated.S', test_name + '.linker_script.ld', - test_name + '.defines.h'], + test_name + '.defines.h', + test_name + '.data_structures.h', + ], command : diag_source_generator_command) test_sources += diag_source_generator_output[0] linker_script = diag_source_generator_output[1] test_defines = diag_source_generator_output[2] + test_data_structures = diag_source_generator_output[3] test_exe = executable(test_name, sources: [jumpstart_sources, test_sources], include_directories: jumpstart_includes, - c_args: ['-include', test_defines.full_path()], + c_args: default_c_args + ['-include', test_defines.full_path(), '-include', test_data_structures.full_path()], link_args: ['-T' + linker_script.full_path()], link_depends: linker_script, dependencies: declare_dependency(sources: test_defines) @@ -101,35 +92,25 @@ foreach unit_test : unit_tests depends : [test_exe]) endif - if get_option('diag_target') == 'spike' + if get_option('run_target') == 'spike' spike_args = default_spike_args if spike_additional_arguments != '' spike_args += spike_additional_arguments.split() endif - test(test_name + ' 🧪 ' + test_description, - spike, - args : [spike_args, test_exe], - suite:'basic', - timeout: get_option('spike_timeout'), - should_fail: test_expected_to_fail) - elif get_option('diag_target') == 'qemu' - qemu_args = default_qemu_args - - if get_option('generate_trace') == true - qemu_args += [ - '--var', 'ap-logfile:' + test_name + '.trace', - '--var', 'out:' + meson.current_build_dir() - ] - endif + target = spike + args = [spike_args, test_exe] + timeout = get_option('spike_timeout') + should_fail = test_expected_to_fail test(test_name + ' 🧪 ' + test_description, - qemu, - args : [qemu_args, '--var', 'ap-payload:' + test_exe.full_path()], + target, + args : args, + timeout: timeout, suite:'basic', - timeout: get_option('qemu_timeout'), - should_fail: test_expected_to_fail) + depends: test_exe, + should_fail: should_fail) endif endforeach