From 70e84303470f9f0708bf46b7e778df7af989d84f Mon Sep 17 00:00:00 2001 From: Atul Khare Date: Fri, 7 Oct 2022 14:02:53 -0700 Subject: [PATCH 1/4] Add salustsm feature flag This adds a feature flag that can be conditionally used to compile code specific to Salus-TSM (or alternatively exclude code that's not needed for the functionality). The Salus-TSM binary will be loaded early by the platform-FW, and is intended for use with a HS-mode VMM. --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 95f7c936..411f9e1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,9 @@ strip = "debuginfo" codegen-units = 1 panic = "abort" +[features] +salustsm = [] + [dependencies] arrayvec = { version = "0.7.2", default-features = false } assertions = { path = "./assertions" } From e8d061abf78d6809b3b04e74f607b8d9f8e33690 Mon Sep 17 00:00:00 2001 From: Atul Khare Date: Fri, 7 Oct 2022 19:50:12 -0700 Subject: [PATCH 2/4] Refactor host_vm functionality This refactors the host_vm functionality and moves most of it into a seperate module. It also introduces a tsm_core module to contain functionality that will be shared by the Salus version that supports VS-mode guests, and the Salus-TSM version that will support HS-mode VMMs. --- src/host_vm_core.rs | 526 ++++++++++++++++++++++++++++++++++++++++ src/main.rs | 568 +------------------------------------------- src/tsm_core.rs | 26 ++ 3 files changed, 563 insertions(+), 557 deletions(-) create mode 100644 src/host_vm_core.rs create mode 100644 src/tsm_core.rs diff --git a/src/host_vm_core.rs b/src/host_vm_core.rs new file mode 100644 index 00000000..3f10b8b1 --- /dev/null +++ b/src/host_vm_core.rs @@ -0,0 +1,526 @@ +// Copyright (c) 2021 by Rivos Inc. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +//! A small Risc-V hypervisor to enable trusted execution environments. + +use core::alloc::{Allocator, GlobalAlloc, Layout}; +use core::ptr::NonNull; + +extern crate alloc; + +use super::host_vm_loader::HostVmLoader; +use super::smp::PerCpu; +use super::vm::HostVm; +use device_tree::{DeviceTree, Fdt}; +use drivers::{imsic::Imsic, iommu::Iommu, pci::PcieRoot, pmu::PmuInfo, uart::UartDriver, CpuInfo}; +use hyp_alloc::HypAlloc; +use page_tracking::*; +use riscv_page_tables::*; +use riscv_pages::*; +use riscv_regs::{hedeleg, henvcfg, hideleg, hie, satp, scounteren}; +use riscv_regs::{ + Exception, Interrupt, LocalRegisterCopy, ReadWriteable, SatpHelpers, Writeable, CSR, CSR_CYCLE, + CSR_TIME, +}; +use s_mode_utils::print::*; +use spin::Once; + +extern "C" { + static _start: u8; + static _stack_end: u8; +} + +/// The allocator used for boot-time dynamic memory allocations. +static HYPERVISOR_ALLOCATOR: Once = Once::new(); + +/// The hypervisor page table root address and mode to load in satp on secondary CPUs +static SATP_VAL: Once = Once::new(); + +// Implementation of GlobalAlloc that forwards allocations to the boot-time allocator. +struct GeneralGlobalAlloc; + +unsafe impl GlobalAlloc for GeneralGlobalAlloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + HYPERVISOR_ALLOCATOR + .get() + .and_then(|a| a.allocate(layout).ok()) + .map(|p| p.as_mut_ptr()) + .unwrap_or(core::ptr::null_mut()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + // Unwrap ok, there must've been an allocator to allocate the pointer in the first place. + HYPERVISOR_ALLOCATOR + .get() + .unwrap() + .deallocate(NonNull::new(ptr).unwrap(), layout); + } +} + +#[global_allocator] +static GENERAL_ALLOCATOR: GeneralGlobalAlloc = GeneralGlobalAlloc; + +/// The host VM that all CPUs enter at boot. +#[cfg(not(feature = "salustsm"))] +static HOST_VM: Once> = Once::new(); + +/// Builds the hardware memory map from the device-tree. The kernel & initramfs image regions are +/// aligned to `T::TOP_LEVEL_ALIGN` so that they can be mapped directly into the host VM's guest +/// physical address space. +fn build_memory_map(fdt: &Fdt) -> MemMapResult { + let mut builder = HwMemMapBuilder::new(T::TOP_LEVEL_ALIGN); + + // First add the memory regions. + for r in fdt.memory_regions() { + // Safety: We own all of memory at this point and we trust the FDT is well-formed. + unsafe { + builder = builder.add_memory_region(RawAddr::supervisor(r.base()), r.size())?; + } + } + + // Reserve the region used by the hypervisor image itself, including the stack and FDT + // passed in by firmware. + + // Safe because we trust the linker placed these symbols correctly. + let start = unsafe { core::ptr::addr_of!(_start) as u64 }; + let stack_end = unsafe { core::ptr::addr_of!(_stack_end) as u64 }; + + // Find the region of DRAM that the hypervisor is in. + let resv_base = fdt + .memory_regions() + .find(|r| start >= r.base() && stack_end <= r.base().checked_add(r.size()).unwrap()) + .map(|r| RawAddr::supervisor(r.base())) + .expect("Hypervisor image does not reside in a contiguous range of DRAM"); + + // Reserve everything from the start of the region the hypervisor is in up until the top of + // the hypervisor stack. + builder = builder.reserve_region( + HwReservedMemType::HypervisorImage, + resv_base, + stack_end - resv_base.bits(), + )?; + + // FDT must be after the hypervisor image. + let fdt_start = fdt.base_addr() as u64; + assert!(stack_end <= fdt_start); + builder = builder.reserve_region( + HwReservedMemType::HypervisorImage, + RawAddr::supervisor(fdt_start), + fdt.size() as u64, + )?; + + // Reserve the regions marked reserved by firmware. + for r in fdt.reserved_memory_regions() { + builder = builder.reserve_region( + HwReservedMemType::FirmwareReserved, + RawAddr::supervisor(r.base()), + r.size(), + )?; + } + + // Reserve the host VM images loaded by firmware. We assume the start of these images are + // aligned to make mapping them in easier. + if let Some(r) = fdt.host_kernel_region() { + assert_eq!(r.base() & (T::TOP_LEVEL_ALIGN - 1), 0); + builder = builder.reserve_region( + HwReservedMemType::HostKernelImage, + RawAddr::supervisor(r.base()), + r.size(), + )?; + } + if let Some(r) = fdt.host_initramfs_region() { + assert_eq!(r.base() & (T::TOP_LEVEL_ALIGN - 1), 0); + builder = builder.reserve_region( + HwReservedMemType::HostInitramfsImage, + RawAddr::supervisor(r.base()), + r.size(), + )?; + } + let mem_map = builder.build(); + + println!("HW memory map:"); + for (i, r) in mem_map.regions().enumerate() { + println!( + "[{}] region: 0x{:x} -> 0x{:x}, {}", + i, + r.base().bits(), + r.end().bits() - 1, + r.region_type() + ); + } + + Ok(mem_map) +} + +// Returns the number of PTE pages needed to map all regions in the given memory map. +// Slightly overestimates of number of pages needed as some regions will share PTE pages in reality. +fn pte_page_count(mem_map: &HwMemMap) -> u64 { + mem_map.regions().fold(0, |acc, r| { + acc + Sv48::max_pte_pages(r.size() / PageSize::Size4k as u64) + }) +} + +// Returns the base address of the first available region in the memory map that is at least `size` +// bytes long. Returns None if no region is big enough. +fn find_available_region(mem_map: &HwMemMap, size: u64) -> Option { + mem_map + .regions() + .find(|r| r.region_type() == HwMemRegionType::Available && r.size() >= size) + .map(|r| r.base()) +} + +// Returns the base, size, and permission pair for the given region if that region type should be +// mapped in the hypervisor's virtual address space. +fn hyp_map_params(r: &HwMemRegion) -> Option<(PageAddr, u64, PteLeafPerms)> { + match r.region_type() { + HwMemRegionType::Available => { + // map available memory as rwx - unser what it'll be used for. + Some((r.base(), r.size(), PteLeafPerms::RWX)) + } + HwMemRegionType::Reserved(HwReservedMemType::FirmwareReserved) => { + // No need to map regions reserved for firmware use + None + } + HwMemRegionType::Reserved(HwReservedMemType::HypervisorImage) + | HwMemRegionType::Reserved(HwReservedMemType::HostKernelImage) + | HwMemRegionType::Reserved(HwReservedMemType::HostInitramfsImage) => { + Some((r.base(), r.size(), PteLeafPerms::RWX)) + } + HwMemRegionType::Reserved(HwReservedMemType::HypervisorHeap) + | HwMemRegionType::Reserved(HwReservedMemType::HypervisorPerCpu) + | HwMemRegionType::Reserved(HwReservedMemType::HypervisorPtes) + | HwMemRegionType::Reserved(HwReservedMemType::PageMap) => { + Some((r.base(), r.size(), PteLeafPerms::RW)) + } + HwMemRegionType::Mmio(_) => Some((r.base(), r.size(), PteLeafPerms::RW)), + } +} + +// Adds an identity mapping to the given Sv48 table for the specified address range. +fn hyp_map_region( + sv48: &FirstStagePageTable, + base: PageAddr, + size: u64, + perms: PteLeafPerms, + get_pte_page: &mut dyn FnMut() -> Option>, +) { + let region_page_count = PageSize::Size4k.round_up(size) / PageSize::Size4k as u64; + // Pass through mappings, vaddr=paddr. + let vaddr = PageAddr::new(RawAddr::supervisor_virt(base.bits())).unwrap(); + // Add mapping for this region to the page table + let mapper = sv48 + .map_range(vaddr, PageSize::Size4k, region_page_count, get_pte_page) + .unwrap(); + let pte_fields = PteFieldBits::leaf_with_perms(perms); + for (virt, phys) in vaddr + .iter_from() + .zip(base.iter_from()) + .take(region_page_count as usize) + { + // Safe as we will create exactly one mapping to each page and will switch to + // using that mapping exclusively. + unsafe { + mapper.map_4k_addr(virt, phys, pte_fields).unwrap(); + } + } +} + +// Creates the Sv48 page table based on the accessible regions of memory in the provided memory +// map. +fn setup_hyp_paging(mem_map: &mut HwMemMap) { + let num_pte_pages = pte_page_count(mem_map); + let pte_base = find_available_region(mem_map, num_pte_pages * PageSize::Size4k as u64) + .expect("Not enough free memory for hypervisor Sv48 page table"); + let mut pte_pages = mem_map + .reserve_and_take_pages( + HwReservedMemType::HypervisorPtes, + SupervisorPageAddr::new(RawAddr::from(pte_base)).unwrap(), + PageSize::Size4k, + num_pte_pages, + ) + .unwrap() + .clean() + .into_iter(); + // Create empty sv48 page table + let root_page = pte_pages.next().unwrap(); + let sv48: FirstStagePageTable = + FirstStagePageTable::new(root_page).expect("creating sv48"); + + // Map all the regions in the memory map that the hypervisor could need. + for (base, size, perms) in mem_map.regions().filter_map(hyp_map_params) { + hyp_map_region(&sv48, base, size, perms, &mut || pte_pages.next()); + } + + // TODO - reset device is hard coded in vm.rs + map_fixed_device(0x10_0000, &sv48, &mut || pte_pages.next()); + + // Install the page table in satp + let mut satp = LocalRegisterCopy::::new(0); + satp.set_from(&sv48, 0); + // Store the SATP value for other CPUs. They load from the global in start_secondary. + SATP_VAL.call_once(|| satp.get()); + CSR.satp.set(satp.get()); + tlb::sfence_vma(None, None); +} + +// Adds some hard-coded device location to the given sv48 page table so that the devices can be +// accessed by the hypervisor. Identity maps a single page at base to base. +fn map_fixed_device( + base: u64, + sv48: &FirstStagePageTable, + get_pte_page: &mut dyn FnMut() -> Option>, +) { + let virt_base = PageAddr::new(RawAddr::supervisor_virt(base)).unwrap(); + let phys_base = PageAddr::new(RawAddr::supervisor(base)).unwrap(); + let pte_fields = PteFieldBits::leaf_with_perms(PteLeafPerms::RW); + let mapper = sv48 + .map_range(virt_base, PageSize::Size4k, 1, get_pte_page) + .unwrap(); + // Safe to map access to the device because this will be the only mapping it is used through. + unsafe { + mapper + .map_4k_addr(virt_base, phys_base, pte_fields) + .unwrap(); + } +} + +/// Creates a heap from the given `mem_map`, marking the region occupied by the heap as reserved. +fn create_heap(mem_map: &mut HwMemMap) { + const HEAP_SIZE: u64 = 16 * 1024 * 1024; + + let heap_base = find_available_region(mem_map, HEAP_SIZE) + .expect("Not enough free memory for hypervisor heap"); + mem_map + .reserve_region( + HwReservedMemType::HypervisorHeap, + RawAddr::from(heap_base), + HEAP_SIZE, + ) + .unwrap(); + let pages: SequentialPages = unsafe { + // Safe since this region of memory was free in the memory map. + SequentialPages::from_mem_range( + heap_base, + PageSize::Size4k, + HEAP_SIZE / PageSize::Size4k as u64, + ) + .unwrap() + }; + HYPERVISOR_ALLOCATOR.call_once(|| HypAlloc::from_pages(pages.clean())); +} + +/// Initialize (H)S-level CSRs to a reasonable state. +pub fn host_vm_setup_csrs() { + // Clear and disable any interupts. + CSR.sie.set(0); + CSR.sip.set(0); + // Turn FP and vector units off. + CSR.sstatus.set(0); + + // Delegate traps to VS. + let mut hedeleg = LocalRegisterCopy::::new(0); + hedeleg.modify(Exception::InstructionMisaligned.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::IllegalInstruction.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::Breakpoint.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::LoadMisaligned.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::StoreMisaligned.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::UserEnvCall.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::InstructionPageFault.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::LoadPageFault.to_hedeleg_field().unwrap()); + hedeleg.modify(Exception::StorePageFault.to_hedeleg_field().unwrap()); + CSR.hedeleg.set(hedeleg.get()); + + let mut hideleg = LocalRegisterCopy::::new(0); + hideleg.modify(Interrupt::VirtualSupervisorSoft.to_hideleg_field().unwrap()); + hideleg.modify( + Interrupt::VirtualSupervisorTimer + .to_hideleg_field() + .unwrap(), + ); + hideleg.modify( + Interrupt::VirtualSupervisorExternal + .to_hideleg_field() + .unwrap(), + ); + CSR.hideleg.set(hideleg.get()); + + let mut hie = LocalRegisterCopy::::new(0); + hie.modify(Interrupt::VirtualSupervisorSoft.to_hie_field().unwrap()); + hie.modify(Interrupt::VirtualSupervisorTimer.to_hie_field().unwrap()); + hie.modify(Interrupt::VirtualSupervisorExternal.to_hie_field().unwrap()); + CSR.hie.set(hie.get()); + + // TODO: Handle virtualization of timer/htimedelta (see issue #46) + // Enable access to timer for now. + CSR.hcounteren.set(1 << (CSR_TIME - CSR_CYCLE)); + + // Make the basic counters available to any of our U-mode tasks. + let mut scounteren = LocalRegisterCopy::::new(0); + scounteren.modify(scounteren::cycle.val(1)); + scounteren.modify(scounteren::time.val(1)); + scounteren.modify(scounteren::instret.val(1)); + CSR.scounteren.set(scounteren.get()); + + super::trap::install_trap_handler(); +} + +/// Boot CPU kernel init for the host_vm. +pub fn host_vm_kernel_init(hart_id: u64, fdt_addr: u64) { + // Safe because we trust that the firmware passed a valid FDT. + let hyp_fdt = + unsafe { Fdt::new_from_raw_pointer(fdt_addr as *const u8) }.expect("Failed to read FDT"); + + let mut mem_map = build_memory_map::(&hyp_fdt).expect("Failed to build memory map"); + + // Find where QEMU loaded the host kernel image. + let host_kernel = *mem_map + .regions() + .find(|r| r.region_type() == HwMemRegionType::Reserved(HwReservedMemType::HostKernelImage)) + .expect("No host kernel image"); + let host_initramfs = mem_map + .regions() + .find(|r| { + r.region_type() == HwMemRegionType::Reserved(HwReservedMemType::HostInitramfsImage) + }) + .cloned(); + + // Create a heap for boot-time memory allocations. + create_heap(&mut mem_map); + + let hyp_dt = DeviceTree::from(&hyp_fdt).expect("Failed to construct device-tree"); + + // Find the UART and switch to it as the system console. + UartDriver::probe_from(&hyp_dt, &mut mem_map).expect("Failed to probe UART"); + + // Discover the CPU topology. + CpuInfo::parse_from(&hyp_dt); + let cpu_info = CpuInfo::get(); + if cpu_info.has_sstc() { + println!("Sstc support present"); + // Only write henvcfg when Sstc is present to avoid blowing up on versions of QEMU which + // don't support the *envcfg registers. + CSR.henvcfg.modify(henvcfg::stce.val(1)); + } + if cpu_info.has_sscofpmf() { + // Only probe for PMU counters if we have Sscofpmf; we can't expose counters to guests + // unless we have support for per-mode filtering. + println!("Sscofpmf support present"); + if let Err(e) = PmuInfo::init() { + println!("PmuInfo::init() failed with {:?}", e); + } + } + println!( + "{} CPU(s) present. Booting on CPU{} (hart {})", + cpu_info.num_cpus(), + cpu_info + .hart_id_to_cpu(hart_id.try_into().unwrap()) + .unwrap() + .raw(), + hart_id + ); + + // Probe for the IMSIC. + Imsic::probe_from(&hyp_dt, &mut mem_map).expect("Failed to probe IMSIC"); + let imsic_geometry = Imsic::get().phys_geometry(); + println!( + "IMSIC at 0x{:08x}; {} guest interrupt files supported", + imsic_geometry.base_addr().bits(), + imsic_geometry.guests_per_hart() + ); + Imsic::setup_this_cpu(); + + // Probe for a PCI bus. + PcieRoot::probe_from(&hyp_dt, &mut mem_map).expect("Failed to set up PCIe"); + let pci = PcieRoot::get(); + for dev in pci.devices() { + let dev = dev.lock(); + println!( + "Found func {}; type: {}, MSI: {}, MSI-X: {}, PCIe: {}", + dev.info(), + dev.info().header_type(), + dev.has_msi(), + dev.has_msix(), + dev.is_pcie(), + ); + for bar in dev.bar_info().bars() { + println!( + "BAR{:}: type {:?}, size 0x{:x}", + bar.index(), + bar.bar_type(), + bar.size() + ); + } + } + + setup_hyp_paging(&mut mem_map); + + // Set up per-CPU memory and boot the secondary CPUs. + PerCpu::init(hart_id, &mut mem_map); + + // We start RAM in the host address space at the same location as it is in the supervisor + // address space. + let guest_ram_base = mem_map + .regions() + .find(|r| !matches!(r.region_type(), HwMemRegionType::Mmio(_))) + .map(|r| RawAddr::guest(r.base().bits(), PageOwnerId::host())) + .unwrap(); + let guest_phys_size = mem_map.regions().last().unwrap().end().bits() - guest_ram_base.bits(); + + // Create an allocator for the remaining pages. Anything that's left over will be mapped + // into the host VM. + let mut hyp_mem = HypPageAlloc::new(mem_map); + + // Find and initialize the IOMMU. + match Iommu::probe_from(PcieRoot::get(), &mut || { + hyp_mem.take_pages_for_host_state(1).into_iter().next() + }) { + Ok(_) => { + println!( + "Found RISC-V IOMMU version 0x{:x}", + Iommu::get().unwrap().version() + ); + } + Err(e) => { + println!("Failed to probe IOMMU: {:?}", e); + } + }; + + // Now load the host VM. + let host = HostVmLoader::new( + hyp_dt, + host_kernel, + host_initramfs, + guest_ram_base, + guest_phys_size, + hyp_mem, + ) + .build_device_tree() + .build_address_space(); + + // Lock down the boot time allocator before allowing the host VM to be entered. + HYPERVISOR_ALLOCATOR.get().unwrap().seal(); + + super::smp::start_secondary_cpus(); + + HOST_VM.call_once(|| host); + let cpu_id = PerCpu::this_cpu().cpu_id(); + HOST_VM.get().unwrap().run(cpu_id.raw() as u64); +} + +/// Secondary CPU init function for the host_vm. +pub fn host_vm_secondary_init(_hart_id: u64) { + CSR.satp.set(*SATP_VAL.get().unwrap()); + tlb::sfence_vma(None, None); + + let cpu_info = CpuInfo::get(); + if cpu_info.has_sstc() { + CSR.henvcfg.modify(henvcfg::stce.val(1)); + } + Imsic::setup_this_cpu(); + + let me = PerCpu::this_cpu(); + me.set_online(); + + HOST_VM.wait().run(me.cpu_id().raw() as u64); +} diff --git a/src/main.rs b/src/main.rs index c08fedd1..71896326 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,430 +19,33 @@ is_some_and )] -use core::alloc::{Allocator, GlobalAlloc, Layout}; -use core::ptr::NonNull; - extern crate alloc; mod asm; mod guest_tracking; +mod host_vm_core; mod host_vm_loader; mod smp; mod trap; +mod tsm_core; mod vm; mod vm_cpu; mod vm_id; mod vm_pages; mod vm_pmu; -use device_tree::{DeviceTree, Fdt}; -use drivers::{imsic::Imsic, iommu::Iommu, pci::PcieRoot, pmu::PmuInfo, uart::UartDriver, CpuInfo}; -use host_vm_loader::HostVmLoader; -use hyp_alloc::HypAlloc; -use page_tracking::*; -use riscv_page_tables::*; -use riscv_pages::*; -use riscv_regs::{hedeleg, henvcfg, hideleg, hie, satp, scounteren}; #[cfg(target_feature = "v")] use riscv_regs::{sstatus, vlenb, Readable, RiscvCsrInterface, MAX_VECTOR_REGISTER_LEN}; -use riscv_regs::{ - Exception, Interrupt, LocalRegisterCopy, ReadWriteable, SatpHelpers, Writeable, CSR, CSR_CYCLE, - CSR_TIME, -}; -use s_mode_utils::abort::abort; use s_mode_utils::print::*; use s_mode_utils::sbi_console::SbiConsole; -use smp::PerCpu; -use spin::Once; -use vm::HostVm; - -#[panic_handler] -fn panic(info: &core::panic::PanicInfo) -> ! { - println!("panic : {:?}", info); - abort(); -} - -extern "C" { - static _start: u8; - static _stack_end: u8; -} - -/// The allocator used for boot-time dynamic memory allocations. -static HYPERVISOR_ALLOCATOR: Once = Once::new(); - -/// The hypervisor page table root address and mode to load in satp on secondary CPUs -static SATP_VAL: Once = Once::new(); - -// Implementation of GlobalAlloc that forwards allocations to the boot-time allocator. -struct GeneralGlobalAlloc; - -unsafe impl GlobalAlloc for GeneralGlobalAlloc { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - HYPERVISOR_ALLOCATOR - .get() - .and_then(|a| a.allocate(layout).ok()) - .map(|p| p.as_mut_ptr()) - .unwrap_or(core::ptr::null_mut()) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - // Unwrap ok, there must've been an allocator to allocate the pointer in the first place. - HYPERVISOR_ALLOCATOR - .get() - .unwrap() - .deallocate(NonNull::new(ptr).unwrap(), layout); - } -} - -#[global_allocator] -static GENERAL_ALLOCATOR: GeneralGlobalAlloc = GeneralGlobalAlloc; - -/// Aborts if the system hits an allocation error. -#[alloc_error_handler] -pub fn alloc_error(_layout: Layout) -> ! { - abort() -} - -// Powers off this machine. -fn poweroff() -> ! { - println!("Shutting down"); - // Safety: on this platform, a write of 0x5555 to 0x100000 will trigger the platform to - // poweroff, which is defined behavior. - unsafe { - core::ptr::write_volatile(0x10_0000 as *mut u32, 0x5555); - } - abort() -} - -/// The host VM that all CPUs enter at boot. -static HOST_VM: Once> = Once::new(); - -/// Builds the hardware memory map from the device-tree. The kernel & initramfs image regions are -/// aligned to `T::TOP_LEVEL_ALIGN` so that they can be mapped directly into the host VM's guest -/// physical address space. -fn build_memory_map(fdt: &Fdt) -> MemMapResult { - let mut builder = HwMemMapBuilder::new(T::TOP_LEVEL_ALIGN); - - // First add the memory regions. - for r in fdt.memory_regions() { - // Safety: We own all of memory at this point and we trust the FDT is well-formed. - unsafe { - builder = builder.add_memory_region(RawAddr::supervisor(r.base()), r.size())?; - } - } - - // Reserve the region used by the hypervisor image itself, including the stack and FDT - // passed in by firmware. - - // Safe because we trust the linker placed these symbols correctly. - let start = unsafe { core::ptr::addr_of!(_start) as u64 }; - let stack_end = unsafe { core::ptr::addr_of!(_stack_end) as u64 }; - - // Find the region of DRAM that the hypervisor is in. - let resv_base = fdt - .memory_regions() - .find(|r| start >= r.base() && stack_end <= r.base().checked_add(r.size()).unwrap()) - .map(|r| RawAddr::supervisor(r.base())) - .expect("Hypervisor image does not reside in a contiguous range of DRAM"); - - // Reserve everything from the start of the region the hypervisor is in up until the top of - // the hypervisor stack. - builder = builder.reserve_region( - HwReservedMemType::HypervisorImage, - resv_base, - stack_end - resv_base.bits(), - )?; - - // FDT must be after the hypervisor image. - let fdt_start = fdt.base_addr() as u64; - assert!(stack_end <= fdt_start); - builder = builder.reserve_region( - HwReservedMemType::HypervisorImage, - RawAddr::supervisor(fdt_start), - fdt.size() as u64, - )?; - - // Reserve the regions marked reserved by firmware. - for r in fdt.reserved_memory_regions() { - builder = builder.reserve_region( - HwReservedMemType::FirmwareReserved, - RawAddr::supervisor(r.base()), - r.size(), - )?; - } - - // Reserve the host VM images loaded by firmware. We assume the start of these images are - // aligned to make mapping them in easier. - if let Some(r) = fdt.host_kernel_region() { - assert_eq!(r.base() & (T::TOP_LEVEL_ALIGN - 1), 0); - builder = builder.reserve_region( - HwReservedMemType::HostKernelImage, - RawAddr::supervisor(r.base()), - r.size(), - )?; - } - if let Some(r) = fdt.host_initramfs_region() { - assert_eq!(r.base() & (T::TOP_LEVEL_ALIGN - 1), 0); - builder = builder.reserve_region( - HwReservedMemType::HostInitramfsImage, - RawAddr::supervisor(r.base()), - r.size(), - )?; - } - let mem_map = builder.build(); - - println!("HW memory map:"); - for (i, r) in mem_map.regions().enumerate() { - println!( - "[{}] region: 0x{:x} -> 0x{:x}, {}", - i, - r.base().bits(), - r.end().bits() - 1, - r.region_type() - ); - } - - Ok(mem_map) -} - -// Returns the number of PTE pages needed to map all regions in the given memory map. -// Slightly overestimates of number of pages needed as some regions will share PTE pages in reality. -fn pte_page_count(mem_map: &HwMemMap) -> u64 { - mem_map.regions().fold(0, |acc, r| { - acc + Sv48::max_pte_pages(r.size() / PageSize::Size4k as u64) - }) -} - -// Returns the base address of the first available region in the memory map that is at least `size` -// bytes long. Returns None if no region is big enough. -fn find_available_region(mem_map: &HwMemMap, size: u64) -> Option { - mem_map - .regions() - .find(|r| r.region_type() == HwMemRegionType::Available && r.size() >= size) - .map(|r| r.base()) -} - -// Returns the base, size, and permission pair for the given region if that region type should be -// mapped in the hypervisor's virtual address space. -fn hyp_map_params(r: &HwMemRegion) -> Option<(PageAddr, u64, PteLeafPerms)> { - match r.region_type() { - HwMemRegionType::Available => { - // map available memory as rwx - unser what it'll be used for. - Some((r.base(), r.size(), PteLeafPerms::RWX)) - } - HwMemRegionType::Reserved(HwReservedMemType::FirmwareReserved) => { - // No need to map regions reserved for firmware use - None - } - HwMemRegionType::Reserved(HwReservedMemType::HypervisorImage) - | HwMemRegionType::Reserved(HwReservedMemType::HostKernelImage) - | HwMemRegionType::Reserved(HwReservedMemType::HostInitramfsImage) => { - Some((r.base(), r.size(), PteLeafPerms::RWX)) - } - HwMemRegionType::Reserved(HwReservedMemType::HypervisorHeap) - | HwMemRegionType::Reserved(HwReservedMemType::HypervisorPerCpu) - | HwMemRegionType::Reserved(HwReservedMemType::HypervisorPtes) - | HwMemRegionType::Reserved(HwReservedMemType::PageMap) => { - Some((r.base(), r.size(), PteLeafPerms::RW)) - } - HwMemRegionType::Mmio(_) => Some((r.base(), r.size(), PteLeafPerms::RW)), - } -} - -// Adds an identity mapping to the given Sv48 table for the specified address range. -fn hyp_map_region( - sv48: &FirstStagePageTable, - base: PageAddr, - size: u64, - perms: PteLeafPerms, - get_pte_page: &mut dyn FnMut() -> Option>, -) { - let region_page_count = PageSize::Size4k.round_up(size) / PageSize::Size4k as u64; - // Pass through mappings, vaddr=paddr. - let vaddr = PageAddr::new(RawAddr::supervisor_virt(base.bits())).unwrap(); - // Add mapping for this region to the page table - let mapper = sv48 - .map_range(vaddr, PageSize::Size4k, region_page_count, get_pte_page) - .unwrap(); - let pte_fields = PteFieldBits::leaf_with_perms(perms); - for (virt, phys) in vaddr - .iter_from() - .zip(base.iter_from()) - .take(region_page_count as usize) - { - // Safe as we will create exactly one mapping to each page and will switch to - // using that mapping exclusively. - unsafe { - mapper.map_4k_addr(virt, phys, pte_fields).unwrap(); - } - } -} - -// Creates the Sv48 page table based on the accessible regions of memory in the provided memory -// map. -fn setup_hyp_paging(mem_map: &mut HwMemMap) { - let num_pte_pages = pte_page_count(mem_map); - let pte_base = find_available_region(mem_map, num_pte_pages * PageSize::Size4k as u64) - .expect("Not enough free memory for hypervisor Sv48 page table"); - let mut pte_pages = mem_map - .reserve_and_take_pages( - HwReservedMemType::HypervisorPtes, - SupervisorPageAddr::new(RawAddr::from(pte_base)).unwrap(), - PageSize::Size4k, - num_pte_pages, - ) - .unwrap() - .clean() - .into_iter(); - // Create empty sv48 page table - let root_page = pte_pages.next().unwrap(); - let sv48: FirstStagePageTable = - FirstStagePageTable::new(root_page).expect("creating sv48"); - - // Map all the regions in the memory map that the hypervisor could need. - for (base, size, perms) in mem_map.regions().filter_map(hyp_map_params) { - hyp_map_region(&sv48, base, size, perms, &mut || pte_pages.next()); - } - - // TODO - reset device is hard coded in vm.rs - map_fixed_device(0x10_0000, &sv48, &mut || pte_pages.next()); - - // Install the page table in satp - let mut satp = LocalRegisterCopy::::new(0); - satp.set_from(&sv48, 0); - // Store the SATP value for other CPUs. They load from the global in start_secondary. - SATP_VAL.call_once(|| satp.get()); - CSR.satp.set(satp.get()); - tlb::sfence_vma(None, None); -} - -// Adds some hard-coded device location to the given sv48 page table so that the devices can be -// accessed by the hypervisor. Identity maps a single page at base to base. -fn map_fixed_device( - base: u64, - sv48: &FirstStagePageTable, - get_pte_page: &mut dyn FnMut() -> Option>, -) { - let virt_base = PageAddr::new(RawAddr::supervisor_virt(base)).unwrap(); - let phys_base = PageAddr::new(RawAddr::supervisor(base)).unwrap(); - let pte_fields = PteFieldBits::leaf_with_perms(PteLeafPerms::RW); - let mapper = sv48 - .map_range(virt_base, PageSize::Size4k, 1, get_pte_page) - .unwrap(); - // Safe to map access to the device because this will be the only mapping it is used through. - unsafe { - mapper - .map_4k_addr(virt_base, phys_base, pte_fields) - .unwrap(); - } -} - -/// Creates a heap from the given `mem_map`, marking the region occupied by the heap as reserved. -fn create_heap(mem_map: &mut HwMemMap) { - const HEAP_SIZE: u64 = 16 * 1024 * 1024; - - let heap_base = find_available_region(mem_map, HEAP_SIZE) - .expect("Not enough free memory for hypervisor heap"); - mem_map - .reserve_region( - HwReservedMemType::HypervisorHeap, - RawAddr::from(heap_base), - HEAP_SIZE, - ) - .unwrap(); - let pages: SequentialPages = unsafe { - // Safe since this region of memory was free in the memory map. - SequentialPages::from_mem_range( - heap_base, - PageSize::Size4k, - HEAP_SIZE / PageSize::Size4k as u64, - ) - .unwrap() - }; - HYPERVISOR_ALLOCATOR.call_once(|| HypAlloc::from_pages(pages.clean())); -} - -/// Initialize (H)S-level CSRs to a reasonable state. -pub fn setup_csrs() { - // Clear and disable any interupts. - CSR.sie.set(0); - CSR.sip.set(0); - // Turn FP and vector units off. - CSR.sstatus.set(0); - - // Delegate traps to VS. - let mut hedeleg = LocalRegisterCopy::::new(0); - hedeleg.modify(Exception::InstructionMisaligned.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::IllegalInstruction.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::Breakpoint.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::LoadMisaligned.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::StoreMisaligned.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::UserEnvCall.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::InstructionPageFault.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::LoadPageFault.to_hedeleg_field().unwrap()); - hedeleg.modify(Exception::StorePageFault.to_hedeleg_field().unwrap()); - CSR.hedeleg.set(hedeleg.get()); - - let mut hideleg = LocalRegisterCopy::::new(0); - hideleg.modify(Interrupt::VirtualSupervisorSoft.to_hideleg_field().unwrap()); - hideleg.modify( - Interrupt::VirtualSupervisorTimer - .to_hideleg_field() - .unwrap(), - ); - hideleg.modify( - Interrupt::VirtualSupervisorExternal - .to_hideleg_field() - .unwrap(), - ); - CSR.hideleg.set(hideleg.get()); - - let mut hie = LocalRegisterCopy::::new(0); - hie.modify(Interrupt::VirtualSupervisorSoft.to_hie_field().unwrap()); - hie.modify(Interrupt::VirtualSupervisorTimer.to_hie_field().unwrap()); - hie.modify(Interrupt::VirtualSupervisorExternal.to_hie_field().unwrap()); - CSR.hie.set(hie.get()); - - // TODO: Handle virtualization of timer/htimedelta (see issue #46) - // Enable access to timer for now. - CSR.hcounteren.set(1 << (CSR_TIME - CSR_CYCLE)); - - // Make the basic counters available to any of our U-mode tasks. - let mut scounteren = LocalRegisterCopy::::new(0); - scounteren.modify(scounteren::cycle.val(1)); - scounteren.modify(scounteren::time.val(1)); - scounteren.modify(scounteren::instret.val(1)); - CSR.scounteren.set(scounteren.get()); - - trap::install_trap_handler(); -} - -#[cfg(target_feature = "v")] -fn check_vector_width() { - // Because we just ran setup_csrs(), we know vectors are off - // Turn vectors on - CSR.sstatus.read_and_set_bits(sstatus::vs::Initial.value); - - // vlenb converted from bytes to bits - let rwidth = CSR.vlenb.read(vlenb::value); - println!("vector register width: {} bits", rwidth * 8); - if rwidth > MAX_VECTOR_REGISTER_LEN as u64 { - println!( - "Vector registers too wide: {} bits, maximum is {} bits", - rwidth * 8, - MAX_VECTOR_REGISTER_LEN * 8 - ); - panic!("Aborting boot."); - } - // Turn vectors off - CSR.sstatus.read_and_clear_bits(sstatus::vs::Dirty.value); -} +use tsm_core::*; /// The entry point of the Rust part of the kernel. #[no_mangle] extern "C" fn kernel_init(hart_id: u64, fdt_addr: u64) { + use host_vm_core::*; // Reset CSRs to a sane state. - setup_csrs(); + host_vm_setup_csrs(); SbiConsole::set_as_console(); println!("Salus: Boot test VM"); @@ -451,164 +54,15 @@ extern "C" fn kernel_init(hart_id: u64, fdt_addr: u64) { #[cfg(target_feature = "v")] check_vector_width(); - // Safe because we trust that the firmware passed a valid FDT. - let hyp_fdt = - unsafe { Fdt::new_from_raw_pointer(fdt_addr as *const u8) }.expect("Failed to read FDT"); - - let mut mem_map = build_memory_map::(&hyp_fdt).expect("Failed to build memory map"); - - // Find where QEMU loaded the host kernel image. - let host_kernel = *mem_map - .regions() - .find(|r| r.region_type() == HwMemRegionType::Reserved(HwReservedMemType::HostKernelImage)) - .expect("No host kernel image"); - let host_initramfs = mem_map - .regions() - .find(|r| { - r.region_type() == HwMemRegionType::Reserved(HwReservedMemType::HostInitramfsImage) - }) - .cloned(); - - // Create a heap for boot-time memory allocations. - create_heap(&mut mem_map); - - let hyp_dt = DeviceTree::from(&hyp_fdt).expect("Failed to construct device-tree"); - - // Find the UART and switch to it as the system console. - UartDriver::probe_from(&hyp_dt, &mut mem_map).expect("Failed to probe UART"); - - // Discover the CPU topology. - CpuInfo::parse_from(&hyp_dt); - let cpu_info = CpuInfo::get(); - if cpu_info.has_sstc() { - println!("Sstc support present"); - // Only write henvcfg when Sstc is present to avoid blowing up on versions of QEMU which - // don't support the *envcfg registers. - CSR.henvcfg.modify(henvcfg::stce.val(1)); - } - if cpu_info.has_sscofpmf() { - // Only probe for PMU counters if we have Sscofpmf; we can't expose counters to guests - // unless we have support for per-mode filtering. - println!("Sscofpmf support present"); - if let Err(e) = PmuInfo::init() { - println!("PmuInfo::init() failed with {:?}", e); - } - } - println!( - "{} CPU(s) present. Booting on CPU{} (hart {})", - cpu_info.num_cpus(), - cpu_info - .hart_id_to_cpu(hart_id.try_into().unwrap()) - .unwrap() - .raw(), - hart_id - ); - - // Probe for the IMSIC. - Imsic::probe_from(&hyp_dt, &mut mem_map).expect("Failed to probe IMSIC"); - let imsic_geometry = Imsic::get().phys_geometry(); - println!( - "IMSIC at 0x{:08x}; {} guest interrupt files supported", - imsic_geometry.base_addr().bits(), - imsic_geometry.guests_per_hart() - ); - Imsic::setup_this_cpu(); - - // Probe for a PCI bus. - PcieRoot::probe_from(&hyp_dt, &mut mem_map).expect("Failed to set up PCIe"); - let pci = PcieRoot::get(); - for dev in pci.devices() { - let dev = dev.lock(); - println!( - "Found func {}; type: {}, MSI: {}, MSI-X: {}, PCIe: {}", - dev.info(), - dev.info().header_type(), - dev.has_msi(), - dev.has_msix(), - dev.is_pcie(), - ); - for bar in dev.bar_info().bars() { - println!( - "BAR{:}: type {:?}, size 0x{:x}", - bar.index(), - bar.bar_type(), - bar.size() - ); - } - } - - setup_hyp_paging(&mut mem_map); - - // Set up per-CPU memory and boot the secondary CPUs. - PerCpu::init(hart_id, &mut mem_map); - - // We start RAM in the host address space at the same location as it is in the supervisor - // address space. - let guest_ram_base = mem_map - .regions() - .find(|r| !matches!(r.region_type(), HwMemRegionType::Mmio(_))) - .map(|r| RawAddr::guest(r.base().bits(), PageOwnerId::host())) - .unwrap(); - let guest_phys_size = mem_map.regions().last().unwrap().end().bits() - guest_ram_base.bits(); - - // Create an allocator for the remaining pages. Anything that's left over will be mapped - // into the host VM. - let mut hyp_mem = HypPageAlloc::new(mem_map); - - // Find and initialize the IOMMU. - match Iommu::probe_from(PcieRoot::get(), &mut || { - hyp_mem.take_pages_for_host_state(1).into_iter().next() - }) { - Ok(_) => { - println!( - "Found RISC-V IOMMU version 0x{:x}", - Iommu::get().unwrap().version() - ); - } - Err(e) => { - println!("Failed to probe IOMMU: {:?}", e); - } - }; - - // Now load the host VM. - let host = HostVmLoader::new( - hyp_dt, - host_kernel, - host_initramfs, - guest_ram_base, - guest_phys_size, - hyp_mem, - ) - .build_device_tree() - .build_address_space(); - - // Lock down the boot time allocator before allowing the host VM to be entered. - HYPERVISOR_ALLOCATOR.get().unwrap().seal(); - - smp::start_secondary_cpus(); - - HOST_VM.call_once(|| host); - let cpu_id = PerCpu::this_cpu().cpu_id(); - HOST_VM.get().unwrap().run(cpu_id.raw() as u64); + host_vm_kernel_init(hart_id, fdt_addr); poweroff(); } #[no_mangle] -extern "C" fn secondary_init(_hart_id: u64) { - setup_csrs(); - - CSR.satp.set(*SATP_VAL.get().unwrap()); - tlb::sfence_vma(None, None); - - let cpu_info = CpuInfo::get(); - if cpu_info.has_sstc() { - CSR.henvcfg.modify(henvcfg::stce.val(1)); - } - Imsic::setup_this_cpu(); - - let me = PerCpu::this_cpu(); - me.set_online(); - - HOST_VM.wait().run(me.cpu_id().raw() as u64); +#[cfg(not(feature = "salustsm"))] +extern "C" fn secondary_init(hart_id: u64) { + use host_vm_core::*; + host_vm_setup_csrs(); + host_vm_secondary_init(hart_id); poweroff(); } diff --git a/src/tsm_core.rs b/src/tsm_core.rs new file mode 100644 index 00000000..aff30e08 --- /dev/null +++ b/src/tsm_core.rs @@ -0,0 +1,26 @@ +use core::alloc::Layout; +use s_mode_utils::abort::abort; +use s_mode_utils::print::*; + +/// Aborts if the system hits an allocation error. +#[alloc_error_handler] +pub fn alloc_error(_layout: Layout) -> ! { + abort() +} + +/// Powers off this machine. +pub fn poweroff() -> ! { + println!("Shutting down"); + // Safety: on this platform, a write of 0x5555 to 0x100000 will trigger the platform to + // poweroff, which is defined behavior. + unsafe { + core::ptr::write_volatile(0x10_0000 as *mut u32, 0x5555); + } + abort() +} + +#[panic_handler] +fn panic(info: &core::panic::PanicInfo) -> ! { + println!("panic : {:?}", info); + abort(); +} From 9fe3d8f337e469e51fdf2c97ad63542ababb7795 Mon Sep 17 00:00:00 2001 From: Atul Khare Date: Fri, 7 Oct 2022 21:21:57 -0700 Subject: [PATCH 3/4] Add Salus TSM module This adds a module for the Salus TSM binary. At present, the it's a stub intended for use prototyping purposes, and will evolve to provide the complete range of functionality. --- src/salus_tsm.rs | 60 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 src/salus_tsm.rs diff --git a/src/salus_tsm.rs b/src/salus_tsm.rs new file mode 100644 index 00000000..f94ad127 --- /dev/null +++ b/src/salus_tsm.rs @@ -0,0 +1,60 @@ +// Copyright (c) 2021 by Rivos Inc. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +//! A small Risc-V hypervisor to enable trusted execution environments. + +#![no_main] +#![no_std] +#![feature( + panic_info_message, + allocator_api, + alloc_error_handler, + lang_items, + if_let_guard, + asm_const, + ptr_sub_ptr, + slice_ptr_get, + let_chains, + is_some_and +)] + +extern crate alloc; + +mod asm; +mod tsm_core; + +use core::alloc::{GlobalAlloc, Layout}; +use s_mode_utils::abort::abort; +use s_mode_utils::print::*; +use s_mode_utils::sbi_console::SbiConsole; +use tsm_core::*; + +// Implementation of GlobalAlloc that forwards allocations to the boot-time allocator. +struct GeneralGlobalAlloc; + +unsafe impl GlobalAlloc for GeneralGlobalAlloc { + unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { + abort(); + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + abort(); + } +} + +#[global_allocator] +static GENERAL_ALLOCATOR: GeneralGlobalAlloc = GeneralGlobalAlloc; + +/// The entry point of the Rust part of the kernel. +#[no_mangle] +extern "C" fn kernel_init(_hart_id: u64, _fdt_addr: u64) { + SbiConsole::set_as_console(); + println!("Salus-TSM: Booting"); + poweroff(); +} + +#[no_mangle] +extern "C" fn secondary_init(_hart_id: u64) { + poweroff(); +} From feaed3dd4cb60dc0d2efa714e278ca9b3b9db7d3 Mon Sep 17 00:00:00 2001 From: Atul Khare Date: Fri, 7 Oct 2022 15:16:34 -0700 Subject: [PATCH 4/4] Add Salus TSM build target This makes the changes to Cargo.toml and the Makefile to build binaries for the original Salus, and the new Salus-TSM. --- Cargo.toml | 9 +++++++++ Makefile | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 411f9e1e..92d03bde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,15 @@ panic = "abort" [features] salustsm = [] +[[bin]] +name = "salus-tsm" +path = "src/salus_tsm.rs" +required-features = ["salustsm"] + +[[bin]] +name = "salus" +path = "src/main.rs" + [dependencies] arrayvec = { version = "0.7.2", default-features = false } assertions = { path = "./assertions" } diff --git a/Makefile b/Makefile index aaae0e14..97021a79 100644 --- a/Makefile +++ b/Makefile @@ -98,12 +98,17 @@ guestvm: tellus: guestvm cargo build $(CARGO_FLAGS) --package test_workloads --bin tellus --release +.PHONY: salus_tsm +salus_tsm: + cargo build $(CARGO_FLAGS) --release --bin salus-tsm --features="salustsm" + # Runnable targets: # # run_tellus_gdb: Run Tellus as the host VM with GDB debugging enabled. # run_tellus: Run Tellus as the host VM. # run_linux: Run a bare Linux kernel as the host VM. # run_debian: Run a Linux kernel as the host VM with a Debian rootfs. +# run_salus_tsm: Runs the Salus-TSM binary (currently a stub). run_tellus_gdb: tellus_bin salus_debug $(QEMU_BIN) \ @@ -141,6 +146,12 @@ run_debian: salus -device e1000e,netdev=usernet \ $(EXTRA_QEMU_ARGS) +run_salus_tsm: salus_tsm + $(QEMU_BIN) \ + $(MACH_ARGS) \ + -kernel $(RELEASE_BINS)salus-tsm \ + $(EXTRA_QEMU_ARGS) + .PHONY: lint lint: cargo clippy -- -D warnings -Wmissing-docs