From 618ad4e3b5fcfc740fe1fd352e17d458a521bf42 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Tue, 2 Dec 2025 14:50:35 +0100 Subject: [PATCH 1/8] hal/arm: pmap_switch full MPU reconfiguration Introduce full MPU regions reconfiguration on context switch, allowing for more flexibile configuration of memory maps on MPU targets. Performed tests show no memory coherence problems and minor improvements in pmap_switch performance. According to ARM documentation, cache maintenance is not required, as long as memory maps are not overlapping, and that assumption is already present in Phoenix-RTOS. Changes include * additional hal_syspage_prog_t structure, initialized in loader, containing program configuration of MPU regions in form of ready-to-copy register values * pmap_t structure contain pointer to above structure instead of regions bitmask * pmap_switch disables MPU and performs full reconfiguration, optimized with LDMIA/STMIA assembly operations * handling of process's kernel-code access is moved to loader JIRA: RTOS-1149 --- hal/aarch64/pmap.c | 2 +- hal/armv7a/pmap.c | 2 +- hal/armv7m/arch/pmap.h | 3 +- hal/armv7m/pmap.c | 155 ++++++++++------------- hal/armv7r/arch/pmap.h | 3 +- hal/armv7r/pmap.c | 152 +++++++++------------- hal/armv8m/arch/pmap.h | 3 +- hal/armv8m/mcx/n94x/config.h | 2 + hal/armv8m/pmap.c | 161 +++++++++--------------- hal/armv8r/pmap.c | 2 +- hal/ia32/pmap.c | 2 +- hal/pmap.h | 5 +- hal/riscv64/pmap.c | 2 +- hal/sparcv8leon/pmap-nommu.c | 8 +- hal/sparcv8leon/pmap.c | 2 +- include/arch/aarch64/zynqmp/syspage.h | 3 + include/arch/armv7a/imx6ull/syspage.h | 4 + include/arch/armv7a/zynq7000/syspage.h | 3 + include/arch/armv7m/imxrt/syspage.h | 19 ++- include/arch/armv7m/stm32/syspage.h | 19 ++- include/arch/armv7r/tda4vm/syspage.h | 19 ++- include/arch/armv7r/zynqmp/syspage.h | 19 ++- include/arch/armv8m/mcx/syspage.h | 19 ++- include/arch/armv8m/nrf/syspage.h | 19 ++- include/arch/armv8m/stm32/syspage.h | 20 ++- include/arch/armv8r/mps3an536/syspage.h | 3 + include/arch/ia32/syspage.h | 2 + include/arch/riscv64/syspage.h | 4 + include/arch/sparcv8leon/syspage.h | 4 + include/syspage.h | 4 +- proc/process.c | 22 +--- vm/map.c | 4 +- 32 files changed, 325 insertions(+), 366 deletions(-) diff --git a/hal/aarch64/pmap.c b/hal/aarch64/pmap.c index 4f091d81c..6c5d327eb 100644 --- a/hal/aarch64/pmap.c +++ b/hal/aarch64/pmap.c @@ -273,7 +273,7 @@ static void _pmap_cacheOpAfterChange(descr_t newEntry, ptr_t vaddr, unsigned int /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { pmap->ttl1 = vaddr; pmap->addr = p->addr; diff --git a/hal/armv7a/pmap.c b/hal/armv7a/pmap.c index b6c9f2f52..8c00823d4 100644 --- a/hal/armv7a/pmap.c +++ b/hal/armv7a/pmap.c @@ -187,7 +187,7 @@ static void _pmap_asidDealloc(pmap_t *pmap) /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { pmap->pdir = vaddr; pmap->addr = p->addr; diff --git a/hal/armv7m/arch/pmap.h b/hal/armv7m/arch/pmap.h index 570a5a9f2..da4229c56 100644 --- a/hal/armv7m/arch/pmap.h +++ b/hal/armv7m/arch/pmap.h @@ -17,6 +17,7 @@ #define _PH_HAL_PMAP_ARMV7M_H_ #include "hal/types.h" +#include "syspage.h" /* Architecture dependent page attributes - used for mapping */ #define PGHD_PRESENT 0x01U @@ -55,7 +56,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - u32 regions; + const hal_syspage_prog_t *hal; } pmap_t; #endif diff --git a/hal/armv7m/pmap.c b/hal/armv7m/pmap.c index 61bfb24a4..1718fd685 100644 --- a/hal/armv7m/pmap.c +++ b/hal/armv7m/pmap.c @@ -17,9 +17,14 @@ #include "config.h" #include "syspage.h" #include "halsyspage.h" +#include "lib/lib.h" #include #include + +#define MPU_BASE ((volatile u32 *)0xe000ed90U) + + /* clang-format off */ enum { mpu_type, mpu_ctrl, mpu_rnr, mpu_rbar, mpu_rasr, mpu_rbar_a1, mpu_rasr_a1, mpu_rbar_a2, mpu_rasr_a2, mpu_rbar_a3, mpu_rasr_a3 }; @@ -36,15 +41,20 @@ extern void *_init_vectors; static struct { volatile u32 *mpu; - unsigned int kernelCodeRegion; spinlock_t lock; + unsigned int lastMPUCount; } pmap_common; /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { - pmap->regions = pmap_common.kernelCodeRegion; + if (prog != NULL) { + pmap->hal = &prog->hal; + } + else { + pmap->hal = NULL; + } return 0; } @@ -55,55 +65,42 @@ addr_t pmap_destroy(pmap_t *pmap, unsigned int *i) } -static unsigned int pmap_map2region(unsigned int map) +/* parasoft-suppress-next-line MISRAC2012-DIR_4_3-a "Optimized context switching code" */ +void pmap_switch(pmap_t *pmap) { + const volatile u32 *RBAR_ADDR = MPU_BASE + mpu_rbar; + unsigned int allocCnt; + spinlock_ctx_t sc; unsigned int i; - unsigned int mask = 0; - - for (i = 0; i < sizeof(syspage->hs.mpu.map) / sizeof(*syspage->hs.mpu.map); ++i) { - if (map == syspage->hs.mpu.map[i]) { - mask |= (1UL << i); - } - } - - return mask; -} - + const u32 *tableCurrent; -int pmap_addMap(pmap_t *pmap, unsigned int map) -{ - unsigned int rmask = pmap_map2region(map); - if (rmask == 0U) { - return -1; - } + if ((pmap != NULL) && (pmap->hal != NULL)) { + hal_spinlockSet(&pmap_common.lock, &sc); - pmap->regions |= rmask; + allocCnt = pmap->hal->mpu.allocCnt; + tableCurrent = &pmap->hal->mpu.table[0].rbar; - return 0; -} + /* Disable MPU */ + hal_cpuDataMemoryBarrier(); + *(pmap_common.mpu + mpu_ctrl) &= ~1U; + + for (i = 0; i < max(allocCnt, pmap_common.lastMPUCount); i += 4U) { + /* region number update is done by writes to RBAR */ + __asm__ volatile( + "ldmia %[tableCurrent]!, {r3-r8, r10, r11} \n\t" /* Load 4 regions (rbar/rasr pairs) from table, update table pointer */ + "stmia %[mpu_rbar], {r3-r8, r10, r11} \n\t" /* Write 4 regions via RBAR/RASR and aliases */ + : [tableCurrent] "+&r"(tableCurrent) + : [mpu_rbar] "r"(RBAR_ADDR) + : "r3", "r4", "r5", "r6", "r7", "r8", "r10", "r11"); + } + /* Enable MPU */ + *(pmap_common.mpu + mpu_ctrl) |= 1U; + hal_cpuDataSyncBarrier(); + hal_cpuInstrBarrier(); -void pmap_switch(pmap_t *pmap) -{ - unsigned int i, cnt = syspage->hs.mpu.allocCnt; - spinlock_ctx_t sc; + pmap_common.lastMPUCount = allocCnt; - if (pmap != NULL) { - hal_spinlockSet(&pmap_common.lock, &sc); - for (i = 0; i < cnt; ++i) { - /* Select region */ - *(pmap_common.mpu + mpu_rnr) = i; - hal_cpuDataMemoryBarrier(); - - /* Enable/disable region according to the mask */ - if ((pmap->regions & (1UL << i)) != 0U) { - *(pmap_common.mpu + mpu_rasr) |= 1U; - } - else { - *(pmap_common.mpu + mpu_rasr) &= ~1U; - } - hal_cpuDataMemoryBarrier(); - } hal_spinlockClear(&pmap_common.lock, &sc); } } @@ -129,14 +126,23 @@ addr_t pmap_resolve(pmap_t *pmap, void *vaddr) int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size) { + unsigned int i; const syspage_map_t *map = syspage_mapAddrResolve((addr_t)vaddr); - unsigned int rmask; if (map == NULL) { return 0; } - rmask = pmap_map2region(map->id); - return ((pmap->regions & rmask) == 0U) ? 0 : 1; + if (pmap->hal == NULL) { + /* Kernel pmap has access to everything */ + return 1; + } + + for (i = 0; i < pmap->hal->mpu.allocCnt; ++i) { + if (pmap->hal->mpu.map[i] == map->id) { + return 1; + } + } + return 0; } @@ -175,11 +181,8 @@ int pmap_segment(unsigned int i, void **vaddr, size_t *size, vm_prot_t *prot, vo /* parasoft-suppress-next-line MISRAC2012-DIR_4_3 "Assembly is required for low-level operations" */ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) { - const syspage_map_t *ikmap; - unsigned int ikregion; - u32 t; - addr_t pc; - unsigned int i, cnt = syspage->hs.mpu.allocCnt; + unsigned int cnt = (syspage->hs.mpuType >> 8U) & 0xffU; + unsigned int i; (*vstart) = (void *)(((ptr_t)_init_vectors + 7U) & ~7U); (*vend) = (*((char **)vstart)) + SIZE_PAGE; @@ -189,8 +192,11 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) /* Initial size of kernel map */ pmap->end = (void *)((addr_t)&__bss_start + 32U * 1024U); + pmap->hal = NULL; + pmap_common.lastMPUCount = cnt; + /* Configure MPU */ - pmap_common.mpu = (void *)0xe000ed90U; + pmap_common.mpu = MPU_BASE; /* Disable MPU just in case */ *(pmap_common.mpu + mpu_ctrl) &= ~1U; @@ -201,17 +207,11 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) hal_cpuDataMemoryBarrier(); for (i = 0; i < cnt; ++i) { - t = syspage->hs.mpu.table[i].rbar; - if ((t & (1UL << 4)) == 0U) { - continue; - } - - *(pmap_common.mpu + mpu_rbar) = t; - hal_cpuDataMemoryBarrier(); + /* Select region */ + *(pmap_common.mpu + mpu_rnr) = i; - /* Disable regions for now */ - t = syspage->hs.mpu.table[i].rasr & ~1U; - *(pmap_common.mpu + mpu_rasr) = t; + /* Disable all regions for now */ + *(pmap_common.mpu + mpu_rasr) = 0U; hal_cpuDataMemoryBarrier(); } @@ -219,34 +219,5 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) *(pmap_common.mpu + mpu_ctrl) |= 1U; hal_cpuDataMemoryBarrier(); - /* FIXME HACK - * allow all programs to execute (and read) kernel code map. - * Needed because of hal_jmp, syscalls handler and signals handler. - * In these functions we need to switch to the user mode when still - * executing kernel code. This will cause memory management fault - * if the application does not have access to the kernel instruction - * map. Possible fix - place return to the user code in the separate - * region and allow this region instead. */ - - /* Find kernel code region */ - __asm__ volatile("\tmov %0, pc;" : "=r"(pc)); - ikmap = syspage_mapAddrResolve(pc); - if (ikmap == NULL) { - hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map not found. Bad system config\n"); - for (;;) { - hal_cpuHalt(); - } - } - - ikregion = pmap_map2region(ikmap->id); - if (ikregion == 0U) { - hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map has no assigned region. Bad system config\n"); - for (;;) { - hal_cpuHalt(); - } - } - - pmap_common.kernelCodeRegion = ikregion; - hal_spinlockCreate(&pmap_common.lock, "pmap"); } diff --git a/hal/armv7r/arch/pmap.h b/hal/armv7r/arch/pmap.h index 7704082cd..a35c1c056 100644 --- a/hal/armv7r/arch/pmap.h +++ b/hal/armv7r/arch/pmap.h @@ -17,6 +17,7 @@ #define _PH_HAL_PMAP_ARMV7R_H_ #include "hal/types.h" +#include "syspage.h" #define PGHD_PRESENT 0x01U #define PGHD_USER 0x04U @@ -54,7 +55,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - u32 regions; + const hal_syspage_prog_t *hal; } pmap_t; #endif diff --git a/hal/armv7r/pmap.c b/hal/armv7r/pmap.c index 1e29c7f42..9ad18bfbb 100644 --- a/hal/armv7r/pmap.c +++ b/hal/armv7r/pmap.c @@ -33,9 +33,9 @@ u8 _init_stack[NUM_CPUS][SIZE_INITIAL_KSTACK] __attribute__((aligned(8))); static struct { - unsigned int kernelCodeRegion; spinlock_t lock; - int mpu_enabled; + int mpuEnabled; + unsigned int lastMPUCount[NUM_CPUS]; } pmap_common; @@ -104,9 +104,14 @@ static void pmap_mpu_disable(void) /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { - pmap->regions = pmap_common.kernelCodeRegion; + if (prog != NULL) { + pmap->hal = &prog->hal; + } + else { + pmap->hal = NULL; + } return 0; } @@ -117,60 +122,43 @@ addr_t pmap_destroy(pmap_t *pmap, unsigned int *i) } -static unsigned int pmap_map2region(unsigned int map) +void pmap_switch(pmap_t *pmap) { - if (pmap_common.mpu_enabled == 0) { - return 1; - } - + const hal_syspage_prog_t *hal; + unsigned int allocCnt; + spinlock_ctx_t sc; unsigned int i; - unsigned int mask = 0U; - - for (i = 0U; i < sizeof(syspage->hs.mpu.map) / sizeof(*syspage->hs.mpu.map); ++i) { - if (map == syspage->hs.mpu.map[i]) { - mask |= (1UL << i); - } - } - - return mask; -} - -int pmap_addMap(pmap_t *pmap, unsigned int map) -{ - if (pmap_common.mpu_enabled == 0) { - return 0; - } - - unsigned int rmask = pmap_map2region(map); - if (rmask == 0U) { - return -1; + if (pmap_common.mpuEnabled == 0) { + return; } - pmap->regions |= rmask; - - return 0; -} + if (pmap != NULL && pmap->hal != NULL) { + hal_spinlockSet(&pmap_common.lock, &sc); + hal = pmap->hal; + allocCnt = hal->mpu.allocCnt; -void pmap_switch(pmap_t *pmap) -{ - unsigned int i, cnt = syspage->hs.mpu.allocCnt; - spinlock_ctx_t sc; - if (pmap_common.mpu_enabled == 0) { - return; - } + /* Disable MPU */ + pmap_mpu_disable(); - if (pmap != NULL) { - hal_spinlockSet(&pmap_common.lock, &sc); - for (i = 0; i < cnt; ++i) { - /* Select region */ + for (i = 0; i < allocCnt; ++i) { pmap_mpu_setMemRegionNumber(i); + pmap_mpu_setMemRegionRbar(hal->mpu.table[i].rbar); + pmap_mpu_setMemRegionRasr(hal->mpu.table[i].rasr); + } - /* Enable/disable region according to the mask */ - pmap_mpu_setMemRegionStatus(((pmap->regions & (1UL << i)) != 0U) ? 1 : 0); + /* Disable all remaining regions */ + for (; i < pmap_common.lastMPUCount[hal_cpuGetID()]; i++) { + pmap_mpu_setMemRegionNumber(i); + pmap_mpu_setMemRegionStatus(0); } + /* Enable MPU */ + pmap_mpu_enable(); + + pmap_common.lastMPUCount[hal_cpuGetID()] = allocCnt; + hal_spinlockClear(&pmap_common.lock, &sc); } } @@ -196,9 +184,10 @@ addr_t pmap_resolve(pmap_t *pmap, void *vaddr) int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size) { + unsigned int i; const syspage_map_t *map; - unsigned int rmask; - if (pmap_common.mpu_enabled == 0) { + + if (pmap_common.mpuEnabled == 0) { return 1; } @@ -206,9 +195,18 @@ int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size) if (map == NULL) { return 0; } - rmask = pmap_map2region(map->id); - return ((pmap->regions & rmask) == 0U) ? 0 : 1; + if (pmap->hal == NULL) { + /* Kernel pmap has access to everything */ + return 1; + } + + for (i = 0; i < pmap->hal->mpu.allocCnt; ++i) { + if (pmap->hal->mpu.map[i] == map->id) { + return 1; + } + } + return 0; } @@ -246,12 +244,8 @@ int pmap_segment(unsigned int i, void **vaddr, size_t *size, vm_prot_t *prot, vo void _pmap_init(pmap_t *pmap, void **vstart, void **vend) { - const syspage_map_t *ikmap; - unsigned int ikregion; - u32 t; + unsigned int cnt = (syspage->hs.mpuType >> 8U) & 0xffU; unsigned int i; - unsigned int cnt = syspage->hs.mpu.allocCnt; - *vstart = (void *)(((ptr_t)&_end + 7U) & ~7U); *vend = (*((char **)vstart)) + SIZE_PAGE; @@ -261,62 +255,28 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) pmap->end = (void *)((addr_t)&__bss_start + 32U * 1024U); - pmap->regions = (1UL << cnt) - 1U; + pmap->hal = NULL; + for (i = 0; i < (unsigned int)NUM_CPUS; i++) { + pmap_common.lastMPUCount[i] = cnt; + } if (cnt == 0U) { hal_spinlockCreate(&pmap_common.lock, "pmap"); - pmap_common.mpu_enabled = 0; - pmap_common.kernelCodeRegion = 0; + pmap_common.mpuEnabled = 0; return; } - pmap_common.mpu_enabled = 1; + pmap_common.mpuEnabled = 1; /* Disable MPU that may have been enabled before */ pmap_mpu_disable(); for (i = 0; i < cnt; ++i) { pmap_mpu_setMemRegionNumber(i); - t = syspage->hs.mpu.table[i].rbar; - if ((t & (0x1U << 4)) == 0U) { - continue; - } - - pmap_mpu_setMemRegionRbar(t); - pmap_mpu_setMemRegionRasr(syspage->hs.mpu.table[i].rasr); /* Enable all regions */ + pmap_mpu_setMemRegionStatus(0); } /* Enable MPU */ pmap_mpu_enable(); - - /* FIXME HACK - * allow all programs to execute (and read) kernel code map. - * Needed because of hal_jmp, syscalls handler and signals handler. - * In these functions we need to switch to the user mode when still - * executing kernel code. This will cause memory management fault - * if the application does not have access to the kernel instruction - * map. Possible fix - place return to the user code in the separate - * region and allow this region instead. */ - - /* Find kernel code region */ - /* parasoft-suppress-next-line MISRAC2012-RULE_11_1 "We need address of this function in numeric type" */ - ikmap = syspage_mapAddrResolve((addr_t)_pmap_init); - if (ikmap == NULL) { - hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map not found. Bad system config\n"); - for (;;) { - hal_cpuHalt(); - } - } - - ikregion = pmap_map2region(ikmap->id); - if (ikregion == 0U) { - hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map has no assigned region. Bad system config\n"); - for (;;) { - hal_cpuHalt(); - } - } - - pmap_common.kernelCodeRegion = ikregion; - hal_spinlockCreate(&pmap_common.lock, "pmap"); } diff --git a/hal/armv8m/arch/pmap.h b/hal/armv8m/arch/pmap.h index f68142162..5507118d3 100644 --- a/hal/armv8m/arch/pmap.h +++ b/hal/armv8m/arch/pmap.h @@ -17,6 +17,7 @@ #define _PH_HAL_PMAP_ARMV8M_H_ #include "hal/types.h" +#include "syspage.h" #define PGHD_PRESENT 0x01U #define PGHD_USER 0x04U @@ -54,7 +55,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - u32 regions; + const hal_syspage_prog_t *hal; } pmap_t; #endif diff --git a/hal/armv8m/mcx/n94x/config.h b/hal/armv8m/mcx/n94x/config.h index 4c45912ba..47a8c3cd6 100644 --- a/hal/armv8m/mcx/n94x/config.h +++ b/hal/armv8m/mcx/n94x/config.h @@ -20,7 +20,9 @@ #ifndef __ASSEMBLY__ +#include "hal/types.h" #include "include/arch/armv8m/mcx/syspage.h" +#include "include/syspage.h" #include "mcxn94x.h" #define HAL_NAME_PLATFORM "MCX N94x " diff --git a/hal/armv8m/pmap.c b/hal/armv8m/pmap.c index 4fbd8ef37..abccd45e8 100644 --- a/hal/armv8m/pmap.c +++ b/hal/armv8m/pmap.c @@ -17,6 +17,7 @@ #include "config.h" #include "syspage.h" #include "halsyspage.h" +#include "lib/lib.h" #include #include @@ -43,16 +44,21 @@ extern void *_init_vectors; static struct { volatile u32 *mpu; - unsigned int kernelCodeRegion; spinlock_t lock; - int mpu_enabled; + int mpuEnabled; + unsigned int lastMPUCount; } pmap_common; /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { - pmap->regions = pmap_common.kernelCodeRegion; + if (prog != NULL) { + pmap->hal = &prog->hal; + } + else { + pmap->hal = NULL; + } return 0; } @@ -63,67 +69,46 @@ addr_t pmap_destroy(pmap_t *pmap, unsigned int *i) } -static unsigned int pmap_map2region(unsigned int map) +/* parasoft-suppress-next-line MISRAC2012-DIR_4_3-a "Optimized context switching code" */ +void pmap_switch(pmap_t *pmap) { + const volatile u32 *RBAR_ADDR = MPU_BASE + mpu_rbar; + unsigned int allocCnt; + spinlock_ctx_t sc; unsigned int i; - unsigned int mask = 0; - - if (pmap_common.mpu_enabled == 0) { - return 1; - } + const u32 *tableCurrent; - for (i = 0; i < sizeof(syspage->hs.mpu.map) / sizeof(*syspage->hs.mpu.map); ++i) { - if (map == syspage->hs.mpu.map[i]) { - mask |= (1UL << i); - } + if (pmap_common.mpuEnabled == 0) { + return; } - return mask; -} + if (pmap != NULL && pmap->hal != NULL) { + hal_spinlockSet(&pmap_common.lock, &sc); + allocCnt = pmap->hal->mpu.allocCnt; + tableCurrent = &pmap->hal->mpu.table[0].rbar; -int pmap_addMap(pmap_t *pmap, unsigned int map) -{ - unsigned int rmask; - if (pmap_common.mpu_enabled == 0) { - return 0; - } - - rmask = pmap_map2region(map); - if (rmask == 0U) { - return -1; - } + /* Disable MPU */ + hal_cpuDataMemoryBarrier(); + *(pmap_common.mpu + mpu_ctrl) &= ~1U; - pmap->regions |= rmask; + for (i = 0; i < max(allocCnt, pmap_common.lastMPUCount); i += 4U) { + *(pmap_common.mpu + mpu_rnr) = i; + __asm__ volatile( + "ldmia %[tableCurrent]!, {r3-r8, r10, r11} \n\t" /* Load 4 regions (rbar/rlar pairs) from table, update table pointer */ + "stmia %[mpu_rbar], {r3-r8, r10, r11} \n\t" /* Write 4 regions via RBAR/RLAR and aliases */ + : [tableCurrent] "+r"(tableCurrent) + : [mpu_rbar] "r"(RBAR_ADDR) + : "r3", "r4", "r5", "r6", "r7", "r8", "r10", "r11"); + } - return 0; -} + /* Enable MPU */ + *(pmap_common.mpu + mpu_ctrl) |= 1U; + hal_cpuDataSyncBarrier(); + hal_cpuInstrBarrier(); + pmap_common.lastMPUCount = allocCnt; -void pmap_switch(pmap_t *pmap) -{ - unsigned int i, cnt = syspage->hs.mpu.allocCnt; - spinlock_ctx_t sc; - if (pmap_common.mpu_enabled == 0) { - return; - } - - if (pmap != NULL) { - hal_spinlockSet(&pmap_common.lock, &sc); - for (i = 0; i < cnt; ++i) { - /* Select region */ - *(pmap_common.mpu + mpu_rnr) = i; - hal_cpuDataMemoryBarrier(); - - /* Enable/disable region according to the mask */ - if ((pmap->regions & (1UL << i)) != 0UL) { - *(pmap_common.mpu + mpu_rlar) |= 1U; - } - else { - *(pmap_common.mpu + mpu_rlar) &= ~1U; - } - hal_cpuDataMemoryBarrier(); - } hal_spinlockClear(&pmap_common.lock, &sc); } } @@ -149,8 +134,8 @@ addr_t pmap_resolve(pmap_t *pmap, void *vaddr) int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size) { + unsigned int i; const syspage_map_t *map = syspage_mapAddrResolve((addr_t)vaddr); - unsigned int rmask; addr_t addr_end = (addr_t)vaddr + size; /* Check for potential arithmetic overflow. `addr_end` is allowed to be 0, * as it represents the top of memory. */ @@ -158,13 +143,21 @@ int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size) return 0; } - if (pmap_common.mpu_enabled == 0) { + if (pmap_common.mpuEnabled == 0) { return 1; } - rmask = pmap_map2region(map->id); + if (pmap->hal == NULL) { + /* Kernel pmap has access to everything */ + return 1; + } - return ((pmap->regions & rmask) != 0U) ? 1 : 0; + for (i = 0; i < pmap->hal->mpu.allocCnt; ++i) { + if (pmap->hal->mpu.map[i] == map->id) { + return 1; + } + } + return 0; } @@ -202,9 +195,10 @@ int pmap_segment(unsigned int i, void **vaddr, size_t *size, vm_prot_t *prot, vo void _pmap_init(pmap_t *pmap, void **vstart, void **vend) { - const syspage_map_t *ikmap; - unsigned int ikregion; - unsigned int i, cnt = syspage->hs.mpu.allocCnt; + unsigned int cnt = min( + (syspage->hs.mpuType >> 8U) & 0xffU, + sizeof(pmap->hal->mpu.table) / sizeof(pmap->hal->mpu.table[0])); + unsigned int i; (*vstart) = (void *)(((ptr_t)_init_vectors + 7U) & ~7U); (*vend) = (*((char **)vstart)) + SIZE_PAGE; @@ -214,20 +208,19 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) /* Initial size of kernel map */ pmap->end = (void *)((addr_t)&__bss_start + 32U * 1024U); - /* Enable all regions for kernel */ - pmap->regions = (1UL << cnt) - 1UL; + pmap->hal = NULL; + pmap_common.lastMPUCount = cnt; /* Configure MPU */ pmap_common.mpu = MPU_BASE; hal_spinlockCreate(&pmap_common.lock, "pmap"); if (cnt == 0U) { - pmap_common.mpu_enabled = 0; - pmap_common.kernelCodeRegion = 0; + pmap_common.mpuEnabled = 0; return; } - pmap_common.mpu_enabled = 1; + pmap_common.mpuEnabled = 1; /* Disable MPU just in case */ *(pmap_common.mpu + mpu_ctrl) &= ~1U; @@ -241,42 +234,12 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) for (i = 0; i < cnt; ++i) { /* Select MPU region to configure */ *(pmap_common.mpu + mpu_rnr) = i; - hal_cpuDataMemoryBarrier(); - - *(pmap_common.mpu + mpu_rbar) = syspage->hs.mpu.table[i].rbar; - hal_cpuDataMemoryBarrier(); - /* Disable regions for now */ - *(pmap_common.mpu + mpu_rlar) = syspage->hs.mpu.table[i].rlar & ~1U; - hal_cpuDataMemoryBarrier(); + /* Disable all regions for now */ + *(pmap_common.mpu + mpu_rlar) = 0U; } /* Enable MPU */ *(pmap_common.mpu + mpu_ctrl) |= 1U; - hal_cpuDataMemoryBarrier(); - - /* FIXME HACK - * allow all programs to execute (and read) kernel code map. - * Needed because of hal_jmp, syscalls handler and signals handler. - * In these functions we need to switch to the user mode when still - * executing kernel code. This will cause memory management fault - * if the application does not have access to the kernel instruction - * map. Possible fix - place return to the user code in the separate - * region and allow this region instead. */ - - /* Find kernel code region */ - /* parasoft-suppress-next-line MISRAC2012-RULE_11_1 "We need address of this function in numeric type" */ - ikmap = syspage_mapAddrResolve((addr_t)_pmap_init); - if (ikmap != NULL) { - ikregion = pmap_map2region(ikmap->id); - } - - if ((ikmap == NULL) || (ikregion == 0U)) { - hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map not found or has no regions. Bad system config\n"); - for (;;) { - hal_cpuHalt(); - } - } - - pmap_common.kernelCodeRegion = ikregion; + hal_cpuDataSyncBarrier(); } diff --git a/hal/armv8r/pmap.c b/hal/armv8r/pmap.c index 00677aad0..e5aaff90d 100644 --- a/hal/armv8r/pmap.c +++ b/hal/armv8r/pmap.c @@ -29,7 +29,7 @@ u8 _init_stack[NUM_CPUS][SIZE_INITIAL_KSTACK] __attribute__((aligned(8))); /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { return 0; } diff --git a/hal/ia32/pmap.c b/hal/ia32/pmap.c index e44342f46..6e4744b01 100644 --- a/hal/ia32/pmap.c +++ b/hal/ia32/pmap.c @@ -39,7 +39,7 @@ static struct { /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { u32 i, pages; pmap->pdir = vaddr; diff --git a/hal/pmap.h b/hal/pmap.h index e2968afb3..6885928d4 100644 --- a/hal/pmap.h +++ b/hal/pmap.h @@ -20,6 +20,7 @@ #include "lib/attrs.h" #include +#include "syspage.h" #ifndef NOMMU @@ -31,14 +32,12 @@ MAYBE_UNUSED static inline int pmap_belongs(pmap_t *pmap, void *addr) #else -int pmap_addMap(pmap_t *pmap, unsigned int map); - int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size); #endif -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr); +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr); addr_t pmap_destroy(pmap_t *pmap, unsigned int *i); diff --git a/hal/riscv64/pmap.c b/hal/riscv64/pmap.c index 305e5b241..1eda3e8a7 100644 --- a/hal/riscv64/pmap.c +++ b/hal/riscv64/pmap.c @@ -101,7 +101,7 @@ addr_t pmap_getKernelStart(void) /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { unsigned int i, pages; ptr_t va; diff --git a/hal/sparcv8leon/pmap-nommu.c b/hal/sparcv8leon/pmap-nommu.c index efbdce542..1014c00d8 100644 --- a/hal/sparcv8leon/pmap-nommu.c +++ b/hal/sparcv8leon/pmap-nommu.c @@ -28,7 +28,7 @@ extern void *_init_stack; /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { return 0; } @@ -40,12 +40,6 @@ addr_t pmap_destroy(pmap_t *pmap, unsigned int *i) } -int pmap_addMap(pmap_t *pmap, unsigned int map) -{ - return 0; -} - - void pmap_switch(pmap_t *pmap) { return; diff --git a/hal/sparcv8leon/pmap.c b/hal/sparcv8leon/pmap.c index 18fdf74b1..a2f1aab84 100644 --- a/hal/sparcv8leon/pmap.c +++ b/hal/sparcv8leon/pmap.c @@ -198,7 +198,7 @@ static void _pmap_contextDealloc(pmap_t *pmap) /* Function creates empty page table */ -int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr) +int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { pmap->pdir1 = vaddr; pmap->context = CONTEXT_INVALID; diff --git a/include/arch/aarch64/zynqmp/syspage.h b/include/arch/aarch64/zynqmp/syspage.h index 2abbc61be..11c3f04a3 100644 --- a/include/arch/aarch64/zynqmp/syspage.h +++ b/include/arch/aarch64/zynqmp/syspage.h @@ -21,4 +21,7 @@ typedef struct { } __attribute__((packed)) hal_syspage_t; +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/arch/armv7a/imx6ull/syspage.h b/include/arch/armv7a/imx6ull/syspage.h index 1543fc8bc..e86ca6da6 100644 --- a/include/arch/armv7a/imx6ull/syspage.h +++ b/include/arch/armv7a/imx6ull/syspage.h @@ -21,4 +21,8 @@ typedef struct { int dummy; } __attribute__((packed)) hal_syspage_t; + +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/arch/armv7a/zynq7000/syspage.h b/include/arch/armv7a/zynq7000/syspage.h index e312c7184..fba674a3e 100644 --- a/include/arch/armv7a/zynq7000/syspage.h +++ b/include/arch/armv7a/zynq7000/syspage.h @@ -21,4 +21,7 @@ typedef struct { } __attribute__((packed)) hal_syspage_t; +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/arch/armv7m/imxrt/syspage.h b/include/arch/armv7m/imxrt/syspage.h index ae1dcee8f..303f3c41f 100644 --- a/include/arch/armv7m/imxrt/syspage.h +++ b/include/arch/armv7m/imxrt/syspage.h @@ -18,16 +18,25 @@ #define _PH_SYSPAGE_IMXRT_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rasr; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ - } __attribute__((packed)) mpu; + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; + } mpu; +} hal_syspage_prog_t; + + +typedef struct { + unsigned int mpuType; unsigned int bootReason; } __attribute__((packed)) hal_syspage_t; diff --git a/include/arch/armv7m/stm32/syspage.h b/include/arch/armv7m/stm32/syspage.h index aaa8ef7cc..23fcba662 100644 --- a/include/arch/armv7m/stm32/syspage.h +++ b/include/arch/armv7m/stm32/syspage.h @@ -18,16 +18,25 @@ #define _PH_SYSPAGE_STM32_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rasr; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ - } __attribute__((packed)) mpu; + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; + } mpu; +} hal_syspage_prog_t; + + +typedef struct { + unsigned int mpuType; unsigned int bootReason; } __attribute__((packed)) hal_syspage_t; diff --git a/include/arch/armv7r/tda4vm/syspage.h b/include/arch/armv7r/tda4vm/syspage.h index eae556bb5..1b988a28c 100644 --- a/include/arch/armv7r/tda4vm/syspage.h +++ b/include/arch/armv7r/tda4vm/syspage.h @@ -17,17 +17,26 @@ #define _PH_SYSPAGE_ARMV7R_TDA4VM_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { - int resetReason; struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rasr; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; } __attribute__((packed)) mpu; +} __attribute__((packed)) hal_syspage_prog_t; + + +typedef struct { + int resetReason; + unsigned int mpuType; } __attribute__((packed)) hal_syspage_t; diff --git a/include/arch/armv7r/zynqmp/syspage.h b/include/arch/armv7r/zynqmp/syspage.h index 31c94eecc..14e2731de 100644 --- a/include/arch/armv7r/zynqmp/syspage.h +++ b/include/arch/armv7r/zynqmp/syspage.h @@ -17,17 +17,26 @@ #define _PH_SYSPAGE_ARMV7R_ZYNQMP_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { - int resetReason; struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rasr; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; } __attribute__((packed)) mpu; +} __attribute__((packed)) hal_syspage_prog_t; + + +typedef struct { + int resetReason; + unsigned int mpuType; } __attribute__((packed)) hal_syspage_t; diff --git a/include/arch/armv8m/mcx/syspage.h b/include/arch/armv8m/mcx/syspage.h index 2f6721505..d35c5952a 100644 --- a/include/arch/armv8m/mcx/syspage.h +++ b/include/arch/armv8m/mcx/syspage.h @@ -18,16 +18,25 @@ #define _PH_SYSPAGE_MCXN94X_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rlar; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ - } __attribute__((packed)) mpu; + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; + } mpu; +} hal_syspage_prog_t; + + +typedef struct { + unsigned int mpuType; } __attribute__((packed)) hal_syspage_t; #endif diff --git a/include/arch/armv8m/nrf/syspage.h b/include/arch/armv8m/nrf/syspage.h index 6fc79e590..2d4dd4826 100644 --- a/include/arch/armv8m/nrf/syspage.h +++ b/include/arch/armv8m/nrf/syspage.h @@ -18,16 +18,25 @@ #define _PH_SYSPAGE_NRF91_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { struct { - unsigned int type; - unsigned int allocCnt; struct { unsigned int rbar; unsigned int rlar; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ - } __attribute__((packed)) mpu; + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; + } mpu; +} hal_syspage_prog_t; + + +typedef struct { + unsigned int mpuType; } __attribute__((packed)) hal_syspage_t; #endif diff --git a/include/arch/armv8m/stm32/syspage.h b/include/arch/armv8m/stm32/syspage.h index 1f1e50c64..da4e97e99 100644 --- a/include/arch/armv8m/stm32/syspage.h +++ b/include/arch/armv8m/stm32/syspage.h @@ -18,17 +18,25 @@ #define _PH_SYSPAGE_STM32_H_ +#ifndef MPU_MAX_REGIONS +#define MPU_MAX_REGIONS 16 +#endif + + typedef struct { struct { - unsigned int type; - unsigned int allocCnt; - unsigned int mair[2]; struct { unsigned int rbar; unsigned int rlar; - } table[16] __attribute__((aligned(8))); - unsigned int map[16]; /* ((unsigned int)-1) = map is not assigned */ - } __attribute__((packed)) mpu; + } table[MPU_MAX_REGIONS] __attribute__((aligned(8))); + unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ + unsigned int allocCnt; + } mpu; +} hal_syspage_prog_t; + + +typedef struct { + unsigned int mpuType; unsigned int bootReason; } __attribute__((packed)) hal_syspage_t; diff --git a/include/arch/armv8r/mps3an536/syspage.h b/include/arch/armv8r/mps3an536/syspage.h index 1dce97769..430656cdd 100644 --- a/include/arch/armv8r/mps3an536/syspage.h +++ b/include/arch/armv8r/mps3an536/syspage.h @@ -22,4 +22,7 @@ typedef struct { } __attribute__((packed)) hal_syspage_t; +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/arch/ia32/syspage.h b/include/arch/ia32/syspage.h index e143489d9..1215f317d 100644 --- a/include/arch/ia32/syspage.h +++ b/include/arch/ia32/syspage.h @@ -57,5 +57,7 @@ typedef struct { } __attribute__((packed)) graphmode; /* Graphics mode info */ } __attribute__((packed)) hal_syspage_t; +typedef struct { +} hal_syspage_prog_t; #endif diff --git a/include/arch/riscv64/syspage.h b/include/arch/riscv64/syspage.h index 5044f1eef..08c83f2ce 100644 --- a/include/arch/riscv64/syspage.h +++ b/include/arch/riscv64/syspage.h @@ -21,4 +21,8 @@ typedef struct { unsigned int boothartId; } __attribute__((packed)) hal_syspage_t; + +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/arch/sparcv8leon/syspage.h b/include/arch/sparcv8leon/syspage.h index 020190ddb..4f0479c5d 100644 --- a/include/arch/sparcv8leon/syspage.h +++ b/include/arch/sparcv8leon/syspage.h @@ -21,4 +21,8 @@ typedef struct { int dummy; } __attribute__((packed)) hal_syspage_t; + +typedef struct { +} hal_syspage_prog_t; + #endif diff --git a/include/syspage.h b/include/syspage.h index d9ce36cd8..a43ac6ea9 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -48,7 +48,9 @@ typedef struct _syspage_prog_t { size_t dmapSz; unsigned char *dmaps; -} __attribute__((packed)) syspage_prog_t; + + hal_syspage_prog_t hal; +} syspage_prog_t; typedef struct _syspage_map_t { diff --git a/proc/process.c b/proc/process.c index 0c90f9f76..79114cccf 100644 --- a/proc/process.c +++ b/proc/process.c @@ -1085,7 +1085,6 @@ static void process_exec(thread_t *current, process_spawn_t *spawn) void *stack, *entry = NULL; int err = EOK, count; void *cleanupFn = NULL; - unsigned int i = 0; spinlock_ctx_t sc; const struct stackArg args[] = { { &spawn->envp, sizeof(spawn->envp) }, @@ -1102,29 +1101,10 @@ static void process_exec(thread_t *current, process_spawn_t *spawn) if (err == EOK) { proc_changeMap(current->process, ¤t->process->map, NULL, ¤t->process->map.pmap); } - (void)i; #else - (void)pmap_create(¤t->process->map.pmap, NULL, NULL, NULL); + (void)pmap_create(¤t->process->map.pmap, NULL, NULL, spawn->prog, NULL); proc_changeMap(current->process, (spawn->map != NULL) ? spawn->map : process_common.kmap, spawn->imap, ¤t->process->map.pmap); current->process->entries = NULL; - - if (spawn->prog != NULL) { - /* Add instruction maps */ - for (i = 0; i < spawn->prog->imapSz; ++i) { - if (err != EOK) { - break; - } - err = pmap_addMap(current->process->pmapp, spawn->prog->imaps[i]); - } - - /* Add data/io maps */ - for (i = 0; i < spawn->prog->dmapSz; ++i) { - if (err != EOK) { - break; - } - err = pmap_addMap(current->process->pmapp, spawn->prog->dmaps[i]); - } - } #endif if (err == EOK) { diff --git a/vm/map.c b/vm/map.c index 49e476e2b..fbde7a338 100644 --- a/vm/map.c +++ b/vm/map.c @@ -1009,9 +1009,9 @@ int vm_mapCreate(vm_map_t *map, void *start, void *stop) return -ENOMEM; } - (void)pmap_create(&map->pmap, &map_common.kmap->pmap, map->pmap.pmapp, map->pmap.pmapv); + (void)pmap_create(&map->pmap, &map_common.kmap->pmap, map->pmap.pmapp, NULL, map->pmap.pmapv); #else - (void)pmap_create(&map->pmap, &map_common.kmap->pmap, NULL, NULL); + (void)pmap_create(&map->pmap, &map_common.kmap->pmap, NULL, NULL, NULL); #endif (void)proc_lockInit(&map->lock, &proc_lockAttrDefault, "map.map"); From a5a1b73a2668845b4ee4d9593208ebc0ffd5a067 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 18 Dec 2025 11:49:49 +0100 Subject: [PATCH 2/8] syspage: add partition abstract Add syspage_part_t struct to keep partition configuration, starting with MPU registers and arrays of maps for allocation and access. JIRA: RTOS-1149 --- hal/armv7m/arch/pmap.h | 2 +- hal/armv7m/pmap.c | 2 +- hal/armv7r/arch/pmap.h | 2 +- hal/armv7r/pmap.c | 10 ++++++-- hal/armv8m/arch/pmap.h | 2 +- hal/armv8m/pmap.c | 2 +- include/arch/aarch64/zynqmp/syspage.h | 3 ++- include/arch/armv7a/imx6ull/syspage.h | 3 ++- include/arch/armv7a/zynq7000/syspage.h | 3 ++- include/arch/armv7m/imxrt/syspage.h | 2 +- include/arch/armv7m/stm32/syspage.h | 2 +- include/arch/armv7r/tda4vm/syspage.h | 2 +- include/arch/armv7r/zynqmp/syspage.h | 2 +- include/arch/armv8m/mcx/syspage.h | 2 +- include/arch/armv8m/nrf/syspage.h | 2 +- include/arch/armv8m/stm32/syspage.h | 2 +- include/arch/armv8r/mps3an536/syspage.h | 3 ++- include/arch/ia32/syspage.h | 3 ++- include/arch/riscv64/syspage.h | 3 ++- include/arch/sparcv8leon/syspage.h | 3 ++- include/syspage.h | 34 +++++++++++++++++++++---- proc/process.c | 25 +++++++++++++++--- proc/process.h | 3 ++- syspage.c | 18 +++++++++++++ 24 files changed, 105 insertions(+), 30 deletions(-) diff --git a/hal/armv7m/arch/pmap.h b/hal/armv7m/arch/pmap.h index da4229c56..8372fc894 100644 --- a/hal/armv7m/arch/pmap.h +++ b/hal/armv7m/arch/pmap.h @@ -56,7 +56,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - const hal_syspage_prog_t *hal; + const hal_syspage_part_t *hal; } pmap_t; #endif diff --git a/hal/armv7m/pmap.c b/hal/armv7m/pmap.c index 1718fd685..93cf0d02a 100644 --- a/hal/armv7m/pmap.c +++ b/hal/armv7m/pmap.c @@ -50,7 +50,7 @@ static struct { int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { if (prog != NULL) { - pmap->hal = &prog->hal; + pmap->hal = &prog->partition->hal; } else { pmap->hal = NULL; diff --git a/hal/armv7r/arch/pmap.h b/hal/armv7r/arch/pmap.h index a35c1c056..972aba55d 100644 --- a/hal/armv7r/arch/pmap.h +++ b/hal/armv7r/arch/pmap.h @@ -55,7 +55,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - const hal_syspage_prog_t *hal; + const hal_syspage_part_t *hal; } pmap_t; #endif diff --git a/hal/armv7r/pmap.c b/hal/armv7r/pmap.c index 9ad18bfbb..39afe7e43 100644 --- a/hal/armv7r/pmap.c +++ b/hal/armv7r/pmap.c @@ -36,6 +36,7 @@ static struct { spinlock_t lock; int mpuEnabled; unsigned int lastMPUCount[NUM_CPUS]; + const hal_syspage_part_t *lastMPUConf[NUM_CPUS]; } pmap_common; @@ -107,7 +108,7 @@ static void pmap_mpu_disable(void) int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { if (prog != NULL) { - pmap->hal = &prog->hal; + pmap->hal = &prog->partition->hal; } else { pmap->hal = NULL; @@ -124,7 +125,7 @@ addr_t pmap_destroy(pmap_t *pmap, unsigned int *i) void pmap_switch(pmap_t *pmap) { - const hal_syspage_prog_t *hal; + const hal_syspage_part_t *hal; unsigned int allocCnt; spinlock_ctx_t sc; unsigned int i; @@ -134,6 +135,9 @@ void pmap_switch(pmap_t *pmap) } if (pmap != NULL && pmap->hal != NULL) { + if (pmap->hal == pmap_common.lastMPUConf[hal_cpuGetID()]) { + return; + } hal_spinlockSet(&pmap_common.lock, &sc); hal = pmap->hal; @@ -158,6 +162,7 @@ void pmap_switch(pmap_t *pmap) pmap_mpu_enable(); pmap_common.lastMPUCount[hal_cpuGetID()] = allocCnt; + pmap_common.lastMPUConf[hal_cpuGetID()] = hal; hal_spinlockClear(&pmap_common.lock, &sc); } @@ -258,6 +263,7 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend) pmap->hal = NULL; for (i = 0; i < (unsigned int)NUM_CPUS; i++) { pmap_common.lastMPUCount[i] = cnt; + pmap_common.lastMPUConf[i] = NULL; } if (cnt == 0U) { diff --git a/hal/armv8m/arch/pmap.h b/hal/armv8m/arch/pmap.h index 5507118d3..f4253a1f7 100644 --- a/hal/armv8m/arch/pmap.h +++ b/hal/armv8m/arch/pmap.h @@ -55,7 +55,7 @@ typedef struct _page_t { typedef struct _pmap_t { void *start; void *end; - const hal_syspage_prog_t *hal; + const hal_syspage_part_t *hal; } pmap_t; #endif diff --git a/hal/armv8m/pmap.c b/hal/armv8m/pmap.c index abccd45e8..e6a340187 100644 --- a/hal/armv8m/pmap.c +++ b/hal/armv8m/pmap.c @@ -54,7 +54,7 @@ static struct { int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr) { if (prog != NULL) { - pmap->hal = &prog->hal; + pmap->hal = &prog->partition->hal; } else { pmap->hal = NULL; diff --git a/include/arch/aarch64/zynqmp/syspage.h b/include/arch/aarch64/zynqmp/syspage.h index 11c3f04a3..2c52e6070 100644 --- a/include/arch/aarch64/zynqmp/syspage.h +++ b/include/arch/aarch64/zynqmp/syspage.h @@ -22,6 +22,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/armv7a/imx6ull/syspage.h b/include/arch/armv7a/imx6ull/syspage.h index e86ca6da6..0f859d848 100644 --- a/include/arch/armv7a/imx6ull/syspage.h +++ b/include/arch/armv7a/imx6ull/syspage.h @@ -23,6 +23,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/armv7a/zynq7000/syspage.h b/include/arch/armv7a/zynq7000/syspage.h index fba674a3e..125e7f80e 100644 --- a/include/arch/armv7a/zynq7000/syspage.h +++ b/include/arch/armv7a/zynq7000/syspage.h @@ -22,6 +22,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/armv7m/imxrt/syspage.h b/include/arch/armv7m/imxrt/syspage.h index 303f3c41f..4fc8b81e3 100644 --- a/include/arch/armv7m/imxrt/syspage.h +++ b/include/arch/armv7m/imxrt/syspage.h @@ -32,7 +32,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } mpu; -} hal_syspage_prog_t; +} hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv7m/stm32/syspage.h b/include/arch/armv7m/stm32/syspage.h index 23fcba662..31518b924 100644 --- a/include/arch/armv7m/stm32/syspage.h +++ b/include/arch/armv7m/stm32/syspage.h @@ -32,7 +32,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } mpu; -} hal_syspage_prog_t; +} hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv7r/tda4vm/syspage.h b/include/arch/armv7r/tda4vm/syspage.h index 1b988a28c..a9d5cd00a 100644 --- a/include/arch/armv7r/tda4vm/syspage.h +++ b/include/arch/armv7r/tda4vm/syspage.h @@ -31,7 +31,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } __attribute__((packed)) mpu; -} __attribute__((packed)) hal_syspage_prog_t; +} __attribute__((packed)) hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv7r/zynqmp/syspage.h b/include/arch/armv7r/zynqmp/syspage.h index 14e2731de..bd72db632 100644 --- a/include/arch/armv7r/zynqmp/syspage.h +++ b/include/arch/armv7r/zynqmp/syspage.h @@ -31,7 +31,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } __attribute__((packed)) mpu; -} __attribute__((packed)) hal_syspage_prog_t; +} __attribute__((packed)) hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv8m/mcx/syspage.h b/include/arch/armv8m/mcx/syspage.h index d35c5952a..be85fbd20 100644 --- a/include/arch/armv8m/mcx/syspage.h +++ b/include/arch/armv8m/mcx/syspage.h @@ -32,7 +32,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } mpu; -} hal_syspage_prog_t; +} hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv8m/nrf/syspage.h b/include/arch/armv8m/nrf/syspage.h index 2d4dd4826..095f5cc4f 100644 --- a/include/arch/armv8m/nrf/syspage.h +++ b/include/arch/armv8m/nrf/syspage.h @@ -32,7 +32,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } mpu; -} hal_syspage_prog_t; +} hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv8m/stm32/syspage.h b/include/arch/armv8m/stm32/syspage.h index da4e97e99..2f0295c20 100644 --- a/include/arch/armv8m/stm32/syspage.h +++ b/include/arch/armv8m/stm32/syspage.h @@ -32,7 +32,7 @@ typedef struct { unsigned int map[MPU_MAX_REGIONS]; /* ((unsigned int)-1) = map is not assigned */ unsigned int allocCnt; } mpu; -} hal_syspage_prog_t; +} hal_syspage_part_t; typedef struct { diff --git a/include/arch/armv8r/mps3an536/syspage.h b/include/arch/armv8r/mps3an536/syspage.h index 430656cdd..52bfa3dde 100644 --- a/include/arch/armv8r/mps3an536/syspage.h +++ b/include/arch/armv8r/mps3an536/syspage.h @@ -23,6 +23,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/ia32/syspage.h b/include/arch/ia32/syspage.h index 1215f317d..5b4ece5ee 100644 --- a/include/arch/ia32/syspage.h +++ b/include/arch/ia32/syspage.h @@ -58,6 +58,7 @@ typedef struct { } __attribute__((packed)) hal_syspage_t; typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/riscv64/syspage.h b/include/arch/riscv64/syspage.h index 08c83f2ce..5c4507c1d 100644 --- a/include/arch/riscv64/syspage.h +++ b/include/arch/riscv64/syspage.h @@ -23,6 +23,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/arch/sparcv8leon/syspage.h b/include/arch/sparcv8leon/syspage.h index 4f0479c5d..deb716cf1 100644 --- a/include/arch/sparcv8leon/syspage.h +++ b/include/arch/sparcv8leon/syspage.h @@ -23,6 +23,7 @@ typedef struct { typedef struct { -} hal_syspage_prog_t; + int dummy; +} hal_syspage_part_t; #endif diff --git a/include/syspage.h b/include/syspage.h index a43ac6ea9..e8860396a 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -17,6 +17,7 @@ #define _PH_SYSPAGE_H_ +/* clang-format off */ enum { mAttrRead = 0x01, mAttrWrite = 0x02, mAttrExec = 0x04, mAttrShareable = 0x08, mAttrCacheable = 0x10, mAttrBufferable = 0x20 }; @@ -26,21 +27,45 @@ enum { console_default = 0, console_com0, console_com1, console_com2, console_co console_com15, console_vga0 }; +enum { pFlagSpawnAll = 0x01 }; +/* clang-format on */ + + typedef struct _mapent_t { struct _mapent_t *next, *prev; + /* clang-format off */ enum { hal_entryReserved = 0, hal_entryTemp, hal_entryAllocated, hal_entryInvalid } type; + /* clang-format on */ addr_t start; addr_t end; } __attribute__((packed)) mapent_t; +typedef struct _syspage_part_t { + struct _syspage_part_t *next, *prev; + + char *name; + unsigned int flags; + + size_t allocMapSz; + unsigned char *allocMaps; + + size_t accessMapSz; + unsigned char *accessMaps; + + hal_syspage_part_t hal; +} syspage_part_t; + + typedef struct _syspage_prog_t { struct _syspage_prog_t *next, *prev; addr_t start; addr_t end; + syspage_part_t *partition; + char *argv; size_t imapSz; @@ -48,9 +73,7 @@ typedef struct _syspage_prog_t { size_t dmapSz; unsigned char *dmaps; - - hal_syspage_prog_t hal; -} syspage_prog_t; +} __attribute__((packed)) syspage_prog_t; typedef struct _syspage_map_t { @@ -74,8 +97,9 @@ typedef struct { addr_t pkernel; /* Physical address of kernel's beginning */ - syspage_map_t *maps; /* Maps list */ - syspage_prog_t *progs; /* Programs list*/ + syspage_map_t *maps; /* Maps list */ + syspage_part_t *partitions; /* Partitions list */ + syspage_prog_t *progs; /* Programs list */ unsigned int console; /* Console ID defines in hal */ } __attribute__((packed)) syspage_t; diff --git a/proc/process.c b/proc/process.c index 79114cccf..e7536f32b 100644 --- a/proc/process.c +++ b/proc/process.c @@ -174,7 +174,7 @@ static int process_alloc(process_t *process) } -int proc_start(startFn_t start, void *arg, const char *path) +int proc_start(startFn_t start, void *arg, const char *path, syspage_part_t *partition) { int err = EOK; process_t *process; @@ -203,6 +203,7 @@ int proc_start(startFn_t start, void *arg, const char *path) process->ghosts = NULL; process->reaper = NULL; process->refs = 1; + process->partition = partition; (void)proc_lockInit(&process->lock, &proc_lockAttrDefault, "process"); @@ -1173,6 +1174,24 @@ static int proc_spawn(vm_object_t *object, const syspage_prog_t *prog, vm_map_t int pid; process_spawn_t spawn; spinlock_ctx_t sc; + syspage_part_t *part; + process_t *proc = proc_current()->process; + + if (prog != NULL) { + part = prog->partition; + } + else if ((proc != NULL) && (proc->partition != NULL)) { + part = proc->partition; + } + else { + part = NULL; + } + if ((proc != NULL) && + (proc->partition != NULL) && + ((proc->partition->flags & (unsigned int)pFlagSpawnAll) == 0U) && + (proc->partition != part)) { + return -EACCES; + } if (argv != NULL) { argv = proc_copyargs(argv); @@ -1203,7 +1222,7 @@ static int proc_spawn(vm_object_t *object, const syspage_prog_t *prog, vm_map_t hal_spinlockCreate(&spawn.sl, "spawnsl"); - pid = proc_start(proc_spawnThread, &spawn, path); + pid = proc_start(proc_spawnThread, &spawn, path, part); if (pid > 0) { hal_spinlockSet(&spawn.sl, &sc); while (spawn.state == FORKING) { @@ -1454,7 +1473,7 @@ int proc_vfork(void) spawn->parent = current; spawn->prog = NULL; - pid = proc_start(process_vforkThread, spawn, NULL); + pid = proc_start(process_vforkThread, spawn, NULL, (current->process != NULL) ? current->process->partition : NULL); if (pid < 0) { hal_spinlockDestroy(&spawn->sl); vm_kfree(spawn); diff --git a/proc/process.h b/proc/process.h index fbf2c217c..7c1ca30fe 100644 --- a/proc/process.h +++ b/proc/process.h @@ -48,6 +48,7 @@ typedef struct _process_t { vm_map_t *mapp; vm_map_t *imapp; pmap_t *pmapp; + syspage_part_t *partition; int exit; unsigned int lazy : 1; @@ -93,7 +94,7 @@ void proc_kill(process_t *proc); void proc_reap(void); -int proc_start(startFn_t start, void *arg, const char *path); +int proc_start(startFn_t start, void *arg, const char *path, syspage_part_t *partition); int proc_fileSpawn(const char *path, char **argv, char **envp); diff --git a/syspage.c b/syspage.c index 795c36355..f4e176931 100644 --- a/syspage.c +++ b/syspage.c @@ -186,6 +186,7 @@ void syspage_progShow(void) void syspage_init(void) { syspage_prog_t *prog; + syspage_part_t *part; syspage_map_t *map; mapent_t *entry; @@ -225,7 +226,24 @@ void syspage_init(void) prog->dmaps = hal_syspageRelocate(prog->dmaps); prog->imaps = hal_syspageRelocate(prog->imaps); prog->argv = hal_syspageRelocate(prog->argv); + prog->partition = hal_syspageRelocate(prog->partition); prog = prog->next; } while (prog != syspage_common.syspage->progs); } + + /* Partition's relocation */ + if (syspage_common.syspage->partitions != NULL) { + syspage_common.syspage->partitions = hal_syspageRelocate(syspage_common.syspage->partitions); + part = syspage_common.syspage->partitions; + + do { + part->next = hal_syspageRelocate(part->next); + part->prev = hal_syspageRelocate(part->prev); + + part->allocMaps = hal_syspageRelocate(part->allocMaps); + part->accessMaps = hal_syspageRelocate(part->accessMaps); + part->name = hal_syspageRelocate(part->name); + part = part->next; + } while (part != syspage_common.syspage->partitions); + } } From 1b9652c3dcf09e32e554d743e78aa8f54b77853b Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 18 Dec 2025 11:56:45 +0100 Subject: [PATCH 3/8] syscalls/mmap: multimap allocation Iterate over all allocation maps from syspage_part_t struct when allocating memory using mmap to increase allocation flexibility. Allows for uncached map selection eg. for buffers for HW communication. Verify map access inside munmap and mprotect. JIRA: RTOS-1149 --- include/syspage.h | 2 +- syscalls.c | 51 ++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/include/syspage.h b/include/syspage.h index e8860396a..85263bcaa 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -99,7 +99,7 @@ typedef struct { syspage_map_t *maps; /* Maps list */ syspage_part_t *partitions; /* Partitions list */ - syspage_prog_t *progs; /* Programs list */ + syspage_prog_t *progs; /* Programs list */ unsigned int console; /* Console ID defines in hal */ } __attribute__((packed)) syspage_t; diff --git a/syscalls.c b/syscalls.c index f609af5cc..1fa37a483 100644 --- a/syscalls.c +++ b/syscalls.c @@ -109,8 +109,29 @@ int syscalls_sys_mmap(u8 *ustack) } flags &= ~(MAP_ANONYMOUS | MAP_CONTIGUOUS | MAP_PHYSMEM); - +#ifndef NOMMU (*vaddr) = vm_mmap(proc_current()->process->mapp, *vaddr, NULL, size, PROT_USER | (vm_prot_t)prot, o, (o == NULL) ? -1 : offs, flags); + +#else + unsigned int i; + void *va = NULL; + if (proc->partition == NULL) { + (*vaddr) = vm_mmap(proc_current()->process->mapp, *vaddr, NULL, size, PROT_USER | (vm_prot_t)prot, o, (o == NULL) ? -1 : offs, flags); + } + else { + /* TODO: option for exact map selection */ + for (i = 0; i < proc->partition->allocMapSz; i++) { + if (((flags & MAP_UNCACHED) != 0U) && ((syspage_mapIdResolve(proc->partition->allocMaps[i])->attr & (unsigned int)mAttrCacheable) != 0U)) { + continue; + } + va = vm_mmap(vm_getSharedMap((int)proc->partition->allocMaps[i]), *vaddr, NULL, size, PROT_USER | (vm_prot_t)prot, o, (o == NULL) ? -1 : offs, flags); + if (va != NULL) { + break; + } + } + (*vaddr) = va; + } +#endif (void)vm_objectPut(o); if ((*vaddr) == NULL) { @@ -124,7 +145,6 @@ int syscalls_sys_mmap(u8 *ustack) int syscalls_sys_munmap(u8 *ustack) { - process_t *proc = proc_current()->process; void *vaddr; size_t size; int err; @@ -133,7 +153,18 @@ int syscalls_sys_munmap(u8 *ustack) GETFROMSTACK(ustack, size_t, size, 1U); size = round_page(size); - err = vm_munmap(proc->mapp, vaddr, size); +#ifndef NOMMU + err = vm_munmap(proc_current()->process->mapp, vaddr, size); +#else + const syspage_map_t *smap = syspage_mapAddrResolve((addr_t)vaddr); + if (smap == NULL) { + return -EINVAL; + } + if (pmap_isAllowed(proc_current()->process->pmapp, vaddr, size) == 0) { + return -EACCES; + } + err = vm_munmap(vm_getSharedMap((int)smap->id), vaddr, size); +#endif if (err < 0) { return err; } @@ -143,7 +174,6 @@ int syscalls_sys_munmap(u8 *ustack) int syscalls_sys_mprotect(u8 *ustack) { - process_t *proc = proc_current()->process; void *vaddr; size_t len; int prot, err; @@ -152,7 +182,18 @@ int syscalls_sys_mprotect(u8 *ustack) GETFROMSTACK(ustack, size_t, len, 1U); GETFROMSTACK(ustack, int, prot, 2U); - err = vm_mprotect(proc->mapp, vaddr, len, PROT_USER | (vm_prot_t)prot); +#ifndef NOMMU + err = vm_mprotect(proc_current()->process->mapp, vaddr, len, PROT_USER | (vm_prot_t)prot); +#else + const syspage_map_t *smap = syspage_mapAddrResolve((addr_t)vaddr); + if (smap == NULL) { + return -EINVAL; + } + if (pmap_isAllowed(proc_current()->process->pmapp, vaddr, len) == 0) { + return -EACCES; + } + err = vm_mprotect(vm_getSharedMap((int)smap->id), vaddr, len, PROT_USER | (vm_prot_t)prot); +#endif if (err < 0) { return err; } From 5b908a74027d86223d8c59c168234620afabb15a Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 12 Feb 2026 14:34:02 +0100 Subject: [PATCH 4/8] proc/threads: temporal partitioning Introduce scheduler windows to allow for partitions temporal separation. Move timer update to _threads_schedule on all cores to reduce the use of threads_common.spinlock and make wakeup calculation atomic with schedule JIRA: RTOS-1149 --- include/syspage.h | 11 +++ main.c | 2 +- proc/threads.c | 189 +++++++++++++++++++++++++++++++++++----------- syspage.c | 25 ++++++ syspage.h | 10 +++ test/proc.c | 2 +- 6 files changed, 194 insertions(+), 45 deletions(-) diff --git a/include/syspage.h b/include/syspage.h index 85263bcaa..ff8c0f1da 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -42,6 +42,14 @@ typedef struct _mapent_t { } __attribute__((packed)) mapent_t; +typedef struct _syspage_sched_window_t { + struct _syspage_sched_window_t *next, *prev; + + time_t stop; + unsigned char id; +} __attribute__((packed)) syspage_sched_window_t; + + typedef struct _syspage_part_t { struct _syspage_part_t *next, *prev; @@ -54,6 +62,8 @@ typedef struct _syspage_part_t { size_t accessMapSz; unsigned char *accessMaps; + unsigned int schedWindowsMask; + hal_syspage_part_t hal; } syspage_part_t; @@ -99,6 +109,7 @@ typedef struct { syspage_map_t *maps; /* Maps list */ syspage_part_t *partitions; /* Partitions list */ + syspage_sched_window_t *schedWindows; syspage_prog_t *progs; /* Programs list */ unsigned int console; /* Console ID defines in hal */ diff --git a/main.c b/main.c index 84dace1b8..825bfa44f 100644 --- a/main.c +++ b/main.c @@ -131,7 +131,7 @@ int main(void) test_proc_exit(); #endif - (void)proc_start(main_initthr, NULL, (const char *)"init"); + (void)proc_start(main_initthr, NULL, (const char *)"init", NULL); /* Start scheduling, leave current stack */ hal_cpuEnableInterrupts(); diff --git a/proc/threads.c b/proc/threads.c index 67935baaa..cbf3510e5 100644 --- a/proc/threads.c +++ b/proc/threads.c @@ -35,16 +35,23 @@ const struct lockAttr proc_lockAttrDefault = { .type = PH_LOCK_NORMAL }; /* Special empty queue value used to wakeup next enqueued thread. This is used to implement sticky conditions */ static thread_t *const wakeupPending = (void *)-1; +#define NUM_PRIO 8U + + static struct { vm_map_t *kmap; spinlock_t spinlock; lock_t lock; - thread_t *ready[8]; + + thread_t ***ready; /* [schedwindow][priority], schedwindow 0 is always scheduled */ + syspage_sched_window_t **actWindow; thread_t **current; time_t utcoffs; /* Synchronized by spinlock */ rbtree_t sleeping; + time_t sleepMin; + time_t *windowStart; /* Synchronized by mutex */ unsigned int idcounter; @@ -65,9 +72,7 @@ static struct { } threads_common; -_Static_assert(sizeof(threads_common.ready) / sizeof(threads_common.ready[0]) <= (u8)-1, "queue size must fit into priority type"); - -#define MAX_PRIO ((u8)(sizeof(threads_common.ready) / sizeof(threads_common.ready[0])) - 1U) +_Static_assert(NUM_PRIO <= (u8)-1, "queue size must fit into priority type"); static thread_t *_proc_current(void); @@ -168,7 +173,6 @@ static void _threads_waking(thread_t *t) static void _threads_updateWakeup(time_t now, thread_t *minimum) { thread_t *t; - time_t wakeup; if (minimum != NULL) { t = minimum; @@ -179,21 +183,15 @@ static void _threads_updateWakeup(time_t now, thread_t *minimum) if (t != NULL) { if (now >= t->wakeup) { - wakeup = 1; + threads_common.sleepMin = now; } else { - wakeup = t->wakeup - now; + threads_common.sleepMin = t->wakeup; } } else { - wakeup = SYSTICK_INTERVAL; - } - - if (wakeup > SYSTICK_INTERVAL + SYSTICK_INTERVAL / 8) { - wakeup = SYSTICK_INTERVAL; + threads_common.sleepMin = -1; } - - hal_timerSetWakeup((unsigned int)wakeup); } @@ -337,6 +335,17 @@ __attribute__((noreturn)) void proc_longjmp(cpu_context_t *ctx) static int _threads_checkSignal(thread_t *selected, process_t *proc, cpu_context_t *signalCtx, unsigned int oldmask, const int src); +static thread_t **proc_getReadyQueues(const process_t *process) +{ + if ((process != NULL) && (process->partition != NULL)) { + return threads_common.ready[hal_cpuGetFirstBit(process->partition->schedWindowsMask)]; + } + else { + return threads_common.ready[0]; + } +} + + /* parasoft-suppress-next-line MISRAC2012-RULE_8_4 "Function is used externally within assembler code" */ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) { @@ -345,6 +354,10 @@ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) process_t *proc; cpu_context_t *signalCtx, *selCtx; unsigned int cpuId = hal_cpuGetID(); + syspage_sched_window_t **window = &threads_common.actWindow[cpuId]; + thread_t **actReady; + time_t wakeup; + time_t now = _proc_gettimeRaw(); (void)arg; (void)n; @@ -352,6 +365,17 @@ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) trace_eventSchedEnter(cpuId); + while (now - threads_common.windowStart[cpuId] >= (*window)->stop) { + (*window) = (*window)->next; + if ((*window) == syspage_schedulerWindowList()) { + threads_common.windowStart[cpuId] = now; + (*window) = (*window)->next; /* Skip background window */ + break; + } + } + + actReady = threads_common.ready[(*window)->id]; + current = _proc_current(); threads_common.current[cpuId] = NULL; @@ -361,21 +385,28 @@ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) /* Move thread to the end of queue */ if (current->state == READY) { - LIST_ADD(&threads_common.ready[current->priority], current); + LIST_ADD(&proc_getReadyQueues(current->process)[current->priority], current); _threads_preempted(current); } } /* Get next thread */ i = 0; - while (i < sizeof(threads_common.ready) / sizeof(thread_t *)) { - selected = threads_common.ready[i]; - if (selected == NULL) { - i++; - continue; + while (i < NUM_PRIO) { + selected = threads_common.ready[0][i]; + if (selected != NULL) { + LIST_REMOVE(&threads_common.ready[0][i], selected); } + else { + selected = actReady[i]; + + if (selected == NULL) { + i++; + continue; + } - LIST_REMOVE(&threads_common.ready[i], selected); + LIST_REMOVE(&actReady[i], selected); + } if (selected->exit == 0U) { break; @@ -445,6 +476,19 @@ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) /* Update CPU usage */ _threads_cpuTimeCalc(current, selected); + wakeup = threads_common.actWindow[cpuId]->stop - (now - threads_common.windowStart[cpuId]); + if (((*window)->id == 0U) || (wakeup > SYSTICK_INTERVAL + SYSTICK_INTERVAL / 8)) { + wakeup = SYSTICK_INTERVAL; + } + if ((cpuId == 0U) && (threads_common.sleepMin != -1) && (threads_common.sleepMin < now + wakeup)) { + wakeup = threads_common.sleepMin - now; + } + if (wakeup <= 0) { + wakeup = 1; + } + + hal_timerSetWakeup((unsigned int)wakeup); + trace_eventSchedExit(cpuId); return EOK; @@ -535,7 +579,7 @@ int proc_threadCreate(process_t *process, startFn_t start, int *id, u8 priority, spinlock_ctx_t sc; int err; - if (priority >= sizeof(threads_common.ready) / sizeof(thread_t *)) { + if (priority >= NUM_PRIO) { return -EINVAL; } @@ -619,7 +663,7 @@ int proc_threadCreate(process_t *process, startFn_t start, int *id, u8 priority, /* Insert thread to scheduler queue */ _threads_waking(t); - LIST_ADD(&threads_common.ready[priority], t); + LIST_ADD(&proc_getReadyQueues(process)[priority], t); hal_spinlockClear(&threads_common.spinlock, &sc); @@ -629,7 +673,7 @@ int proc_threadCreate(process_t *process, startFn_t start, int *id, u8 priority, static u8 _proc_lockGetPriority(lock_t *lock) { - u8 priority = MAX_PRIO; + u8 priority = NUM_PRIO - 1U; thread_t *thread = lock->queue; if (thread != NULL) { @@ -647,7 +691,7 @@ static u8 _proc_lockGetPriority(lock_t *lock) static u8 _proc_threadGetLockPriority(thread_t *thread) { - u8 ret, priority = MAX_PRIO; + u8 ret, priority = NUM_PRIO - 1U; lock_t *lock = thread->locks; if (lock != NULL) { @@ -674,6 +718,8 @@ static u8 _proc_threadGetPriority(thread_t *thread) static void _proc_threadSetPriority(thread_t *thread, u8 priority) { unsigned int i; + thread_t **readyQueues; + /* Don't allow decreasing the priority below base level */ if (priority > thread->priorityBase) { @@ -688,11 +734,12 @@ static void _proc_threadSetPriority(thread_t *thread, u8 priority) } if (i == hal_cpuGetCount()) { - LIB_ASSERT(LIST_BELONGS(&threads_common.ready[thread->priority], thread) != 0, + readyQueues = proc_getReadyQueues(thread->process); + LIB_ASSERT(LIST_BELONGS(&readyQueues[thread->priority], thread) != 0, "thread: 0x%p, tid: %d, priority: %d, is not on the ready list", thread, proc_getTid(thread), thread->priority); - LIST_REMOVE(&threads_common.ready[thread->priority], thread); - LIST_ADD(&threads_common.ready[priority], thread); + LIST_REMOVE(&readyQueues[thread->priority], thread); + LIST_ADD(&readyQueues[priority], thread); } } @@ -712,7 +759,7 @@ int proc_threadPriority(int signedPriority) return -EINVAL; } - if ((signedPriority >= 0) && ((size_t)signedPriority >= sizeof(threads_common.ready) / sizeof(threads_common.ready[0]))) { + if ((signedPriority >= 0) && ((size_t)signedPriority >= NUM_PRIO)) { return -EINVAL; } @@ -888,7 +935,7 @@ static void _proc_threadDequeue(thread_t *t) } if (i == hal_cpuGetCount()) { - LIST_ADD(&threads_common.ready[t->priority], t); + LIST_ADD(&proc_getReadyQueues(t->process)[t->priority], t); } } @@ -1853,6 +1900,7 @@ void proc_threadsDump(u8 priority) { thread_t *t; spinlock_ctx_t sc; + syspage_sched_window_t *window; /* Strictly needed - no lock can be taken * while threads_common.spinlock is being @@ -1862,16 +1910,21 @@ void proc_threadsDump(u8 priority) lib_printf("threads: "); hal_spinlockSet(&threads_common.spinlock, &sc); - t = threads_common.ready[priority]; + window = syspage_schedulerWindowList(); do { - lib_printf("[%p] ", t); + t = threads_common.ready[window->id][priority]; + do { + lib_printf("[%p(%u)] ", t, window->id); - if (t == NULL) { - break; - } + if (t == NULL) { + break; + } + + t = t->next; + } while (t != threads_common.ready[window->id][priority]); - t = t->next; - } while (t != threads_common.ready[priority]); + window = window->next; + } while (window != syspage_schedulerWindowList()); hal_spinlockClear(&threads_common.spinlock, &sc); lib_printf("\n"); @@ -2007,7 +2060,10 @@ int proc_threadsList(int n, threadinfo_t *info) int _threads_init(vm_map_t *kmap, vm_object_t *kernel) { - unsigned int i; + syspage_sched_window_t *schedWindow; + syspage_part_t *part; + unsigned int i, j, cnt; + u32 mask; threads_common.kmap = kmap; threads_common.ghosts = NULL; threads_common.reaper = NULL; @@ -2021,15 +2077,45 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) threads_common.stackCanary[i] = ((i & 1U) != 0U) ? 0xaaU : 0x55U; } - /* Initiaizlie scheduler queue */ - for (i = 0; i < sizeof(threads_common.ready) / sizeof(thread_t *); i++) { - threads_common.ready[i] = NULL; + /* Initialize scheduler queue */ + schedWindow = syspage_schedulerWindowList()->next; + cnt = 1; + while (schedWindow != syspage_schedulerWindowList()) { + schedWindow = schedWindow->next; + cnt++; + } + threads_common.ready = vm_kmalloc(sizeof(thread_t **) * cnt); + if (threads_common.ready == NULL) { + return -ENOMEM; + } + for (i = 0; i < cnt; i++) { + mask = (u32)(1UL << i); + part = syspage_partitionList(); + do { + if ((part->schedWindowsMask & mask) != 0U) { + mask = part->schedWindowsMask; + break; + } + part = part->next; + } while (part != syspage_partitionList()); + if (hal_cpuGetFirstBit(mask) != i) { + threads_common.ready[i] = threads_common.ready[hal_cpuGetFirstBit(mask)]; + continue; + } + + threads_common.ready[i] = vm_kmalloc(sizeof(thread_t *) * NUM_PRIO); + if (threads_common.ready[i] == NULL) { + return -ENOMEM; + } + for (j = 0; j < NUM_PRIO; j++) { + threads_common.ready[i][j] = NULL; + } } lib_rbInit(&threads_common.sleeping, threads_sleepcmp, NULL); lib_idtreeInit(&threads_common.id); - lib_printf("proc: Initializing thread scheduler, priorities=%d\n", sizeof(threads_common.ready) / sizeof(thread_t *)); + lib_printf("proc: Initializing thread scheduler, priorities=%d\n", NUM_PRIO); hal_spinlockCreate(&threads_common.spinlock, "threads.spinlock"); @@ -2043,9 +2129,24 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) /* Run idle thread on every cpu */ for (i = 0; i < hal_cpuGetCount(); i++) { threads_common.current[i] = NULL; - (void)proc_threadCreate(NULL, threads_idlethr, NULL, MAX_PRIO, (size_t)SIZE_KSTACK, NULL, 0, NULL); + (void)proc_threadCreate(NULL, threads_idlethr, NULL, NUM_PRIO - 1U, (size_t)SIZE_KSTACK, NULL, 0, NULL); } + threads_common.windowStart = vm_kmalloc(sizeof(*threads_common.windowStart) * hal_cpuGetCount()); + if (threads_common.windowStart == NULL) { + return -ENOMEM; + } + threads_common.actWindow = vm_kmalloc(sizeof(*threads_common.actWindow) * hal_cpuGetCount()); + if (threads_common.actWindow == NULL) { + return -ENOMEM; + } + for (i = 0; i < hal_cpuGetCount(); i++) { + threads_common.actWindow[i] = syspage_schedulerWindowList(); + threads_common.windowStart[i] = 0; + } + + threads_common.sleepMin = -1; + /* Install scheduler on clock interrupt */ #ifdef PENDSV_IRQ hal_memset(&threads_common.pendsvHandler, 0, sizeof(threads_common.pendsvHandler)); @@ -2057,5 +2158,7 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) hal_memset(&threads_common.timeintrHandler, 0, sizeof(threads_common.timeintrHandler)); (void)hal_timerRegister(threads_timeintr, NULL, &threads_common.timeintrHandler); + /* workaround for sparcv8leon which does not set up SYSTICK timer */ + hal_timerSetWakeup((unsigned int)SYSTICK_INTERVAL); return EOK; } diff --git a/syspage.c b/syspage.c index f4e176931..0fda1e999 100644 --- a/syspage.c +++ b/syspage.c @@ -183,11 +183,24 @@ void syspage_progShow(void) } +syspage_sched_window_t *syspage_schedulerWindowList(void) +{ + return syspage_common.syspage->schedWindows; +} + + +syspage_part_t *syspage_partitionList(void) +{ + return syspage_common.syspage->partitions; +} + + void syspage_init(void) { syspage_prog_t *prog; syspage_part_t *part; syspage_map_t *map; + syspage_sched_window_t *schedWindow; mapent_t *entry; syspage_common.syspage = (syspage_t *)hal_syspageAddr(); @@ -246,4 +259,16 @@ void syspage_init(void) part = part->next; } while (part != syspage_common.syspage->partitions); } + + /* SchedWindow's relocation */ + if (syspage_common.syspage->schedWindows != NULL) { + syspage_common.syspage->schedWindows = hal_syspageRelocate(syspage_common.syspage->schedWindows); + schedWindow = syspage_common.syspage->schedWindows; + + do { + schedWindow->next = hal_syspageRelocate(schedWindow->next); + schedWindow->prev = hal_syspageRelocate(schedWindow->prev); + schedWindow = schedWindow->next; + } while (schedWindow != syspage_common.syspage->schedWindows); + } } diff --git a/syspage.h b/syspage.h index f587ae7d4..575532fe8 100644 --- a/syspage.h +++ b/syspage.h @@ -51,6 +51,16 @@ const syspage_prog_t *syspage_progIdResolve(unsigned int id); const syspage_prog_t *syspage_progNameResolve(const char *name); +/* Scheduler configuration */ + +syspage_sched_window_t *syspage_schedulerWindowList(void); + + +/* Partition configuration */ + +syspage_part_t *syspage_partitionList(void); + + /* General functions */ void syspage_progShow(void); diff --git a/test/proc.c b/test/proc.c index eb5f743b2..b86d473d4 100644 --- a/test/proc.c +++ b/test/proc.c @@ -202,7 +202,7 @@ static void test_proc_initthr(void *arg) void test_proc_exit(void) { - proc_start(test_proc_initthr, NULL, (const char *)"init"); + proc_start(test_proc_initthr, NULL, (const char *)"init", NULL); hal_cpuEnableInterrupts(); hal_cpuReschedule(NULL, NULL); From 87a64a401c3587b85fdc345a1c987117ba84bef5 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 19 Feb 2026 16:36:47 +0100 Subject: [PATCH 5/8] vm/page: add partition memory limit on MMU targets Introduce accounting mechanism for partition allocated pages to provide resource safety for critical partitions, as there is no other mechanism for separating physical maps for targets with MMU. JIRA: RTOS-1149 --- include/syspage.h | 2 ++ perf/buffer-mem.c | 4 ++-- proc/msg.c | 12 ++++++------ proc/process.c | 5 +++-- syscalls.c | 2 +- test/vm.c | 4 ++-- vm/amap.c | 15 ++++++++------- vm/amap.h | 3 ++- vm/map.c | 28 ++++++++++++++++------------ vm/object.c | 46 ++++++++++++++++++++++++++-------------------- vm/object.h | 3 ++- vm/page-nommu.c | 4 ++-- vm/page.c | 29 +++++++++++++++++++++-------- vm/page.h | 4 ++-- vm/zone.c | 6 +++--- 15 files changed, 98 insertions(+), 69 deletions(-) diff --git a/include/syspage.h b/include/syspage.h index ff8c0f1da..7453f258e 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -62,6 +62,8 @@ typedef struct _syspage_part_t { size_t accessMapSz; unsigned char *accessMaps; + size_t availableMem; + size_t usedMem; unsigned int schedWindowsMask; hal_syspage_part_t hal; diff --git a/perf/buffer-mem.c b/perf/buffer-mem.c index ee2871c9e..8b90a0464 100644 --- a/perf/buffer-mem.c +++ b/perf/buffer-mem.c @@ -57,7 +57,7 @@ static void _bufferFree(void *data, page_t **pages) while (p != NULL) { *pages = p->next; - vm_pageFree(p); + vm_pageFree(p, NULL); sz += SIZE_PAGE; p = *pages; } @@ -80,7 +80,7 @@ static void *_bufferAlloc(page_t **pages, size_t sz) } for (v = data; (ptr_t)v < (ptr_t)data + sz; v += SIZE_PAGE) { - p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, NULL); if (p == NULL) { err = -ENOMEM; diff --git a/proc/msg.c b/proc/msg.c index 65a826d03..aa745cc68 100644 --- a/proc/msg.c +++ b/proc/msg.c @@ -113,7 +113,7 @@ static void *msg_map(int dir, kmsg_t *kmsg, void *data, size_t size, process_t * ml->boffs = boffs; bpa = pmap_resolve(&srcmap->pmap, data) & ~(SIZE_PAGE - 1U); - nbp = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + nbp = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, (from == NULL) ? NULL : from->partition); ml->bp = nbp; if (nbp == NULL) { return NULL; @@ -154,7 +154,7 @@ static void *msg_map(int dir, kmsg_t *kmsg, void *data, size_t size, process_t * epa = pmap_resolve(&srcmap->pmap, vaddr) & ~(SIZE_PAGE - 1U); if ((boffs == 0U) || (eoffs >= boffs)) { - nep = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + nep = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, (from == NULL) ? NULL : from->partition); ml->ep = nep; if (nep == NULL) { return NULL; @@ -192,14 +192,14 @@ static void msg_release(kmsg_t *kmsg) vm_map_t *map; if (kmsg->i.bp != NULL) { - vm_pageFree(kmsg->i.bp); + vm_pageFree(kmsg->i.bp, (kmsg->src == NULL) ? NULL : kmsg->src->partition); (void)vm_munmap(msg_common.kmap, kmsg->i.bvaddr, SIZE_PAGE); kmsg->i.bp = NULL; } if (kmsg->i.eoffs != 0U) { if (kmsg->i.ep != NULL) { - vm_pageFree(kmsg->i.ep); + vm_pageFree(kmsg->i.ep, (kmsg->src == NULL) ? NULL : kmsg->src->partition); } (void)vm_munmap(msg_common.kmap, kmsg->i.evaddr, SIZE_PAGE); kmsg->i.eoffs = 0; @@ -220,14 +220,14 @@ static void msg_release(kmsg_t *kmsg) } if (kmsg->o.bp != NULL) { - vm_pageFree(kmsg->o.bp); + vm_pageFree(kmsg->o.bp, (kmsg->src == NULL) ? NULL : kmsg->src->partition); (void)vm_munmap(msg_common.kmap, kmsg->o.bvaddr, SIZE_PAGE); kmsg->o.bp = NULL; } if (kmsg->o.eoffs != 0U) { if (kmsg->o.ep != NULL) { - vm_pageFree(kmsg->o.ep); + vm_pageFree(kmsg->o.ep, (kmsg->src == NULL) ? NULL : kmsg->src->partition); } (void)vm_munmap(msg_common.kmap, kmsg->o.evaddr, SIZE_PAGE); kmsg->o.eoffs = 0; diff --git a/proc/process.c b/proc/process.c index e7536f32b..affe8b0d2 100644 --- a/proc/process.c +++ b/proc/process.c @@ -1246,13 +1246,14 @@ int proc_fileSpawn(const char *path, char **argv, char **envp) int err; oid_t oid; vm_object_t *object; + process_t *process = proc_current()->process; err = proc_lookup(path, NULL, &oid); if (err < 0) { return err; } - err = vm_objectGet(&object, oid); + err = vm_objectGet(&object, oid, (process == NULL) ? NULL : process->partition); if (err < 0) { return err; } @@ -1722,7 +1723,7 @@ int proc_execve(const char *path, char **argv, char **envp) return err; } - err = vm_objectGet(&object, oid); + err = vm_objectGet(&object, oid, (current->process == NULL) ? NULL : current->process->partition); if (err < 0) { vm_kfree(kpath); vm_kfree(argv); diff --git a/syscalls.c b/syscalls.c index 1fa37a483..512eb58bd 100644 --- a/syscalls.c +++ b/syscalls.c @@ -102,7 +102,7 @@ int syscalls_sys_mmap(u8 *ustack) if (err < 0) { return err; } - err = vm_objectGet(&o, oid); + err = vm_objectGet(&o, oid, proc->partition); if (err < 0) { return err; } diff --git a/test/vm.c b/test/vm.c index 120ea9a9a..47b83d156 100644 --- a/test/vm.c +++ b/test/vm.c @@ -44,7 +44,7 @@ void test_vm_alloc(void) maxsize = max(maxsize, size); hal_cpuGetCycles(&b); - p = vm_pageAlloc(size, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP); + p = vm_pageAlloc(size, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP, NULL); hal_cpuGetCycles(&e); if (p == NULL) { @@ -52,7 +52,7 @@ void test_vm_alloc(void) break; } - vm_pageFree(p); + vm_pageFree(p, NULL); lib_printf("\rtest: size=%d, n=%d", size, n); diff --git a/vm/amap.c b/vm/amap.c index b9bd1e1a8..db66c7f39 100644 --- a/vm/amap.c +++ b/vm/amap.c @@ -28,7 +28,7 @@ static struct { } amap_common; -static anon_t *amap_putanon(anon_t *a) +static anon_t *amap_putanon(anon_t *a, syspage_part_t *part) { if (a == NULL) { return NULL; @@ -39,7 +39,7 @@ static anon_t *amap_putanon(anon_t *a) return a; } - vm_pageFree(a->page); + vm_pageFree(a->page, part); (void)proc_lockClear(&a->lock); (void)proc_lockDone(&a->lock); vm_kfree(a); @@ -57,7 +57,7 @@ void amap_putanons(amap_t *amap, size_t offset, size_t size) (void)proc_lockSet(&amap->lock); for (i = offset / SIZE_PAGE; i < (offset + size) / SIZE_PAGE; ++i) { - (void)amap_putanon(amap->anons[i]); + (void)amap_putanon(amap->anons[i], amap->partition); } (void)proc_lockClear(&amap->lock); } @@ -107,7 +107,7 @@ amap_t *amap_ref(amap_t *amap) } -amap_t *amap_create(amap_t *amap, size_t *offset, size_t size) +amap_t *amap_create(amap_t *amap, size_t *offset, size_t size, syspage_part_t *part) { size_t i = size / SIZE_PAGE; amap_t *new; @@ -139,6 +139,7 @@ amap_t *amap_create(amap_t *amap, size_t *offset, size_t size) (void)proc_lockInit(&new->lock, &proc_lockAttrDefault, "amap.map"); new->size = i; new->refs = 1; + new->partition = part; *offset = *offset / SIZE_PAGE; @@ -274,7 +275,7 @@ page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size if (a != NULL || o != NULL) { /* Copy from object or shared anon */ - p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, amap->partition); if (p == NULL) { (void)amap_unmap(map, v); if (a != NULL) { @@ -285,7 +286,7 @@ page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size } w = amap_map(map, p); if (w == NULL) { - vm_pageFree(p); + vm_pageFree(p, amap->partition); (void)amap_unmap(map, v); if (a != NULL) { (void)proc_lockClear(&a->lock); @@ -308,7 +309,7 @@ page_t *amap_page(vm_map_t *map, amap_t *amap, vm_object_t *o, void *vaddr, size amap->anons[aoffs / SIZE_PAGE] = anon_new(p); if (amap->anons[aoffs / SIZE_PAGE] == NULL) { - vm_pageFree(p); + vm_pageFree(p, amap->partition); p = NULL; } (void)proc_lockClear(&amap->lock); diff --git a/vm/amap.h b/vm/amap.h index 3fd2244a3..ab4faacab 100644 --- a/vm/amap.h +++ b/vm/amap.h @@ -32,6 +32,7 @@ typedef struct _anon_t { typedef struct _amap_t { lock_t lock; + syspage_part_t *partition; size_t size; int refs; anon_t *anons[]; @@ -50,7 +51,7 @@ void amap_putanons(amap_t *amap, size_t offset, size_t size); void amap_getanons(amap_t *amap, size_t offset, size_t size); -amap_t *amap_create(amap_t *amap, size_t *offset, size_t size); +amap_t *amap_create(amap_t *amap, size_t *offset, size_t size, syspage_part_t *part); void amap_put(amap_t *amap); diff --git a/vm/map.c b/vm/map.c index fbde7a338..313cd5ada 100644 --- a/vm/map.c +++ b/vm/map.c @@ -51,7 +51,7 @@ static map_entry_t *map_allocN(size_t n); void map_free(map_entry_t *entry); -static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot); +static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot, syspage_part_t *part); static int map_cmp(rbnode_t *n1, rbnode_t *n2) @@ -623,7 +623,7 @@ void *_vm_mmap(vm_map_t *map, void *vaddr, page_t *p, size_t size, vm_prot_t pro } for (w = vaddr; w < vaddr + size; w += SIZE_PAGE) { - if (_map_force(map, e, w, prot) != 0) { + if (_map_force(map, e, w, prot, process != NULL ? process->partition : NULL) != 0) { amap_putanons(e->amap, e->aoffs, (ptr_t)w - (ptr_t)vaddr); (void)pmap_remove(&map->pmap, vaddr, (void *)((ptr_t)w + SIZE_PAGE)); @@ -709,6 +709,10 @@ int vm_mapForce(vm_map_t *map, void *paddr, vm_prot_t prot) { map_entry_t t, *e; int err; + syspage_part_t *part = NULL; + if (proc_current()->process != NULL) { + part = proc_current()->process->partition; + } (void)proc_lockSet(&map->lock); @@ -722,7 +726,7 @@ int vm_mapForce(vm_map_t *map, void *paddr, vm_prot_t prot) return -EFAULT; } - err = _map_force(map, e, paddr, prot); + err = _map_force(map, e, paddr, prot, part); (void)proc_lockClear(&map->lock); return err; } @@ -734,7 +738,7 @@ static vm_prot_t map_checkProt(vm_prot_t baseProt, vm_prot_t newProt) } -static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot) +static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot, syspage_part_t *part) { vm_attr_t attr; size_t offs; @@ -746,7 +750,7 @@ static int _map_force(vm_map_t *map, map_entry_t *e, void *paddr, vm_prot_t prot return -EINVAL; } if ((((prot & PROT_WRITE) != 0U) && ((e->flags & MAP_NEEDSCOPY) != 0U)) || ((e->object == NULL) && (e->amap == NULL))) { - e->amap = amap_create(e->amap, &e->aoffs, e->size); + e->amap = amap_create(e->amap, &e->aoffs, e->size, part); if (e->amap == NULL) { return -ENOMEM; } @@ -963,7 +967,7 @@ int vm_mprotect(vm_map_t *map, void *vaddr, size_t len, vm_prot_t prot) } } else { - result = _map_force(map, e, currVaddr, prot); + result = _map_force(map, e, currVaddr, prot, p != NULL ? p->partition : NULL); } } @@ -998,14 +1002,14 @@ int vm_mapCreate(vm_map_t *map, void *start, void *stop) map->pmap.end = stop; #ifndef NOMMU - map->pmap.pmapp = vm_pageAlloc(SIZE_PDIR, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE); + map->pmap.pmapp = vm_pageAlloc(SIZE_PDIR, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE, NULL); if (map->pmap.pmapp == NULL) { return -ENOMEM; } map->pmap.pmapv = vm_mmap(map_common.kmap, NULL, map->pmap.pmapp, 1UL << map->pmap.pmapp->idx, PROT_READ | PROT_WRITE, map_common.kernel, -1, MAP_NONE); if (map->pmap.pmapv == NULL) { - vm_pageFree(map->pmap.pmapp); + vm_pageFree(map->pmap.pmapp, NULL); return -ENOMEM; } @@ -1050,11 +1054,11 @@ void vm_mapDestroy(process_t *p, vm_map_t *map) if (a == 0U) { break; } - vm_pageFree(_page_get(a)); + vm_pageFree(_page_get(a), NULL); } (void)vm_munmap(map_common.kmap, map->pmap.pmapv, SIZE_PDIR); - vm_pageFree(map->pmap.pmapp); + vm_pageFree(map->pmap.pmapp, NULL); for (n = map->tree.root; n != NULL; n = map->tree.root) { e = lib_treeof(map_entry_t, linkage, n); @@ -1153,13 +1157,13 @@ int vm_mapCopy(process_t *proc, vm_map_t *dst, vm_map_t *src) if ((proc == NULL) || (proc->lazy == 0U)) { for (offs = 0; offs < f->size; offs += SIZE_PAGE) { - if (_map_force(dst, f, (void *)((ptr_t)f->vaddr + offs), f->prot) != 0) { + if (_map_force(dst, f, (void *)((ptr_t)f->vaddr + offs), f->prot, proc != NULL ? proc->partition : NULL) != 0) { (void)proc_lockClear(&dst->lock); (void)proc_lockClear(&src->lock); return -ENOMEM; } - if (_map_force(src, e, (void *)((ptr_t)e->vaddr + offs), e->prot) != 0) { + if (_map_force(src, e, (void *)((ptr_t)e->vaddr + offs), e->prot, proc != NULL ? proc->partition : NULL) != 0) { (void)proc_lockClear(&dst->lock); (void)proc_lockClear(&src->lock); return -ENOMEM; diff --git a/vm/object.c b/vm/object.c index 2be97ba92..44d04c3d8 100644 --- a/vm/object.c +++ b/vm/object.c @@ -56,7 +56,7 @@ static int object_cmp(rbnode_t *n1, rbnode_t *n2) } -int vm_objectGet(vm_object_t **o, oid_t oid) +int vm_objectGet(vm_object_t **o, oid_t oid, syspage_part_t *part) { vm_object_t t, *no = NULL; size_t i, n; @@ -102,6 +102,7 @@ int vm_objectGet(vm_object_t **o, oid_t oid) /* Safe to cast - sz fits into size_t from above checks */ (*o)->size = (size_t)sz; (*o)->refs = 0; + (*o)->part = part; for (i = 0; i < n; ++i) { (*o)->pages[i] = NULL; @@ -155,12 +156,12 @@ int vm_objectPut(vm_object_t *o) /* Contiguous object 'holds' all pages in pages[0] */ if ((o->oid.port == (u32)(-1)) && (o->oid.id == (id_t)(-1))) { - vm_pageFree(o->pages[0]); + vm_pageFree(o->pages[0], o->part); } else { for (i = 0; i < round_page(o->size) / SIZE_PAGE; ++i) { if (o->pages[i] != NULL) { - vm_pageFree(o->pages[i]); + vm_pageFree(o->pages[i], o->part); } } } @@ -171,37 +172,37 @@ int vm_objectPut(vm_object_t *o) } -static page_t *object_fetch(oid_t oid, u64 offs) +static page_t *object_fetch(vm_object_t *o, u64 offs) { page_t *p; void *v; - if (proc_open(oid, 0) < 0) { + if (proc_open(o->oid, 0) < 0) { return NULL; } - p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + p = vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, o->part); if (p == NULL) { - (void)proc_close(oid, 0); + (void)proc_close(o->oid, 0); return NULL; } v = vm_mmap(object_common.kmap, NULL, p, SIZE_PAGE, PROT_WRITE | PROT_USER, object_common.kernel, 0, MAP_NONE); if (v == NULL) { - vm_pageFree(p); - (void)proc_close(oid, 0); + vm_pageFree(p, o->part); + (void)proc_close(o->oid, 0); return NULL; } - if (proc_read(oid, (off_t)offs, v, SIZE_PAGE, 0) < 0) { + if (proc_read(o->oid, (off_t)offs, v, SIZE_PAGE, 0) < 0) { (void)vm_munmap(object_common.kmap, v, SIZE_PAGE); - vm_pageFree(p); - (void)proc_close(oid, 0); + vm_pageFree(p, o->part); + (void)proc_close(o->oid, 0); return NULL; } (void)vm_munmap(object_common.kmap, v, SIZE_PAGE); - (void)proc_close(oid, 0); + (void)proc_close(o->oid, 0); return p; } @@ -212,7 +213,10 @@ page_t *vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, page_t *p; if (o == NULL) { - return vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP); + if (amap != NULL && *amap != NULL) { + return vm_pageAlloc(SIZE_PAGE, PAGE_OWNER_APP, (*amap)->partition); + } + return NULL; } if (o == VM_OBJ_PHYSMEM) { @@ -246,11 +250,11 @@ page_t *vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, (void)proc_lockClear(&map->lock); - p = object_fetch(o->oid, offs); + p = object_fetch(o, offs); if (vm_lockVerify(map, amap, o, vaddr, offs) != 0) { if (p != NULL) { - vm_pageFree(p); + vm_pageFree(p, o->part); } return NULL; @@ -261,7 +265,7 @@ page_t *vm_objectPage(vm_map_t *map, amap_t **amap, vm_object_t *o, void *vaddr, if (o->pages[offs / SIZE_PAGE] != NULL) { /* Someone loaded a page in the meantime, use it */ if (p != NULL) { - vm_pageFree(p); + vm_pageFree(p, o->part); } p = o->pages[offs / SIZE_PAGE]; @@ -280,8 +284,9 @@ vm_object_t *vm_objectContiguous(size_t size) vm_object_t *o; page_t *p; size_t i, n; + syspage_part_t *part = (proc_current()->process != NULL) ? proc_current()->process->partition : NULL; - p = vm_pageAlloc(size, PAGE_OWNER_APP); + p = vm_pageAlloc(size, PAGE_OWNER_APP, part); if (p == NULL) { return NULL; } @@ -291,7 +296,7 @@ vm_object_t *vm_objectContiguous(size_t size) o = vm_kmalloc(sizeof(vm_object_t) + n * sizeof(page_t *)); if (o == NULL) { - vm_pageFree(p); + vm_pageFree(p, part); return NULL; } @@ -301,6 +306,7 @@ vm_object_t *vm_objectContiguous(size_t size) o->oid.id = (id_t)(-1); o->refs = 1; o->size = size; + o->part = part; for (i = 0; i < n; ++i) { o->pages[i] = p + i; @@ -327,7 +333,7 @@ int _object_init(vm_map_t *kmap, vm_object_t *kernel) kernel->oid.id = 0; (void)lib_rbInsert(&object_common.tree, &kernel->linkage); - (void)vm_objectGet(&o, kernel->oid); + (void)vm_objectGet(&o, kernel->oid, NULL); return EOK; } diff --git a/vm/object.h b/vm/object.h index 9faae0a42..efd3ee0f7 100644 --- a/vm/object.h +++ b/vm/object.h @@ -28,6 +28,7 @@ typedef struct _vm_object_t { rbnode_t linkage; oid_t oid; int refs; + syspage_part_t *part; size_t size; page_t *pages[]; } vm_object_t; @@ -39,7 +40,7 @@ typedef struct _vm_object_t { vm_object_t *vm_objectRef(vm_object_t *o); -int vm_objectGet(vm_object_t **o, oid_t oid); +int vm_objectGet(vm_object_t **o, oid_t oid, syspage_part_t *part); int vm_objectPut(vm_object_t *o); diff --git a/vm/page-nommu.c b/vm/page-nommu.c index 726340051..f929a760a 100644 --- a/vm/page-nommu.c +++ b/vm/page-nommu.c @@ -63,7 +63,7 @@ static page_t *_page_alloc(size_t size, vm_flags_t flags) } -page_t *vm_pageAlloc(size_t size, vm_flags_t flags) +page_t *vm_pageAlloc(size_t size, vm_flags_t flags, syspage_part_t *part) { page_t *p; @@ -75,7 +75,7 @@ page_t *vm_pageAlloc(size_t size, vm_flags_t flags) } -void vm_pageFree(page_t *p) +void vm_pageFree(page_t *p, syspage_part_t *part) { (void)proc_lockSet(&pages.lock); diff --git a/vm/page.c b/vm/page.c index 178950714..533594c7e 100644 --- a/vm/page.c +++ b/vm/page.c @@ -38,7 +38,7 @@ static struct { } pages_info; -static page_t *_page_alloc(size_t size, vm_flags_t flags) +static page_t *_page_alloc(size_t size, vm_flags_t flags, syspage_part_t *part) { unsigned int start, stop, i; page_t *lh, *rh; @@ -52,6 +52,10 @@ static page_t *_page_alloc(size_t size, vm_flags_t flags) start++; } + if ((part != NULL) && ((part->usedMem + (1UL << start)) > part->availableMem)) { + return NULL; + } + /* Find segment */ stop = start; @@ -87,22 +91,26 @@ static page_t *_page_alloc(size_t size, vm_flags_t flags) pages_info.allocsz += SIZE_PAGE; } + if (part != NULL) { + part->usedMem += (1UL << lh->idx); + } + return lh; } -page_t *vm_pageAlloc(size_t size, vm_flags_t flags) +page_t *vm_pageAlloc(size_t size, vm_flags_t flags, syspage_part_t *part) { page_t *p; (void)proc_lockSet(&pages_info.lock); - p = _page_alloc(size, flags); + p = _page_alloc(size, flags, part); (void)proc_lockClear(&pages_info.lock); return p; } -void vm_pageFree(page_t *p) +void vm_pageFree(page_t *p, syspage_part_t *part) { unsigned int idx, i; page_t *lh = p, *rh = p; @@ -123,6 +131,11 @@ void vm_pageFree(page_t *p) idx = p->idx; + if (part != NULL) { + LIB_ASSERT_ALWAYS(part->usedMem >= (1UL << idx), "partition invalid free page.c"); + part->usedMem -= (1UL << idx); + } + /* Mark free pages */ for (i = 0; i < ((u64)1 << idx) / SIZE_PAGE; i++) { (p + i)->flags |= PAGE_FREE; @@ -331,7 +344,7 @@ static int _page_map(pmap_t *pmap, void *vaddr, addr_t pa, vm_attr_t attr) page_t *ap = NULL; while (pmap_enter(pmap, pa, vaddr, attr, ap) < 0) { - ap = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE); + ap = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE, NULL); if (/*vaddr > (void *)VADDR_KERNEL ||*/ ap == NULL) { return -ENOMEM; } @@ -355,13 +368,13 @@ int page_map(pmap_t *pmap, void *vaddr, addr_t pa, vm_attr_t attr) int _page_sbrk(pmap_t *pmap, void **start, void **end) { page_t *np, *ap = NULL; - np = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP); + np = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP, NULL); if (np == NULL) { return -ENOMEM; } while (pmap_enter(pmap, np->addr, (*end), PGHD_READ | PGHD_WRITE | PGHD_PRESENT, ap) < 0) { - ap = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE); + ap = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE, NULL); if (ap == NULL) { return -ENOMEM; } @@ -493,7 +506,7 @@ void _page_init(pmap_t *pmap, void **bss, void **top) if (_pmap_kernelSpaceExpand(pmap, &vaddr, (*top) + max((pages_info.freesz + pages_info.allocsz) / 4U, (1UL << 23)), p) == 0) { break; } - p = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE); + p = _page_alloc(SIZE_PAGE, PAGE_OWNER_KERNEL | PAGE_KERNEL_PTABLE, NULL); if (p == NULL) { return; } diff --git a/vm/page.h b/vm/page.h index 9b1841379..85eb81d46 100644 --- a/vm/page.h +++ b/vm/page.h @@ -22,10 +22,10 @@ #include "types.h" -page_t *vm_pageAlloc(size_t size, vm_flags_t flags); +page_t *vm_pageAlloc(size_t size, vm_flags_t flags, syspage_part_t *part); -void vm_pageFree(page_t *p); +void vm_pageFree(page_t *p, syspage_part_t *part); page_t *_page_get(addr_t addr); diff --git a/vm/zone.c b/vm/zone.c index afed4e0b3..474579cc5 100644 --- a/vm/zone.c +++ b/vm/zone.c @@ -40,14 +40,14 @@ int _vm_zoneCreate(vm_zone_t *zone, size_t blocksz, unsigned int blocks) return -EINVAL; } - zone->pages = vm_pageAlloc(blocks * blocksz, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP); + zone->pages = vm_pageAlloc(blocks * blocksz, PAGE_OWNER_KERNEL | PAGE_KERNEL_HEAP, NULL); if (zone->pages == NULL) { return -ENOMEM; } zone->vaddr = vm_mmap(zone_common.kmap, zone_common.kmap->start, zone->pages, (size_t)1U << zone->pages->idx, PROT_READ | PROT_WRITE, zone_common.kernel, -1, MAP_NONE); if (zone->vaddr == NULL) { - vm_pageFree(zone->pages); + vm_pageFree(zone->pages, NULL); return -ENOMEM; } @@ -77,7 +77,7 @@ int _vm_zoneDestroy(vm_zone_t *zone) } (void)vm_munmap(zone_common.kmap, zone->vaddr, (size_t)1U << zone->pages->idx); - vm_pageFree(zone->pages); + vm_pageFree(zone->pages, NULL); zone->vaddr = NULL; zone->first = NULL; From e9350f59e6a103bf845c1e806056d1aea04949c4 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 19 Feb 2026 16:38:18 +0100 Subject: [PATCH 6/8] proc/msg: disable inter-partition messages Standard, synchronous messaging system is unsuitable for inter-partition communication, especially without timeouts which are not supported yet. For Inter-Partition Communication non-blocking, shared-memory based communication is recommended. JIRA: RTOS-1149 --- include/syspage.h | 2 +- proc/msg-nommu.c | 25 +++++++++++++++++++++++++ proc/msg.c | 26 ++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/include/syspage.h b/include/syspage.h index 7453f258e..6636f8329 100644 --- a/include/syspage.h +++ b/include/syspage.h @@ -27,7 +27,7 @@ enum { console_default = 0, console_com0, console_com1, console_com2, console_co console_com15, console_vga0 }; -enum { pFlagSpawnAll = 0x01 }; +enum { pFlagSpawnAll = 0x01, pFlagIPCAll = 0x02 }; /* clang-format on */ diff --git a/proc/msg-nommu.c b/proc/msg-nommu.c index b08c754d9..aa64482bd 100644 --- a/proc/msg-nommu.c +++ b/proc/msg-nommu.c @@ -29,6 +29,21 @@ static struct { } msg_common; +static int msg_isAllowed(process_t *proc, port_t *p) +{ + if ((proc == NULL) || (p->owner == NULL) || + (proc->partition == NULL) || (p->owner->partition == NULL) || + ((proc->partition->flags & (unsigned int)pFlagIPCAll) != 0U) || + ((p->owner->partition->flags & (unsigned int)pFlagIPCAll) != 0U)) { + return EOK; + } + if (p->owner->partition != proc->partition) { + return -EACCES; + } + return EOK; +} + + int proc_send(u32 port, msg_t *msg) { port_t *p; @@ -44,6 +59,11 @@ int proc_send(u32 port, msg_t *msg) } sender = proc_current(); + err = msg_isAllowed(sender->process, p); + if (err != EOK) { + port_put(p, 0); + return err; + } kmsg.msg = msg; kmsg.src = sender->process; @@ -131,6 +151,11 @@ int proc_recv(u32 port, msg_t *msg, msg_rid_t *rid) if (p == NULL) { return -EINVAL; } + err = msg_isAllowed(proc_current()->process, p); + if (err != EOK) { + port_put(p, 0); + return err; + } hal_spinlockSet(&p->spinlock, &sc); diff --git a/proc/msg.c b/proc/msg.c index aa745cc68..8a78cfa84 100644 --- a/proc/msg.c +++ b/proc/msg.c @@ -347,6 +347,21 @@ static int msg_opack(kmsg_t *kmsg) } +static int msg_isAllowed(process_t *proc, port_t *p) +{ + if ((proc == NULL) || (p->owner == NULL) || + (proc->partition == NULL) || (p->owner->partition == NULL) || + ((proc->partition->flags & (unsigned int)pFlagIPCAll) != 0U) || + ((p->owner->partition->flags & (unsigned int)pFlagIPCAll) != 0U)) { + return EOK; + } + if (p->owner->partition != proc->partition) { + return -EACCES; + } + return EOK; +} + + int proc_send(u32 port, msg_t *msg) { port_t *p; @@ -367,6 +382,11 @@ int proc_send(u32 port, msg_t *msg) } sender = proc_current(); + err = msg_isAllowed(sender->process, p); + if (err != EOK) { + port_put(p, 0); + return err; + } hal_memcpy(&kmsg.msg, msg, sizeof(msg_t)); kmsg.src = sender->process; @@ -451,6 +471,12 @@ int proc_recv(u32 port, msg_t *msg, msg_rid_t *rid) return -EINVAL; } + err = msg_isAllowed(proc_current()->process, p); + if (err != EOK) { + port_put(p, 0); + return err; + } + hal_spinlockSet(&p->spinlock, &sc); while ((p->kmessages == NULL) && (p->closed == 0) && (err != -EINTR)) { From 4cdb0c3b4450d91bc84402e10dade5b4085662d7 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 19 Feb 2026 18:15:00 +0100 Subject: [PATCH 7/8] proc/threads: separate partition sleeping trees Reduce inter-partition interference by separating partition sleeping trees. JIRA: RTOS-1149 --- proc/threads.c | 149 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 115 insertions(+), 34 deletions(-) diff --git a/proc/threads.c b/proc/threads.c index cbf3510e5..77f018269 100644 --- a/proc/threads.c +++ b/proc/threads.c @@ -37,6 +37,8 @@ static thread_t *const wakeupPending = (void *)-1; #define NUM_PRIO 8U +#define NO_WAKEUP ((time_t) - 1) + static struct { vm_map_t *kmap; @@ -49,8 +51,8 @@ static struct { time_t utcoffs; /* Synchronized by spinlock */ - rbtree_t sleeping; - time_t sleepMin; + rbtree_t **sleeping; + time_t **sleepMin; time_t *windowStart; /* Synchronized by mutex */ @@ -170,7 +172,7 @@ static void _threads_waking(thread_t *t) */ -static void _threads_updateWakeup(time_t now, thread_t *minimum) +static void _threads_updateWakeup(time_t now, thread_t *minimum, size_t windowId) { thread_t *t; @@ -178,19 +180,19 @@ static void _threads_updateWakeup(time_t now, thread_t *minimum) t = minimum; } else { - t = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping.root)); + t = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping[windowId]->root)); } if (t != NULL) { if (now >= t->wakeup) { - threads_common.sleepMin = now; + *(threads_common.sleepMin[windowId]) = now; } else { - threads_common.sleepMin = t->wakeup; + *(threads_common.sleepMin[windowId]) = t->wakeup; } } else { - threads_common.sleepMin = -1; + *(threads_common.sleepMin[windowId]) = NO_WAKEUP; } } @@ -213,7 +215,7 @@ static int threads_timeintr(unsigned int n, cpu_context_t *context, void *arg) now = _proc_gettimeRaw(); for (;;) { - t = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping.root)); + t = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping[threads_common.actWindow[hal_cpuGetID()]->id]->root)); if (t == NULL || t->wakeup > now) { break; @@ -223,7 +225,21 @@ static int threads_timeintr(unsigned int n, cpu_context_t *context, void *arg) hal_cpuSetReturnValue(t->context, (void *)-ETIME); } - _threads_updateWakeup(now, t); + _threads_updateWakeup(now, t, threads_common.actWindow[hal_cpuGetID()]->id); + + /* Update wakeup time for the background window */ + for (;;) { + t = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping[0]->root)); + + if (t == NULL || t->wakeup > now) { + break; + } + + _proc_threadDequeue(t); + hal_cpuSetReturnValue(t->context, (void *)-ETIME); + } + + _threads_updateWakeup(now, t, 0); hal_spinlockClear(&threads_common.spinlock, &sc); @@ -335,17 +351,23 @@ __attribute__((noreturn)) void proc_longjmp(cpu_context_t *ctx) static int _threads_checkSignal(thread_t *selected, process_t *proc, cpu_context_t *signalCtx, unsigned int oldmask, const int src); -static thread_t **proc_getReadyQueues(const process_t *process) +static size_t proc_getSchedWindowId(const process_t *process) { if ((process != NULL) && (process->partition != NULL)) { - return threads_common.ready[hal_cpuGetFirstBit(process->partition->schedWindowsMask)]; + return hal_cpuGetFirstBit(process->partition->schedWindowsMask); } else { - return threads_common.ready[0]; + return 0; } } +static thread_t **proc_getReadyQueues(const process_t *process) +{ + return threads_common.ready[proc_getSchedWindowId(process)]; +} + + /* parasoft-suppress-next-line MISRAC2012-RULE_8_4 "Function is used externally within assembler code" */ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) { @@ -480,8 +502,8 @@ int _threads_schedule(unsigned int n, cpu_context_t *context, void *arg) if (((*window)->id == 0U) || (wakeup > SYSTICK_INTERVAL + SYSTICK_INTERVAL / 8)) { wakeup = SYSTICK_INTERVAL; } - if ((cpuId == 0U) && (threads_common.sleepMin != -1) && (threads_common.sleepMin < now + wakeup)) { - wakeup = threads_common.sleepMin - now; + if ((cpuId == 0U) && (*(threads_common.sleepMin[(*window)->id]) != NO_WAKEUP) && (*(threads_common.sleepMin[(*window)->id]) < now + wakeup)) { + wakeup = *(threads_common.sleepMin[(*window)->id]) - now; } if (wakeup <= 0) { wakeup = 1; @@ -919,7 +941,7 @@ static void _proc_threadDequeue(thread_t *t) } if (t->wakeup != 0) { - lib_rbRemove(&threads_common.sleeping, &t->sleeplinkage); + lib_rbRemove(threads_common.sleeping[proc_getSchedWindowId(t->process)], &t->sleeplinkage); } t->wakeup = 0; @@ -960,8 +982,8 @@ static void _proc_threadEnqueue(thread_t **queue, time_t timeout, u8 interruptib if (timeout != 0) { current->wakeup = timeout; - (void)lib_rbInsert(&threads_common.sleeping, ¤t->sleeplinkage); - _threads_updateWakeup(_proc_gettimeRaw(), NULL); + (void)lib_rbInsert(threads_common.sleeping[proc_getSchedWindowId(current->process)], ¤t->sleeplinkage); + _threads_updateWakeup(_proc_gettimeRaw(), NULL, proc_getSchedWindowId(current->process)); } _threads_enqueued(current); @@ -987,19 +1009,23 @@ static int _proc_threadWait(thread_t **queue, time_t timeout, spinlock_ctx_t *sc static int _proc_threadSleepAbs(time_t abs, time_t now, spinlock_ctx_t *sc) { + size_t schedWindowId; + thread_t *current; + /* Handle usleep(0) (yield) */ if (abs > now) { - thread_t *current = _proc_current(); + current = _proc_current(); + schedWindowId = proc_getSchedWindowId(current->process); current->state = SLEEP; current->wait = NULL; current->wakeup = abs; current->interruptible = 1; - (void)lib_rbInsert(&threads_common.sleeping, ¤t->sleeplinkage); + (void)lib_rbInsert(threads_common.sleeping[schedWindowId], ¤t->sleeplinkage); _threads_enqueued(current); - _threads_updateWakeup(now, NULL); + _threads_updateWakeup(now, NULL, schedWindowId); } return hal_cpuReschedule(&threads_common.spinlock, sc); @@ -1295,21 +1321,59 @@ int proc_settime(time_t offs) static time_t _proc_nextWakeup(void) { - thread_t *thread; - time_t wakeup = 0; - time_t now; + /* Idle means currently nothing to schedule, start from next */ + syspage_sched_window_t *window; + thread_t **windowReady; + unsigned int i; + time_t now, windowDelay, wakeup = NO_WAKEUP; - thread = lib_treeof(thread_t, sleeplinkage, lib_rbMinimum(threads_common.sleeping.root)); - if (thread != NULL) { - now = _proc_gettimeRaw(); - if (now >= thread->wakeup) { - wakeup = 0; - } - else { - wakeup = thread->wakeup - now; + now = _proc_gettimeRaw(); + window = threads_common.actWindow[hal_cpuGetID()]->next; + windowDelay = threads_common.windowStart[hal_cpuGetID()] + window->prev->stop - now; + + if (*(threads_common.sleepMin[0]) != NO_WAKEUP) { + wakeup = *(threads_common.sleepMin[0]) - now; + if (wakeup <= 0) { + wakeup = 1; } } + if (window->next == window) { + return wakeup; + } + + do { + if (window == syspage_schedulerWindowList()) { + /* Already checked */ + window = window->next; + } + if ((wakeup != NO_WAKEUP) && (windowDelay > wakeup)) { + break; + } + + windowReady = threads_common.ready[window->id]; + for (i = 0; i < NUM_PRIO; ++i) { + if (windowReady[i] != NULL) { + wakeup = windowDelay; + break; + } + } + if (i < NUM_PRIO) { + break; + } + + if (*(threads_common.sleepMin[window->id]) != NO_WAKEUP) { + wakeup = *(threads_common.sleepMin[window->id]) - now; + if (wakeup <= windowDelay) { + wakeup = windowDelay; + } + break; + } + + windowDelay += window->stop - window->prev->stop; + window = window->next; + } while (window != threads_common.actWindow[hal_cpuGetID()]); + return wakeup; } @@ -2088,6 +2152,14 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) if (threads_common.ready == NULL) { return -ENOMEM; } + threads_common.sleeping = vm_kmalloc(sizeof(rbtree_t *) * cnt); + if (threads_common.sleeping == NULL) { + return -ENOMEM; + } + threads_common.sleepMin = vm_kmalloc(sizeof(time_t *) * cnt); + if (threads_common.sleepMin == NULL) { + return -ENOMEM; + } for (i = 0; i < cnt; i++) { mask = (u32)(1UL << i); part = syspage_partitionList(); @@ -2100,6 +2172,8 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) } while (part != syspage_partitionList()); if (hal_cpuGetFirstBit(mask) != i) { threads_common.ready[i] = threads_common.ready[hal_cpuGetFirstBit(mask)]; + threads_common.sleeping[i] = threads_common.sleeping[hal_cpuGetFirstBit(mask)]; + threads_common.sleepMin[i] = threads_common.sleepMin[hal_cpuGetFirstBit(mask)]; continue; } @@ -2110,9 +2184,18 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) for (j = 0; j < NUM_PRIO; j++) { threads_common.ready[i][j] = NULL; } + threads_common.sleeping[i] = vm_kmalloc(sizeof(rbtree_t)); + if (threads_common.sleeping[i] == NULL) { + return -ENOMEM; + } + lib_rbInit(threads_common.sleeping[i], threads_sleepcmp, NULL); + threads_common.sleepMin[i] = vm_kmalloc(sizeof(time_t)); + if (threads_common.sleepMin[i] == NULL) { + return -ENOMEM; + } + *(threads_common.sleepMin[i]) = NO_WAKEUP; } - lib_rbInit(&threads_common.sleeping, threads_sleepcmp, NULL); lib_idtreeInit(&threads_common.id); lib_printf("proc: Initializing thread scheduler, priorities=%d\n", NUM_PRIO); @@ -2145,8 +2228,6 @@ int _threads_init(vm_map_t *kmap, vm_object_t *kernel) threads_common.windowStart[i] = 0; } - threads_common.sleepMin = -1; - /* Install scheduler on clock interrupt */ #ifdef PENDSV_IRQ hal_memset(&threads_common.pendsvHandler, 0, sizeof(threads_common.pendsvHandler)); From 1ef56c6859cca5b997db550e46fcf8b46f025df6 Mon Sep 17 00:00:00 2001 From: Jakub Klimek Date: Thu, 12 Feb 2026 14:34:17 +0100 Subject: [PATCH 8/8] hal/riscv64: increase syspageCopied size Increase syspageCopied to fit partitions and scheduling windows in syspage space. JIRA: RTOS-1149 --- hal/riscv64/_init.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hal/riscv64/_init.S b/hal/riscv64/_init.S index a00a326ef..ec3c88699 100644 --- a/hal/riscv64/_init.S +++ b/hal/riscv64/_init.S @@ -176,5 +176,5 @@ _init_core: .section ".bss" .align 3 _hal_syspageCopied: - .zero 0x600 + .zero 0x700 .size _hal_syspageCopied, .-_hal_syspageCopied