From f8d1b2d12d37098d20a572ba84d9b0e7f4281ae2 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 24 Dec 2025 18:12:46 +0200 Subject: [PATCH 01/78] fixed find_available_page function --- .../allocators/page_allocator/extensions.rs | 55 ++++++++++++++ .../src/structures/paging/page_table.rs | 74 +++++-------------- 2 files changed, 75 insertions(+), 54 deletions(-) diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/page_allocator/extensions.rs index a9603d6..1ef2370 100644 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ b/kernel/src/memory/allocators/page_allocator/extensions.rs @@ -13,6 +13,10 @@ use cpu_utils::structures::paging::{ }; use extend::ext; use strum::VariantArray; + +use common::error::TableError; +use cpu_utils::structures::paging::EntryIndex; + #[ext] pub impl PhysicalAddress { fn map( @@ -139,6 +143,57 @@ pub impl VirtualAddress { #[ext] pub impl PageTable { + /// Find an avavilable page in the given size. + // ANCHOR: page_table_find_available_page + #[cfg(target_arch = "x86_64")] + fn find_available_page( + page_size: PageSize, + ) -> Result { + const TOTAL_LEVELS: usize = PageTableLevel::VARIANTS.len(); + let mut level_indices = [0usize; TOTAL_LEVELS]; + let mut page_tables = [Self::current_table(); TOTAL_LEVELS]; + let mut current_level = PageTableLevel::PML4; + loop { + let current_table = + page_tables[TOTAL_LEVELS - current_level as usize]; + + let ti = current_table.try_fetch_table( + level_indices[TOTAL_LEVELS - current_level as usize], + current_level, + page_size, + ); + + let next_table = match ti { + EntryIndex::OutOfEntries | EntryIndex::PageDoesNotFit => { + current_level = current_level.prev()?; + level_indices + [TOTAL_LEVELS - current_level as usize] += 1; + continue; + } + EntryIndex::Entry(entry) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + entry.table_index(); + unsafe { + &*entry.mapped_unchecked().as_ptr::() + } + } + EntryIndex::Index(i) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + i; + return Ok(VirtualAddress::from_indices( + level_indices, + )); + } + }; + let next_level = current_level + .next() + .expect("Can't go next on a first level table"); + page_tables[TOTAL_LEVELS - next_level as usize] = next_table; + current_level = next_level; + } + } + // ANCHOR_END: page_table_find_available_page + /// Map the region of memory from 0 to `mem_size_bytes` /// at the top of the page table so that ```rust /// VirtualAddress(0xffff800000000000) -> diff --git a/shared/cpu_utils/src/structures/paging/page_table.rs b/shared/cpu_utils/src/structures/paging/page_table.rs index 5d597f3..fee1d80 100644 --- a/shared/cpu_utils/src/structures/paging/page_table.rs +++ b/shared/cpu_utils/src/structures/paging/page_table.rs @@ -5,7 +5,7 @@ use common::{ address_types::VirtualAddress, constants::{PAGE_DIRECTORY_ENTRIES, REGULAR_PAGE_ALIGNMENT}, enums::{PageSize, PageTableLevel}, - error::{EntryError, TableError}, + error::EntryError, }; // ANCHOR: page_table @@ -17,6 +17,14 @@ pub struct PageTable { } // ANCHOR_END: page_table +#[derive(Debug)] +pub enum EntryIndex { + Entry(&'static PageTableEntry), + Index(usize), + PageDoesNotFit, + OutOfEntries, +} + // ANCHOR: page_table_impl impl PageTable { // ANCHOR: page_table_empty @@ -88,70 +96,28 @@ impl PageTable { /// Returns the index of the found entry and the page table if found. // Anchor: page_table_try_fetch_table #[cfg(target_arch = "x86_64")] - fn try_fetch_table( - &self, + pub fn try_fetch_table( + &'static self, start_at: usize, table_level: PageTableLevel, page_size: PageSize, - ) -> (usize, Option<&PageTable>) { + ) -> EntryIndex { + if !page_size.allocatable_at(table_level) { + return EntryIndex::PageDoesNotFit; + } + for (i, entry) in self.entries.iter().enumerate().skip(start_at) { match entry.mapped_table() { - Ok(v) => { - if page_size.exceeds(table_level) { - continue; - } - return (i, Some(v)); + Ok(_) => { + return EntryIndex::Entry(entry); } Err(EntryError::NoMapping) => { - return (i, None); + return EntryIndex::Index(i); } Err(EntryError::NotATable) => continue, } } - (PAGE_DIRECTORY_ENTRIES, None) - } - - /// Find an avavilable page in the given size. - // ANCHOR: page_table_find_available_page - #[cfg(target_arch = "x86_64")] - pub fn find_available_page( - page_size: PageSize, - ) -> Result { - const LEVELS: usize = 4; - let mut level_indices = [0usize; LEVELS]; - let mut page_tables = [Self::current_table(); LEVELS]; - let mut current_level = PageTableLevel::PML4; - loop { - let current_table = page_tables[current_level as usize]; - - let next_table = match current_table.try_fetch_table( - level_indices[current_level as usize], - current_level, - page_size, - ) { - (PAGE_DIRECTORY_ENTRIES, None) => { - current_level = current_level.prev()?; - level_indices[current_level as usize] += 1; - continue; - } - (i, Some(table)) => { - level_indices[current_level as usize] = i; - table - } - (i, None) => { - level_indices[current_level as usize] = i; - return Ok(VirtualAddress::from_indices( - level_indices, - )); - } - }; - let next_level = current_level - .next() - .expect("Can't go next on a first level table"); - page_tables[next_level as usize] = next_table; - level_indices[next_level as usize] += 1; - } + EntryIndex::OutOfEntries } - // ANCHOR_END: page_table_find_available_page } // ANCHOR_END: page_table_impl From 768ed9880802ad9bdf0196e2d5589538a13a360c Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 24 Dec 2025 18:12:56 +0200 Subject: [PATCH 02/78] renamed and added doc --- shared/common/src/enums/paging.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/shared/common/src/enums/paging.rs b/shared/common/src/enums/paging.rs index 8e41257..000c964 100644 --- a/shared/common/src/enums/paging.rs +++ b/shared/common/src/enums/paging.rs @@ -67,7 +67,13 @@ impl PageSize { } } - pub fn exceeds(&self, table_level: PageTableLevel) -> bool { + /// Conclude if a page can be allocated in the give PageTableLevel + /// + /// # Example + /// A huge (2Mib) Page can be allocated on PML4, PDPT and PD so it will + /// return `true` for those, and it cannot be allocated on `PD` so for + /// it is will return `false` + pub fn allocatable_at(&self, table_level: PageTableLevel) -> bool { (3 - *self as usize) <= table_level as usize } From bef56114021df27ebf436477fb2aa57259817e9f Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 24 Dec 2025 18:13:11 +0200 Subject: [PATCH 03/78] added a function to get the index in the table of an entry --- shared/cpu_utils/src/structures/paging/page_table_entry.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/shared/cpu_utils/src/structures/paging/page_table_entry.rs b/shared/cpu_utils/src/structures/paging/page_table_entry.rs index dbbaac2..4769bec 100644 --- a/shared/cpu_utils/src/structures/paging/page_table_entry.rs +++ b/shared/cpu_utils/src/structures/paging/page_table_entry.rs @@ -172,4 +172,9 @@ impl PageTableEntry { } } // ANCHOR_END: page_table_entry_mapped_table + + pub fn table_index(&self) -> usize { + let table_offset = self as *const _ as usize & ((1 << 12) - 1); + table_offset / size_of::() + } } From 16dec01bcaf7eeb8efa6003d5ebd541629438f91 Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 26 Dec 2025 17:25:02 +0200 Subject: [PATCH 04/78] dump commit of working command sending --- .vscode/settings.json | 3 +- build/runner/runner.bat | 2 +- build/targets/64bit_target.json | 7 +- kernel/src/drivers/ata/ahci/fis.rs | 254 +++++--- kernel/src/drivers/ata/ahci/hba.rs | 559 ++++++++++-------- kernel/src/drivers/ata/ahci/mod.rs | 6 +- kernel/src/drivers/pci.rs | 1 - kernel/src/main.rs | 132 ++--- kernel/src/memory/allocators/mod.rs | 1 + .../allocators/page_allocator/allocator.rs | 17 +- .../allocators/page_allocator/extensions.rs | 5 +- .../memory/allocators/page_allocator/mod.rs | 2 +- .../src/memory/allocators/slab_allocator.rs | 12 + learnix-macros/src/lib.rs | 14 + shared/common/src/enums/ata.rs | 1 + shared/common/src/enums/pic8259.rs | 2 +- shared/common/src/lib.rs | 2 + shared/common/src/volatile.rs | 49 ++ shared/cpu_utils/Cargo.toml | 2 + .../cpu_utils/src/structures/paging/init.rs | 2 +- 20 files changed, 651 insertions(+), 422 deletions(-) create mode 100644 kernel/src/memory/allocators/slab_allocator.rs create mode 100644 shared/common/src/volatile.rs diff --git a/.vscode/settings.json b/.vscode/settings.json index 58834f2..a30e23b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -157,6 +157,7 @@ "tfee", "tfes", "thiserror", + "udma", "ufis", "UHCI", "Uninit", @@ -171,7 +172,7 @@ ], "rust-analyzer.inlayHints.chainingHints.enable": true, "rust-analyzer.check.command": "clippy", - "rust-analyzer.cargo.extraArgs": [ + "rust-analyzer.check.extraArgs": [ "--release" ], "rust-analyzer.cargo.extraEnv": { diff --git a/build/runner/runner.bat b/build/runner/runner.bat index 3845e2e..fba6a2b 100644 --- a/build/runner/runner.bat +++ b/build/runner/runner.bat @@ -4,5 +4,5 @@ qemu-system-x86_64 ^ -M q35 ^ -drive id=disk0,file=build/image.bin,if=none,format=raw ^ - -device ide-hd,drive=disk0,bus=ide.0 ^ + -device ide-hd,drive=disk0,bus=ide.0,rotation_rate=1 ^ -monitor stdio diff --git a/build/targets/64bit_target.json b/build/targets/64bit_target.json index 1ae7434..7d8c39d 100644 --- a/build/targets/64bit_target.json +++ b/build/targets/64bit_target.json @@ -13,7 +13,12 @@ "position-independent-executables": true, "relro-level": "off", "stack-probes": { - "kind": "call" + "kind": "inline-or-call", + "min-llvm-version-for-inline": [ + 16, + 0, + 0 + ] }, "static-position-independent-executables": true, "target-pointer-width": 64, diff --git a/kernel/src/drivers/ata/ahci/fis.rs b/kernel/src/drivers/ata/ahci/fis.rs index 26dbd98..3542819 100644 --- a/kernel/src/drivers/ata/ahci/fis.rs +++ b/kernel/src/drivers/ata/ahci/fis.rs @@ -1,24 +1,30 @@ -use common::enums::{AtaCommand, FisType}; +use core::{ascii::Char, fmt::Debug, num::NonZero}; + +use common::{ + enums::{AtaCommand, FisType}, + volatile::Volatile, +}; +use learnix_macros::{flag, ro_flag}; #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct RegisterH2D { - fis_type: FisType, - pm_flags: u8, - command: AtaCommand, - features: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, - features_ext: u8, - sector_count: u8, - sector_count_ext: u8, + fis_type: Volatile, + pm_flags: Volatile, + command: Volatile, + features: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, + features_ext: Volatile, + sector_count: Volatile, + sector_count_ext: Volatile, _reserved0: u8, - control: u8, + control: Volatile, _reserved1: [u8; 4], } @@ -32,25 +38,25 @@ impl RegisterH2D { sector_count: u16, control: u8, ) -> RegisterH2D { - let features_low = features as u8; - let features_ext = (features >> 8) as u8; - let lba1 = lba as u8; - let lba2 = (lba >> 8) as u8; - let lba3 = (lba >> 16) as u8; - let lba4 = (lba >> 24) as u8; - let lba5 = (lba >> 32) as u8; - let lba6 = (lba >> 40) as u8; - let sector_count_low = sector_count as u8; - let sector_count_ext = (sector_count >> 8) as u8; + let features_low = Volatile::new(features as u8); + let features_ext = Volatile::new((features >> 8) as u8); + let lba1 = Volatile::new(lba as u8); + let lba2 = Volatile::new((lba >> 8) as u8); + let lba3 = Volatile::new((lba >> 16) as u8); + let lba4 = Volatile::new((lba >> 24) as u8); + let lba5 = Volatile::new((lba >> 32) as u8); + let lba6 = Volatile::new((lba >> 40) as u8); + let sector_count_low = Volatile::new(sector_count as u8); + let sector_count_ext = Volatile::new((sector_count >> 8) as u8); RegisterH2D { - fis_type: FisType::RegisterFisHost2Device, - pm_flags, - command, + fis_type: Volatile::new(FisType::RegisterFisHost2Device), + pm_flags: Volatile::new(pm_flags), + command: Volatile::new(command), features: features_low, lba1, lba2, lba3, - device, + device: Volatile::new(device), lba4, lba5, lba6, @@ -58,7 +64,7 @@ impl RegisterH2D { sector_count: sector_count_low, sector_count_ext, _reserved0: 0, - control, + control: Volatile::new(control), _reserved1: [0; 4], } } @@ -67,20 +73,20 @@ impl RegisterH2D { #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct RegisterD2H { - fis_type: FisType, - pm_flags: u8, - status: u8, - error: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, _reserved0: u8, - sector_count: u8, - sector_count_ext: u8, + sector_count: Volatile, + sector_count_ext: Volatile, _reserved1: [u8; 6], } @@ -89,8 +95,8 @@ impl RegisterD2H {} #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct DmaActivateD2H { - fis_type: FisType, - pm_flags: u8, + fis_type: Volatile, + pm_flags: Volatile, _reserved: [u8; 2], } @@ -98,14 +104,14 @@ pub struct DmaActivateD2H { #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct DmaSetup { - fis_type: FisType, - pm_flags: u8, + fis_type: Volatile, + pm_flags: Volatile, _reserved0: [u8; 2], - dma_buffer_id_lower: u32, - dma_buffer_id_upper: u32, + dma_buffer_id_lower: Volatile, + dma_buffer_id_upper: Volatile, _reserved1: u32, - dma_buffer_offset: u32, - dma_transfer_count: u32, + dma_buffer_offset: Volatile, + dma_transfer_count: Volatile, _reserved: u32, } @@ -113,65 +119,65 @@ pub struct DmaSetup { #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct BistActivate { - fis_type: FisType, - pm_flags: u8, - pattern_def: u8, + fis_type: Volatile, + pm_flags: Volatile, + pattern_def: Volatile, _reserved: u8, - data1: u8, - data2: u8, - data3: u8, - data4: u8, + data1: Volatile, + data2: Volatile, + data3: Volatile, + data4: Volatile, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct PioSetupD2H { - fis_type: FisType, - pm_flags: u8, - status: u8, - error: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, _reserved0: u8, - sector_count: u8, - sector_count_exp: u8, + sector_count: Volatile, + sector_count_exp: Volatile, _reserved1: u8, - estatus: u8, - transfer_count: u16, + estatus: Volatile, + transfer_count: Volatile, _reserved2: u16, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct Data { - fis_type: u8, - pm_port: u8, + fis_type: Volatile, + pm_port: Volatile, _reserved0: [u8; 2], - data: [u32; SIZE], + data: Volatile<[u32; SIZE]>, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct SetDeviceBits { - fis_type: FisType, - pm_port: u8, - status: u8, - error: u8, + fis_type: Volatile, + pm_port: Volatile, + status: Volatile, + error: Volatile, _reserved: u32, } impl SetDeviceBits { pub fn status_low(&self) -> u8 { - self.status & !0x7 + self.status.read() & !0x7 } pub fn status_high(&self) -> u8 { - (self.status >> 4) & !0x7 + (self.status.read() >> 4) & !0x7 } } @@ -193,7 +199,87 @@ impl Default for Fis { } } +pub struct GeneralInfo(u16); + +impl GeneralInfo { + ro_flag!(non_magnetic, 15); + ro_flag!(removable_media, 7); + ro_flag!(not_removable_media, 6); +} + +impl Debug for GeneralInfo { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "Non Magnetic: {:?}", self.is_non_magnetic())?; + writeln!(f, "Removable Media: {:?}", self.is_removable_media())?; + writeln!( + f, + "Not Removable Media: {:?}", + self.is_not_removable_media() + ) + } +} + +pub struct DeviceCapabilities(u16); + +impl DeviceCapabilities { + ro_flag!(lba_dma_support, 10); +} + +impl Debug for DeviceCapabilities { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "LBA & DMA Support: {:?},", self.is_lba_dma_support()) + } +} + +pub struct ValidFields(u16); + +impl ValidFields { + ro_flag!(valid_54_58, 0); + ro_flag!(valid_64_70, 1); +} + +#[derive(Debug)] #[repr(C, align(512))] pub struct IdentityPacketData { - pub data: [u16; 0x100], + pub info: GeneralInfo, + pub cylinders: u16, + _reserved0: u16, + pub heads: u16, + _vendor0: [u16; 2], + pub sectors: u16, + _vendor1: [u16; 3], + pub serial_number: [Char; 20], + _vendor2: [u16; 3], + /// Firmware revision in ASCII Characters + pub firmware_rev: [Char; 8], + /// Model number in ASCII Characters + pub model_num: [Char; 40], + pub max_sectors_rw_multiple: u8, + pub _vendor3: u8, + _reserved1: u16, + pub capabilities: u16, + _reserved9: u16, + pub pio_data_transfer_time: u16, + pub dma_data_transfer_time: u16, + pub valid_fields: u16, + pub cur_cylinders: u16, + pub cur_heads: u16, + pub cur_sectors: u16, + pub capacity_sectors: [u16; 2], + pub _reserved10: u16, + pub lba_total_sectors_28: [u16; 2], + // _reserved2: [u16; 19], + // pub major_version: u16, + // pub minor_version: u16, + + // pub command_sets_supported: [u16; 3], + // pub command_sets_enabled: [u16; 3], + // pub udma_modes: u16, + // pub lba_total_sectors_48: u64, + // _reserved4: [u16; 113], // Words 169-206 + // pub physical_logical_sector_size: u16, // Word 209 + // _reserved5: [u16; 7], // Words 210-216 + // pub nominal_media_rotation_rate: u16, /* Word 217 (The SSD vs + // HDDkey) + // * _reserved6: [u16; 40], */ } diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs index 5173b0f..bd42796 100644 --- a/kernel/src/drivers/ata/ahci/hba.rs +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -3,10 +3,10 @@ /// Implemented directly from https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/serial-ata-ahci-spec-rev1-3-1.pdf extern crate alloc; -use core::{mem::MaybeUninit, num::NonZero}; +use core::{fmt::Debug, num::NonZero, panic}; use common::{ - address_types::{PhysicalAddress, VirtualAddress}, + address_types::PhysicalAddress, constants::{ PHYSICAL_MEMORY_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, @@ -14,11 +14,14 @@ use common::{ AtaCommand, Color, DeviceDetection, DeviceType, InterfaceCommunicationControl, InterfaceInitialization, InterfacePowerManagement, InterfaceSpeed, - InterfaceSpeedRestriction, PageSize, PicInterruptVectorOffset, + InterfaceSpeedRestriction, PageSize, }, error::{AhciError, ConversionError, DiagnosticError, HbaError}, + read_volatile, + volatile::Volatile, + write_volatile, }; -use cpu_utils::{instructions::port, structures::paging::PageEntryFlags}; +use cpu_utils::structures::paging::PageEntryFlags; use learnix_macros::{flag, ro_flag, rw1_flag, rwc_flag}; use num_enum::UnsafeFromPrimitive; use strum::IntoEnumIterator; @@ -32,18 +35,17 @@ use crate::{ }, vga_display::color_code::ColorCode, }, - memory::allocators::page_allocator::{ - allocator::PhysicalPageAllocator, extensions::PhysicalAddressExt, - }, + eprintln, + memory::allocators::page_allocator::extensions::PhysicalAddressExt, print, println, }; -use alloc::vec::Vec; - +#[repr(transparent)] #[derive(Copy, Clone)] pub struct AHCIBaseAddress(pub u32); /// CAP +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct HBACapabilities(pub u32); @@ -70,7 +72,11 @@ impl HBACapabilities { ro_flag!(sal, 25); pub fn interface_speed(&self) -> InterfaceSpeed { - unsafe { core::mem::transmute(((self.0 >> 20) & 0xf) as u8) } + unsafe { + core::mem::transmute( + (((read_volatile!(self.0)) >> 20) & 0xf) as u8, + ) + } } // Support AHCI mode only @@ -93,7 +99,7 @@ impl HBACapabilities { // This value is between 1 and 32 pub fn number_of_commands(&self) -> u8 { - ((self.0 >> 8) & 0x1f) as u8 + (((read_volatile!(self.0)) >> 8) & 0x1f) as u8 } // Command completion coalescing supported @@ -107,11 +113,12 @@ impl HBACapabilities { /// Returns the number of ports implemented pub fn number_of_ports(&self) -> u8 { - (self.0 & 0x1f) as u8 + (read_volatile!(self.0) & 0x1f) as u8 } } /// GHC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct GlobalHostControl(pub u32); @@ -131,6 +138,7 @@ impl GlobalHostControl { } /// IS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct InterruptStatus(pub u32); @@ -138,11 +146,15 @@ impl InterruptStatus { // Port Interrupt Pending Status. Corresponds to bits of the PI // register. Cleared by writing a '1' to the corresponding bit. pub fn is_port_pending(&self, port_num: u8) -> bool { - (self.0 & (1 << port_num)) != 0 + (read_volatile!(self.0) & (1 << port_num)) != 0 } pub fn clear(&mut self, port_num: u8) { - self.0 |= 1 << port_num; + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)); + } + + pub fn clear_all(&mut self) { + write_volatile!(self.0, 0); } // RWC flag for Port 0 Interrupt Pending Status @@ -180,59 +192,61 @@ impl InterruptStatus { } // PI +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct PortsImplemented(pub u32); impl PortsImplemented { // Port i is Implemented (P[i]) pub fn is_port_implemented(&self, port_num: u8) -> bool { - (self.0 & (1 << port_num)) != 0 + (read_volatile!(self.0) & (1 << port_num)) != 0 } } // VS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct Version(pub u32); impl Version { // Major Version Number (Bits 31:16) pub fn major_version(&self) -> u16 { - (self.0 >> 16) as u16 + (read_volatile!(self.0) >> 16) as u16 } // Minor Version Number (Bits 15:0) pub fn minor_version(&self) -> u16 { - (self.0 & 0xFFFF) as u16 + (read_volatile!(self.0) & 0xffff) as u16 } } /// CCC_CTL +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct CommandCompletionCoalescingControl(pub u32); impl CommandCompletionCoalescingControl { pub fn interrupt_time_ms(&self) -> u16 { - const MASK: u32 = 0xFFFF; - ((self.0 >> 16) & MASK) as u16 + ((read_volatile!(self.0) >> 16) & 0xffff) as u16 } // Command Completions (CC): Number of command completions necessary to // cause a CCC interrupt pub fn command_completions(&self) -> u8 { - const MASK: u32 = 0xFF; - ((self.0 >> 8) & MASK) as u8 + ((read_volatile!(self.0) >> 8) & 0xff) as u8 } flag!(enable, 0); } /// CCC_PORTS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct CommandCompletionCoalescingPorts(pub u32); impl CommandCompletionCoalescingPorts { pub fn set_port(&mut self, port_num: u8) { - self.0 |= 1 << port_num + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)) } pub fn unset(&mut self, port_num: u8) { @@ -273,22 +287,24 @@ impl CommandCompletionCoalescingPorts { } /// EM_LOC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct EnclosureManagementLocation(pub u32); impl EnclosureManagementLocation { pub fn dword_offset_from_abar(&self) -> usize { - (self.0 >> 16) as usize + (read_volatile!(self.0) >> 16) as usize } /// ZERO is invalid /// TODO understand how to check if i have both receive and transmit pub fn buffet_size(&self) -> Option> { - NonZero::new((self.0 & 0xffff) as usize) + NonZero::new((read_volatile!(self.0) & 0xffff) as usize) } } /// EM_CTL +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct EnclosureManagementControl(pub u32); @@ -328,6 +344,7 @@ impl EnclosureManagementControl { } /// CAP2 +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct HostCapabilitiesExtended(pub u32); @@ -352,6 +369,7 @@ impl HostCapabilitiesExtended { } // BOHC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct BiosOsControlStatus(pub u32); @@ -394,6 +412,7 @@ pub struct VendorSpecificRegisters { } /// Port X Interrupt status +#[repr(transparent)] pub struct PortInterruptStatus(pub u32); impl PortInterruptStatus { @@ -449,11 +468,12 @@ impl PortInterruptStatus { rwc_flag!(dhrs, 0); pub fn clear_pending_interrupts(&mut self) { - self.0 = 0; + write_volatile!(self.0, u32::MAX); } } /// Port X Interrupt Enable +#[repr(transparent)] pub struct InterruptEnable(pub u32); impl InterruptEnable { @@ -510,12 +530,16 @@ impl InterruptEnable { } /// Port X Command and status +#[repr(transparent)] pub struct CmdStatus(pub u32); impl CmdStatus { pub fn set_icc(&mut self, icc: InterfaceCommunicationControl) { - self.0 &= !(0xf << 28); - self.0 |= (icc as u32) << 28; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 28)); + write_volatile!( + self.0, + read_volatile!(self.0) | (icc as u32) << 28 + ); } // Aggressive Slumber / Partial @@ -563,14 +587,11 @@ impl CmdStatus { // Mechanical Presence Switch State ro_flag!(mpss, 13); - pub fn set_current_cmd(&mut self, ccs: u8) { + pub fn get_current_cmd(&mut self) -> u32 { if !self.is_st() { - return; + return 0; } - (0x0u8..=0x1fu8).contains(&ccs).then(|| { - self.0 &= !(0x1f << 8); - self.0 |= (ccs as u32) << 8; - }); + (read_volatile!(self.0) >> 8) & 0x1f } // FIS Receive Enable @@ -625,6 +646,7 @@ impl CmdStatus { } /// Port x Task File Data +#[repr(transparent)] pub struct TaskFileData(pub u32); impl TaskFileData { @@ -638,7 +660,7 @@ impl TaskFileData { ro_flag!(bsy, 7); pub fn error(&self) -> u8 { - (self.0 >> 8) as u8 + (read_volatile!(self.0) >> 8) as u8 } } @@ -663,39 +685,41 @@ impl Signature { } /// Port X SATA Status +#[repr(transparent)] pub struct SataStatus(pub u32); impl SataStatus { pub fn power( &self, ) -> Result> { - let power = ((self.0 >> 8) & 0xf) as u8; + let power = ((read_volatile!(self.0) >> 8) & 0xf) as u8; InterfacePowerManagement::try_from(power) } pub fn speed(&self) -> InterfaceSpeed { - let speed = ((self.0 >> 4) & 0xf) as u8; + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; unsafe { InterfaceSpeed::unchecked_transmute_from(speed) } } pub fn detection( &self, ) -> Result> { - let detection = (self.0 & 0xf) as u8; + let detection = (read_volatile!(self.0) & 0xf) as u8; DeviceDetection::try_from(detection) } } /// Port X SATA control +#[repr(transparent)] pub struct SataControl(pub u32); impl SataControl { pub fn port_multiplier(&self) -> u8 { - ((self.0 >> 16) & 0xf) as u8 + ((read_volatile!(self.0) >> 16) & 0xf) as u8 } pub fn select_power_management(&self) -> u8 { - ((self.0 >> 12) & 0xf) as u8 + ((read_volatile!(self.0) >> 12) & 0xf) as u8 } flag!(devslp_disabled, 10); @@ -703,7 +727,7 @@ impl SataControl { flag!(partial_disabled, 8); pub fn max_speed(&self) -> InterfaceSpeedRestriction { - let speed = ((self.0 >> 4) & 0xf) as u8; + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; unsafe { InterfaceSpeedRestriction::unchecked_transmute_from(speed) } @@ -711,15 +735,20 @@ impl SataControl { pub fn set_max_speed(&mut self, speed: InterfaceSpeed) { if speed != InterfaceSpeed::DevNotPresent { - self.0 &= !(0xf << 4); - self.0 |= (speed as u32) << 4; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 4)); + write_volatile!( + self.0, + read_volatile!(self.0) | (speed as u32) << 4 + ); } } pub fn device_initialization( &self, ) -> Result> { - InterfaceInitialization::try_from((self.0 & 0xf) as u8) + InterfaceInitialization::try_from( + (read_volatile!(self.0) & 0xf) as u8, + ) } // TODO THIS COMMAND ANY MAYBE OTHER SHOULD PROBABLY MOVE TO THE PORT @@ -729,48 +758,66 @@ impl SataControl { &mut self, init: InterfaceInitialization, ) { - self.0 &= !0xf; - self.0 |= init as u32; + write_volatile!(self.0, read_volatile!(self.0) & !0xf); + write_volatile!(self.0, read_volatile!(self.0) | init as u32); } } /// Port X SATA error +#[repr(transparent)] pub struct SataError(pub u32); impl SataError { pub fn diagnostic(&self) -> impl Iterator { - let diagnostic_errors = ((self.0 >> 16) & 0xffff) as u16; + let diagnostic_errors = + ((read_volatile!(self.0) >> 16) & 0xffff) as u16; DiagnosticError::iter() .filter(move |n| *n as u16 & diagnostic_errors != 0) } pub fn error(&self) -> impl Iterator { - let ahci_error = (self.0 & 0xffff) as u16; + let ahci_error = (read_volatile!(self.0) & 0xffff) as u16; AhciError::iter().filter(move |n| *n as u16 & ahci_error != 0) } + + pub fn zero_error(&mut self) { + write_volatile!(self.0, read_volatile!(self.0) & !0xffff) + } } /// Port X Sata Active +#[repr(transparent)] pub struct SataActive(pub u32); /// Port X Command issue -pub struct CmdIssue(pub u32); +#[repr(transparent)] +pub struct CmdIssue(pub Volatile); + +impl CmdIssue { + pub fn issue_cmd(&mut self, cmd: u8) { + self.0.write(self.0.read() | 1 << cmd); + } +} /// Port X SATA Notification +#[repr(transparent)] pub struct SataNotification(pub u32); impl SataNotification { /// Get port multiplier notification pub fn set_pm_notif(&mut self, pm_port: u8) { - (0x0..0xf) - .contains(&pm_port) - .then(|| self.0 |= pm_port as u32); + (0x0..0xf).contains(&pm_port).then(|| { + write_volatile!( + self.0, + read_volatile!(self.0) | pm_port as u32 + ) + }); } /// Get port multiplier notification pub fn get_pm_notif(&self, pm_port: u8) -> bool { if (0x0..0xf).contains(&pm_port) { - (self.0 & !0xffff) & (1 << pm_port) != 0 + (read_volatile!(self.0) & !0xffff) & (1 << pm_port) != 0 } else { false } @@ -778,25 +825,29 @@ impl SataNotification { } /// Port X Frame Information Structure based switching control +#[repr(transparent)] pub struct FisSwitchControl(pub u32); impl FisSwitchControl { /// Port multiplier device that experienced fatal error pub fn device_with_error(&self) -> u8 { - ((self.0 >> 16) & 0xf) as u8 + ((read_volatile!(self.0) >> 16) & 0xf) as u8 } /// The number of devices that FIS-Based switching has been optimized /// for. The minimum value for this field should be 0x2. pub fn active_device_optimization(&self) -> u8 { - ((self.0 >> 12) & 0xf) as u8 + ((read_volatile!(self.0) >> 12) & 0xf) as u8 } /// Set the port multiplier port number, that should receive the next /// command pub fn device_to_issue(&mut self, dev_num: u8) { - self.0 &= !(0xf << 8); - self.0 |= (dev_num as u32) << 8; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 8)); + write_volatile!( + self.0, + read_volatile!(self.0) | (dev_num as u32) << 8 + ); } // Single device error @@ -810,19 +861,20 @@ impl FisSwitchControl { } /// Port x Device sleep +#[repr(transparent)] pub struct DeviceSleep(pub u32); impl DeviceSleep { /// Device Sleep Idle Timeout Multiplier pub fn dito_multiplier(&self) -> u8 { - ((self.0 >> 25) & 0xf) as u8 + ((read_volatile!(self.0) >> 25) & 0xf) as u8 } /// Raw dito value /// /// **Use [`dito_actual`] for the actual wait time** pub fn dito_ms(&self) -> u16 { - ((self.0 >> 15) & 0x3ff) as u16 + ((read_volatile!(self.0) >> 15) & 0x3ff) as u16 } /// The actual timeout, which is dito * (dito_multiplier + 1) @@ -835,7 +887,7 @@ impl DeviceSleep { /// TODO: currently only read only, if write needed, check /// documentation about extended cap and writing to this offset pub fn mdat(&self) -> u8 { - ((self.0 >> 10) & 0x1f) as u8 + ((read_volatile!(self.0) >> 10) & 0x1f) as u8 } /// Device sleep exit timeout @@ -843,7 +895,7 @@ impl DeviceSleep { /// TODO: currently only read only, if write needed, check /// documentation about extended cap and writing to this offset pub fn deto_ms(&self) -> u8 { - ((self.0 >> 2) & 0xff) as u8 + ((read_volatile!(self.0) >> 2) & 0xff) as u8 } // Device sleep present @@ -854,18 +906,19 @@ impl DeviceSleep { } /// Port X Vendor specific +#[repr(transparent)] pub struct VendorSpecific(pub u32); #[repr(C)] pub struct PortControlRegisters { /// Port X Command list base address low - pub clb: u32, + pub clb: Volatile, /// Port X Command list base address high - pub clbu: u32, + pub clbu: Volatile, /// Port X frame information structure base address low - pub fb: u32, + pub fb: Volatile, /// Port X frame information structure base address high - pub fbu: u32, + pub fbu: Volatile, pub is: PortInterruptStatus, pub ie: InterruptEnable, pub cmd: CmdStatus, @@ -887,30 +940,30 @@ pub struct PortControlRegisters { impl PortControlRegisters { /// Return the full command list address by combining the low and high /// 32bit parts - pub fn cmd_list(&mut self) -> &mut CommandList { - let cmd_list_addr = ((self.clbu as usize) << 32) - | (self.clb as usize & !((1 << 10) - 1)); - unsafe { &mut *(cmd_list_addr as *mut CommandList) } + pub fn cmd_list(&mut self) -> &mut CmdList { + let cmd_list_addr = ((self.clbu.read() as usize) << 32) + | (self.clb.read() as usize & !((1 << 10) - 1)); + unsafe { &mut *(cmd_list_addr as *mut CmdList) } } - pub fn set_cmd_list(&mut self, clb: &CommandList) { - let ptr = clb as *const _ as usize; - self.clb = (ptr & 0xffffffff) as u32; - self.clbu = (ptr >> 32) as u32; + pub fn set_cmd_list_address(&mut self, ptr: usize) { + println!("CLB: {:x?}", ptr); + self.clb.write((ptr & 0xffffffff) as u32); + self.clbu.write((ptr >> 32) as u32); } /// Return the full frame information structure address by combining /// the low and high 32bit parts pub fn received_fis(&self) -> &ReceivedFis { - let rfis_addr = ((self.fbu as usize) << 32) - | (self.fb as usize & !((1 << 8) - 1)); + let rfis_addr = ((self.fbu.read() as usize) << 32) + | (self.fb.read() as usize & !((1 << 8) - 1)); unsafe { &*(rfis_addr as *const ReceivedFis) } } - pub fn set_received_fis(&mut self, fis: &ReceivedFis) { - let ptr = fis as *const _ as usize; - self.fb = (ptr & 0xffffffff) as u32; - self.fbu = (ptr >> 32) as u32; + pub fn set_received_fis_address(&mut self, ptr: usize) { + println!("FB: {:x?}", ptr); + self.fb.write((ptr & 0xffffffff) as u32); + self.fbu.write((ptr >> 32) as u32); } pub fn set_status(&mut self, port: u8) { @@ -923,7 +976,7 @@ impl PortControlRegisters { /// Return the index of an available command slot if one exists pub fn find_cmd_slot(&self) -> Option { - let mut slots = self.ci.0 | self.sact.0; + let mut slots = self.ci.0.read() | self.sact.0; for i in 0usize..32 { if slots & 1 == 0 { return Some(i); @@ -933,19 +986,79 @@ impl PortControlRegisters { } None } + + pub fn identity_packet(&mut self, buf: *mut IdentityPacketData) { + let fis = RegisterH2D::new( + 1 << 7, + AtaCommand::IdentifyDevice, + 0, + 0, + 0, + 0, + 0, + ); + let cmd = &mut self.cmd_list().entries[0]; + let cmd_table = &mut cmd.cmd_table::<8>(); + let prdt_ent = &mut cmd_table.table[0]; + write_volatile!(cmd_table.cfis, Fis { h2d: fis }); + prdt_ent.set_buffer(buf); + prdt_ent.dbc.set_dbc(511); + cmd.info.set_command_fis_len(size_of::()); + cmd.info.set_prdtl(1); + println!("Sending command!"); + self.ci.issue_cmd(0); + + let mut timeout = 0xfffff; + loop { + if self.is.0 != 0 { + if self.is.is_tfes() { + eprintln!("ERROR READING FROM DISK"); + for error in self.serr.error() { + println!("{:?}", error); + } + if self.tfd.is_err() { + println!( + "TASK FILE DATA ERROR STATE\nERROR: {:08b}", + self.tfd.error() + ); + } + } + println!("Finished!"); + println!("{:032b}", self.is.0); + break; + } else { + timeout -= 1 + } + + if timeout == 0 { + panic!("Timeout on identity packet read") + } + } + unsafe { + for w in (&mut *buf).serial_number.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).model_num.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).firmware_rev.chunks_exact_mut(2) { + w.swap(0, 1); + } + } + } } /// TODO, DECIDE IF ITS OK THAT THIS IS ONE BYTE GREATER IN SIZE #[repr(C, align(256))] pub struct ReceivedFis { - pub dsfis: DmaSetup, + pub dsfis: Volatile, _reserved0: u32, - pub psfis: PioSetupD2H, + pub psfis: Volatile, _reserved1: [u32; 3], - pub rfis: RegisterD2H, + pub rfis: Volatile, _reserved2: u32, - pub sdbfis: SetDeviceBits, - pub ufis: [u8; 64], + pub sdbfis: Volatile, + pub ufis: Volatile<[u8; 64]>, _reserved3: [u32; 24], } @@ -955,12 +1068,18 @@ pub struct CmdListDescriptionInfo(pub u32); impl CmdListDescriptionInfo { /// Set the Physical region descriptor table length pub fn set_prdtl(&mut self, size: u16) { - self.0 |= (size as u32) << 16; + write_volatile!( + self.0, + read_volatile!(self.0) | (size as u32) << 16 + ); } /// Set the port multiplier port pub fn set_pm_port(&mut self, pm_port: u8) { - self.0 |= ((pm_port & 0xf) as u32) << 12 + write_volatile!( + self.0, + read_volatile!(self.0) | ((pm_port & 0xf) as u32) << 12 + ); } // Clear busy upon R_OK @@ -981,47 +1100,47 @@ impl CmdListDescriptionInfo { // ATAPI flag!(a, 5); - /// Length of command FIS in dwords - pub fn set_command_fis_len_dw(&mut self, len: u8) { - assert!(len < 2, "Len must be smaller then 2"); - assert!(len > 16, "Len must be greater then 16 "); - self.0 |= len as u32; + /// Length of command FIS len (internally converted to dw) + pub fn set_command_fis_len(&mut self, len: usize) { + assert!(len < 64, "Len must be smaller then 64"); + assert!(len > 8, "Len must be greater then 8 "); + write_volatile!( + self.0, + read_volatile!(self.0) | (len / size_of::()) as u32 + ); } } #[repr(C)] -pub struct CommandHeader { +pub struct CmdHeader { info: CmdListDescriptionInfo, - prdb_byte_count: u32, + prdb_byte_count: Volatile, /// Command table descriptor base address - ctba: u32, + ctba: Volatile, /// Command table desciprtor base address upper - ctbau: u32, + ctbau: Volatile, _reserved: [u32; 4], } -impl CommandHeader { +impl CmdHeader { pub fn cmd_table( &mut self, - ) -> &mut CommandTable { - let cmd_table_addr = - ((self.ctbau as usize) << 32) | (self.ctba as usize); - unsafe { &mut *(cmd_table_addr as *mut CommandTable) } + ) -> &mut CmdTable { + let cmd_table_addr = ((self.ctbau.read() as usize) << 32) + | (self.ctba.read() as usize); + unsafe { &mut *(cmd_table_addr as *mut CmdTable) } } - pub fn set_cmd_table( - &mut self, - table: &CommandTable, - ) { - let ptr = table as *const _ as usize; - self.ctba = (ptr & 0xffffffff) as u32; - self.ctbau = (ptr >> 32) as u32; + pub fn set_cmd_table(&mut self, ptr: usize) { + println!("CMD TBL: {:x?}", ptr); + self.ctba.write((ptr & 0xffffffff) as u32); + self.ctbau.write((ptr >> 32) as u32); } } #[repr(C, align(1024))] -pub struct CommandList { - pub entries: [CommandHeader; 32], +pub struct CmdList { + pub entries: [CmdHeader; 32], } pub struct PrdtDescriptionInfo(pub u32); @@ -1034,67 +1153,36 @@ impl PrdtDescriptionInfo { pub fn set_dbc(&mut self, dbc: u32) { const MB: u32 = 1 << 20; assert!(dbc < 4 * MB, "DBC should be smaller then 4Mib"); + write_volatile!(self.0, read_volatile!(self.0) | dbc | 1); } } -#[repr(C, align(128))] -pub struct CommandTableEntry { +#[repr(C)] +pub struct CmdTableEntry { /// Data base address buffer - dba: u32, + dba: Volatile, /// Data base address buffer upper - dbau: u32, + dbau: Volatile, _reserved: u32, /// Data byte count (A maximum of 4mb is available) dbc: PrdtDescriptionInfo, } -impl CommandTableEntry { +impl CmdTableEntry { pub fn set_buffer(&mut self, buf: *mut T) { let ptr = buf as usize; - self.dba = (ptr & 0xffffffff) as u32; - self.dbau = (ptr >> 32) as u32; + self.dba.write((ptr & 0xffffffff) as u32); + self.dbau.write((ptr >> 32) as u32); } } -#[repr(C)] -pub struct CommandTable { +#[repr(C, align(256))] +pub struct CmdTable { cfis: Fis, /// TODO acmd: [u8; 0x10], _reserved: [u8; 0x30], - table: [CommandTableEntry; ENTRIES], -} - -#[repr(C, align(4096))] -pub struct PortCommands { - pub fis: ReceivedFis, - pub cmd_list: CommandList, - pub cmd_table: [CommandTable; 32], - _reserved: [u8; 0x100], -} - -impl PortCommands { - pub fn empty() -> &'static mut PortCommands { - // TODO CREATE EXTERNAL UTIL FUNCTION FOR THIS AND USE ALSO ON PAGE - // TABLE CREATION - let zeroed = unsafe { - core::slice::from_raw_parts_mut( - alloc_pages!( - size_of::>() - / REGULAR_PAGE_SIZE - ) as *mut usize, - size_of::>() / size_of::(), - ) - }; - zeroed.fill(0); - - // TODO MAKE LESS SKEYTCHY - let port_cmd_ptr = (zeroed.as_mut_ptr() as usize - - PHYSICAL_MEMORY_OFFSET) - as *mut PortCommands; - - unsafe { &mut *port_cmd_ptr } - } + table: [CmdTableEntry; ENTRIES], } #[repr(C)] @@ -1124,28 +1212,38 @@ impl HBAMemoryRegisters { let hba: &'static mut HBAMemoryRegisters = unsafe { &mut *a.translate().as_mut_ptr() }; + hba.ghc.ghc.set_ae(); + hba.ghc.ghc.set_ie(); + if hba.ghc.pi.0 >= (1 << 31) { panic!("There is no support for HBA's with more then 30 ports") } - hba.ghc.ghc.set_ae(); - hba.ghc.ghc.set_ie(); - println!("BIOS / OS Handoff: {}", hba.ghc.cap_ext.is_boh()); - println!("Interrupts: {}", hba.ghc.ghc.is_ie()); + + if hba.ghc.cap_ext.is_boh() { + unimplemented!("Didn't implement bios os handoff") + } Ok(hba) } - /// Returns the amount of active devices found - pub fn probe(&self) -> usize { + /// Returns the amount of active devices found and set them into idle + /// state. + pub fn probe_init(&mut self) -> usize { println!( "Detected {} implemented ports", self.ghc.cap.number_of_ports() ); + println!( + "Supported command slots: {}, Supported 64bit addresses: {}", + self.ghc.cap.number_of_commands(), + self.ghc.cap.is_s64a() + ); + let mut count = 0; - for (i, port) in self.ports.iter().enumerate() { + for (i, port) in self.ports.iter_mut().enumerate() { if self.ghc.pi.is_port_implemented(i as u8) && let Ok(power) = port.ssts.power() && let InterfacePowerManagement::Active = power @@ -1165,105 +1263,62 @@ impl HBAMemoryRegisters { println!("{:?}", e ; color = ColorCode::new(Color::Red, Color::Black) ) } } - } - } - count - } - - pub fn map_device( - &'static mut self, - port_number: usize, - ) -> AhciDeviceController { - AhciDeviceController::::new( - &mut self.ports[port_number], - PortCommands::empty(), - ) - } -} - -pub struct AhciDeviceController { - pub port: &'static mut PortControlRegisters, - pub port_cmds: &'static mut PortCommands, -} - -impl AhciDeviceController { - pub fn new( - port: &'static mut PortControlRegisters, - port_cmds: &'static mut PortCommands, - ) -> AhciDeviceController { - println!("port address: {:x?}", port as *const _ as usize); - println!( - "Port commands address: {:x?}", - port_cmds as *const _ as usize - ); - - port.cmd.stop(); - port.set_cmd_list(&port_cmds.cmd_list); - port.set_received_fis(&port_cmds.fis); - for (header, table) in port_cmds - .cmd_list - .entries - .iter_mut() - .zip(port_cmds.cmd_table.iter()) - { - header.info.set_prdtl(ENTRIES as u16); - header.set_cmd_table(table); - } - port.cmd.start(); - - AhciDeviceController { port, port_cmds } - } - - pub fn identity_packet( - &mut self, - buf: *mut IdentityPacketData, - ) -> Option<()> { - self.port.is.clear_pending_interrupts(); - let slot = self.port.find_cmd_slot()?; - let header = &mut self.port.cmd_list().entries[slot]; - header.info.unset_w(); - header.info.set_p(); - header.info.set_prdtl(ENTRIES as u16); - - let table = header.cmd_table::(); - table.table[0].dbc.set_i(); - table.table[0].dbc.set_dbc(511); // 256 words - 1 - table.table[0].set_buffer(buf); - - let fis = RegisterH2D::new( - 0x80, - AtaCommand::IdentifyDevice, - 0, - 0, - 0, - 0, - 0, - ); - table.cfis = Fis { h2d: fis }; - unsafe { - let v = core::ptr::read_volatile(&self.port.ci.0); - core::ptr::write_volatile( - &mut self.port.ci.0, - v | (1 << slot), - ); - - let mut timeout = 0xffffffu32; - loop { - let v = core::ptr::read_volatile(&self.port.ci.0); - if v & (1 << slot) == 0 { - break; - } - timeout -= 1; - if timeout == 0 { - panic!("TIME EXCEEDED ON IDENTITY READ") + port.cmd.stop(); + + let clb_fbu_table = unsafe { alloc_pages!(1) }; + for i in (0..4096).step_by(size_of::()) { + unsafe { + core::ptr::write_volatile( + ((clb_fbu_table + i) + PHYSICAL_MEMORY_OFFSET) + as *mut usize, + 0, + ); + } } - if self.port.is.0 != 0 { - panic!("ERROR ON IDENTITY READ, {}", self.port.is.0); + port.set_cmd_list_address(clb_fbu_table); + port.set_received_fis_address( + clb_fbu_table + size_of::(), + ); + + // MAPPING the first header with 8 entries (0x100 in total + // table size) + let cmd_list = port.cmd_list(); + cmd_list.entries[0].set_cmd_table( + clb_fbu_table + + size_of::() + + size_of::(), + ); + + port.cmd.set_fre(); + port.serr.zero_error(); + // port.ie.set_dhre(); + // port.ie.set_pse(); + // port.ie.set_dse(); + // port.ie.set_tfee(); + port.is.clear_pending_interrupts(); + self.ghc.is.clear_all(); + + port.cmd.set_sud(); + port.cmd.set_pod(); + port.cmd.set_icc(InterfaceCommunicationControl::Active); + + loop { + if !port.tfd.is_bsy() + && !port.tfd.is_drq() + && matches!( + port.ssts.power().unwrap(), + InterfacePowerManagement::Active + ) + { + break; + } } + port.cmd.start(); + println!("Started port number: {}", i) } } - Some(()) + count } } diff --git a/kernel/src/drivers/ata/ahci/mod.rs b/kernel/src/drivers/ata/ahci/mod.rs index 9d86c20..397d27c 100644 --- a/kernel/src/drivers/ata/ahci/mod.rs +++ b/kernel/src/drivers/ata/ahci/mod.rs @@ -1,12 +1,16 @@ pub mod fis; pub mod hba; +use common::enums::CascadedPicInterruptLine; use cpu_utils::structures::interrupt_descriptor_table::InterruptStackFrame; pub use fis::*; pub use hba::*; +use crate::{drivers::pic8259::PIC, println}; + pub extern "x86-interrupt" fn ahci_interrupt( _stack_frame: InterruptStackFrame, ) { - panic!("AHCI Interrupts!"); + println!("AHCI Interrupts!"); + unsafe { PIC.end_of_interrupt(CascadedPicInterruptLine::Ahci) }; } diff --git a/kernel/src/drivers/pci.rs b/kernel/src/drivers/pci.rs index 3f20657..d1de4da 100644 --- a/kernel/src/drivers/pci.rs +++ b/kernel/src/drivers/pci.rs @@ -430,7 +430,6 @@ pub fn scan_pci() -> Vec { continue; } for function in 1..8 { - println!("{}", function); let common = PciConfigurationCycle::read_common_header( bus, device, function, ); diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 81d75a3..868c3ea 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -30,8 +30,7 @@ use core::{ use crate::{ drivers::{ ata::ahci::{ - AhciDeviceController, GenericHostControl, HBAMemoryRegisters, - IdentityPacketData, + GenericHostControl, HBAMemoryRegisters, IdentityPacketData, }, interrupt_handlers, keyboard::{KEYBOARD, ps2_keyboard::Keyboard}, @@ -40,30 +39,23 @@ use crate::{ vga_display::color_code::ColorCode, }, memory::{ - allocators::page_allocator::{ - allocator::PhysicalPageAllocator, - extensions::{PhysicalAddressExt, VirtualAddressExt}, - }, + allocators::page_allocator::allocator::PhysicalPageAllocator, memory_map::{ParsedMapDisplay, parse_map}, }, }; use common::{ - address_types::{PhysicalAddress, VirtualAddress}, + address_types::PhysicalAddress, constants::{ - BIG_PAGE_ALIGNMENT, PHYSICAL_MEMORY_OFFSET, - REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::{ - Color, DeviceDetection, DeviceType, InterfacePowerManagement, - PS2ScanCode, PageSize, PciDeviceType, + PHYSICAL_MEMORY_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, + enums::{CascadedPicInterruptLine, Color, PS2ScanCode}, }; use cpu_utils::{ instructions::interrupts::{self}, structures::{ interrupt_descriptor_table::{IDT, InterruptDescriptorTable}, - paging::PageEntryFlags, + paging::{PageEntryFlags, PageTable}, }, }; @@ -80,6 +72,9 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Obtained Memory Map"); println!("{}", ParsedMapDisplay(parsed_memory_map!())); PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); + println!("ALLOCATOR MAP LEN: {}", unsafe { + ALLOCATOR.assume_init_mut().map_mut().map.len() + }); okprintln!("Allocator Initialized"); unsafe { let idt_address = alloc_pages!(1).into(); @@ -112,58 +107,63 @@ pub unsafe extern "C" fn _start() -> ! { } } - for device in pci_devices.iter_mut() { - // println!("{:#?}", unsafe { device.common.vendor_device }); - // println!("{:#?}", unsafe { device.common.header_type }); - // println!("{:#?}\n", unsafe { device.common.device_type }); - - if device.header.common().device_type.is_ahci() { - let a = unsafe { - PhysicalAddress::new_unchecked( - device.header.general_device.bar5.address(), - ) - }; - - println!( - "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ - Memory Space: {}", - device.header.common().command.is_bus_master(), - device.header.common().command.is_interrupt_disable(), - device.header.common().command.is_io_space(), - device.header.common().command.is_memory_space() - ); - - println!( - "Interrupt Line: {}, Interrupt Pin: {}", - unsafe { device.header.general_device.interrupt_line }, - unsafe { device.header.general_device.interrupt_pin } - ); - - let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); - let hba = HBAMemoryRegisters::new(aligned).unwrap(); - let _ = hba.probe(); - let mut controller = hba.map_device::<13>(0); - let b = unsafe { alloc_pages!(1) - PHYSICAL_MEMORY_OFFSET }; - - println!("b: {:x?}", &b as *const _ as usize); - let rfis = controller.port_cmds.fis.rfis; - println!("rfis: {:?}", rfis); - controller.identity_packet(b as *mut IdentityPacketData); - - let rfis = controller.port_cmds.fis.rfis; - println!("rfis: {:?}", rfis); - - let d = unsafe { - core::ptr::read_volatile(b as *const IdentityPacketData) - }; - - println!("Data Address: {:x?}", b); - - println!("Data: {:?}", d.data); - - // println!("{:x?}", controller.port_cmds as *const _ as usize) - } - } + unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; + // for device in pci_devices.iter_mut() { + // // println!("{:#?}", unsafe { device.common.vendor_device }); + // // println!("{:#?}", unsafe { device.common.header_type }); + // // println!("{:#?}\n", unsafe { device.common.device_type }); + + // if device.header.common().device_type.is_ahci() { + // let a = unsafe { + // PhysicalAddress::new_unchecked( + // device.header.general_device.bar5.address(), + // ) + // }; + + // println!( + // "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ + // Memory Space: {}", + // device.header.common().command.is_bus_master(), + // device.header.common().command.is_interrupt_disable(), + // device.header.common().command.is_io_space(), + // device.header.common().command.is_memory_space() + // ); + + // println!( + // "Interrupt Line: {}, Interrupt Pin: {}", + // unsafe { device.header.general_device.interrupt_line }, + // unsafe { device.header.general_device.interrupt_pin } + // ); + + // let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); + // let hba = HBAMemoryRegisters::new(aligned).unwrap(); + // let _ = hba.probe_init(); + // let p = &mut hba.ports[0]; + + // let buf = + // unsafe { alloc_pages!(1) as *mut IdentityPacketData }; + + // p.identity_packet(buf); + + // let id = unsafe { + // core::ptr::read_volatile( + // (buf as usize + PHYSICAL_MEMORY_OFFSET) + // as *mut IdentityPacketData, + // ) + // }; + + // println!("{:?}", id); + + // println!("Cylinders: {}", id.cylinders); + // println!("Heads: {}", id.heads); + // println!("Sectors: {}", id.sectors); + + // println!("Serial: {:?}", &id.serial_number); + // println!("Model: {:?}", &id.model_num); + // println!("Firmware: {:?}", &id.firmware_rev); + // } + // } + loop { unsafe { print!("{}", KEYBOARD.assume_init_mut().read_char() ; color = ColorCode::new(Color::Green, Color::Black)); diff --git a/kernel/src/memory/allocators/mod.rs b/kernel/src/memory/allocators/mod.rs index e6b41e5..24a624b 100644 --- a/kernel/src/memory/allocators/mod.rs +++ b/kernel/src/memory/allocators/mod.rs @@ -1 +1,2 @@ pub mod page_allocator; +pub mod slab_allocator; diff --git a/kernel/src/memory/allocators/page_allocator/allocator.rs b/kernel/src/memory/allocators/page_allocator/allocator.rs index 826cef7..29f77c6 100644 --- a/kernel/src/memory/allocators/page_allocator/allocator.rs +++ b/kernel/src/memory/allocators/page_allocator/allocator.rs @@ -9,8 +9,8 @@ use common::{ address_types::{PhysicalAddress, VirtualAddress}, bitmap::{BitMap, ContiguousBlockLayout, Position}, constants::{ - FIRST_STAGE_OFFSET, PAGE_ALLOCATOR_OFFSET, PHYSICAL_MEMORY_OFFSET, - REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, + FIRST_STAGE_OFFSET, PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, + REGULAR_PAGE_SIZE, }, enums::MemoryRegionType, }; @@ -71,7 +71,7 @@ impl PhysicalPageAllocator { } #[allow(clippy::mut_from_ref)] - unsafe fn map_mut(&self) -> &mut BitMap { + pub unsafe fn map_mut(&self) -> &mut BitMap { unsafe { self.0.as_mut_unchecked() } } @@ -203,9 +203,7 @@ unsafe impl Allocator for PhysicalPageAllocator { self.map_mut().set_contiguous_block(&p, &block); return Ok(NonNull::slice_from_raw_parts( NonNull::new_unchecked( - Self::resolve_position(&p) - .translate() - .as_mut_ptr::(), + Self::resolve_position(&p).as_mut_ptr::(), ), layout.size(), )); @@ -219,10 +217,9 @@ unsafe impl Allocator for PhysicalPageAllocator { if let Ok(layout) = layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) { - let start_position = - Self::resolve_address(PhysicalAddress::new_unchecked( - ptr.as_ptr() as usize - PHYSICAL_MEMORY_OFFSET, - )); + let start_position = Self::resolve_address( + PhysicalAddress::new_unchecked(ptr.as_ptr() as usize), + ); let block = ContiguousBlockLayout::from_start_size( &start_position, layout.size() / REGULAR_PAGE_SIZE, diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/page_allocator/extensions.rs index 1ef2370..57e15f5 100644 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ b/kernel/src/memory/allocators/page_allocator/extensions.rs @@ -100,8 +100,9 @@ pub impl VirtualAddress { { let index = self.index_of(*level); let entry = &mut table.entries[index]; - let resolved_table = - entry.force_resolve_table_mut().unwrap(); + let resolved_table = entry + .force_resolve_table_mut() + .expect("Tried to create table on a mapped entry"); table = resolved_table; } unsafe { diff --git a/kernel/src/memory/allocators/page_allocator/mod.rs b/kernel/src/memory/allocators/page_allocator/mod.rs index 1d7d882..3c799a4 100644 --- a/kernel/src/memory/allocators/page_allocator/mod.rs +++ b/kernel/src/memory/allocators/page_allocator/mod.rs @@ -8,7 +8,7 @@ pub static mut ALLOCATOR: MaybeUninit = MaybeUninit::uninit(); #[macro_export] -/// Allocate the amout of pages specified, and return the address +/// Allocate the amount of pages specified, and return the address macro_rules! alloc_pages { ($page_number: expr) => {{ use core::alloc::{Allocator, Layout}; diff --git a/kernel/src/memory/allocators/slab_allocator.rs b/kernel/src/memory/allocators/slab_allocator.rs new file mode 100644 index 0000000..d0c8286 --- /dev/null +++ b/kernel/src/memory/allocators/slab_allocator.rs @@ -0,0 +1,12 @@ +use common::address_types::PhysicalAddress; + + + +pub struct Block { + address: [PhysicalAddress], // address of the page. + next: Option<&'static Block> +} + +pub struct SlabAllocator { + pages: [&] +} \ No newline at end of file diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index 696256a..ed1cbb1 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -185,6 +185,7 @@ pub fn rwc_flag(input: TokenStream) -> TokenStream { // build identifiers let name_str = name.to_string(); let clear_ident = format_ident!("clear_{}", name_str); + let support_ident = format_ident!("is_{}", name_str); let expanded = quote! { #[inline] @@ -194,6 +195,19 @@ pub fn rwc_flag(input: TokenStream) -> TokenStream { pub const fn #clear_ident(&mut self) { self.0 |= 1 << #bit; } + + + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Checks if the corresponding flag is set + pub fn #support_ident(&self) -> bool { + unsafe { + core::ptr::read_volatile( + self as *const _ as *mut usize + ) & ((1<< #bit) as usize) != 0 + } + } }; expanded.into() diff --git a/shared/common/src/enums/ata.rs b/shared/common/src/enums/ata.rs index fe45d37..89fc0e2 100644 --- a/shared/common/src/enums/ata.rs +++ b/shared/common/src/enums/ata.rs @@ -8,5 +8,6 @@ use crate::error::ConversionError; pub enum AtaCommand { Nop = 0, ReadDmaExt = 0x25, + IdentifyPacketDevice = 0xa1, IdentifyDevice = 0xec, } diff --git a/shared/common/src/enums/pic8259.rs b/shared/common/src/enums/pic8259.rs index bace8f9..d701e76 100644 --- a/shared/common/src/enums/pic8259.rs +++ b/shared/common/src/enums/pic8259.rs @@ -34,7 +34,7 @@ pub enum CascadedPicInterruptLine { Irq7 = 1 << 7, Irq8 = 1 << 8, Irq9 = 1 << 9, - Irq10 = 1 << 10, + Ahci = 1 << 10, Irq11 = 1 << 11, Irq12 = 1 << 12, Irq13 = 1 << 13, diff --git a/shared/common/src/lib.rs b/shared/common/src/lib.rs index 7bbbbb8..746f121 100644 --- a/shared/common/src/lib.rs +++ b/shared/common/src/lib.rs @@ -14,6 +14,8 @@ pub mod constants; pub mod enums; pub mod error; pub mod ring_buffer; +pub mod volatile; + struct FakeAllocator; unsafe impl core::alloc::GlobalAlloc for FakeAllocator { diff --git a/shared/common/src/volatile.rs b/shared/common/src/volatile.rs new file mode 100644 index 0000000..296a209 --- /dev/null +++ b/shared/common/src/volatile.rs @@ -0,0 +1,49 @@ +use core::fmt::Debug; + +#[derive(Copy)] +#[repr(transparent)] +pub struct Volatile(T); + +impl Volatile { + pub fn new(vol: T) -> Volatile { + Volatile(vol) + } + + /// Read from the hardware register + pub fn read(&self) -> T { + unsafe { core::ptr::read_volatile(&self.0) } + } + + /// Write to the hardware register + pub fn write(&mut self, value: T) { + unsafe { core::ptr::write_volatile(&mut self.0 as *mut T, value) } + } +} + +impl Clone for Volatile { + fn clone(&self) -> Self { + Volatile(self.read()) + } +} + +impl Debug for Volatile { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_fmt(format_args!("{:?}", &self.0 as *const T)) + } +} + +#[macro_export] +macro_rules! read_volatile { + ($arg: expr) => { + unsafe { core::ptr::read_volatile(core::ptr::addr_of!($arg)) } + }; +} + +#[macro_export] +macro_rules! write_volatile { + ($arg: expr, $val: expr) => { + unsafe { + core::ptr::write_volatile(core::ptr::addr_of_mut!($arg), $val) + } + }; +} diff --git a/shared/cpu_utils/Cargo.toml b/shared/cpu_utils/Cargo.toml index 97d0fca..d6e005f 100644 --- a/shared/cpu_utils/Cargo.toml +++ b/shared/cpu_utils/Cargo.toml @@ -11,3 +11,5 @@ derive_more = { version = "2.0.1", default-features = false, features = [ thiserror = { version = "2.0.12", default-features = false } extend = "1.2.0" learnix-macros = { path = "../../learnix-macros" } +strum_macros = { version = "0.27", default-features = false } +strum = { version = "0.27", default-features = false } diff --git a/shared/cpu_utils/src/structures/paging/init.rs b/shared/cpu_utils/src/structures/paging/init.rs index 9a0d3c8..18e2176 100644 --- a/shared/cpu_utils/src/structures/paging/init.rs +++ b/shared/cpu_utils/src/structures/paging/init.rs @@ -82,7 +82,7 @@ pub fn enable() -> Option<()> { ); top_identity_page_table_l2.entries[0].map_unchecked( PhysicalAddress::new_unchecked(0), - PageEntryFlags::huge_io_page_flags(), + PageEntryFlags::huge_page_flags(), ); } // ANCHOR_END: setup_top_page_tables From caa31a804bb2296db17219cb2f003e6a5e41a76a Mon Sep 17 00:00:00 2001 From: sagi Date: Tue, 30 Dec 2025 22:59:59 +0200 Subject: [PATCH 05/78] commited changes before rebase --- build/build.rs | 2 +- build/src/main.rs | 2 + kernel/src/main.rs | 127 +++++++++--------- .../allocators/page_allocator/allocator.rs | 13 +- .../src/memory/allocators/slab_allocator.rs | 15 +-- kernel/src/memory/mod.rs | 1 + kernel/src/memory/page_descriptor.rs | 39 ++++++ 7 files changed, 110 insertions(+), 89 deletions(-) create mode 100644 kernel/src/memory/page_descriptor.rs diff --git a/build/build.rs b/build/build.rs index 1124c9d..3c7866b 100644 --- a/build/build.rs +++ b/build/build.rs @@ -56,7 +56,7 @@ fn main() -> io::Result<()> { "targets/32bit_target.json", "release", ); - build_stage("../kernel", "targets/64bit_target.json", &profile); + build_stage("../kernel", "targets/64bit_target.json", "release"); // Combine binaries into one image let input_dir = PathBuf::from("bin"); diff --git a/build/src/main.rs b/build/src/main.rs index f328e4d..21638f7 100644 --- a/build/src/main.rs +++ b/build/src/main.rs @@ -1 +1,3 @@ +#![no_std] + fn main() {} diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 868c3ea..ab4fa5f 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -17,21 +17,15 @@ #![feature(ascii_char_variants)] #![feature(ascii_char)] #![feature(const_convert)] +#![feature(core_intrinsics)] #![deny(clippy::all)] mod drivers; mod memory; -use core::{ - alloc::{Allocator, Layout}, - mem::MaybeUninit, - num::NonZero, - panic::PanicInfo, -}; +use core::{num::NonZero, panic::PanicInfo}; use crate::{ drivers::{ - ata::ahci::{ - GenericHostControl, HBAMemoryRegisters, IdentityPacketData, - }, + ata::ahci::{HBAMemoryRegisters, IdentityPacketData}, interrupt_handlers, keyboard::{KEYBOARD, ps2_keyboard::Keyboard}, pci::{self}, @@ -53,9 +47,8 @@ use common::{ }; use cpu_utils::{ instructions::interrupts::{self}, - structures::{ - interrupt_descriptor_table::{IDT, InterruptDescriptorTable}, - paging::{PageEntryFlags, PageTable}, + structures::interrupt_descriptor_table::{ + IDT, InterruptDescriptorTable, }, }; @@ -108,61 +101,61 @@ pub unsafe extern "C" fn _start() -> ! { } unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; - // for device in pci_devices.iter_mut() { - // // println!("{:#?}", unsafe { device.common.vendor_device }); - // // println!("{:#?}", unsafe { device.common.header_type }); - // // println!("{:#?}\n", unsafe { device.common.device_type }); - - // if device.header.common().device_type.is_ahci() { - // let a = unsafe { - // PhysicalAddress::new_unchecked( - // device.header.general_device.bar5.address(), - // ) - // }; - - // println!( - // "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ - // Memory Space: {}", - // device.header.common().command.is_bus_master(), - // device.header.common().command.is_interrupt_disable(), - // device.header.common().command.is_io_space(), - // device.header.common().command.is_memory_space() - // ); - - // println!( - // "Interrupt Line: {}, Interrupt Pin: {}", - // unsafe { device.header.general_device.interrupt_line }, - // unsafe { device.header.general_device.interrupt_pin } - // ); - - // let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); - // let hba = HBAMemoryRegisters::new(aligned).unwrap(); - // let _ = hba.probe_init(); - // let p = &mut hba.ports[0]; - - // let buf = - // unsafe { alloc_pages!(1) as *mut IdentityPacketData }; - - // p.identity_packet(buf); - - // let id = unsafe { - // core::ptr::read_volatile( - // (buf as usize + PHYSICAL_MEMORY_OFFSET) - // as *mut IdentityPacketData, - // ) - // }; - - // println!("{:?}", id); - - // println!("Cylinders: {}", id.cylinders); - // println!("Heads: {}", id.heads); - // println!("Sectors: {}", id.sectors); - - // println!("Serial: {:?}", &id.serial_number); - // println!("Model: {:?}", &id.model_num); - // println!("Firmware: {:?}", &id.firmware_rev); - // } - // } + for device in pci_devices.iter_mut() { + // println!("{:#?}", unsafe { device.common.vendor_device }); + // println!("{:#?}", unsafe { device.common.header_type }); + // println!("{:#?}\n", unsafe { device.common.device_type }); + + if device.header.common().device_type.is_ahci() { + let a = unsafe { + PhysicalAddress::new_unchecked( + device.header.general_device.bar5.address(), + ) + }; + + println!( + "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ + Memory Space: {}", + device.header.common().command.is_bus_master(), + device.header.common().command.is_interrupt_disable(), + device.header.common().command.is_io_space(), + device.header.common().command.is_memory_space() + ); + + println!( + "Interrupt Line: {}, Interrupt Pin: {}", + unsafe { device.header.general_device.interrupt_line }, + unsafe { device.header.general_device.interrupt_pin } + ); + + let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); + let hba = HBAMemoryRegisters::new(aligned).unwrap(); + let _ = hba.probe_init(); + let p = &mut hba.ports[0]; + + let buf = + unsafe { alloc_pages!(1) as *mut IdentityPacketData }; + + p.identity_packet(buf); + + let id = unsafe { + core::ptr::read_volatile( + (buf as usize + PHYSICAL_MEMORY_OFFSET) + as *mut IdentityPacketData, + ) + }; + + println!("{:?}", id); + + println!("Cylinders: {}", id.cylinders); + println!("Heads: {}", id.heads); + println!("Sectors: {}", id.sectors); + + println!("Serial: {:?}", &id.serial_number); + println!("Model: {:?}", &id.model_num); + println!("Firmware: {:?}", &id.firmware_rev); + } + } loop { unsafe { diff --git a/kernel/src/memory/allocators/page_allocator/allocator.rs b/kernel/src/memory/allocators/page_allocator/allocator.rs index 29f77c6..49dc787 100644 --- a/kernel/src/memory/allocators/page_allocator/allocator.rs +++ b/kernel/src/memory/allocators/page_allocator/allocator.rs @@ -9,8 +9,7 @@ use common::{ address_types::{PhysicalAddress, VirtualAddress}, bitmap::{BitMap, ContiguousBlockLayout, Position}, constants::{ - FIRST_STAGE_OFFSET, PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, - REGULAR_PAGE_SIZE, + PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, enums::MemoryRegionType, }; @@ -88,15 +87,7 @@ impl PhysicalPageAllocator { )); let initialized = uninit.assume_init_mut(); - // Set the null page - initialized - .map_mut() - .set_bit(&Position::new_unchecked(0, 0)); - - let start_address = const { - PhysicalAddress::new_unchecked(FIRST_STAGE_OFFSET as usize) - .align_down(REGULAR_PAGE_ALIGNMENT) - }; + let start_address = PhysicalAddress::new_unchecked(0); let start_position = Self::address_position(start_address).unwrap(); // Allocate the addresses that are used for the diff --git a/kernel/src/memory/allocators/slab_allocator.rs b/kernel/src/memory/allocators/slab_allocator.rs index d0c8286..5f25966 100644 --- a/kernel/src/memory/allocators/slab_allocator.rs +++ b/kernel/src/memory/allocators/slab_allocator.rs @@ -1,12 +1,7 @@ -use common::address_types::PhysicalAddress; +use core::alloc::Layout; - - -pub struct Block { - address: [PhysicalAddress], // address of the page. - next: Option<&'static Block> +pub struct SlabCache { + // TODO ADD LOCK + pub layout: Layout, + pub objects: &'static mut [T], } - -pub struct SlabAllocator { - pages: [&] -} \ No newline at end of file diff --git a/kernel/src/memory/mod.rs b/kernel/src/memory/mod.rs index 17bb0dc..3f8cfb0 100644 --- a/kernel/src/memory/mod.rs +++ b/kernel/src/memory/mod.rs @@ -1,2 +1,3 @@ pub mod allocators; pub mod memory_map; +pub mod page_descriptor; diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs new file mode 100644 index 0000000..9a59320 --- /dev/null +++ b/kernel/src/memory/page_descriptor.rs @@ -0,0 +1,39 @@ +use core::{intrinsics::size_of, mem::MaybeUninit}; + +use common::constants::{REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE}; + +use crate::{alloc_pages, memory::allocators::slab_allocator::SlabCache}; + +pub struct Unassigned; + +pub type UnassignedPage = Page; + +impl UnassignedPage { + pub fn assign(&self) -> &Page { + let ptr = self as *const _ as usize; + unsafe { &*(ptr as *const Page) } + } + + pub fn assign_mut(&mut self) -> &mut Page { + let ptr = self as *const _ as usize; + unsafe { &mut *(ptr as *mut Page) } + } +} + +pub static PAGES: MaybeUninit<&'static mut [UnassignedPage]> = + MaybeUninit::uninit(); + +pub struct Page { + pub owner: Option<&'static SlabCache>, + pub counter: u64, +} + +pub fn pages_init(usable_mem: usize) { + let num_pages = usable_mem / REGULAR_PAGE_SIZE; + + let capacity = (num_pages * size_of::()) + .next_multiple_of(REGULAR_PAGE_SIZE); + + let array_address = + unsafe { alloc_pages!(capacity / REGULAR_PAGE_SIZE) }; +} From 94905e1f40393d8fcd1354b61fe54da3e648dc70 Mon Sep 17 00:00:00 2001 From: sagi Date: Tue, 30 Dec 2025 23:49:46 +0200 Subject: [PATCH 06/78] started writing buddy allocator --- kernel/src/memory/page_descriptor.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 9a59320..1e42991 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,4 +1,4 @@ -use core::{intrinsics::size_of, mem::MaybeUninit}; +use core::{array, intrinsics::size_of, mem::MaybeUninit}; use common::constants::{REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE}; @@ -23,9 +23,19 @@ impl UnassignedPage { pub static PAGES: MaybeUninit<&'static mut [UnassignedPage]> = MaybeUninit::uninit(); +pub struct BuddyBlockMeta { + next: Option<&'static UnassignedPage>, +} + +pub const BUDDY_MAX_ORDER: usize = 10; + +pub struct BuddyAllocator { + freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], +} + pub struct Page { pub owner: Option<&'static SlabCache>, - pub counter: u64, + pub block: BuddyBlockMeta, } pub fn pages_init(usable_mem: usize) { From ebb3c7eccf22ee4967a0434b8909baece11785b0 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 1 Jan 2026 19:14:45 +0200 Subject: [PATCH 07/78] added some buddy allocator data --- build/src/main.rs | 2 - kernel/src/memory/page_descriptor.rs | 64 ++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/build/src/main.rs b/build/src/main.rs index 21638f7..f328e4d 100644 --- a/build/src/main.rs +++ b/build/src/main.rs @@ -1,3 +1 @@ -#![no_std] - fn main() {} diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 1e42991..c6d7903 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,8 +1,13 @@ -use core::{array, intrinsics::size_of, mem::MaybeUninit}; - +use crate::{ + alloc_pages, + memory::{ + allocators::slab_allocator::SlabCache, memory_map::MemoryRegion, + }, +}; use common::constants::{REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE}; - -use crate::{alloc_pages, memory::allocators::slab_allocator::SlabCache}; +use core::{intrinsics::size_of, mem::MaybeUninit}; +use strum::VariantArray; +use strum_macros::VariantArray; pub struct Unassigned; @@ -23,27 +28,62 @@ impl UnassignedPage { pub static PAGES: MaybeUninit<&'static mut [UnassignedPage]> = MaybeUninit::uninit(); +#[derive(Default)] pub struct BuddyBlockMeta { next: Option<&'static UnassignedPage>, + prev: Option<&'static UnassignedPage>, + order: BuddyOrder, +} + +pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); + +#[derive(VariantArray, Clone, Copy, PartialEq, Eq)] +pub enum BuddyOrder { + Order0 = 0, + Order1 = 1, + Order2 = 2, + Order3 = 3, + Order4 = 4, + Order5 = 5, + Order6 = 6, + Order7 = 7, + Order8 = 8, + Order9 = 9, +} + +impl BuddyOrder { + pub const MAX: BuddyOrder = BuddyOrder::Order9; + pub const MIN: BuddyOrder = BuddyOrder::Order0; } -pub const BUDDY_MAX_ORDER: usize = 10; +impl Default for BuddyOrder { + fn default() -> Self { + BuddyOrder::MAX + } +} +#[derive(Default)] pub struct BuddyAllocator { freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], } +impl BuddyAllocator { + pub fn alloc_pages() -> usize { + unimplemented!() + } +} + pub struct Page { pub owner: Option<&'static SlabCache>, - pub block: BuddyBlockMeta, + pub buddy: BuddyBlockMeta, } -pub fn pages_init(usable_mem: usize) { - let num_pages = usable_mem / REGULAR_PAGE_SIZE; +pub fn pages_init(map: &mut [MemoryRegion]) { + // let num_pages = usable_mem / REGULAR_PAGE_SIZE; - let capacity = (num_pages * size_of::()) - .next_multiple_of(REGULAR_PAGE_SIZE); + // let capacity = (num_pages * size_of::()) + // .next_multiple_of(REGULAR_PAGE_SIZE); - let array_address = - unsafe { alloc_pages!(capacity / REGULAR_PAGE_SIZE) }; + // let array_address = + // unsafe { alloc_pages!(capacity / REGULAR_PAGE_SIZE) }; } From 93abeb65596af71695f12c4842542127df240c48 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 1 Jan 2026 23:25:35 +0200 Subject: [PATCH 08/78] implement initialization for page map --- kernel/src/memory/page_descriptor.rs | 84 +++++++++++++++++++--------- 1 file changed, 57 insertions(+), 27 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index c6d7903..a5ea6fa 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,14 +1,17 @@ -use crate::{ - alloc_pages, - memory::{ - allocators::slab_allocator::SlabCache, memory_map::MemoryRegion, - }, +use crate::memory::{ + allocators::slab_allocator::SlabCache, memory_map::ParsedMemoryMap, +}; +use common::constants::{ + PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, +}; +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, }; -use common::constants::{REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE}; -use core::{intrinsics::size_of, mem::MaybeUninit}; use strum::VariantArray; use strum_macros::VariantArray; +#[derive(Default)] pub struct Unassigned; pub type UnassignedPage = Page; @@ -25,14 +28,14 @@ impl UnassignedPage { } } -pub static PAGES: MaybeUninit<&'static mut [UnassignedPage]> = - MaybeUninit::uninit(); +pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = + LateInit::uninit(); #[derive(Default)] pub struct BuddyBlockMeta { next: Option<&'static UnassignedPage>, prev: Option<&'static UnassignedPage>, - order: BuddyOrder, + order: Option, } pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); @@ -51,17 +54,6 @@ pub enum BuddyOrder { Order9 = 9, } -impl BuddyOrder { - pub const MAX: BuddyOrder = BuddyOrder::Order9; - pub const MIN: BuddyOrder = BuddyOrder::Order0; -} - -impl Default for BuddyOrder { - fn default() -> Self { - BuddyOrder::MAX - } -} - #[derive(Default)] pub struct BuddyAllocator { freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], @@ -73,17 +65,55 @@ impl BuddyAllocator { } } +#[derive(Default)] pub struct Page { pub owner: Option<&'static SlabCache>, pub buddy: BuddyBlockMeta, } -pub fn pages_init(map: &mut [MemoryRegion]) { - // let num_pages = usable_mem / REGULAR_PAGE_SIZE; +pub struct LateInit(MaybeUninit); + +impl LateInit { + pub const fn uninit() -> LateInit { + LateInit::(MaybeUninit::uninit()) + } - // let capacity = (num_pages * size_of::()) - // .next_multiple_of(REGULAR_PAGE_SIZE); + pub const fn write(&mut self, val: T) { + self.0.write(val); + } +} - // let array_address = - // unsafe { alloc_pages!(capacity / REGULAR_PAGE_SIZE) }; +impl Deref for LateInit { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.assume_init_ref() } + } +} + +impl DerefMut for LateInit { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.assume_init_mut() } + } +} + +pub fn pages_init(map: &ParsedMemoryMap) -> usize { + let last = map.last().unwrap(); + let last_page = (last.base_address + last.length) as usize + & REGULAR_PAGE_ALIGNMENT.as_usize(); + + let total_pages = last_page / REGULAR_PAGE_SIZE; + + unsafe { + PAGES.write(core::slice::from_raw_parts_mut( + PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, + total_pages, + )); + + PAGES + .iter_mut() + .for_each(|p| *p = UnassignedPage::default()); + + PAGES.as_ptr_range().end as usize + } } From dbc2b614731036395a4cb04903a383fffcc7b48d Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 1 Jan 2026 23:25:46 +0200 Subject: [PATCH 09/78] implemented deref and derefmut --- kernel/src/memory/memory_map.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kernel/src/memory/memory_map.rs b/kernel/src/memory/memory_map.rs index f84672b..18419a4 100644 --- a/kernel/src/memory/memory_map.rs +++ b/kernel/src/memory/memory_map.rs @@ -3,6 +3,7 @@ use common::{ enums::MemoryRegionType, }; use core::fmt::{self, Display, Formatter}; +use derive_more::{Deref, DerefMut}; #[macro_export] macro_rules! parsed_memory_map { @@ -104,11 +105,13 @@ impl MemoryRegionTrait for MemoryRegionExtended { } } -pub struct ParsedMapDisplay( - pub &'static [T], -); +#[derive(Deref, DerefMut)] +pub struct MemoryMap(pub &'static [T]); -impl Display for ParsedMapDisplay { +pub type RawMemoryMap = MemoryMap; +pub type ParsedMemoryMap = MemoryMap; + +impl Display for MemoryMap { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut usable = 0u64; let mut reserved = 0u64; @@ -120,10 +123,10 @@ impl Display for ParsedMapDisplay { write!( f, - "[0x{:0>9x} - 0x{:0>9x}]: type: {}", + "[0x{:0>9x} - 0x{:0>9x}]: type: {:?}", entry.base_address(), entry.base_address() + entry.length(), - entry.region_type() as u32 + entry.region_type() )?; match entry.region_type() { From 61377746b70013a104da90f895e9ea7f856fc508 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 3 Jan 2026 17:59:25 +0200 Subject: [PATCH 10/78] added simple init to the buddy allocator --- kernel/src/memory/page_descriptor.rs | 44 +++++++++++++++++++++------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index a5ea6fa..24638bc 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,5 +1,8 @@ -use crate::memory::{ - allocators::slab_allocator::SlabCache, memory_map::ParsedMemoryMap, +use crate::{ + memory::{ + allocators::slab_allocator::SlabCache, memory_map::ParsedMemoryMap, + }, + println, }; use common::constants::{ PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, @@ -31,10 +34,10 @@ impl UnassignedPage { pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = LateInit::uninit(); -#[derive(Default)] +#[derive(Default, Clone, Copy)] pub struct BuddyBlockMeta { - next: Option<&'static UnassignedPage>, - prev: Option<&'static UnassignedPage>, + next: Option<*mut UnassignedPage>, + prev: Option<*mut UnassignedPage>, order: Option, } @@ -60,15 +63,36 @@ pub struct BuddyAllocator { } impl BuddyAllocator { - pub fn alloc_pages() -> usize { - unimplemented!() + pub fn alloc_pages(&self, num_pages: usize) -> usize { + assert!( + num_pages < (1 << BUDDY_MAX_ORDER), + "Size cannot be greater then: {}", + 1 << BUDDY_MAX_ORDER + ); + let order = num_pages.next_power_of_two().leading_zeros(); + } + + pub fn init(&'static mut self) { + self.freelist[BUDDY_MAX_ORDER - 1] = + unsafe { PAGES[0].buddy_meta }; + + let mut iter = unsafe { PAGES.iter_mut().peekable() }; + let mut prev = None; + + while let Some(curr) = iter.next() { + curr.buddy_meta.next = iter.peek().map(|v| { + *v as *const UnassignedPage as *mut UnassignedPage + }); + curr.buddy_meta.prev = prev; + prev = Some(curr) + } } } #[derive(Default)] pub struct Page { pub owner: Option<&'static SlabCache>, - pub buddy: BuddyBlockMeta, + pub buddy_meta: BuddyBlockMeta, } pub struct LateInit(MaybeUninit); @@ -100,9 +124,9 @@ impl DerefMut for LateInit { pub fn pages_init(map: &ParsedMemoryMap) -> usize { let last = map.last().unwrap(); let last_page = (last.base_address + last.length) as usize - & REGULAR_PAGE_ALIGNMENT.as_usize(); - + & !REGULAR_PAGE_ALIGNMENT.as_usize(); let total_pages = last_page / REGULAR_PAGE_SIZE; + println!("Last Page: {}, Total Pages: {}", last_page, total_pages); unsafe { PAGES.write(core::slice::from_raw_parts_mut( From ee9d9d9c8aeb0658f2ed8052db6db9bf5bfe3a18 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 3 Jan 2026 22:13:35 +0200 Subject: [PATCH 11/78] added multiple util functions for the page allocator --- kernel/src/memory/page_descriptor.rs | 153 +++++++++++++++++++++++---- 1 file changed, 130 insertions(+), 23 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 24638bc..e345920 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,18 +1,17 @@ use crate::{ - memory::{ - allocators::slab_allocator::SlabCache, memory_map::ParsedMemoryMap, - }, + memory::{allocators::slab::SlabCache, memory_map::ParsedMemoryMap}, println, }; -use common::constants::{ - PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, +use common::{ + constants::{ + PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, + }, + enums::{BUDDY_MAX_ORDER, BuddyOrder}, }; use core::{ mem::MaybeUninit, ops::{Deref, DerefMut}, }; -use strum::VariantArray; -use strum_macros::VariantArray; #[derive(Default)] pub struct Unassigned; @@ -41,20 +40,19 @@ pub struct BuddyBlockMeta { order: Option, } -pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); - -#[derive(VariantArray, Clone, Copy, PartialEq, Eq)] -pub enum BuddyOrder { - Order0 = 0, - Order1 = 1, - Order2 = 2, - Order3 = 3, - Order4 = 4, - Order5 = 5, - Order6 = 6, - Order7 = 7, - Order8 = 8, - Order9 = 9, +impl BuddyBlockMeta { + pub fn detach(&mut self) -> Option<*mut Page> { + let detached = self.next? as *mut Page; // None if there is no page to detach + self.next = unsafe { (*detached).buddy_meta.next }; + Some(detached) + } + + pub fn attach(&mut self, attachment: *mut Page) { + let attachment_ref = + unsafe { &mut *attachment }.as_unassigned_mut(); + attachment_ref.buddy_meta.next = self.next; + self.next = Some(attachment_ref as *mut UnassignedPage) + } } #[derive(Default)] @@ -69,14 +67,89 @@ impl BuddyAllocator { "Size cannot be greater then: {}", 1 << BUDDY_MAX_ORDER ); - let order = num_pages.next_power_of_two().leading_zeros(); + let order = num_pages.next_power_of_two().leading_zeros() as usize; + + let page = self.freelist[order].next.unwrap_or_else(|| { + self.split_until(order) + .expect("Out of memory, swap is not implemented") + }); + + get_page_address(page) + } + + pub fn free_pages(&self, address: usize) { + unimplemented!() + } + + /// This function assumes that `wanted_order` is empty, and won't check + /// it. + pub fn split_until( + &self, + wanted_order: usize, + ) -> Option<*mut UnassignedPage> { + let closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + .find(|i| self.freelist[*i].next.is_some())?; + + let mut next_split = &self.freelist[closet_order]; + + // for current_split in + // ((wanted_order + 1)..closet_order).rev().peekable() + // { + // let page = self.freelist[current_split] + // .detach::() + // .expect("Error in logic"); + // } + None + } + + /// TODO: Make an unsafe split if relevant + /// + /// # Safety + /// This function does not attach the new references! + pub unsafe fn split( + &mut self, + order: usize, + ) -> Option<(&mut UnassignedPage, &mut UnassignedPage)> { + let meta = &mut self.freelist[order]; + + // Detach the page from it's order list. + if let Some(page) = meta.detach::() { + let page_ref = unsafe { &mut (*page) }; + + // Reduce it's order to find it's order. + let prev_order = + BuddyOrder::try_from(order as u8 - 1).unwrap(); + page_ref.buddy_meta.order = Some(prev_order); + + // Find it's buddy new buddy. + let buddy = unsafe { + &mut (*page_ref + .get_buddy() + .expect("Buddy order given is the max order")) + }; + + // Set the order of the buddy. + buddy.buddy_meta.order = Some(prev_order); + + return Some((page_ref, buddy)); + } + None + } + + pub fn merge(&self) { + unimplemented!() } pub fn init(&'static mut self) { self.freelist[BUDDY_MAX_ORDER - 1] = unsafe { PAGES[0].buddy_meta }; - let mut iter = unsafe { PAGES.iter_mut().peekable() }; + let mut iter = unsafe { + PAGES + .iter_mut() + .step_by(BuddyOrder::MAX as usize) + .peekable() + }; let mut prev = None; while let Some(curr) = iter.next() { @@ -84,6 +157,7 @@ impl BuddyAllocator { *v as *const UnassignedPage as *mut UnassignedPage }); curr.buddy_meta.prev = prev; + curr.buddy_meta.order = Some(BuddyOrder::MAX); prev = Some(curr) } } @@ -95,6 +169,32 @@ pub struct Page { pub buddy_meta: BuddyBlockMeta, } +impl Page { + pub fn as_unassigned(&self) -> &UnassignedPage { + let ptr = self as *const _ as usize; + unsafe { &*(ptr as *const UnassignedPage) } + } + + pub fn as_unassigned_mut(&mut self) -> &mut UnassignedPage { + let ptr = self as *const _ as usize; + unsafe { &mut *(ptr as *mut UnassignedPage) } + } + + pub fn get_buddy(&self) -> Option<*mut Page> { + if let Some(order) = self.buddy_meta.order { + if let BuddyOrder::MAX = order { + return None; + } else { + return Some( + (self as *const _ as usize ^ (1 << order as usize)) + as *mut Page, + ); + } + } + None + } +} + pub struct LateInit(MaybeUninit); impl LateInit { @@ -121,6 +221,13 @@ impl DerefMut for LateInit { } } +pub fn get_page_address(page: *const Page) -> usize { + let index = unsafe { + PAGES.as_ptr().offset_from((&*page).as_unassigned()) as usize + }; + index * REGULAR_PAGE_SIZE +} + pub fn pages_init(map: &ParsedMemoryMap) -> usize { let last = map.last().unwrap(); let last_page = (last.base_address + last.length) as usize From 3cdd0ac2bfa9a8808abb784b72492f802255503b Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 3 Jan 2026 22:13:56 +0200 Subject: [PATCH 12/78] renamed --- kernel/src/memory/allocators/mod.rs | 2 +- kernel/src/memory/allocators/{slab_allocator.rs => slab.rs} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename kernel/src/memory/allocators/{slab_allocator.rs => slab.rs} (100%) diff --git a/kernel/src/memory/allocators/mod.rs b/kernel/src/memory/allocators/mod.rs index 24a624b..af89e2e 100644 --- a/kernel/src/memory/allocators/mod.rs +++ b/kernel/src/memory/allocators/mod.rs @@ -1,2 +1,2 @@ pub mod page_allocator; -pub mod slab_allocator; +pub mod slab; diff --git a/kernel/src/memory/allocators/slab_allocator.rs b/kernel/src/memory/allocators/slab.rs similarity index 100% rename from kernel/src/memory/allocators/slab_allocator.rs rename to kernel/src/memory/allocators/slab.rs From 5725b3d87776fb6833f3a302531bce5a4237a5da Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 3 Jan 2026 22:14:14 +0200 Subject: [PATCH 13/78] added a file for the buddy order enum --- shared/common/src/enums/buddy.rs | 36 ++++++++++++++++++++++++++++++++ shared/common/src/enums/mod.rs | 2 ++ 2 files changed, 38 insertions(+) create mode 100644 shared/common/src/enums/buddy.rs diff --git a/shared/common/src/enums/buddy.rs b/shared/common/src/enums/buddy.rs new file mode 100644 index 0000000..bb935db --- /dev/null +++ b/shared/common/src/enums/buddy.rs @@ -0,0 +1,36 @@ +use crate::error::ConversionError; +use num_enum::{TryFromPrimitive, UnsafeFromPrimitive}; +use strum::VariantArray; +use strum_macros::VariantArray; + +pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); + +#[repr(u8)] +#[derive( + VariantArray, + Clone, + Copy, + PartialEq, + Eq, + TryFromPrimitive, + UnsafeFromPrimitive, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum BuddyOrder { + Order0 = 0, + Order1 = 1, + Order2 = 2, + Order3 = 3, + Order4 = 4, + Order5 = 5, + Order6 = 6, + Order7 = 7, + Order8 = 8, + Order9 = 9, + Order10 = 10, +} + +impl BuddyOrder { + pub const MIN: BuddyOrder = *BuddyOrder::VARIANTS.first().unwrap(); + pub const MAX: BuddyOrder = *BuddyOrder::VARIANTS.last().unwrap(); +} diff --git a/shared/common/src/enums/mod.rs b/shared/common/src/enums/mod.rs index 1466b58..2149e19 100644 --- a/shared/common/src/enums/mod.rs +++ b/shared/common/src/enums/mod.rs @@ -1,6 +1,7 @@ pub mod ahci; pub mod ata; pub mod bios_interrupts; +pub mod buddy; pub mod cpuid; pub mod general; pub mod global_descriptor_table; @@ -16,6 +17,7 @@ pub mod vga; pub use ahci::*; pub use ata::*; pub use bios_interrupts::*; +pub use buddy::*; pub use cpuid::*; pub use general::*; pub use global_descriptor_table::*; From ac8aafd3e5035e566712d41e71db66e364375a21 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 4 Jan 2026 22:42:41 +0200 Subject: [PATCH 14/78] finished untested implementation of buddy allocator --- kernel/src/memory/page_descriptor.rs | 91 ++++++++++++++-------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index e345920..6dfde14 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -61,7 +61,7 @@ pub struct BuddyAllocator { } impl BuddyAllocator { - pub fn alloc_pages(&self, num_pages: usize) -> usize { + pub fn alloc_pages(&mut self, num_pages: usize) -> usize { assert!( num_pages < (1 << BUDDY_MAX_ORDER), "Size cannot be greater then: {}", @@ -84,56 +84,32 @@ impl BuddyAllocator { /// This function assumes that `wanted_order` is empty, and won't check /// it. pub fn split_until( - &self, + &mut self, wanted_order: usize, ) -> Option<*mut UnassignedPage> { - let closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) .find(|i| self.freelist[*i].next.is_some())?; - let mut next_split = &self.freelist[closet_order]; + let initial_page = unsafe { + &mut *self.freelist[closet_order] + .detach::() + .unwrap() + }; - // for current_split in - // ((wanted_order + 1)..closet_order).rev().peekable() - // { - // let page = self.freelist[current_split] - // .detach::() - // .expect("Error in logic"); - // } - None - } + let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); - /// TODO: Make an unsafe split if relevant - /// - /// # Safety - /// This function does not attach the new references! - pub unsafe fn split( - &mut self, - order: usize, - ) -> Option<(&mut UnassignedPage, &mut UnassignedPage)> { - let meta = &mut self.freelist[order]; - - // Detach the page from it's order list. - if let Some(page) = meta.detach::() { - let page_ref = unsafe { &mut (*page) }; - - // Reduce it's order to find it's order. - let prev_order = - BuddyOrder::try_from(order as u8 - 1).unwrap(); - page_ref.buddy_meta.order = Some(prev_order); - - // Find it's buddy new buddy. - let buddy = unsafe { - &mut (*page_ref - .get_buddy() - .expect("Buddy order given is the max order")) - }; - - // Set the order of the buddy. - buddy.buddy_meta.order = Some(prev_order); - - return Some((page_ref, buddy)); + while closet_order != wanted_order { + closet_order -= 1; + + self.freelist[closet_order].attach(rhs); + + let split_ref = unsafe { &mut *lhs }; + + (lhs, rhs) = unsafe { split_ref.split().unwrap() }; } - None + + self.freelist[closet_order].attach(rhs); + Some(lhs) } pub fn merge(&self) { @@ -193,6 +169,33 @@ impl Page { } None } + + /// TODO: Make an unsafe split if relevant + /// + /// # Safety + /// This function does not attach the new references! + pub unsafe fn split( + &mut self, + ) -> Option<(*mut Page, *mut Page)> { + // Reduce it's order to find it's order. + let prev_order = + BuddyOrder::try_from(self.buddy_meta.order? as u8 - 1) + .unwrap(); + + self.buddy_meta.order = Some(prev_order); + + // Find it's buddy new buddy. + let buddy = unsafe { + &mut (*self + .get_buddy() + .expect("Buddy order given is the max order")) + }; + + // Set the order of the buddy. + buddy.buddy_meta.order = Some(prev_order); + + Some((self as *mut Page, buddy as *mut Page)) + } } pub struct LateInit(MaybeUninit); From 648868c1fd8662ff2e378a941a6645dac0492923 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 5 Jan 2026 23:51:56 +0200 Subject: [PATCH 15/78] fixed some bugs on buddy allocator, still has some to fix --- kernel/src/memory/allocators/slab.rs | 1 + kernel/src/memory/page_descriptor.rs | 61 ++++++++++++++++++---------- shared/common/src/enums/buddy.rs | 1 + 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 5f25966..ddb7cd4 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,5 +1,6 @@ use core::alloc::Layout; +#[derive(Debug)] pub struct SlabCache { // TODO ADD LOCK pub layout: Layout, diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 6dfde14..a8f7db0 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -13,7 +13,15 @@ use core::{ ops::{Deref, DerefMut}, }; -#[derive(Default)] +pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { + freelist: [BuddyBlockMeta { + next: None, + prev: None, + order: None, + }; BUDDY_MAX_ORDER], +}; + +#[derive(Default, Debug)] pub struct Unassigned; pub type UnassignedPage = Page; @@ -33,7 +41,7 @@ impl UnassignedPage { pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = LateInit::uninit(); -#[derive(Default, Clone, Copy)] +#[derive(Default, Clone, Copy, Debug)] pub struct BuddyBlockMeta { next: Option<*mut UnassignedPage>, prev: Option<*mut UnassignedPage>, @@ -55,7 +63,6 @@ impl BuddyBlockMeta { } } -#[derive(Default)] pub struct BuddyAllocator { freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], } @@ -67,14 +74,17 @@ impl BuddyAllocator { "Size cannot be greater then: {}", 1 << BUDDY_MAX_ORDER ); - let order = num_pages.next_power_of_two().leading_zeros() as usize; + let order = (usize::BITS + - 1 + - num_pages.next_power_of_two().leading_zeros()) + as usize; let page = self.freelist[order].next.unwrap_or_else(|| { self.split_until(order) .expect("Out of memory, swap is not implemented") }); - get_page_address(page) + (unsafe { &*page }).physical_address() } pub fn free_pages(&self, address: usize) { @@ -88,7 +98,10 @@ impl BuddyAllocator { wanted_order: usize, ) -> Option<*mut UnassignedPage> { let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) - .find(|i| self.freelist[*i].next.is_some())?; + .find(|i| self.freelist[*i].next.is_some()) + .unwrap(); + + println!("closet: {}, wanted: {}", closet_order, wanted_order); let initial_page = unsafe { &mut *self.freelist[closet_order] @@ -117,15 +130,13 @@ impl BuddyAllocator { } pub fn init(&'static mut self) { - self.freelist[BUDDY_MAX_ORDER - 1] = - unsafe { PAGES[0].buddy_meta }; - let mut iter = unsafe { PAGES .iter_mut() .step_by(BuddyOrder::MAX as usize) .peekable() }; + let mut prev = None; while let Some(curr) = iter.next() { @@ -136,10 +147,12 @@ impl BuddyAllocator { curr.buddy_meta.order = Some(BuddyOrder::MAX); prev = Some(curr) } + self.freelist[BUDDY_MAX_ORDER - 1] = + unsafe { PAGES[0].buddy_meta }; } } -#[derive(Default)] +#[derive(Default, Debug)] pub struct Page { pub owner: Option<&'static SlabCache>, pub buddy_meta: BuddyBlockMeta, @@ -156,6 +169,13 @@ impl Page { unsafe { &mut *(ptr as *mut UnassignedPage) } } + pub fn physical_address(&self) -> usize { + let index = unsafe { + PAGES.as_ptr().offset_from(self.as_unassigned()) as usize + }; + index * REGULAR_PAGE_SIZE + } + pub fn get_buddy(&self) -> Option<*mut Page> { if let Some(order) = self.buddy_meta.order { if let BuddyOrder::MAX = order { @@ -224,30 +244,27 @@ impl DerefMut for LateInit { } } -pub fn get_page_address(page: *const Page) -> usize { - let index = unsafe { - PAGES.as_ptr().offset_from((&*page).as_unassigned()) as usize - }; - index * REGULAR_PAGE_SIZE -} - pub fn pages_init(map: &ParsedMemoryMap) -> usize { let last = map.last().unwrap(); let last_page = (last.base_address + last.length) as usize & !REGULAR_PAGE_ALIGNMENT.as_usize(); let total_pages = last_page / REGULAR_PAGE_SIZE; - println!("Last Page: {}, Total Pages: {}", last_page, total_pages); + println!( + "Last Page: {}, Total Pages: {}, size_of_array: {:x?} Kib", + last_page, + total_pages, + total_pages * size_of::>() / 1024 + ); unsafe { PAGES.write(core::slice::from_raw_parts_mut( PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, total_pages, )); - PAGES - .iter_mut() - .for_each(|p| *p = UnassignedPage::default()); - + for p in PAGES.iter_mut() { + *p = UnassignedPage::default(); + } PAGES.as_ptr_range().end as usize } } diff --git a/shared/common/src/enums/buddy.rs b/shared/common/src/enums/buddy.rs index bb935db..5a50fda 100644 --- a/shared/common/src/enums/buddy.rs +++ b/shared/common/src/enums/buddy.rs @@ -11,6 +11,7 @@ pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); Clone, Copy, PartialEq, + Debug, Eq, TryFromPrimitive, UnsafeFromPrimitive, From 95c390e76d5dc852c391d0579543b57cbcf14c48 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 5 Jan 2026 23:52:32 +0200 Subject: [PATCH 16/78] mapped more memory so the buddy allocator would be happy. --- shared/cpu_utils/src/structures/paging/init.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/shared/cpu_utils/src/structures/paging/init.rs b/shared/cpu_utils/src/structures/paging/init.rs index 18e2176..290d304 100644 --- a/shared/cpu_utils/src/structures/paging/init.rs +++ b/shared/cpu_utils/src/structures/paging/init.rs @@ -54,8 +54,11 @@ pub fn enable() -> Option<()> { ); identity_page_table_l2.entries[0].map_unchecked( PhysicalAddress::new_unchecked(0), - /// TODO PATCH REMOVE LATER - PageEntryFlags::huge_io_page_flags(), + PageEntryFlags::huge_page_flags(), + ); + identity_page_table_l2.entries[1].map_unchecked( + PhysicalAddress::new_unchecked(0x200000), + PageEntryFlags::huge_page_flags(), ); } // ANCHOR_END: setup_page_tables @@ -82,7 +85,7 @@ pub fn enable() -> Option<()> { ); top_identity_page_table_l2.entries[0].map_unchecked( PhysicalAddress::new_unchecked(0), - PageEntryFlags::huge_page_flags(), + PageEntryFlags::huge_io_page_flags(), ); } // ANCHOR_END: setup_top_page_tables From f35c51d0041a12ed25dff15d5952fbd6a1a11e48 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 8 Jan 2026 00:48:07 +0200 Subject: [PATCH 17/78] fixed stupid bugs on the buddy allocation, seems to work now. --- kernel/src/memory/page_descriptor.rs | 47 +++++++++++++++++++++------- 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index a8f7db0..80622ef 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -7,6 +7,7 @@ use common::{ PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, enums::{BUDDY_MAX_ORDER, BuddyOrder}, + write_volatile, }; use core::{ mem::MaybeUninit, @@ -79,7 +80,7 @@ impl BuddyAllocator { - num_pages.next_power_of_two().leading_zeros()) as usize; - let page = self.freelist[order].next.unwrap_or_else(|| { + let page = self.freelist[order].detach().unwrap_or_else(|| { self.split_until(order) .expect("Out of memory, swap is not implemented") }); @@ -109,18 +110,23 @@ impl BuddyAllocator { .unwrap() }; + println!("Initial Page: {:?}", initial_page); + let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); - while closet_order != wanted_order { - closet_order -= 1; + closet_order -= 1; + while closet_order != wanted_order { + println!("Left address: {:?}, right address: {:?}", lhs, rhs); self.freelist[closet_order].attach(rhs); let split_ref = unsafe { &mut *lhs }; (lhs, rhs) = unsafe { split_ref.split().unwrap() }; + closet_order -= 1; } + println!("Left address: {:?}, right address: {:?}", lhs, rhs); self.freelist[closet_order].attach(rhs); Some(lhs) } @@ -133,7 +139,7 @@ impl BuddyAllocator { let mut iter = unsafe { PAGES .iter_mut() - .step_by(BuddyOrder::MAX as usize) + .step_by(1 << BuddyOrder::MAX as usize) .peekable() }; @@ -147,12 +153,15 @@ impl BuddyAllocator { curr.buddy_meta.order = Some(BuddyOrder::MAX); prev = Some(curr) } - self.freelist[BUDDY_MAX_ORDER - 1] = - unsafe { PAGES[0].buddy_meta }; + self.freelist[BUDDY_MAX_ORDER - 1] = BuddyBlockMeta { + next: Some(unsafe { (&mut PAGES[0]) as *mut UnassignedPage }), + prev: None, + order: Some(BuddyOrder::MAX), + }; } } -#[derive(Default, Debug)] +#[derive(Debug)] pub struct Page { pub owner: Option<&'static SlabCache>, pub buddy_meta: BuddyBlockMeta, @@ -171,7 +180,8 @@ impl Page { pub fn physical_address(&self) -> usize { let index = unsafe { - PAGES.as_ptr().offset_from(self.as_unassigned()) as usize + (self.as_unassigned() as *const UnassignedPage) + .offset_from(PAGES.as_ptr()) as usize }; index * REGULAR_PAGE_SIZE } @@ -182,7 +192,9 @@ impl Page { return None; } else { return Some( - (self as *const _ as usize ^ (1 << order as usize)) + (self as *const _ as usize + ^ ((1 << order as usize) + * size_of::())) as *mut Page, ); } @@ -198,11 +210,12 @@ impl Page { &mut self, ) -> Option<(*mut Page, *mut Page)> { // Reduce it's order to find it's order. + let prev_order = BuddyOrder::try_from(self.buddy_meta.order? as u8 - 1) .unwrap(); - self.buddy_meta.order = Some(prev_order); + write_volatile!(self.buddy_meta.order, Some(prev_order)); // Find it's buddy new buddy. let buddy = unsafe { @@ -212,7 +225,7 @@ impl Page { }; // Set the order of the buddy. - buddy.buddy_meta.order = Some(prev_order); + write_volatile!(buddy.buddy_meta.order, Some(prev_order)); Some((self as *mut Page, buddy as *mut Page)) } @@ -263,7 +276,17 @@ pub fn pages_init(map: &ParsedMemoryMap) -> usize { )); for p in PAGES.iter_mut() { - *p = UnassignedPage::default(); + core::ptr::write_volatile( + p as *mut UnassignedPage, + UnassignedPage { + buddy_meta: BuddyBlockMeta { + next: None, + order: None, + prev: None, + }, + owner: None, + }, + ); } PAGES.as_ptr_range().end as usize } From 40415ee724eb87ee6ff70af10b4098ea9ef5aef5 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 8 Jan 2026 18:36:24 +0200 Subject: [PATCH 18/78] slight changes --- kernel/src/memory/page_descriptor.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 80622ef..a918ab2 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -89,7 +89,7 @@ impl BuddyAllocator { } pub fn free_pages(&self, address: usize) { - unimplemented!() + let page_index = address / REGULAR_PAGE_SIZE; } /// This function assumes that `wanted_order` is empty, and won't check @@ -102,22 +102,17 @@ impl BuddyAllocator { .find(|i| self.freelist[*i].next.is_some()) .unwrap(); - println!("closet: {}, wanted: {}", closet_order, wanted_order); - let initial_page = unsafe { &mut *self.freelist[closet_order] .detach::() .unwrap() }; - println!("Initial Page: {:?}", initial_page); - let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); closet_order -= 1; while closet_order != wanted_order { - println!("Left address: {:?}, right address: {:?}", lhs, rhs); self.freelist[closet_order].attach(rhs); let split_ref = unsafe { &mut *lhs }; @@ -126,7 +121,6 @@ impl BuddyAllocator { closet_order -= 1; } - println!("Left address: {:?}, right address: {:?}", lhs, rhs); self.freelist[closet_order].attach(rhs); Some(lhs) } @@ -179,10 +173,10 @@ impl Page { } pub fn physical_address(&self) -> usize { - let index = unsafe { - (self.as_unassigned() as *const UnassignedPage) - .offset_from(PAGES.as_ptr()) as usize - }; + let index = (self.as_unassigned() as *const _ as usize + - PAGE_ALLOCATOR_OFFSET) + / size_of::(); + index * REGULAR_PAGE_SIZE } From 1d0104575dbdba9417eb2a9b1efe24a0d18335da Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 9 Jan 2026 13:14:04 +0200 Subject: [PATCH 19/78] fixed a bug where the found half of a page would be wrong on a split --- kernel/src/memory/page_descriptor.rs | 42 +++++++++++++--------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index a918ab2..cabe3a1 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -71,9 +71,9 @@ pub struct BuddyAllocator { impl BuddyAllocator { pub fn alloc_pages(&mut self, num_pages: usize) -> usize { assert!( - num_pages < (1 << BUDDY_MAX_ORDER), + num_pages <= (1 << BuddyOrder::MAX as usize), "Size cannot be greater then: {}", - 1 << BUDDY_MAX_ORDER + 1 << BuddyOrder::MAX as usize ); let order = (usize::BITS - 1 @@ -99,8 +99,7 @@ impl BuddyAllocator { wanted_order: usize, ) -> Option<*mut UnassignedPage> { let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) - .find(|i| self.freelist[*i].next.is_some()) - .unwrap(); + .find(|i| self.freelist[*i].next.is_some())?; let initial_page = unsafe { &mut *self.freelist[closet_order] @@ -109,7 +108,6 @@ impl BuddyAllocator { }; let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); - closet_order -= 1; while closet_order != wanted_order { @@ -181,19 +179,17 @@ impl Page { } pub fn get_buddy(&self) -> Option<*mut Page> { - if let Some(order) = self.buddy_meta.order { - if let BuddyOrder::MAX = order { - return None; - } else { - return Some( - (self as *const _ as usize - ^ ((1 << order as usize) - * size_of::())) - as *mut Page, - ); - } + let order = self.buddy_meta.order?; + if let BuddyOrder::MAX = order { + None + } else { + Some( + (self as *const _ as usize + ^ ((1 << order as usize) + * size_of::())) + as *mut Page, + ) } - None } /// TODO: Make an unsafe split if relevant @@ -210,13 +206,13 @@ impl Page { .unwrap(); write_volatile!(self.buddy_meta.order, Some(prev_order)); + let index = ((self.as_unassigned() as *const _ as usize + - PAGE_ALLOCATOR_OFFSET) + / size_of::()) + + (1 << prev_order as usize); - // Find it's buddy new buddy. - let buddy = unsafe { - &mut (*self - .get_buddy() - .expect("Buddy order given is the max order")) - }; + // Find it's half + let buddy = unsafe { PAGES[index].assign_mut::() }; // Set the order of the buddy. write_volatile!(buddy.buddy_meta.order, Some(prev_order)); From 914c17425d006bb20497b2f7283ebb3e737461cf Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 9 Jan 2026 15:25:34 +0200 Subject: [PATCH 20/78] currently disabled scan because allocator is not implemented on the buddy allocator --- kernel/src/drivers/pci.rs | 91 +++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/kernel/src/drivers/pci.rs b/kernel/src/drivers/pci.rs index d1de4da..8378527 100644 --- a/kernel/src/drivers/pci.rs +++ b/kernel/src/drivers/pci.rs @@ -2,10 +2,7 @@ extern crate alloc; use crate::{ drivers::ata::ahci::AHCIBaseAddress, - memory::allocators::page_allocator::{ - ALLOCATOR, allocator::PhysicalPageAllocator, - }, - println, + memory::allocators::page_allocator::allocator::PhysicalPageAllocator, }; use alloc::vec::Vec; use common::enums::{ @@ -406,46 +403,46 @@ impl PciDevice { pub fn enable_interrupts(&self, irq: CascadedPicInterruptLine) {} } -pub fn scan_pci() -> Vec { - let mut v: Vec = - Vec::with_capacity_in(64, unsafe { - ALLOCATOR.assume_init_ref().clone() - }); - for bus in 0..=255 { - for device in 0..32 { - let common = - PciConfigurationCycle::read_common_header(bus, device, 0); - if common.vendor_device.vendor == VendorID::NonExistent { - continue; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device( - bus, device, 0, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - if !common.header_type.is_multifunction() { - continue; - } - for function in 1..8 { - let common = PciConfigurationCycle::read_common_header( - bus, device, function, - ); - if common.vendor_device.vendor == VendorID::NonExistent { - continue; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device( - bus, device, function, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - } - } - } - v -} +// pub fn scan_pci() -> Vec { +// let mut v: Vec = +// Vec::with_capacity_in(64, unsafe { +// ALLOCATOR.assume_init_ref().clone() +// }); +// for bus in 0..=255 { +// for device in 0..32 { +// let common = +// PciConfigurationCycle::read_common_header(bus, device, +// 0); if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, 0, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// if !common.header_type.is_multifunction() { +// continue; +// } +// for function in 1..8 { +// let common = PciConfigurationCycle::read_common_header( +// bus, device, function, +// ); +// if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, function, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// } +// } +// } +// v +// } From 4ec0badb68f8dc07b9870b0ef0f6dcb77413a72f Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 9 Jan 2026 15:26:25 +0200 Subject: [PATCH 21/78] separated buddy allocator to a different file --- .../memory/allocators/page_allocator/buddy.rs | 144 +++++++++++++++ kernel/src/memory/page_descriptor.rs | 165 +----------------- 2 files changed, 153 insertions(+), 156 deletions(-) create mode 100644 kernel/src/memory/allocators/page_allocator/buddy.rs diff --git a/kernel/src/memory/allocators/page_allocator/buddy.rs b/kernel/src/memory/allocators/page_allocator/buddy.rs new file mode 100644 index 0000000..280fb74 --- /dev/null +++ b/kernel/src/memory/allocators/page_allocator/buddy.rs @@ -0,0 +1,144 @@ +use core::ptr; + +use common::{ + address_types::PhysicalAddress, + enums::{BUDDY_MAX_ORDER, BuddyOrder}, +}; +use cpu_utils::structures::paging::PageTable; + +use crate::memory::page_descriptor::{ + PAGES, Page, Unassigned, UnassignedPage, +}; + +pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { + freelist: [BuddyBlockMeta { + next: None, + prev: None, + order: None, + }; BUDDY_MAX_ORDER], +}; + +#[derive(Default, Clone, Copy, Debug)] +pub struct BuddyBlockMeta { + pub next: Option<*mut UnassignedPage>, + pub prev: Option<*mut UnassignedPage>, + pub order: Option, +} + +impl BuddyBlockMeta { + pub fn detach(&mut self) -> Option<*mut Page> { + let detached = self.next? as *mut Page; // None if there is no page to detach + self.next = unsafe { (*detached).buddy_meta.next }; + Some(detached) + } + + pub fn attach(&mut self, attachment: *mut Page) { + let attachment_ref = + unsafe { &mut *attachment }.as_unassigned_mut(); + attachment_ref.buddy_meta.next = self.next; + self.next = Some(attachment_ref as *mut UnassignedPage) + } +} + +pub struct BuddyAllocator { + freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], +} + +impl BuddyAllocator { + pub fn alloc_pages(&mut self, num_pages: usize) -> usize { + assert!( + num_pages <= (1 << BuddyOrder::MAX as usize), + "Size cannot be greater then: {}", + 1 << BuddyOrder::MAX as usize + ); + let order = (usize::BITS + - 1 + - num_pages.next_power_of_two().leading_zeros()) + as usize; + + let page = self.freelist[order].detach().unwrap_or_else(|| { + self.split_until(order) + .expect("Out of memory, swap is not implemented") + }); + + (unsafe { &*page }).physical_address() + } + + // pub fn free_pages(&self, address: usize) { + // let page_index = address / REGULAR_PAGE_SIZE; + // } + + /// This function assumes that `wanted_order` is empty, and won't check + /// it. + pub fn split_until( + &mut self, + wanted_order: usize, + ) -> Option<*mut UnassignedPage> { + let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + .find(|i| self.freelist[*i].next.is_some())?; + + let initial_page = unsafe { + &mut *self.freelist[closet_order] + .detach::() + .unwrap() + }; + + let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); + closet_order -= 1; + + while closet_order != wanted_order { + self.freelist[closet_order].attach(rhs); + + let split_ref = unsafe { &mut *lhs }; + + (lhs, rhs) = unsafe { split_ref.split().unwrap() }; + closet_order -= 1; + } + + self.freelist[closet_order].attach(rhs); + Some(lhs) + } + + pub fn merge(&self) { + unimplemented!() + } + + pub fn alloc_table(&mut self) -> &'static mut PageTable { + unsafe { + let address = + { PhysicalAddress::new_unchecked(self.alloc_pages(1)) }; + ptr::write_volatile( + address.as_mut_ptr::(), + PageTable::empty(), + ); + &mut *address.as_mut_ptr::() + } + } + + pub fn init(&'static mut self) { + let mut iter = unsafe { + PAGES + .iter_mut() + .step_by(1 << BuddyOrder::MAX as usize) + .peekable() + }; + + let mut prev = None; + + while let Some(curr) = iter.next() { + curr.buddy_meta.next = iter.peek().map(|v| { + *v as *const UnassignedPage as *mut UnassignedPage + }); + curr.buddy_meta.prev = prev; + curr.buddy_meta.order = Some(BuddyOrder::MAX); + prev = Some(curr) + } + self.freelist[BUDDY_MAX_ORDER - 1] = BuddyBlockMeta { + next: Some(unsafe { (&mut PAGES[0]) as *mut UnassignedPage }), + prev: None, + order: Some(BuddyOrder::MAX), + }; + // Allocate initial MB + self.alloc_pages(256); + } +} diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index cabe3a1..8a73196 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,26 +1,20 @@ use crate::{ - memory::{allocators::slab::SlabCache, memory_map::ParsedMemoryMap}, + memory::{ + allocators::{ + page_allocator::buddy::BuddyBlockMeta, slab::SlabCache, + }, + memory_map::ParsedMemoryMap, + }, println, }; use common::{ constants::{ PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, - enums::{BUDDY_MAX_ORDER, BuddyOrder}, + enums::BuddyOrder, + late_init::LateInit, write_volatile, }; -use core::{ - mem::MaybeUninit, - ops::{Deref, DerefMut}, -}; - -pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { - freelist: [BuddyBlockMeta { - next: None, - prev: None, - order: None, - }; BUDDY_MAX_ORDER], -}; #[derive(Default, Debug)] pub struct Unassigned; @@ -42,117 +36,6 @@ impl UnassignedPage { pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = LateInit::uninit(); -#[derive(Default, Clone, Copy, Debug)] -pub struct BuddyBlockMeta { - next: Option<*mut UnassignedPage>, - prev: Option<*mut UnassignedPage>, - order: Option, -} - -impl BuddyBlockMeta { - pub fn detach(&mut self) -> Option<*mut Page> { - let detached = self.next? as *mut Page; // None if there is no page to detach - self.next = unsafe { (*detached).buddy_meta.next }; - Some(detached) - } - - pub fn attach(&mut self, attachment: *mut Page) { - let attachment_ref = - unsafe { &mut *attachment }.as_unassigned_mut(); - attachment_ref.buddy_meta.next = self.next; - self.next = Some(attachment_ref as *mut UnassignedPage) - } -} - -pub struct BuddyAllocator { - freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], -} - -impl BuddyAllocator { - pub fn alloc_pages(&mut self, num_pages: usize) -> usize { - assert!( - num_pages <= (1 << BuddyOrder::MAX as usize), - "Size cannot be greater then: {}", - 1 << BuddyOrder::MAX as usize - ); - let order = (usize::BITS - - 1 - - num_pages.next_power_of_two().leading_zeros()) - as usize; - - let page = self.freelist[order].detach().unwrap_or_else(|| { - self.split_until(order) - .expect("Out of memory, swap is not implemented") - }); - - (unsafe { &*page }).physical_address() - } - - pub fn free_pages(&self, address: usize) { - let page_index = address / REGULAR_PAGE_SIZE; - } - - /// This function assumes that `wanted_order` is empty, and won't check - /// it. - pub fn split_until( - &mut self, - wanted_order: usize, - ) -> Option<*mut UnassignedPage> { - let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) - .find(|i| self.freelist[*i].next.is_some())?; - - let initial_page = unsafe { - &mut *self.freelist[closet_order] - .detach::() - .unwrap() - }; - - let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); - closet_order -= 1; - - while closet_order != wanted_order { - self.freelist[closet_order].attach(rhs); - - let split_ref = unsafe { &mut *lhs }; - - (lhs, rhs) = unsafe { split_ref.split().unwrap() }; - closet_order -= 1; - } - - self.freelist[closet_order].attach(rhs); - Some(lhs) - } - - pub fn merge(&self) { - unimplemented!() - } - - pub fn init(&'static mut self) { - let mut iter = unsafe { - PAGES - .iter_mut() - .step_by(1 << BuddyOrder::MAX as usize) - .peekable() - }; - - let mut prev = None; - - while let Some(curr) = iter.next() { - curr.buddy_meta.next = iter.peek().map(|v| { - *v as *const UnassignedPage as *mut UnassignedPage - }); - curr.buddy_meta.prev = prev; - curr.buddy_meta.order = Some(BuddyOrder::MAX); - prev = Some(curr) - } - self.freelist[BUDDY_MAX_ORDER - 1] = BuddyBlockMeta { - next: Some(unsafe { (&mut PAGES[0]) as *mut UnassignedPage }), - prev: None, - order: Some(BuddyOrder::MAX), - }; - } -} - #[derive(Debug)] pub struct Page { pub owner: Option<&'static SlabCache>, @@ -221,32 +104,6 @@ impl Page { } } -pub struct LateInit(MaybeUninit); - -impl LateInit { - pub const fn uninit() -> LateInit { - LateInit::(MaybeUninit::uninit()) - } - - pub const fn write(&mut self, val: T) { - self.0.write(val); - } -} - -impl Deref for LateInit { - type Target = T; - - fn deref(&self) -> &Self::Target { - unsafe { self.0.assume_init_ref() } - } -} - -impl DerefMut for LateInit { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { self.0.assume_init_mut() } - } -} - pub fn pages_init(map: &ParsedMemoryMap) -> usize { let last = map.last().unwrap(); let last_page = (last.base_address + last.length) as usize @@ -269,11 +126,7 @@ pub fn pages_init(map: &ParsedMemoryMap) -> usize { core::ptr::write_volatile( p as *mut UnassignedPage, UnassignedPage { - buddy_meta: BuddyBlockMeta { - next: None, - order: None, - prev: None, - }, + buddy_meta: BuddyBlockMeta::default(), owner: None, }, ); From 8a21efa47e9028a23f4b56f068590e311e3d59eb Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 9 Jan 2026 15:26:57 +0200 Subject: [PATCH 22/78] used buddy allocator instead of the bitmap allocator --- .../allocators/page_allocator/extensions.rs | 36 +++++++++---------- .../memory/allocators/page_allocator/mod.rs | 20 ++--------- 2 files changed, 19 insertions(+), 37 deletions(-) diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/page_allocator/extensions.rs index 57e15f5..17ba98a 100644 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ b/kernel/src/memory/allocators/page_allocator/extensions.rs @@ -1,4 +1,3 @@ -use super::ALLOCATOR; use common::{ address_types::{PhysicalAddress, VirtualAddress}, constants::{ @@ -17,6 +16,8 @@ use strum::VariantArray; use common::error::TableError; use cpu_utils::structures::paging::EntryIndex; +use crate::memory::allocators::page_allocator::buddy::BUDDY_ALLOCATOR; + #[ext] pub impl PhysicalAddress { fn map( @@ -49,25 +50,20 @@ pub impl PageTableEntry { match self.mapped_table_mut() { Ok(table) => Some(table), Err(EntryError::NotATable) => None, - Err(EntryError::NoMapping) => { - let resolved_table = - unsafe { ALLOCATOR.assume_init_ref().alloc_table() }; - unsafe { - self.map_unchecked( - PhysicalAddress::new_unchecked( - resolved_table.address().as_usize(), - ), - PageEntryFlags::table_flags(), - ); - } - unsafe { - Some( - &mut *self - .mapped_unchecked() - .as_mut_ptr::(), - ) - } - } + Err(EntryError::NoMapping) => unsafe { + let resolved_table = BUDDY_ALLOCATOR.alloc_table(); + self.map_unchecked( + PhysicalAddress::new_unchecked( + resolved_table.address().as_usize(), + ), + PageEntryFlags::table_flags(), + ); + Some( + &mut *self + .mapped_unchecked() + .as_mut_ptr::(), + ) + }, } } } diff --git a/kernel/src/memory/allocators/page_allocator/mod.rs b/kernel/src/memory/allocators/page_allocator/mod.rs index 3c799a4..ac555f4 100644 --- a/kernel/src/memory/allocators/page_allocator/mod.rs +++ b/kernel/src/memory/allocators/page_allocator/mod.rs @@ -1,26 +1,12 @@ pub mod allocator; +pub mod buddy; pub mod extensions; -use allocator::PhysicalPageAllocator; -use core::mem::MaybeUninit; - -pub static mut ALLOCATOR: MaybeUninit = - MaybeUninit::uninit(); - #[macro_export] /// Allocate the amount of pages specified, and return the address macro_rules! alloc_pages { ($page_number: expr) => {{ - use core::alloc::{Allocator, Layout}; - use $crate::memory::allocators::page_allocator::ALLOCATOR; - ALLOCATOR - .assume_init_ref() - .allocate(Layout::from_size_align_unchecked( - REGULAR_PAGE_SIZE * $page_number, - REGULAR_PAGE_ALIGNMENT.as_usize(), - )) - .unwrap() - .addr() - .get() + use $crate::memory::allocators::page_allocator::buddy::BUDDY_ALLOCATOR; + BUDDY_ALLOCATOR.alloc_pages($page_number) }}; } From 29a186fc3dc1b52bf78d288e3368693c90460404 Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 9 Jan 2026 15:27:33 +0200 Subject: [PATCH 23/78] separated late init into a different file --- shared/common/src/late_init.rs | 30 ++++++++++++++++++++++++++++++ shared/common/src/lib.rs | 1 + 2 files changed, 31 insertions(+) create mode 100644 shared/common/src/late_init.rs diff --git a/shared/common/src/late_init.rs b/shared/common/src/late_init.rs new file mode 100644 index 0000000..a2ba4a5 --- /dev/null +++ b/shared/common/src/late_init.rs @@ -0,0 +1,30 @@ +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; + +pub struct LateInit(MaybeUninit); + +impl LateInit { + pub const fn uninit() -> LateInit { + LateInit::(MaybeUninit::uninit()) + } + + pub const fn write(&mut self, val: T) { + self.0.write(val); + } +} + +impl Deref for LateInit { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.assume_init_ref() } + } +} + +impl DerefMut for LateInit { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.assume_init_mut() } + } +} diff --git a/shared/common/src/lib.rs b/shared/common/src/lib.rs index 746f121..4923b17 100644 --- a/shared/common/src/lib.rs +++ b/shared/common/src/lib.rs @@ -13,6 +13,7 @@ pub mod bitmap; pub mod constants; pub mod enums; pub mod error; +pub mod late_init; pub mod ring_buffer; pub mod volatile; From 69cf55ff02a3a23e9a886d33931bc78282bfe417 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 10 Jan 2026 16:56:42 +0200 Subject: [PATCH 24/78] added some of the slab allocator structures --- kernel/src/main.rs | 212 ++++++++++-------- .../memory/allocators/page_allocator/buddy.rs | 1 + kernel/src/memory/allocators/slab.rs | 32 ++- 3 files changed, 146 insertions(+), 99 deletions(-) diff --git a/kernel/src/main.rs b/kernel/src/main.rs index ab4fa5f..f19a3da 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -33,8 +33,11 @@ use crate::{ vga_display::color_code::ColorCode, }, memory::{ - allocators::page_allocator::allocator::PhysicalPageAllocator, - memory_map::{ParsedMapDisplay, parse_map}, + allocators::page_allocator::{ + allocator::PhysicalPageAllocator, buddy::BUDDY_ALLOCATOR, + }, + memory_map::{MemoryMap, MemoryRegion, parse_map}, + page_descriptor::pages_init, }, }; @@ -52,8 +55,6 @@ use cpu_utils::{ }, }; -use memory::allocators::page_allocator::ALLOCATOR; - #[unsafe(no_mangle)] #[unsafe(link_section = ".start")] #[allow(clippy::missing_safety_doc)] @@ -63,105 +64,124 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Entered Long Mode"); parse_map(); okprintln!("Obtained Memory Map"); - println!("{}", ParsedMapDisplay(parsed_memory_map!())); - PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); - println!("ALLOCATOR MAP LEN: {}", unsafe { - ALLOCATOR.assume_init_mut().map_mut().map.len() - }); - okprintln!("Allocator Initialized"); + println!("{}", MemoryMap(parsed_memory_map!())); + + // loop {} + // PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); + // println!("ALLOCATOR MAP LEN: {}", unsafe { + // ALLOCATOR.assume_init_mut().map_mut().map.len() + // }); + // okprintln!("Allocator Initialized"); unsafe { - let idt_address = alloc_pages!(1).into(); - InterruptDescriptorTable::init(&mut IDT, idt_address); + // let idt_address = alloc_pages!(1).into(); + InterruptDescriptorTable::init(&mut IDT, 0xc0000.into()); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); okprintln!("Initialized Programmable Interrupt Controller"); - let keyboard_buffer_address = alloc_pages!(1).into(); - Keyboard::init( - &mut KEYBOARD, - keyboard_buffer_address, - NonZero::new(REGULAR_PAGE_SIZE).unwrap(), - ); - okprintln!("Initialized Keyboard"); - interrupts::enable(); - } - let mut pci_devices = pci::scan_pci(); - println!("Press ENTER to enumerate PCI devices!"); - let a = pci_devices.as_ptr() as usize; - println!("pci_devices address: {:x}", a); - - loop { - let c = unsafe { KEYBOARD.assume_init_mut().read_raw_scancode() }; - if let Some(e) = c - && PS2ScanCode::from_scancode(e) == PS2ScanCode::Enter - { - break; - } - } - - unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; - for device in pci_devices.iter_mut() { - // println!("{:#?}", unsafe { device.common.vendor_device }); - // println!("{:#?}", unsafe { device.common.header_type }); - // println!("{:#?}\n", unsafe { device.common.device_type }); - - if device.header.common().device_type.is_ahci() { - let a = unsafe { - PhysicalAddress::new_unchecked( - device.header.general_device.bar5.address(), - ) - }; - - println!( - "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ - Memory Space: {}", - device.header.common().command.is_bus_master(), - device.header.common().command.is_interrupt_disable(), - device.header.common().command.is_io_space(), - device.header.common().command.is_memory_space() - ); - - println!( - "Interrupt Line: {}, Interrupt Pin: {}", - unsafe { device.header.general_device.interrupt_line }, - unsafe { device.header.general_device.interrupt_pin } - ); - - let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); - let hba = HBAMemoryRegisters::new(aligned).unwrap(); - let _ = hba.probe_init(); - let p = &mut hba.ports[0]; - - let buf = - unsafe { alloc_pages!(1) as *mut IdentityPacketData }; - - p.identity_packet(buf); - - let id = unsafe { - core::ptr::read_volatile( - (buf as usize + PHYSICAL_MEMORY_OFFSET) - as *mut IdentityPacketData, - ) - }; - - println!("{:?}", id); - - println!("Cylinders: {}", id.cylinders); - println!("Heads: {}", id.heads); - println!("Sectors: {}", id.sectors); - - println!("Serial: {:?}", &id.serial_number); - println!("Model: {:?}", &id.model_num); - println!("Firmware: {:?}", &id.firmware_rev); - } - } - - loop { - unsafe { - print!("{}", KEYBOARD.assume_init_mut().read_char() ; color = ColorCode::new(Color::Green, Color::Black)); - } + // let keyboard_buffer_address = alloc_pages!(1).into(); + // Keyboard::init( + // &mut KEYBOARD, + // keyboard_buffer_address, + // NonZero::new(REGULAR_PAGE_SIZE).unwrap(), + // ); + // okprintln!("Initialized Keyboard"); + // interrupts::enable(); } + pages_init(&MemoryMap(parsed_memory_map!())); + unsafe { BUDDY_ALLOCATOR.init() }; + let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", a); + let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", a); + let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", a); + let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", a); + let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", a); + let b = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; + println!("{:x?}", b); + let c = unsafe { BUDDY_ALLOCATOR.alloc_pages(32) }; + println!("{:x?}", c); + panic!("") + // let mut pci_devices = pci::scan_pci(); + // println!("Press ENTER to enumerate PCI devices!"); + // let a = pci_devices.as_ptr() as usize; + // println!("pci_devices address: {:x}", a); + + // loop { + // let c = unsafe { KEYBOARD.assume_init_mut().read_raw_scancode() + // }; if let Some(e) = c + // && PS2ScanCode::from_scancode(e) == PS2ScanCode::Enter + // { + // break; + // } + // } + + // unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; + // for device in pci_devices.iter_mut() { + // // println!("{:#?}", unsafe { device.common.vendor_device }); + // // println!("{:#?}", unsafe { device.common.header_type }); + // // println!("{:#?}\n", unsafe { device.common.device_type }); + + // if device.header.common().device_type.is_ahci() { + // let a = unsafe { + // PhysicalAddress::new_unchecked( + // device.header.general_device.bar5.address(), + // ) + // }; + + // println!( + // "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ + // Memory Space: {}", + // device.header.common().command.is_bus_master(), + // device.header.common().command.is_interrupt_disable(), + // device.header.common().command.is_io_space(), + // device.header.common().command.is_memory_space() + // ); + + // println!( + // "Interrupt Line: {}, Interrupt Pin: {}", + // unsafe { device.header.general_device.interrupt_line }, + // unsafe { device.header.general_device.interrupt_pin } + // ); + + // let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); + // let hba = HBAMemoryRegisters::new(aligned).unwrap(); + // let _ = hba.probe_init(); + // let p = &mut hba.ports[0]; + + // let buf = + // unsafe { alloc_pages!(1) as *mut IdentityPacketData }; + + // p.identity_packet(buf); + + // let id = unsafe { + // core::ptr::read_volatile( + // (buf as usize + PHYSICAL_MEMORY_OFFSET) + // as *mut IdentityPacketData, + // ) + // }; + + // println!("{:?}", id); + + // println!("Cylinders: {}", id.cylinders); + // println!("Heads: {}", id.heads); + // println!("Sectors: {}", id.sectors); + + // println!("Serial: {:?}", &id.serial_number); + // println!("Model: {:?}", &id.model_num); + // println!("Firmware: {:?}", &id.firmware_rev); + // } + // } + + // loop { + // unsafe { + // print!("{}", KEYBOARD.assume_init_mut().read_char() ; color + // = ColorCode::new(Color::Green, Color::Black)); } + // } } /// This function is called on panic. diff --git a/kernel/src/memory/allocators/page_allocator/buddy.rs b/kernel/src/memory/allocators/page_allocator/buddy.rs index 280fb74..2238ab7 100644 --- a/kernel/src/memory/allocators/page_allocator/buddy.rs +++ b/kernel/src/memory/allocators/page_allocator/buddy.rs @@ -20,6 +20,7 @@ pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { #[derive(Default, Clone, Copy, Debug)] pub struct BuddyBlockMeta { + // TODO CHANGE INTO REF BECAUSE IT CONSUMES LESS MEMORY pub next: Option<*mut UnassignedPage>, pub prev: Option<*mut UnassignedPage>, pub order: Option, diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index ddb7cd4..a0285bc 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,8 +1,34 @@ -use core::alloc::Layout; +use core::{fmt::Debug, mem::ManuallyDrop}; + +/// Preallocated object in the slab allocator. +/// +/// When a slab is initialized, each position will include the index of the +/// next free object, when the object is allocated this index will be +/// overwrite by the objects data thus wasting no space on the freelist. +pub union PreallocatedObject { + allocated: ManuallyDrop, + next_free: usize, +} + +impl Debug for PreallocatedObject { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + Ok(()) + } +} + +#[derive(Debug)] +pub struct SlabDescriptor { + /// The index in the objects array of the next free objet + pub next_free: usize, + pub objects: &'static mut [PreallocatedObject], + pub next: Option<&'static mut SlabDescriptor>, +} #[derive(Debug)] pub struct SlabCache { // TODO ADD LOCK - pub layout: Layout, - pub objects: &'static mut [T], + pub buddy_order: usize, + pub partial: SlabDescriptor, + pub full: SlabDescriptor, + pub free: SlabDescriptor, } From 143b456104417a68170ef7211c68844901562175 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 11 Jan 2026 20:37:09 +0200 Subject: [PATCH 25/78] added slab structs --- kernel/src/memory/allocators/slab.rs | 67 ++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index a0285bc..f83c28a 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,13 +1,25 @@ -use core::{fmt::Debug, mem::ManuallyDrop}; +use core::{fmt::Debug, mem::ManuallyDrop, num::NonZero}; + +use common::constants::REGULAR_PAGE_SIZE; + +use crate::alloc_pages; + +pub union SlabCaches { + pub generic4: ManuallyDrop>, +} + +pub static SLABS: [SlabCaches; 1] = [SlabCaches { + generic4: ManuallyDrop::new(SlabCache::new()), +}]; /// Preallocated object in the slab allocator. /// /// When a slab is initialized, each position will include the index of the /// next free object, when the object is allocated this index will be -/// overwrite by the objects data thus wasting no space on the freelist. -pub union PreallocatedObject { - allocated: ManuallyDrop, - next_free: usize, +/// overwrite by the objects data thus wasting no space on the free list. +pub union PreallocatedObject { + pub allocated: ManuallyDrop, + pub next_free_idx: Option>, } impl Debug for PreallocatedObject { @@ -17,18 +29,57 @@ impl Debug for PreallocatedObject { } #[derive(Debug)] -pub struct SlabDescriptor { +pub struct SlabDescriptor { /// The index in the objects array of the next free objet - pub next_free: usize, + pub next_free_idx: usize, pub objects: &'static mut [PreallocatedObject], pub next: Option<&'static mut SlabDescriptor>, } +impl SlabDescriptor { + pub fn new( + order: usize, + next: Option<&'static mut SlabDescriptor>, + ) -> SlabDescriptor { + let address = unsafe { alloc_pages!(1 << order) }; + let objects = unsafe { + core::slice::from_raw_parts_mut( + address as *mut PreallocatedObject, + ((1 << order) * REGULAR_PAGE_SIZE) / size_of::(), + ) + }; + + for (i, object) in objects.iter_mut().enumerate() { + *object = PreallocatedObject { + next_free_idx: Some(NonZero < i as u16 + 1), + } + } + + unsafe { objects.last().unwrap().next_free_idx } + + SlabDescriptor { + next_free_idx: 0, + objects, + next, + } + } +} + #[derive(Debug)] -pub struct SlabCache { +pub struct SlabCache { // TODO ADD LOCK pub buddy_order: usize, pub partial: SlabDescriptor, pub full: SlabDescriptor, pub free: SlabDescriptor, } + +impl SlabCache { + pub const fn new() -> SlabCache { + unimplemented!() + } +} + +const trait SlabPosition { + fn get_position() -> usize; +} From 5c73aa532b82fc7065723dbfb51c0e2d576fa537 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 12 Jan 2026 00:11:29 +0200 Subject: [PATCH 26/78] added an alloc function and used nonmax crate --- kernel/Cargo.toml | 1 + kernel/src/memory/allocators/slab.rs | 41 ++++++++++++++++++++++------ 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index c9584e8..36e6781 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -16,3 +16,4 @@ extend = "1.2.0" learnix-macros = { path = "../learnix-macros" } strum_macros = { version = "0.27", default-features = false } strum = { version = "0.27", default-features = false } +nonmax = { version = "0.5.5", default-features = false } diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index f83c28a..1a8f028 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,15 +1,19 @@ -use core::{fmt::Debug, mem::ManuallyDrop, num::NonZero}; +use core::{fmt::Debug, mem::ManuallyDrop}; -use common::constants::REGULAR_PAGE_SIZE; +use common::{constants::REGULAR_PAGE_SIZE, write_volatile}; -use crate::alloc_pages; +use nonmax::NonMaxU16; + +use crate::{alloc_pages, memory::page_descriptor::Unassigned}; pub union SlabCaches { pub generic4: ManuallyDrop>, + pub slab_descriptor: ManuallyDrop>, + pub slab_cache: ManuallyDrop>, } pub static SLABS: [SlabCaches; 1] = [SlabCaches { - generic4: ManuallyDrop::new(SlabCache::new()), + slab_descriptor: ManuallyDrop::new(SlabCache::new()), }]; /// Preallocated object in the slab allocator. @@ -19,7 +23,7 @@ pub static SLABS: [SlabCaches; 1] = [SlabCaches { /// overwrite by the objects data thus wasting no space on the free list. pub union PreallocatedObject { pub allocated: ManuallyDrop, - pub next_free_idx: Option>, + pub next_free_idx: Option, } impl Debug for PreallocatedObject { @@ -31,7 +35,7 @@ impl Debug for PreallocatedObject { #[derive(Debug)] pub struct SlabDescriptor { /// The index in the objects array of the next free objet - pub next_free_idx: usize, + pub next_free_idx: Option, pub objects: &'static mut [PreallocatedObject], pub next: Option<&'static mut SlabDescriptor>, } @@ -51,18 +55,37 @@ impl SlabDescriptor { for (i, object) in objects.iter_mut().enumerate() { *object = PreallocatedObject { - next_free_idx: Some(NonZero < i as u16 + 1), + next_free_idx: Some(unsafe { + NonMaxU16::new_unchecked(i as u16 + 1) + }), } } - unsafe { objects.last().unwrap().next_free_idx } + objects.last_mut().unwrap().next_free_idx = None; SlabDescriptor { - next_free_idx: 0, + next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), objects, next, } } + + pub fn alloc_obj(&mut self, obj: T) -> &mut T { + debug_assert!( + self.next_free_idx.is_some(), + "Should always be some, because if not, slab is full" + ); + + let preallocated = + &mut self.objects[self.next_free_idx.unwrap().get() as usize]; + self.next_free_idx = unsafe { preallocated.next_free_idx }; + + write_volatile!(preallocated.allocated, ManuallyDrop::new(obj)); + + unsafe { &mut preallocated.allocated } + } + + pub unsafe fn dealloc_obj(&self, obj: &T) {} } #[derive(Debug)] From ecf0701d543020a2ed2d0df41a7c355c6b999cdb Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 12 Jan 2026 20:33:25 +0200 Subject: [PATCH 27/78] added default trait implementation --- kernel/src/main.rs | 59 +++++-------- kernel/src/memory/allocators/slab.rs | 127 +++++++++++++++++++-------- 2 files changed, 113 insertions(+), 73 deletions(-) diff --git a/kernel/src/main.rs b/kernel/src/main.rs index f19a3da..091d82b 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -18,6 +18,7 @@ #![feature(ascii_char)] #![feature(const_convert)] #![feature(core_intrinsics)] +#![feature(min_specialization)] #![deny(clippy::all)] mod drivers; mod memory; @@ -66,46 +67,27 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Obtained Memory Map"); println!("{}", MemoryMap(parsed_memory_map!())); - // loop {} - // PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); - // println!("ALLOCATOR MAP LEN: {}", unsafe { - // ALLOCATOR.assume_init_mut().map_mut().map.len() - // }); - // okprintln!("Allocator Initialized"); + pages_init(&MemoryMap(parsed_memory_map!())); + unsafe { BUDDY_ALLOCATOR.init() }; + + okprintln!("Allocator Initialized"); unsafe { - // let idt_address = alloc_pages!(1).into(); - InterruptDescriptorTable::init(&mut IDT, 0xc0000.into()); + InterruptDescriptorTable::init(&mut IDT, alloc_pages!(1).into()); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); okprintln!("Initialized Programmable Interrupt Controller"); - // let keyboard_buffer_address = alloc_pages!(1).into(); - // Keyboard::init( - // &mut KEYBOARD, - // keyboard_buffer_address, - // NonZero::new(REGULAR_PAGE_SIZE).unwrap(), - // ); - // okprintln!("Initialized Keyboard"); - // interrupts::enable(); + let keyboard_buffer_address = alloc_pages!(1).into(); + Keyboard::init( + &mut KEYBOARD, + keyboard_buffer_address, + NonZero::new(REGULAR_PAGE_SIZE).unwrap(), + ); + okprintln!("Initialized Keyboard"); + interrupts::enable(); } - pages_init(&MemoryMap(parsed_memory_map!())); - unsafe { BUDDY_ALLOCATOR.init() }; - let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", a); - let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", a); - let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", a); - let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", a); - let a = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", a); - let b = unsafe { BUDDY_ALLOCATOR.alloc_pages(2) }; - println!("{:x?}", b); - let c = unsafe { BUDDY_ALLOCATOR.alloc_pages(32) }; - println!("{:x?}", c); - panic!("") + // panic!("") // let mut pci_devices = pci::scan_pci(); // println!("Press ENTER to enumerate PCI devices!"); // let a = pci_devices.as_ptr() as usize; @@ -177,11 +159,12 @@ pub unsafe extern "C" fn _start() -> ! { // } // } - // loop { - // unsafe { - // print!("{}", KEYBOARD.assume_init_mut().read_char() ; color - // = ColorCode::new(Color::Green, Color::Black)); } - // } + loop { + unsafe { + print!("{}", KEYBOARD.assume_init_mut().read_char() ; color + = ColorCode::new(Color::Green, Color::Black)); + } + } } /// This function is called on panic. diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 1a8f028..92d8e95 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -6,10 +6,62 @@ use nonmax::NonMaxU16; use crate::{alloc_pages, memory::page_descriptor::Unassigned}; +trait SlabConstructor { + fn new( + order: usize, + next: Option<&'static mut SlabDescriptor>, + ) -> Self; +} + +impl SlabConstructor for SlabDescriptor { + default fn new( + order: usize, + next: Option<&'static mut SlabDescriptor>, + ) -> SlabDescriptor { + let address = unsafe { alloc_pages!(1 << order) }; + let objects = unsafe { + core::slice::from_raw_parts_mut( + address as *mut PreallocatedObject, + ((1 << order) * REGULAR_PAGE_SIZE) / size_of::(), + ) + }; + + for (i, object) in objects.iter_mut().enumerate() { + *object = PreallocatedObject { + next_free_idx: Some(unsafe { + NonMaxU16::new_unchecked(i as u16 + 1) + }), + } + } + + objects.last_mut().unwrap().next_free_idx = None; + + SlabDescriptor { + next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), + objects, + next, + } + } +} + +impl SlabConstructor> + for SlabDescriptor +{ + fn new( + _order: usize, + _next: Option< + &'static mut SlabDescriptor>, + >, + ) -> Self { + unimplemented!() + } +} + pub union SlabCaches { pub generic4: ManuallyDrop>, - pub slab_descriptor: ManuallyDrop>, - pub slab_cache: ManuallyDrop>, + pub slab_descriptor: + ManuallyDrop>>, + pub slab_cache: ManuallyDrop>>, } pub static SLABS: [SlabCaches; 1] = [SlabCaches { @@ -32,6 +84,10 @@ impl Debug for PreallocatedObject { } } +impl SlabDescriptor> { + pub fn new() {} +} + #[derive(Debug)] pub struct SlabDescriptor { /// The index in the objects array of the next free objet @@ -41,35 +97,6 @@ pub struct SlabDescriptor { } impl SlabDescriptor { - pub fn new( - order: usize, - next: Option<&'static mut SlabDescriptor>, - ) -> SlabDescriptor { - let address = unsafe { alloc_pages!(1 << order) }; - let objects = unsafe { - core::slice::from_raw_parts_mut( - address as *mut PreallocatedObject, - ((1 << order) * REGULAR_PAGE_SIZE) / size_of::(), - ) - }; - - for (i, object) in objects.iter_mut().enumerate() { - *object = PreallocatedObject { - next_free_idx: Some(unsafe { - NonMaxU16::new_unchecked(i as u16 + 1) - }), - } - } - - objects.last_mut().unwrap().next_free_idx = None; - - SlabDescriptor { - next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), - objects, - next, - } - } - pub fn alloc_obj(&mut self, obj: T) -> &mut T { debug_assert!( self.next_free_idx.is_some(), @@ -85,16 +112,31 @@ impl SlabDescriptor { unsafe { &mut preallocated.allocated } } - pub unsafe fn dealloc_obj(&self, obj: &T) {} + /// Deallocate an object from this slab + /// + /// # Safety + /// This function assumes that the object address is in this slab. + pub unsafe fn dealloc_obj(&mut self, obj: &T) { + let freed_index = unsafe { + self.objects.as_ptr().offset_from( + obj as *const _ as *const PreallocatedObject, + ) as usize + }; + + self.objects[freed_index].next_free_idx = self.next_free_idx; + + self.next_free_idx = + unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; + } } #[derive(Debug)] pub struct SlabCache { // TODO ADD LOCK pub buddy_order: usize, - pub partial: SlabDescriptor, - pub full: SlabDescriptor, - pub free: SlabDescriptor, + pub free: Option<&'static mut SlabDescriptor>, + pub partial: Option<&'static mut SlabDescriptor>, + pub full: Option<&'static mut SlabDescriptor>, } impl SlabCache { @@ -103,6 +145,21 @@ impl SlabCache { } } +impl SlabCache> { + pub fn initial( + buddy_order: usize, + ) -> SlabCache> { + let partial = SlabDescriptor::>::new( + buddy_order, + None, + ); + let full = SlabDescriptor::>::new( + buddy_order, + None, + ); + } +} + const trait SlabPosition { fn get_position() -> usize; } From 89638577ba9ac9dd3232ce46458baf27b114e43c Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 12 Jan 2026 23:44:43 +0200 Subject: [PATCH 28/78] derived copy and clone for unassigned --- kernel/src/memory/page_descriptor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 8a73196..a0fa278 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -16,7 +16,7 @@ use common::{ write_volatile, }; -#[derive(Default, Debug)] +#[derive(Default, Clone, Copy, Debug)] pub struct Unassigned; pub type UnassignedPage = Page; From c717c9a19ebea0dd1f901d5a3f7f1acea25e2742 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 12 Jan 2026 23:45:16 +0200 Subject: [PATCH 29/78] added utility function and moved references into nonnull pointers --- kernel/src/memory/allocators/slab.rs | 153 ++++++++++++++++++--------- 1 file changed, 102 insertions(+), 51 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 92d8e95..461bc89 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,22 +1,22 @@ -use core::{fmt::Debug, mem::ManuallyDrop}; +use core::{ + fmt::Debug, + mem::ManuallyDrop, + num::{NonZeroIsize, NonZeroUsize}, + ptr::NonNull, +}; -use common::{constants::REGULAR_PAGE_SIZE, write_volatile}; +use common::{ + constants::REGULAR_PAGE_SIZE, enums::ProcessorSubClass, write_volatile, +}; use nonmax::NonMaxU16; use crate::{alloc_pages, memory::page_descriptor::Unassigned}; -trait SlabConstructor { - fn new( - order: usize, - next: Option<&'static mut SlabDescriptor>, - ) -> Self; -} - -impl SlabConstructor for SlabDescriptor { - default fn new( +impl SlabDescriptor { + pub fn new( order: usize, - next: Option<&'static mut SlabDescriptor>, + next: Option>>, ) -> SlabDescriptor { let address = unsafe { alloc_pages!(1 << order) }; let objects = unsafe { @@ -38,22 +38,48 @@ impl SlabConstructor for SlabDescriptor { SlabDescriptor { next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), - objects, + objects: NonNull::from_mut(objects.first_mut().unwrap()), + size: unsafe { NonZeroUsize::new_unchecked(objects.len()) }, next, } } + + pub fn as_unassigned(&self) -> &SlabDescriptor { + unsafe { + &*(self as *const _ as *const SlabDescriptor) + } + } + + pub fn as_unassigned_mut( + &mut self, + ) -> &mut SlabDescriptor { + unsafe { + &mut *(self as *mut _ as *mut SlabDescriptor) + } + } } -impl SlabConstructor> - for SlabDescriptor -{ - fn new( - _order: usize, - _next: Option< - &'static mut SlabDescriptor>, - >, - ) -> Self { - unimplemented!() +impl SlabDescriptor { + pub fn assign(&self) -> &SlabDescriptor { + unsafe { &*(self as *const _ as *const SlabDescriptor) } + } + + pub fn assign_mut(&mut self) -> &mut SlabDescriptor { + unsafe { &mut *(self as *mut _ as *mut SlabDescriptor) } + } +} + +impl SlabDescriptor> { + pub fn initial( + order: usize, + ) -> &'static mut SlabDescriptor> { + let mut descriptor = + SlabDescriptor::>::new(order, None); + + let mut d = + descriptor.alloc_obj(descriptor.as_unassigned().clone()); + + unsafe { d.as_mut().assign_mut::>() } } } @@ -84,46 +110,58 @@ impl Debug for PreallocatedObject { } } -impl SlabDescriptor> { - pub fn new() {} -} - -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SlabDescriptor { /// The index in the objects array of the next free objet pub next_free_idx: Option, - pub objects: &'static mut [PreallocatedObject], - pub next: Option<&'static mut SlabDescriptor>, + pub objects: NonNull>, + pub size: NonZeroUsize, + pub next: Option>>, } impl SlabDescriptor { - pub fn alloc_obj(&mut self, obj: T) -> &mut T { + pub fn object_at(&self, idx: usize) -> NonNull> { + if idx * size_of::() > self.size.get() { + panic!("Out of bounds"); + } + + unsafe { self.objects.add(idx) } + } + + pub fn alloc_obj(&mut self, obj: T) -> NonNull { debug_assert!( self.next_free_idx.is_some(), "Should always be some, because if not, slab is full" ); - let preallocated = - &mut self.objects[self.next_free_idx.unwrap().get() as usize]; + let preallocated = unsafe { + self.object_at(self.next_free_idx.unwrap().get() as usize) + .as_mut() + }; + self.next_free_idx = unsafe { preallocated.next_free_idx }; write_volatile!(preallocated.allocated, ManuallyDrop::new(obj)); - unsafe { &mut preallocated.allocated } + unsafe { NonNull::from_mut(&mut preallocated.allocated) } } /// Deallocate an object from this slab /// /// # Safety /// This function assumes that the object address is in this slab. - pub unsafe fn dealloc_obj(&mut self, obj: &T) { + pub unsafe fn dealloc_obj(&mut self, obj: *const T) { let freed_index = unsafe { - self.objects.as_ptr().offset_from( - obj as *const _ as *const PreallocatedObject, - ) as usize + self.objects + .as_ptr() + .offset_from(obj as *const PreallocatedObject) + as usize }; - self.objects[freed_index].next_free_idx = self.next_free_idx; + unsafe { + self.object_at(freed_index).as_mut().next_free_idx = + self.next_free_idx + }; self.next_free_idx = unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; @@ -139,27 +177,40 @@ pub struct SlabCache { pub full: Option<&'static mut SlabDescriptor>, } -impl SlabCache { - pub const fn new() -> SlabCache { +impl SlabCacheConstructor for SlabCache { + default fn new(buddy_order: usize) -> SlabCache { unimplemented!() } } -impl SlabCache> { - pub fn initial( +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { + unimplemented!() + } +} + +impl SlabCache> { + pub fn initial_cache( buddy_order: usize, - ) -> SlabCache> { - let partial = SlabDescriptor::>::new( - buddy_order, - None, - ); - let full = SlabDescriptor::>::new( + ) -> SlabCache> { + let partial = + SlabDescriptor::>::initial( + buddy_order, + ); + + SlabCache { buddy_order, - None, - ); + free: None, + partial: Some(partial), + full: None, + } } } +trait SlabCacheConstructor { + fn new(buddy_order: usize) -> Self; +} + const trait SlabPosition { fn get_position() -> usize; } From 9af1cc86cfb9644556dce49f34d78d4b02d4fcec Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 00:18:44 +0200 Subject: [PATCH 30/78] added a macro to generate slabs, slab array, and position trait. also changed static reference into NonNull --- kernel/src/memory/allocators/slab.rs | 132 +++++++++++++++++++-------- 1 file changed, 92 insertions(+), 40 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 461bc89..9d34f64 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -6,7 +6,8 @@ use core::{ }; use common::{ - constants::REGULAR_PAGE_SIZE, enums::ProcessorSubClass, write_volatile, + constants::REGULAR_PAGE_SIZE, enums::ProcessorSubClass, + late_init::LateInit, write_volatile, }; use nonmax::NonMaxU16; @@ -19,14 +20,18 @@ impl SlabDescriptor { next: Option>>, ) -> SlabDescriptor { let address = unsafe { alloc_pages!(1 << order) }; - let objects = unsafe { - core::slice::from_raw_parts_mut( - address as *mut PreallocatedObject, + let mut objects = unsafe { + NonNull::slice_from_raw_parts( + NonNull::new_unchecked( + address as *mut PreallocatedObject, + ), ((1 << order) * REGULAR_PAGE_SIZE) / size_of::(), ) }; - for (i, object) in objects.iter_mut().enumerate() { + for (i, object) in + unsafe { objects.as_mut() }.iter_mut().enumerate() + { *object = PreallocatedObject { next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(i as u16 + 1) @@ -34,12 +39,13 @@ impl SlabDescriptor { } } - objects.last_mut().unwrap().next_free_idx = None; + unsafe { + objects.as_mut().last_mut().unwrap().next_free_idx = None + }; SlabDescriptor { next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), - objects: NonNull::from_mut(objects.first_mut().unwrap()), - size: unsafe { NonZeroUsize::new_unchecked(objects.len()) }, + objects, next, } } @@ -70,29 +76,84 @@ impl SlabDescriptor { } impl SlabDescriptor> { - pub fn initial( + pub fn initial_descriptor( order: usize, - ) -> &'static mut SlabDescriptor> { + ) -> NonNull>> { let mut descriptor = SlabDescriptor::>::new(order, None); let mut d = descriptor.alloc_obj(descriptor.as_unassigned().clone()); - unsafe { d.as_mut().assign_mut::>() } + unsafe { + NonNull::from_mut( + d.as_mut().assign_mut::>(), + ) + } } } pub union SlabCaches { pub generic4: ManuallyDrop>, pub slab_descriptor: - ManuallyDrop>>, + ManuallyDrop>>, pub slab_cache: ManuallyDrop>>, + pub uninit: (), +} + +macro_rules! define_slab_system { + ($($t:ty),* $(,)?) => { + // 1. Implement the trait for each type + register_slabs!($($t),*); + + // 2. Calculate count + const COUNT: usize = [$(stringify!($t)),*].len(); + + // 3. Create the static array + pub static SLABS: [SlabCaches; COUNT] = [ + $( + // We mention $t inside a block but don't actually use it. + // This tells Rust: "Repeat this block for every type in $t" + { + stringify!($t); + SlabCaches { uninit: () } + } + ),* + ]; + } +} + +macro_rules! register_slabs { + // 1. Entry point: handle trailing commas by calling the internal @step + ($($t:ty),* $(,)?) => { + register_slabs!(@step 0; $($t),*); + }; + + // 2. The recursive step: Matches a type, a comma, and at least one more type + (@step $idx:expr; $head:ty, $($tail:ty),+) => { + impl SlabPosition for $head { + const POSITION: usize = $idx; + } + register_slabs!(@step $idx + 1; $($tail),*); + }; + + // 3. The base case: Matches exactly one last type (no trailing comma) + (@step $idx:expr; $head:ty) => { + impl SlabPosition for $head { + const POSITION: usize = $idx; + } + }; + + // 4. The empty case: If someone calls it with nothing + (@step $idx:expr; ) => {}; } +define_slab_system!(SlabDescriptor,); + +unsafe impl Send for SlabDescriptor {} +unsafe impl Sync for SlabDescriptor {} -pub static SLABS: [SlabCaches; 1] = [SlabCaches { - slab_descriptor: ManuallyDrop::new(SlabCache::new()), -}]; +unsafe impl Send for SlabCaches {} +unsafe impl Sync for SlabCaches {} /// Preallocated object in the slab allocator. /// @@ -114,20 +175,11 @@ impl Debug for PreallocatedObject { pub struct SlabDescriptor { /// The index in the objects array of the next free objet pub next_free_idx: Option, - pub objects: NonNull>, - pub size: NonZeroUsize, + pub objects: NonNull<[PreallocatedObject]>, pub next: Option>>, } impl SlabDescriptor { - pub fn object_at(&self, idx: usize) -> NonNull> { - if idx * size_of::() > self.size.get() { - panic!("Out of bounds"); - } - - unsafe { self.objects.add(idx) } - } - pub fn alloc_obj(&mut self, obj: T) -> NonNull { debug_assert!( self.next_free_idx.is_some(), @@ -135,8 +187,8 @@ impl SlabDescriptor { ); let preallocated = unsafe { - self.object_at(self.next_free_idx.unwrap().get() as usize) - .as_mut() + &mut self.objects.as_mut() + [self.next_free_idx.unwrap().get() as usize] }; self.next_free_idx = unsafe { preallocated.next_free_idx }; @@ -151,15 +203,11 @@ impl SlabDescriptor { /// # Safety /// This function assumes that the object address is in this slab. pub unsafe fn dealloc_obj(&mut self, obj: *const T) { - let freed_index = unsafe { - self.objects - .as_ptr() - .offset_from(obj as *const PreallocatedObject) - as usize - }; + let freed_index = + (obj.addr() - self.objects.as_ptr().addr()) / size_of::(); unsafe { - self.object_at(freed_index).as_mut().next_free_idx = + self.objects.as_mut()[freed_index].next_free_idx = self.next_free_idx }; @@ -172,9 +220,9 @@ impl SlabDescriptor { pub struct SlabCache { // TODO ADD LOCK pub buddy_order: usize, - pub free: Option<&'static mut SlabDescriptor>, - pub partial: Option<&'static mut SlabDescriptor>, - pub full: Option<&'static mut SlabDescriptor>, + pub free: Option>>, + pub partial: Option>>, + pub full: Option>>, } impl SlabCacheConstructor for SlabCache { @@ -194,7 +242,7 @@ impl SlabCache> { buddy_order: usize, ) -> SlabCache> { let partial = - SlabDescriptor::>::initial( + SlabDescriptor::>::initial_descriptor( buddy_order, ); @@ -211,6 +259,10 @@ trait SlabCacheConstructor { fn new(buddy_order: usize) -> Self; } -const trait SlabPosition { - fn get_position() -> usize; +/// Get the position on the slab array, for a slab of the given type. +/// +/// Shouldn't implement this trait manually, and it is implemented once +/// with a macro. +pub const trait SlabPosition { + const POSITION: usize; } From d7b295e942f8f5ad755605a9319d771a24388abc Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 00:57:18 +0200 Subject: [PATCH 31/78] remove slab caches union as it is not needed, and added assign and unassign function for the slab --- kernel/src/memory/allocators/slab.rs | 50 ++++++++++++++++------------ 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 9d34f64..2a5cf48 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -93,14 +93,6 @@ impl SlabDescriptor> { } } -pub union SlabCaches { - pub generic4: ManuallyDrop>, - pub slab_descriptor: - ManuallyDrop>>, - pub slab_cache: ManuallyDrop>>, - pub uninit: (), -} - macro_rules! define_slab_system { ($($t:ty),* $(,)?) => { // 1. Implement the trait for each type @@ -110,13 +102,13 @@ macro_rules! define_slab_system { const COUNT: usize = [$(stringify!($t)),*].len(); // 3. Create the static array - pub static SLABS: [SlabCaches; COUNT] = [ + pub static mut SLABS: [LateInit>; COUNT] = [ $( // We mention $t inside a block but don't actually use it. // This tells Rust: "Repeat this block for every type in $t" { stringify!($t); - SlabCaches { uninit: () } + LateInit::uninit() } ),* ]; @@ -152,8 +144,8 @@ define_slab_system!(SlabDescriptor,); unsafe impl Send for SlabDescriptor {} unsafe impl Sync for SlabDescriptor {} -unsafe impl Send for SlabCaches {} -unsafe impl Sync for SlabCaches {} +unsafe impl Send for SlabCache {} +unsafe impl Sync for SlabCache {} /// Preallocated object in the slab allocator. /// @@ -225,22 +217,34 @@ pub struct SlabCache { pub full: Option>>, } -impl SlabCacheConstructor for SlabCache { - default fn new(buddy_order: usize) -> SlabCache { - unimplemented!() +impl SlabCache { + pub fn as_unassigned(&self) -> &SlabCache { + unsafe { &*(self as *const _ as *const SlabCache) } + } + + pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { + unsafe { &mut *(self as *mut _ as *mut SlabCache) } + } +} + +impl SlabCache { + pub fn assign(&self) -> &SlabCache { + unsafe { &*(self as *const _ as *const SlabCache) } + } + + pub fn assign_mut(&mut self) -> &mut SlabCache { + unsafe { &mut *(self as *mut _ as *mut SlabCache) } } } -impl SlabCacheConstructor for SlabCache> { - fn new(buddy_order: usize) -> SlabCache> { +impl SlabCacheConstructor for SlabCache { + default fn new(buddy_order: usize) -> SlabCache { unimplemented!() } } -impl SlabCache> { - pub fn initial_cache( - buddy_order: usize, - ) -> SlabCache> { +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { let partial = SlabDescriptor::>::initial_descriptor( buddy_order, @@ -266,3 +270,7 @@ trait SlabCacheConstructor { pub const trait SlabPosition { const POSITION: usize; } + +pub fn slab_of() -> &'static mut SlabCache { + unsafe { SLABS[T::POSITION].assign_mut() } +} From fd8775fd7342592e8d8157ddc67e2397fd0960a0 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 16:31:08 +0200 Subject: [PATCH 32/78] Changed T to implement SlabPosition --- .../src/memory/allocators/page_allocator/buddy.rs | 9 +++++---- kernel/src/memory/page_descriptor.rs | 15 +++++++++------ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/kernel/src/memory/allocators/page_allocator/buddy.rs b/kernel/src/memory/allocators/page_allocator/buddy.rs index 2238ab7..46e8fc2 100644 --- a/kernel/src/memory/allocators/page_allocator/buddy.rs +++ b/kernel/src/memory/allocators/page_allocator/buddy.rs @@ -6,8 +6,9 @@ use common::{ }; use cpu_utils::structures::paging::PageTable; -use crate::memory::page_descriptor::{ - PAGES, Page, Unassigned, UnassignedPage, +use crate::memory::{ + allocators::slab::SlabPosition, + page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, }; pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { @@ -27,13 +28,13 @@ pub struct BuddyBlockMeta { } impl BuddyBlockMeta { - pub fn detach(&mut self) -> Option<*mut Page> { + pub fn detach(&mut self) -> Option<*mut Page> { let detached = self.next? as *mut Page; // None if there is no page to detach self.next = unsafe { (*detached).buddy_meta.next }; Some(detached) } - pub fn attach(&mut self, attachment: *mut Page) { + pub fn attach(&mut self, attachment: *mut Page) { let attachment_ref = unsafe { &mut *attachment }.as_unassigned_mut(); attachment_ref.buddy_meta.next = self.next; diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index a0fa278..4ba539f 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -1,7 +1,10 @@ +use core::ptr::NonNull; + use crate::{ memory::{ allocators::{ - page_allocator::buddy::BuddyBlockMeta, slab::SlabCache, + page_allocator::buddy::BuddyBlockMeta, + slab::{SlabCache, SlabPosition}, }, memory_map::ParsedMemoryMap, }, @@ -22,12 +25,12 @@ pub struct Unassigned; pub type UnassignedPage = Page; impl UnassignedPage { - pub fn assign(&self) -> &Page { + pub fn assign(&self) -> &Page { let ptr = self as *const _ as usize; unsafe { &*(ptr as *const Page) } } - pub fn assign_mut(&mut self) -> &mut Page { + pub fn assign_mut(&mut self) -> &mut Page { let ptr = self as *const _ as usize; unsafe { &mut *(ptr as *mut Page) } } @@ -37,12 +40,12 @@ pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = LateInit::uninit(); #[derive(Debug)] -pub struct Page { - pub owner: Option<&'static SlabCache>, +pub struct Page { + pub owner: Option>>, pub buddy_meta: BuddyBlockMeta, } -impl Page { +impl Page { pub fn as_unassigned(&self) -> &UnassignedPage { let ptr = self as *const _ as usize; unsafe { &*(ptr as *const UnassignedPage) } From 8c2a24933e461f2d9bd1bd5989f1fb58da1044f0 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 16:31:28 +0200 Subject: [PATCH 33/78] more progress! --- kernel/src/memory/allocators/slab.rs | 84 +++++++++++++++++----------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 2a5cf48..b311b15 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,20 +1,15 @@ -use core::{ - fmt::Debug, - mem::ManuallyDrop, - num::{NonZeroIsize, NonZeroUsize}, - ptr::NonNull, -}; +use core::{fmt::Debug, mem::ManuallyDrop, ptr::NonNull}; use common::{ - constants::REGULAR_PAGE_SIZE, enums::ProcessorSubClass, - late_init::LateInit, write_volatile, + constants::REGULAR_PAGE_SIZE, late_init::LateInit, write_volatile, }; +use extend::ext; use nonmax::NonMaxU16; use crate::{alloc_pages, memory::page_descriptor::Unassigned}; -impl SlabDescriptor { +impl SlabDescriptor { pub fn new( order: usize, next: Option>>, @@ -65,13 +60,20 @@ impl SlabDescriptor { } } -impl SlabDescriptor { - pub fn assign(&self) -> &SlabDescriptor { - unsafe { &*(self as *const _ as *const SlabDescriptor) } +#[ext] +impl NonNull> { + fn assign(self) -> NonNull> { + unsafe { self.as_ref().assign::() } } +} - pub fn assign_mut(&mut self) -> &mut SlabDescriptor { - unsafe { &mut *(self as *mut _ as *mut SlabDescriptor) } +impl SlabDescriptor { + pub fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked( + self as *const _ as *mut SlabDescriptor, + ) + } } } @@ -82,14 +84,9 @@ impl SlabDescriptor> { let mut descriptor = SlabDescriptor::>::new(order, None); - let mut d = - descriptor.alloc_obj(descriptor.as_unassigned().clone()); + let d = descriptor.alloc_obj(descriptor.as_unassigned().clone()); - unsafe { - NonNull::from_mut( - d.as_mut().assign_mut::>(), - ) - } + unsafe { d.as_ref().assign::>() } } } @@ -139,13 +136,14 @@ macro_rules! register_slabs { // 4. The empty case: If someone calls it with nothing (@step $idx:expr; ) => {}; } + define_slab_system!(SlabDescriptor,); -unsafe impl Send for SlabDescriptor {} -unsafe impl Sync for SlabDescriptor {} +unsafe impl Send for SlabDescriptor {} +unsafe impl Sync for SlabDescriptor {} -unsafe impl Send for SlabCache {} -unsafe impl Sync for SlabCache {} +unsafe impl Send for SlabCache {} +unsafe impl Sync for SlabCache {} /// Preallocated object in the slab allocator. /// @@ -164,14 +162,14 @@ impl Debug for PreallocatedObject { } #[derive(Debug, Clone)] -pub struct SlabDescriptor { +pub struct SlabDescriptor { /// The index in the objects array of the next free objet pub next_free_idx: Option, pub objects: NonNull<[PreallocatedObject]>, pub next: Option>>, } -impl SlabDescriptor { +impl SlabDescriptor { pub fn alloc_obj(&mut self, obj: T) -> NonNull { debug_assert!( self.next_free_idx.is_some(), @@ -209,7 +207,7 @@ impl SlabDescriptor { } #[derive(Debug)] -pub struct SlabCache { +pub struct SlabCache { // TODO ADD LOCK pub buddy_order: usize, pub free: Option>>, @@ -217,7 +215,7 @@ pub struct SlabCache { pub full: Option>>, } -impl SlabCache { +impl SlabCache { pub fn as_unassigned(&self) -> &SlabCache { unsafe { &*(self as *const _ as *const SlabCache) } } @@ -225,21 +223,37 @@ impl SlabCache { pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { unsafe { &mut *(self as *mut _ as *mut SlabCache) } } + + pub fn alloc(&self, obj: T) -> NonNull { + unimplemented!() + } + + pub fn dealloc(&self, obj: NonNull) { + unimplemented!() + } } impl SlabCache { - pub fn assign(&self) -> &SlabCache { + pub fn assign(&self) -> &SlabCache { unsafe { &*(self as *const _ as *const SlabCache) } } - pub fn assign_mut(&mut self) -> &mut SlabCache { + pub fn assign_mut(&mut self) -> &mut SlabCache { unsafe { &mut *(self as *mut _ as *mut SlabCache) } } } -impl SlabCacheConstructor for SlabCache { +impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { - unimplemented!() + let free = slab_of::>() + .alloc(SlabDescriptor::new(buddy_order, None)); + + SlabCache { + buddy_order, + free: Some(unsafe { free.as_ref().assign::() }), + partial: None, + full: None, + } } } @@ -274,3 +288,7 @@ pub const trait SlabPosition { pub fn slab_of() -> &'static mut SlabCache { unsafe { SLABS[T::POSITION].assign_mut() } } + +impl SlabPosition for Unassigned { + const POSITION: usize = usize::MAX; +} From c69a8bb3da2033803af7d32bebd111aba2c4bd2a Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 18:44:37 +0200 Subject: [PATCH 34/78] split slab allocation into files for organization and fixed paths that changed because of that. --- .../memory/allocators/page_allocator/buddy.rs | 2 +- kernel/src/memory/allocators/slab.rs | 300 ++---------------- kernel/src/memory/allocators/slab/cache.rs | 66 ++++ .../src/memory/allocators/slab/descriptor.rs | 125 ++++++++ kernel/src/memory/allocators/slab/macros.rs | 42 +++ .../memory/allocators/slab/preallocated.rs | 0 kernel/src/memory/allocators/slab/traits.rs | 17 + kernel/src/memory/page_descriptor.rs | 2 +- 8 files changed, 274 insertions(+), 280 deletions(-) create mode 100644 kernel/src/memory/allocators/slab/cache.rs create mode 100644 kernel/src/memory/allocators/slab/descriptor.rs create mode 100644 kernel/src/memory/allocators/slab/macros.rs create mode 100644 kernel/src/memory/allocators/slab/preallocated.rs create mode 100644 kernel/src/memory/allocators/slab/traits.rs diff --git a/kernel/src/memory/allocators/page_allocator/buddy.rs b/kernel/src/memory/allocators/page_allocator/buddy.rs index 46e8fc2..5a00c78 100644 --- a/kernel/src/memory/allocators/page_allocator/buddy.rs +++ b/kernel/src/memory/allocators/page_allocator/buddy.rs @@ -7,7 +7,7 @@ use common::{ use cpu_utils::structures::paging::PageTable; use crate::memory::{ - allocators::slab::SlabPosition, + allocators::slab::traits::SlabPosition, page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, }; diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index b311b15..6aac599 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -1,65 +1,29 @@ -use core::{fmt::Debug, mem::ManuallyDrop, ptr::NonNull}; - -use common::{ - constants::REGULAR_PAGE_SIZE, late_init::LateInit, write_volatile, +pub mod cache; +pub mod descriptor; +pub mod macros; +pub mod traits; + +use crate::{ + define_slab_system, + memory::{ + allocators::slab::{ + cache::SlabCache, descriptor::SlabDescriptor, + traits::SlabPosition, + }, + page_descriptor::Unassigned, + }, }; +use core::ptr::NonNull; -use extend::ext; -use nonmax::NonMaxU16; - -use crate::{alloc_pages, memory::page_descriptor::Unassigned}; - -impl SlabDescriptor { - pub fn new( - order: usize, - next: Option>>, - ) -> SlabDescriptor { - let address = unsafe { alloc_pages!(1 << order) }; - let mut objects = unsafe { - NonNull::slice_from_raw_parts( - NonNull::new_unchecked( - address as *mut PreallocatedObject, - ), - ((1 << order) * REGULAR_PAGE_SIZE) / size_of::(), - ) - }; - - for (i, object) in - unsafe { objects.as_mut() }.iter_mut().enumerate() - { - *object = PreallocatedObject { - next_free_idx: Some(unsafe { - NonMaxU16::new_unchecked(i as u16 + 1) - }), - } - } - - unsafe { - objects.as_mut().last_mut().unwrap().next_free_idx = None - }; - - SlabDescriptor { - next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), - objects, - next, - } - } - - pub fn as_unassigned(&self) -> &SlabDescriptor { - unsafe { - &*(self as *const _ as *const SlabDescriptor) - } - } +// Global Slabs Array Definition +define_slab_system!(SlabDescriptor,); - pub fn as_unassigned_mut( - &mut self, - ) -> &mut SlabDescriptor { - unsafe { - &mut *(self as *mut _ as *mut SlabDescriptor) - } - } +pub fn slab_of() -> &'static mut SlabCache { + unsafe { SLABS[T::POSITION].assign_mut() } } +// Marker Extensions +use extend::ext; #[ext] impl NonNull> { fn assign(self) -> NonNull> { @@ -67,228 +31,8 @@ impl NonNull> { } } -impl SlabDescriptor { - pub fn assign(&self) -> NonNull> { - unsafe { - NonNull::new_unchecked( - self as *const _ as *mut SlabDescriptor, - ) - } - } -} - -impl SlabDescriptor> { - pub fn initial_descriptor( - order: usize, - ) -> NonNull>> { - let mut descriptor = - SlabDescriptor::>::new(order, None); - - let d = descriptor.alloc_obj(descriptor.as_unassigned().clone()); - - unsafe { d.as_ref().assign::>() } - } -} - -macro_rules! define_slab_system { - ($($t:ty),* $(,)?) => { - // 1. Implement the trait for each type - register_slabs!($($t),*); - - // 2. Calculate count - const COUNT: usize = [$(stringify!($t)),*].len(); - - // 3. Create the static array - pub static mut SLABS: [LateInit>; COUNT] = [ - $( - // We mention $t inside a block but don't actually use it. - // This tells Rust: "Repeat this block for every type in $t" - { - stringify!($t); - LateInit::uninit() - } - ),* - ]; - } -} - -macro_rules! register_slabs { - // 1. Entry point: handle trailing commas by calling the internal @step - ($($t:ty),* $(,)?) => { - register_slabs!(@step 0; $($t),*); - }; - - // 2. The recursive step: Matches a type, a comma, and at least one more type - (@step $idx:expr; $head:ty, $($tail:ty),+) => { - impl SlabPosition for $head { - const POSITION: usize = $idx; - } - register_slabs!(@step $idx + 1; $($tail),*); - }; - - // 3. The base case: Matches exactly one last type (no trailing comma) - (@step $idx:expr; $head:ty) => { - impl SlabPosition for $head { - const POSITION: usize = $idx; - } - }; - - // 4. The empty case: If someone calls it with nothing - (@step $idx:expr; ) => {}; -} - -define_slab_system!(SlabDescriptor,); - +// Thread safety implementations unsafe impl Send for SlabDescriptor {} unsafe impl Sync for SlabDescriptor {} - unsafe impl Send for SlabCache {} unsafe impl Sync for SlabCache {} - -/// Preallocated object in the slab allocator. -/// -/// When a slab is initialized, each position will include the index of the -/// next free object, when the object is allocated this index will be -/// overwrite by the objects data thus wasting no space on the free list. -pub union PreallocatedObject { - pub allocated: ManuallyDrop, - pub next_free_idx: Option, -} - -impl Debug for PreallocatedObject { - fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - Ok(()) - } -} - -#[derive(Debug, Clone)] -pub struct SlabDescriptor { - /// The index in the objects array of the next free objet - pub next_free_idx: Option, - pub objects: NonNull<[PreallocatedObject]>, - pub next: Option>>, -} - -impl SlabDescriptor { - pub fn alloc_obj(&mut self, obj: T) -> NonNull { - debug_assert!( - self.next_free_idx.is_some(), - "Should always be some, because if not, slab is full" - ); - - let preallocated = unsafe { - &mut self.objects.as_mut() - [self.next_free_idx.unwrap().get() as usize] - }; - - self.next_free_idx = unsafe { preallocated.next_free_idx }; - - write_volatile!(preallocated.allocated, ManuallyDrop::new(obj)); - - unsafe { NonNull::from_mut(&mut preallocated.allocated) } - } - - /// Deallocate an object from this slab - /// - /// # Safety - /// This function assumes that the object address is in this slab. - pub unsafe fn dealloc_obj(&mut self, obj: *const T) { - let freed_index = - (obj.addr() - self.objects.as_ptr().addr()) / size_of::(); - - unsafe { - self.objects.as_mut()[freed_index].next_free_idx = - self.next_free_idx - }; - - self.next_free_idx = - unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; - } -} - -#[derive(Debug)] -pub struct SlabCache { - // TODO ADD LOCK - pub buddy_order: usize, - pub free: Option>>, - pub partial: Option>>, - pub full: Option>>, -} - -impl SlabCache { - pub fn as_unassigned(&self) -> &SlabCache { - unsafe { &*(self as *const _ as *const SlabCache) } - } - - pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { - unsafe { &mut *(self as *mut _ as *mut SlabCache) } - } - - pub fn alloc(&self, obj: T) -> NonNull { - unimplemented!() - } - - pub fn dealloc(&self, obj: NonNull) { - unimplemented!() - } -} - -impl SlabCache { - pub fn assign(&self) -> &SlabCache { - unsafe { &*(self as *const _ as *const SlabCache) } - } - - pub fn assign_mut(&mut self) -> &mut SlabCache { - unsafe { &mut *(self as *mut _ as *mut SlabCache) } - } -} - -impl SlabCacheConstructor for SlabCache { - default fn new(buddy_order: usize) -> SlabCache { - let free = slab_of::>() - .alloc(SlabDescriptor::new(buddy_order, None)); - - SlabCache { - buddy_order, - free: Some(unsafe { free.as_ref().assign::() }), - partial: None, - full: None, - } - } -} - -impl SlabCacheConstructor for SlabCache> { - fn new(buddy_order: usize) -> SlabCache> { - let partial = - SlabDescriptor::>::initial_descriptor( - buddy_order, - ); - - SlabCache { - buddy_order, - free: None, - partial: Some(partial), - full: None, - } - } -} - -trait SlabCacheConstructor { - fn new(buddy_order: usize) -> Self; -} - -/// Get the position on the slab array, for a slab of the given type. -/// -/// Shouldn't implement this trait manually, and it is implemented once -/// with a macro. -pub const trait SlabPosition { - const POSITION: usize; -} - -pub fn slab_of() -> &'static mut SlabCache { - unsafe { SLABS[T::POSITION].assign_mut() } -} - -impl SlabPosition for Unassigned { - const POSITION: usize = usize::MAX; -} diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs new file mode 100644 index 0000000..dffb9f6 --- /dev/null +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -0,0 +1,66 @@ +use super::descriptor::SlabDescriptor; +use super::slab_of; +use super::traits::{SlabCacheConstructor, SlabPosition}; +use crate::memory::page_descriptor::Unassigned; +use core::ptr::NonNull; + +#[derive(Debug)] +pub struct SlabCache { + pub buddy_order: usize, + pub free: Option>>, + pub partial: Option>>, + pub full: Option>>, +} + +impl SlabCache { + pub fn as_unassigned(&self) -> &SlabCache { + unsafe { &*(self as *const _ as *const SlabCache) } + } + + pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { + unsafe { &mut *(self as *mut _ as *mut SlabCache) } + } + + pub fn alloc(&self, _obj: T) -> NonNull { + unimplemented!() + } + pub fn dealloc(&self, _obj: NonNull) { + unimplemented!() + } +} + +impl SlabCache { + pub fn assign(&self) -> &SlabCache { + unsafe { &*(self as *const _ as *const SlabCache) } + } + + pub fn assign_mut(&mut self) -> &mut SlabCache { + unsafe { &mut *(self as *mut _ as *mut SlabCache) } + } +} + +impl SlabCacheConstructor for SlabCache { + default fn new(buddy_order: usize) -> SlabCache { + let free = slab_of::>() + .alloc(SlabDescriptor::new(buddy_order, None)); + + SlabCache { + buddy_order, + free: Some(unsafe { free.as_ref().assign::() }), + partial: None, + full: None, + } + } +} + +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { + let partial = SlabDescriptor::>::initial_descriptor(buddy_order); + SlabCache { + buddy_order, + free: None, + partial: Some(partial), + full: None, + } + } +} diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs new file mode 100644 index 0000000..7578e96 --- /dev/null +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -0,0 +1,125 @@ +use super::traits::SlabPosition; +use crate::{alloc_pages, memory::page_descriptor::Unassigned}; +use common::{constants::REGULAR_PAGE_SIZE, write_volatile}; +use core::{ + fmt::Debug, + mem::{ManuallyDrop, size_of}, + ptr::NonNull, +}; +use nonmax::NonMaxU16; + +/// Preallocated object in the slab allocator. +pub union PreallocatedObject { + pub allocated: ManuallyDrop, + pub next_free_idx: Option, +} + +impl Debug for PreallocatedObject { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct SlabDescriptor { + pub next_free_idx: Option, + pub objects: NonNull<[PreallocatedObject]>, + pub next: Option>>, +} + +impl SlabDescriptor { + pub fn new( + order: usize, + next: Option>>, + ) -> SlabDescriptor { + let address = unsafe { alloc_pages!(1 << order) }; + let mut objects = unsafe { + NonNull::slice_from_raw_parts( + NonNull::new_unchecked( + address as *mut PreallocatedObject, + ), + ((1 << order) * REGULAR_PAGE_SIZE) + / size_of::>(), + ) + }; + + for (i, object) in + unsafe { objects.as_mut() }.iter_mut().enumerate() + { + *object = PreallocatedObject { + next_free_idx: Some(unsafe { + NonMaxU16::new_unchecked(i as u16 + 1) + }), + } + } + + unsafe { + objects.as_mut().last_mut().unwrap().next_free_idx = None + }; + + SlabDescriptor { + next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), + objects, + next, + } + } + + pub fn alloc_obj(&mut self, obj: T) -> NonNull { + debug_assert!(self.next_free_idx.is_some(), "Slab is full"); + + let idx = self.next_free_idx.unwrap().get() as usize; + let preallocated = unsafe { &mut self.objects.as_mut()[idx] }; + + self.next_free_idx = unsafe { preallocated.next_free_idx }; + write_volatile!(preallocated.allocated, ManuallyDrop::new(obj)); + + unsafe { NonNull::from_mut(&mut preallocated.allocated) } + } + + pub unsafe fn dealloc_obj(&mut self, obj: *const T) { + let freed_index = (obj.addr() - self.objects.as_ptr().addr()) + / size_of::>(); + + unsafe { + self.objects.as_mut()[freed_index].next_free_idx = + self.next_free_idx; + }; + self.next_free_idx = + unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; + } + + pub fn as_unassigned(&self) -> &SlabDescriptor { + unsafe { + &*(self as *const _ as *const SlabDescriptor) + } + } + + pub fn as_unassigned_mut( + &mut self, + ) -> &mut SlabDescriptor { + unsafe { + &mut *(self as *mut _ as *mut SlabDescriptor) + } + } +} + +impl SlabDescriptor { + pub fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked( + self as *const _ as *mut SlabDescriptor, + ) + } + } +} + +impl SlabDescriptor> { + pub fn initial_descriptor( + order: usize, + ) -> NonNull>> { + let mut descriptor = + SlabDescriptor::>::new(order, None); + let d = descriptor.alloc_obj(descriptor.as_unassigned().clone()); + unsafe { d.as_ref().assign::>() } + } +} diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs new file mode 100644 index 0000000..4ba8f83 --- /dev/null +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -0,0 +1,42 @@ +#[macro_export] +macro_rules! register_slabs { + ($($t:ty),* $(,)?) => { + $crate::register_slabs!(@step 0; $($t),*); + }; + + (@step $idx:expr; $head:ty, $($tail:ty),+) => { + impl $crate::slab::traits::SlabPosition for $head { + const POSITION: usize = $idx; + } + $crate::register_slabs!(@step $idx + 1; $($tail),*); + }; + + (@step $idx:expr; $head:ty) => { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { + const POSITION: usize = $idx; + } + }; + + (@step $idx:expr; ) => {}; +} + +#[macro_export] +macro_rules! define_slab_system { + ($($t:ty),* $(,)?) => { + + $crate::register_slabs!($($t),*); + + const COUNT: usize = [$(stringify!($t)),*].len(); + + pub static mut SLABS: [ + + common::late_init::LateInit>; COUNT] = [ + $( + { + stringify!($t); + common::late_init::LateInit::uninit() + } + ),* + ]; + } +} diff --git a/kernel/src/memory/allocators/slab/preallocated.rs b/kernel/src/memory/allocators/slab/preallocated.rs new file mode 100644 index 0000000..e69de29 diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs new file mode 100644 index 0000000..ca2f35f --- /dev/null +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -0,0 +1,17 @@ +use crate::memory::page_descriptor::Unassigned; + +/// Get the position on the slab array, for a slab of the given type. +/// +/// Shouldn't implement this trait manually; it is implemented +/// via the `define_slab_system` macro. +pub trait SlabPosition { + const POSITION: usize; +} + +impl SlabPosition for Unassigned { + const POSITION: usize = usize::MAX; +} + +pub trait SlabCacheConstructor { + fn new(buddy_order: usize) -> Self; +} diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 4ba539f..7014e97 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -4,7 +4,7 @@ use crate::{ memory::{ allocators::{ page_allocator::buddy::BuddyBlockMeta, - slab::{SlabCache, SlabPosition}, + slab::{cache::SlabCache, traits::SlabPosition}, }, memory_map::ParsedMemoryMap, }, From 28ce7b83410ac17530411539c2f60f9b09704dc1 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 23:28:44 +0200 Subject: [PATCH 35/78] added generic slab types --- kernel/src/memory/allocators/slab.rs | 50 ++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 6aac599..2321440 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -7,31 +7,61 @@ use crate::{ define_slab_system, memory::{ allocators::slab::{ - cache::SlabCache, descriptor::SlabDescriptor, - traits::SlabPosition, + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{SlabCacheConstructor, SlabPosition}, }, page_descriptor::Unassigned, }, }; use core::ptr::NonNull; -// Global Slabs Array Definition -define_slab_system!(SlabDescriptor,); +pub struct Generic8(pub usize); +pub struct Generic16(pub [usize; 2]); +pub struct Generic32(pub [usize; 4]); +pub struct Generic64(pub [usize; 8]); +pub struct Generic96(pub [usize; 12]); +pub struct Generic128(pub [usize; 16]); +pub struct Generic192(pub [usize; 24]); +pub struct Generic256(pub [usize; 32]); +pub struct Generic512(pub [usize; 64]); +pub struct Generic1024(pub [usize; 128]); +pub struct Generic2048(pub [usize; 256]); +pub struct Generic4096(pub [usize; 512]); +pub struct Generic8192(pub [usize; 1024]); -pub fn slab_of() -> &'static mut SlabCache { - unsafe { SLABS[T::POSITION].assign_mut() } +define_slab_system!( + SlabDescriptor, + Generic8, + Generic16, + Generic32, + Generic64, + Generic96, + Generic128, + Generic192, + Generic256, + Generic512, + Generic1024, + Generic2048, + Generic4096, + Generic8192, +); + +pub static mut SLAB_ALLOCATOR: SlabAllocator = SlabAllocator::new(); + +impl SlabAllocator { + pub fn slab_of(&self) -> NonNull> { + self.slabs[T::POSITION].assign::() + } } -// Marker Extensions -use extend::ext; -#[ext] +#[extend::ext] impl NonNull> { fn assign(self) -> NonNull> { unsafe { self.as_ref().assign::() } } } -// Thread safety implementations unsafe impl Send for SlabDescriptor {} unsafe impl Sync for SlabDescriptor {} unsafe impl Send for SlabCache {} From f5417a0062fa173fcddbdbcb0d62cc48ba5758b2 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 23:28:54 +0200 Subject: [PATCH 36/78] added alloc function --- kernel/src/memory/allocators/slab/cache.rs | 59 +++++++++++++++++----- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index dffb9f6..256f609 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,10 +1,12 @@ +use common::enums::ProgrammingInterface; + use super::descriptor::SlabDescriptor; -use super::slab_of; use super::traits::{SlabCacheConstructor, SlabPosition}; +use crate::memory::allocators::slab::SLAB_ALLOCATOR; use crate::memory::page_descriptor::Unassigned; use core::ptr::NonNull; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SlabCache { pub buddy_order: usize, pub free: Option>>, @@ -21,28 +23,57 @@ impl SlabCache { unsafe { &mut *(self as *mut _ as *mut SlabCache) } } - pub fn alloc(&self, _obj: T) -> NonNull { - unimplemented!() + pub fn alloc(&mut self, obj: T) -> NonNull { + if let Some(mut partial) = self.partial { + let partial = unsafe { partial.as_mut() }; + + let allocation = partial.alloc_obj(obj); + + if partial.next_free_idx.is_none() { + self.partial = partial.next; + partial.next = self.full; + self.full = Some(NonNull::from_mut(partial)); + } + return allocation; + } + if let Some(mut free) = self.free { + let free = unsafe { free.as_mut() }; + + let allocation = free.alloc_obj(obj); + + self.free = free.next; + free.next = self.partial; + self.partial = Some(NonNull::from_mut(free)); + + return allocation; + } + + todo!( + "Handle cases where partial and free are full, and \ + allocation from the page allocator is needed." + ) } - pub fn dealloc(&self, _obj: NonNull) { - unimplemented!() + pub fn dealloc(&self, ptr: NonNull) { + todo!() } } impl SlabCache { - pub fn assign(&self) -> &SlabCache { - unsafe { &*(self as *const _ as *const SlabCache) } - } - - pub fn assign_mut(&mut self) -> &mut SlabCache { - unsafe { &mut *(self as *mut _ as *mut SlabCache) } + pub fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self as *const _ as *mut SlabCache) + } } } impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { - let free = slab_of::>() - .alloc(SlabDescriptor::new(buddy_order, None)); + let free = unsafe { + SLAB_ALLOCATOR + .slab_of::>() + .as_mut() + .alloc(SlabDescriptor::new(buddy_order, None)) + }; SlabCache { buddy_order, From 192f31a5cfb6cfe9aedc61c2171a143d82d63c18 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 23:29:13 +0200 Subject: [PATCH 37/78] fixed message on debug print --- kernel/src/memory/allocators/slab/descriptor.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 7578e96..575ad1b 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -65,7 +65,10 @@ impl SlabDescriptor { } pub fn alloc_obj(&mut self, obj: T) -> NonNull { - debug_assert!(self.next_free_idx.is_some(), "Slab is full"); + debug_assert!( + self.next_free_idx.is_some(), + "Called allocate on a full slab" + ); let idx = self.next_free_idx.unwrap().get() as usize; let preallocated = unsafe { &mut self.objects.as_mut()[idx] }; From f628485be344d5cb724d6aa32004ffd2c778ebb1 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 14 Jan 2026 23:30:53 +0200 Subject: [PATCH 38/78] added implementation of the init function for all the slabs the are created, this is really useful because now each slab can implement it's own new function if necessary and it will call the dedicated function instead of a general one. --- kernel/src/memory/allocators/slab/macros.rs | 35 +++++++++++++++------ 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 4ba8f83..39de3b6 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -5,7 +5,7 @@ macro_rules! register_slabs { }; (@step $idx:expr; $head:ty, $($tail:ty),+) => { - impl $crate::slab::traits::SlabPosition for $head { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { const POSITION: usize = $idx; } $crate::register_slabs!(@step $idx + 1; $($tail),*); @@ -23,20 +23,37 @@ macro_rules! register_slabs { #[macro_export] macro_rules! define_slab_system { ($($t:ty),* $(,)?) => { + use common::constants::REGULAR_PAGE_SIZE; $crate::register_slabs!($($t),*); const COUNT: usize = [$(stringify!($t)),*].len(); - pub static mut SLABS: [ + pub struct SlabAllocator { + slabs: [common::late_init::LateInit>; COUNT] + } - common::late_init::LateInit>; COUNT] = [ - $( - { - stringify!($t); - common::late_init::LateInit::uninit() + impl SlabAllocator { + pub const fn new() -> Self { + Self { + slabs: [ + $({ + let _ = stringify!($t); + common::late_init::LateInit::uninit() + }),* + ] } - ),* - ]; + } + + pub fn init(&'static mut self) { + $( + let index = <$t>::POSITION; + + self.slabs[index].write(SlabCache::<$t>::new( + size_of::<$t>().next_multiple_of(REGULAR_PAGE_SIZE) / REGULAR_PAGE_SIZE + ).as_unassigned().clone()); + )* + } + } } } From f1723708b860ec584873591bf15ce7b2888a635a Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 15 Jan 2026 23:31:45 +0200 Subject: [PATCH 39/78] changed function name into alloc, and now returns the address without getting and writing the object. Has a seem to work kmalloc function! --- kernel/src/memory/allocators/slab.rs | 18 ++++++++++++++++++ kernel/src/memory/allocators/slab/cache.rs | 16 ++++++++-------- .../src/memory/allocators/slab/descriptor.rs | 17 ++++++++++------- kernel/src/memory/allocators/slab/traits.rs | 2 +- 4 files changed, 37 insertions(+), 16 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 2321440..73f391b 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -16,18 +16,31 @@ use crate::{ }; use core::ptr::NonNull; +#[derive(Debug)] pub struct Generic8(pub usize); +#[derive(Debug)] pub struct Generic16(pub [usize; 2]); +#[derive(Debug)] pub struct Generic32(pub [usize; 4]); +#[derive(Debug)] pub struct Generic64(pub [usize; 8]); +#[derive(Debug)] pub struct Generic96(pub [usize; 12]); +#[derive(Debug)] pub struct Generic128(pub [usize; 16]); +#[derive(Debug)] pub struct Generic192(pub [usize; 24]); +#[derive(Debug)] pub struct Generic256(pub [usize; 32]); +#[derive(Debug)] pub struct Generic512(pub [usize; 64]); +#[derive(Debug)] pub struct Generic1024(pub [usize; 128]); +#[derive(Debug)] pub struct Generic2048(pub [usize; 256]); +#[derive(Debug)] pub struct Generic4096(pub [usize; 512]); +#[derive(Debug)] pub struct Generic8192(pub [usize; 1024]); define_slab_system!( @@ -53,6 +66,11 @@ impl SlabAllocator { pub fn slab_of(&self) -> NonNull> { self.slabs[T::POSITION].assign::() } + + pub fn kmalloc(&self) -> NonNull { + let mut slab = self.slab_of::(); + unsafe { slab.as_mut().alloc() } + } } #[extend::ext] diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 256f609..c38a983 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,5 +1,3 @@ -use common::enums::ProgrammingInterface; - use super::descriptor::SlabDescriptor; use super::traits::{SlabCacheConstructor, SlabPosition}; use crate::memory::allocators::slab::SLAB_ALLOCATOR; @@ -23,11 +21,11 @@ impl SlabCache { unsafe { &mut *(self as *mut _ as *mut SlabCache) } } - pub fn alloc(&mut self, obj: T) -> NonNull { + pub fn alloc(&mut self) -> NonNull { if let Some(mut partial) = self.partial { let partial = unsafe { partial.as_mut() }; - let allocation = partial.alloc_obj(obj); + let allocation = partial.alloc(); if partial.next_free_idx.is_none() { self.partial = partial.next; @@ -39,7 +37,7 @@ impl SlabCache { if let Some(mut free) = self.free { let free = unsafe { free.as_mut() }; - let allocation = free.alloc_obj(obj); + let allocation = free.alloc(); self.free = free.next; free.next = self.partial; @@ -53,7 +51,7 @@ impl SlabCache { allocation from the page allocator is needed." ) } - pub fn dealloc(&self, ptr: NonNull) { + pub fn dealloc(&self, _ptr: NonNull) { todo!() } } @@ -68,13 +66,15 @@ impl SlabCache { impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { - let free = unsafe { + let mut free = unsafe { SLAB_ALLOCATOR .slab_of::>() .as_mut() - .alloc(SlabDescriptor::new(buddy_order, None)) + .alloc() }; + unsafe { *free.as_mut() = SlabDescriptor::new(buddy_order, None) } + SlabCache { buddy_order, free: Some(unsafe { free.as_ref().assign::() }), diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 575ad1b..b24b2fa 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -1,6 +1,6 @@ use super::traits::SlabPosition; use crate::{alloc_pages, memory::page_descriptor::Unassigned}; -use common::{constants::REGULAR_PAGE_SIZE, write_volatile}; +use common::constants::REGULAR_PAGE_SIZE; use core::{ fmt::Debug, mem::{ManuallyDrop, size_of}, @@ -64,7 +64,7 @@ impl SlabDescriptor { } } - pub fn alloc_obj(&mut self, obj: T) -> NonNull { + pub fn alloc(&mut self) -> NonNull { debug_assert!( self.next_free_idx.is_some(), "Called allocate on a full slab" @@ -74,13 +74,12 @@ impl SlabDescriptor { let preallocated = unsafe { &mut self.objects.as_mut()[idx] }; self.next_free_idx = unsafe { preallocated.next_free_idx }; - write_volatile!(preallocated.allocated, ManuallyDrop::new(obj)); unsafe { NonNull::from_mut(&mut preallocated.allocated) } } - pub unsafe fn dealloc_obj(&mut self, obj: *const T) { - let freed_index = (obj.addr() - self.objects.as_ptr().addr()) + pub unsafe fn dealloc(&mut self, ptr: *const T) { + let freed_index = (ptr.addr() - self.objects.as_ptr().addr()) / size_of::>(); unsafe { @@ -122,7 +121,11 @@ impl SlabDescriptor> { ) -> NonNull>> { let mut descriptor = SlabDescriptor::>::new(order, None); - let d = descriptor.alloc_obj(descriptor.as_unassigned().clone()); - unsafe { d.as_ref().assign::>() } + + let mut ptr = descriptor.alloc(); + + unsafe { *ptr.as_mut() = descriptor.as_unassigned().clone() } + + unsafe { ptr.as_ref().assign::>() } } } diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index ca2f35f..17d91d5 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -4,7 +4,7 @@ use crate::memory::page_descriptor::Unassigned; /// /// Shouldn't implement this trait manually; it is implemented /// via the `define_slab_system` macro. -pub trait SlabPosition { +pub trait SlabPosition: 'static + Sized { const POSITION: usize; } From 31961c256bbf8b89cfefaba21a21c4df06135480 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 17 Jan 2026 19:33:23 +0200 Subject: [PATCH 40/78] removed regular allocator and switched to using nonnull --- .../allocators/{page_allocator => }/buddy.rs | 75 +++--- kernel/src/memory/allocators/buddy/meta.rs | 31 +++ .../{page_allocator => }/extensions.rs | 2 +- kernel/src/memory/allocators/mod.rs | 3 +- .../allocators/page_allocator/allocator.rs | 224 ------------------ .../memory/allocators/page_allocator/mod.rs | 12 - 6 files changed, 73 insertions(+), 274 deletions(-) rename kernel/src/memory/allocators/{page_allocator => }/buddy.rs (67%) create mode 100644 kernel/src/memory/allocators/buddy/meta.rs rename kernel/src/memory/allocators/{page_allocator => }/extensions.rs (99%) delete mode 100644 kernel/src/memory/allocators/page_allocator/allocator.rs delete mode 100644 kernel/src/memory/allocators/page_allocator/mod.rs diff --git a/kernel/src/memory/allocators/page_allocator/buddy.rs b/kernel/src/memory/allocators/buddy.rs similarity index 67% rename from kernel/src/memory/allocators/page_allocator/buddy.rs rename to kernel/src/memory/allocators/buddy.rs index 5a00c78..728de2e 100644 --- a/kernel/src/memory/allocators/page_allocator/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -1,16 +1,22 @@ -use core::ptr; +use core::ptr::{self, NonNull}; use common::{ address_types::PhysicalAddress, + constants::REGULAR_PAGE_SIZE, enums::{BUDDY_MAX_ORDER, BuddyOrder}, }; use cpu_utils::structures::paging::PageTable; -use crate::memory::{ - allocators::slab::traits::SlabPosition, - page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, +use crate::{ + memory::{ + allocators::buddy::meta::BuddyBlockMeta, + page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, + }, + println, }; +pub mod meta; + pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { freelist: [BuddyBlockMeta { next: None, @@ -19,29 +25,6 @@ pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { }; BUDDY_MAX_ORDER], }; -#[derive(Default, Clone, Copy, Debug)] -pub struct BuddyBlockMeta { - // TODO CHANGE INTO REF BECAUSE IT CONSUMES LESS MEMORY - pub next: Option<*mut UnassignedPage>, - pub prev: Option<*mut UnassignedPage>, - pub order: Option, -} - -impl BuddyBlockMeta { - pub fn detach(&mut self) -> Option<*mut Page> { - let detached = self.next? as *mut Page; // None if there is no page to detach - self.next = unsafe { (*detached).buddy_meta.next }; - Some(detached) - } - - pub fn attach(&mut self, attachment: *mut Page) { - let attachment_ref = - unsafe { &mut *attachment }.as_unassigned_mut(); - attachment_ref.buddy_meta.next = self.next; - self.next = Some(attachment_ref as *mut UnassignedPage) - } -} - pub struct BuddyAllocator { freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], } @@ -63,7 +46,7 @@ impl BuddyAllocator { .expect("Out of memory, swap is not implemented") }); - (unsafe { &*page }).physical_address() + unsafe { page.as_ref().physical_address() } } // pub fn free_pages(&self, address: usize) { @@ -75,14 +58,15 @@ impl BuddyAllocator { pub fn split_until( &mut self, wanted_order: usize, - ) -> Option<*mut UnassignedPage> { + ) -> Option> { let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) .find(|i| self.freelist[*i].next.is_some())?; let initial_page = unsafe { - &mut *self.freelist[closet_order] + self.freelist[closet_order] .detach::() .unwrap() + .as_mut() }; let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); @@ -91,7 +75,7 @@ impl BuddyAllocator { while closet_order != wanted_order { self.freelist[closet_order].attach(rhs); - let split_ref = unsafe { &mut *lhs }; + let split_ref = unsafe { lhs.as_mut() }; (lhs, rhs) = unsafe { split_ref.split().unwrap() }; closet_order -= 1; @@ -128,19 +112,38 @@ impl BuddyAllocator { let mut prev = None; while let Some(curr) = iter.next() { - curr.buddy_meta.next = iter.peek().map(|v| { - *v as *const UnassignedPage as *mut UnassignedPage + curr.buddy_meta.next = iter.peek().map(|v| unsafe { + NonNull::new_unchecked( + *v as *const Page as *mut UnassignedPage, + ) }); curr.buddy_meta.prev = prev; curr.buddy_meta.order = Some(BuddyOrder::MAX); - prev = Some(curr) + prev = Some(NonNull::from_mut(curr)) } self.freelist[BUDDY_MAX_ORDER - 1] = BuddyBlockMeta { - next: Some(unsafe { (&mut PAGES[0]) as *mut UnassignedPage }), + next: Some(unsafe { NonNull::new_unchecked(&mut PAGES[0]) }), prev: None, order: Some(BuddyOrder::MAX), }; // Allocate initial MB - self.alloc_pages(256); + + // Allocate pages array + let mem_map_size_pages = unsafe { + (PAGES.len() * size_of::()) / REGULAR_PAGE_SIZE + }; + println!("Mem map pages total: {}", mem_map_size_pages); + println!( + "Mem Map allocation: {:x?}", + self.alloc_pages(256 + mem_map_size_pages) + ); } } +#[macro_export] +/// Allocate the amount of pages specified, and return the address +macro_rules! alloc_pages { + ($page_number: expr) => {{ + use $crate::memory::allocators::buddy::BUDDY_ALLOCATOR; + BUDDY_ALLOCATOR.alloc_pages($page_number) + }}; +} diff --git a/kernel/src/memory/allocators/buddy/meta.rs b/kernel/src/memory/allocators/buddy/meta.rs new file mode 100644 index 0000000..78ce50c --- /dev/null +++ b/kernel/src/memory/allocators/buddy/meta.rs @@ -0,0 +1,31 @@ +use core::ptr::NonNull; + +use common::enums::BuddyOrder; + +use crate::memory::{ + allocators::slab::traits::SlabPosition, + page_descriptor::{ + NonNullPageTExt, NonNullPageUnassignedExt, Page, UnassignedPage, + }, +}; + +#[derive(Default, Clone, Copy, Debug)] +pub struct BuddyBlockMeta { + // TODO CHANGE INTO REF BECAUSE IT CONSUMES LESS MEMORY + pub next: Option>, + pub prev: Option>, + pub order: Option, +} + +impl BuddyBlockMeta { + pub fn detach(&mut self) -> Option>> { + let detached = self.next?; // None if there is no page to detach + self.next = unsafe { detached.as_ref().buddy_meta.next }; + Some(detached.assign::()) + } + + pub fn attach(&mut self, mut p: NonNull>) { + unsafe { p.as_mut().buddy_meta.next = self.next }; + self.next = Some(p.as_unassigned()) + } +} diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/extensions.rs similarity index 99% rename from kernel/src/memory/allocators/page_allocator/extensions.rs rename to kernel/src/memory/allocators/extensions.rs index 17ba98a..164dddc 100644 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -16,7 +16,7 @@ use strum::VariantArray; use common::error::TableError; use cpu_utils::structures::paging::EntryIndex; -use crate::memory::allocators::page_allocator::buddy::BUDDY_ALLOCATOR; +use crate::memory::allocators::buddy::BUDDY_ALLOCATOR; #[ext] pub impl PhysicalAddress { diff --git a/kernel/src/memory/allocators/mod.rs b/kernel/src/memory/allocators/mod.rs index af89e2e..6b2929b 100644 --- a/kernel/src/memory/allocators/mod.rs +++ b/kernel/src/memory/allocators/mod.rs @@ -1,2 +1,3 @@ -pub mod page_allocator; +pub mod buddy; +pub mod extensions; pub mod slab; diff --git a/kernel/src/memory/allocators/page_allocator/allocator.rs b/kernel/src/memory/allocators/page_allocator/allocator.rs deleted file mode 100644 index 49dc787..0000000 --- a/kernel/src/memory/allocators/page_allocator/allocator.rs +++ /dev/null @@ -1,224 +0,0 @@ -use core::{ - alloc::{AllocError, Allocator, Layout}, - cell::UnsafeCell, - mem::MaybeUninit, - ptr::{self, NonNull}, -}; - -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - bitmap::{BitMap, ContiguousBlockLayout, Position}, - constants::{ - PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::MemoryRegionType, -}; -use cpu_utils::structures::paging::PageTable; - -use crate::parsed_memory_map; - -#[derive(Debug)] -// TODO: This is not thread safe, probably should use Mutex -// in the future -/// Physical page allocator implemented with a bitmap, every -/// bit corresponds to a physical page -pub struct PhysicalPageAllocator(UnsafeCell); - -impl Clone for PhysicalPageAllocator { - fn clone(&self) -> Self { - unsafe { - let bitmap = self.map_mut(); - Self(UnsafeCell::new(bitmap.clone())) - } - } -} - -impl PhysicalPageAllocator { - /// Creates a new allocator from the `bitmap_address` - /// and the `memory_size`. - /// - /// # Parameters - /// - /// - `bitmap_address`: Virtual address that is identity mapped and - /// will use to store the map - /// - `memory_size`: Memory size in bytes - #[allow(unsafe_op_in_unsafe_fn)] - pub const unsafe fn new( - bitmap_address: VirtualAddress, - memory_size: usize, - ) -> PhysicalPageAllocator { - let size_in_pages = memory_size / REGULAR_PAGE_SIZE; - let map_size = size_in_pages / u64::BITS as usize; - PhysicalPageAllocator(UnsafeCell::new(BitMap::new( - bitmap_address, - map_size, - ))) - } - - pub const fn address_position( - address: PhysicalAddress, - ) -> Option { - if address.is_aligned(REGULAR_PAGE_ALIGNMENT) { - let bit_index = address.as_usize() / REGULAR_PAGE_SIZE; - return Some(Position::from_abs_bit_index(bit_index)); - } - None - } - - unsafe fn map(&self) -> &BitMap { - unsafe { self.0.as_ref_unchecked() } - } - - #[allow(clippy::mut_from_ref)] - pub unsafe fn map_mut(&self) -> &mut BitMap { - unsafe { self.0.as_mut_unchecked() } - } - - pub fn init(uninit: &'static mut MaybeUninit) { - unsafe { - let memory_size = parsed_memory_map!() - .iter() - .map(|x| x.length as usize) - .sum::(); - uninit.write(Self::new( - PhysicalAddress::new_unchecked(PAGE_ALLOCATOR_OFFSET) - .translate(), - memory_size, - )); - let initialized = uninit.assume_init_mut(); - - let start_address = PhysicalAddress::new_unchecked(0); - let start_position = - Self::address_position(start_address).unwrap(); - // Allocate the addresses that are used for the - // code, and for other variables. - let end_address = PhysicalAddress::new_unchecked( - PAGE_ALLOCATOR_OFFSET - + core::mem::size_of_val(initialized.map().map), - ) - .align_up(REGULAR_PAGE_ALIGNMENT); - let size_bits = ((end_address - start_address) - / REGULAR_PAGE_SIZE) - .as_usize(); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - for region in parsed_memory_map!() { - if region.region_type != MemoryRegionType::Usable { - let start_address_aligned = - PhysicalAddress::new_unchecked( - region.base_address as usize - & (u64::MAX - ^ (REGULAR_PAGE_SIZE as u64 - 1)) - as usize, - ); - let start_position = - Self::address_position(start_address_aligned) - .unwrap(); - let size_bits = - region.length as usize / REGULAR_PAGE_SIZE; - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - } - } - }; - } - - /// Resolves `map_index` and `bit_index` into actual - /// physical address - pub fn resolve_position(p: &Position) -> PhysicalAddress { - unsafe { - PhysicalAddress::new_unchecked( - ((p.map_index * (u64::BITS as usize)) + p.bit_index) - * REGULAR_PAGE_SIZE, - ) - } - } - - pub fn resolve_address(address: PhysicalAddress) -> Position { - let starting_bit_idx = address.as_usize() / REGULAR_PAGE_SIZE; - Position::from_abs_bit_index(starting_bit_idx) - } - - pub fn available_memory(&self) -> usize { - unsafe { self.map().count_zeros() * REGULAR_PAGE_SIZE } - } - - /// Return the physical address of this table - pub(super) fn alloc_table(&self) -> &'static mut PageTable { - let free_block = unsafe { self.map().find_free_block(1) }; - - match free_block { - Some((p, _)) => unsafe { - let physical_address = Self::resolve_position(&p); - - ptr::write( - physical_address.translate().as_mut_ptr::(), - PageTable::empty(), - ); - - self.map_mut().set_bit(&p); - - &mut *physical_address.as_mut_ptr::() - }, - - None => panic!( - "No physical memory is available to allocate this table" - ), - } - } -} - -#[allow(unsafe_op_in_unsafe_fn)] -unsafe impl Allocator for PhysicalPageAllocator { - fn allocate( - &self, - layout: Layout, - ) -> Result, AllocError> { - unsafe { - if let Ok(layout) = - layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) - && let Some((p, block)) = self - .map() - .find_free_block(layout.size() / REGULAR_PAGE_SIZE) - { - self.map_mut().set_contiguous_block(&p, &block); - return Ok(NonNull::slice_from_raw_parts( - NonNull::new_unchecked( - Self::resolve_position(&p).as_mut_ptr::(), - ), - layout.size(), - )); - } - Err(AllocError) - } - } - - /// TODO USE INVAL PAGE HERE ON THE ADDRESS - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if let Ok(layout) = - layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) - { - let start_position = Self::resolve_address( - PhysicalAddress::new_unchecked(ptr.as_ptr() as usize), - ); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - layout.size() / REGULAR_PAGE_SIZE, - ); - self.map_mut() - .unset_contiguous_block(&start_position, &block); - } - } -} - -unsafe impl Sync for PhysicalPageAllocator {} diff --git a/kernel/src/memory/allocators/page_allocator/mod.rs b/kernel/src/memory/allocators/page_allocator/mod.rs deleted file mode 100644 index ac555f4..0000000 --- a/kernel/src/memory/allocators/page_allocator/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub mod allocator; -pub mod buddy; -pub mod extensions; - -#[macro_export] -/// Allocate the amount of pages specified, and return the address -macro_rules! alloc_pages { - ($page_number: expr) => {{ - use $crate::memory::allocators::page_allocator::buddy::BUDDY_ALLOCATOR; - BUDDY_ALLOCATOR.alloc_pages($page_number) - }}; -} From 7fd37f61f8a830e1b15757031cba8023a34c8950 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 17 Jan 2026 19:34:13 +0200 Subject: [PATCH 41/78] fixed a bug where prev was not treated on next and prev --- kernel/src/memory/allocators/buddy/meta.rs | 34 ++++++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/kernel/src/memory/allocators/buddy/meta.rs b/kernel/src/memory/allocators/buddy/meta.rs index 78ce50c..4b8e667 100644 --- a/kernel/src/memory/allocators/buddy/meta.rs +++ b/kernel/src/memory/allocators/buddy/meta.rs @@ -1,4 +1,4 @@ -use core::ptr::NonNull; +use core::{iter::ByRefSized, ptr::NonNull}; use common::enums::BuddyOrder; @@ -9,23 +9,45 @@ use crate::memory::{ }, }; -#[derive(Default, Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug)] pub struct BuddyBlockMeta { - // TODO CHANGE INTO REF BECAUSE IT CONSUMES LESS MEMORY - pub next: Option>, - pub prev: Option>, - pub order: Option, + next: Option>, + prev: Option>, + order: Option, +} + +impl const Default for BuddyBlockMeta { + fn default() -> Self { + Self { + next: None, + prev: None, + order: None, + } + } } impl BuddyBlockMeta { pub fn detach(&mut self) -> Option>> { let detached = self.next?; // None if there is no page to detach + self.next = unsafe { detached.as_ref().buddy_meta.next }; + + if let Some(mut next) = self.next { + unsafe { next.as_mut().buddy_meta.prev = None } + } + Some(detached.assign::()) } pub fn attach(&mut self, mut p: NonNull>) { unsafe { p.as_mut().buddy_meta.next = self.next }; + + if let Some(mut next) = self.next { + unsafe { + next.as_mut().buddy_meta.prev = Some(p.as_unassigned()) + }; + } + self.next = Some(p.as_unassigned()) } } From 19d4f8bad97c6df235f3fed934b7e38544fc8359 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 17 Jan 2026 22:38:19 +0200 Subject: [PATCH 42/78] initial draft for fixed initialization of the buddy allocator to ignore not usable memory from the bios map. Currently not working. --- kernel/src/drivers/ata/ahci/hba.rs | 182 +++++++++--------- kernel/src/drivers/pci.rs | 5 +- kernel/src/main.rs | 32 ++- kernel/src/memory/allocators/buddy.rs | 75 ++++---- kernel/src/memory/allocators/buddy/meta.rs | 8 +- .../src/memory/allocators/slab/descriptor.rs | 2 +- kernel/src/memory/page_descriptor.rs | 50 +++-- .../structures/interrupt_descriptor_table.rs | 4 +- 8 files changed, 200 insertions(+), 158 deletions(-) diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs index bd42796..7eccf45 100644 --- a/kernel/src/drivers/ata/ahci/hba.rs +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -36,7 +36,7 @@ use crate::{ vga_display::color_code::ColorCode, }, eprintln, - memory::allocators::page_allocator::extensions::PhysicalAddressExt, + memory::allocators::extensions::PhysicalAddressExt, print, println, }; @@ -1231,94 +1231,96 @@ impl HBAMemoryRegisters { /// Returns the amount of active devices found and set them into idle /// state. pub fn probe_init(&mut self) -> usize { - println!( - "Detected {} implemented ports", - self.ghc.cap.number_of_ports() - ); - - println!( - "Supported command slots: {}, Supported 64bit addresses: {}", - self.ghc.cap.number_of_commands(), - self.ghc.cap.is_s64a() - ); - - let mut count = 0; - for (i, port) in self.ports.iter_mut().enumerate() { - if self.ghc.pi.is_port_implemented(i as u8) - && let Ok(power) = port.ssts.power() - && let InterfacePowerManagement::Active = power - { - count += 1; - println!("\nDetected device at port number: {}", i); - print!(" Device Power: "); - println!("{:?}", power ; color = ColorCode::new(Color::Green, Color::Black)); - print!(" Device Speed: "); - println!("{}", port.ssts.speed() ; color = ColorCode::new(Color::Green, Color::Black)); - print!(" Device type: "); - match port.sig.device_type() { - Ok(t) => { - println!("{:?}", t ; color = ColorCode::new(Color::Green, Color::Black) ) - } - Err(e) => { - println!("{:?}", e ; color = ColorCode::new(Color::Red, Color::Black) ) - } - } - port.cmd.stop(); - - let clb_fbu_table = unsafe { alloc_pages!(1) }; - for i in (0..4096).step_by(size_of::()) { - unsafe { - core::ptr::write_volatile( - ((clb_fbu_table + i) + PHYSICAL_MEMORY_OFFSET) - as *mut usize, - 0, - ); - } - } - - port.set_cmd_list_address(clb_fbu_table); - port.set_received_fis_address( - clb_fbu_table + size_of::(), - ); - - // MAPPING the first header with 8 entries (0x100 in total - // table size) - let cmd_list = port.cmd_list(); - cmd_list.entries[0].set_cmd_table( - clb_fbu_table - + size_of::() - + size_of::(), - ); - - port.cmd.set_fre(); - port.serr.zero_error(); - // port.ie.set_dhre(); - // port.ie.set_pse(); - // port.ie.set_dse(); - // port.ie.set_tfee(); - port.is.clear_pending_interrupts(); - self.ghc.is.clear_all(); - - port.cmd.set_sud(); - port.cmd.set_pod(); - port.cmd.set_icc(InterfaceCommunicationControl::Active); - - loop { - if !port.tfd.is_bsy() - && !port.tfd.is_drq() - && matches!( - port.ssts.power().unwrap(), - InterfacePowerManagement::Active - ) - { - break; - } - } - port.cmd.start(); - println!("Started port number: {}", i) - } - } - - count + // println!( + // "Detected {} implemented ports", + // self.ghc.cap.number_of_ports() + // ); + + // println!( + // "Supported command slots: {}, Supported 64bit addresses: + // {}", self.ghc.cap.number_of_commands(), + // self.ghc.cap.is_s64a() + // ); + + // let mut count = 0; + // for (i, port) in self.ports.iter_mut().enumerate() { + // if self.ghc.pi.is_port_implemented(i as u8) + // && let Ok(power) = port.ssts.power() + // && let InterfacePowerManagement::Active = power + // { + // count += 1; + // println!("\nDetected device at port number: {}", i); + // print!(" Device Power: "); + // println!("{:?}", power ; color = + // ColorCode::new(Color::Green, Color::Black)); + // print!(" Device Speed: "); println!("{}", + // port.ssts.speed() ; color = ColorCode::new(Color::Green, + // Color::Black)); print!(" Device type: "); + // match port.sig.device_type() { + // Ok(t) => { + // println!("{:?}", t ; color = + // ColorCode::new(Color::Green, Color::Black) ) + // } Err(e) => { + // println!("{:?}", e ; color = + // ColorCode::new(Color::Red, Color::Black) ) + // } } + // port.cmd.stop(); + + // let clb_fbu_table = unsafe { alloc_pages!(1) }; + // for i in (0..4096).step_by(size_of::()) { + // unsafe { + // core::ptr::write_volatile( + // ((clb_fbu_table + i) + + // PHYSICAL_MEMORY_OFFSET) as + // *mut usize, 0, + // ); + // } + // } + + // port.set_cmd_list_address(clb_fbu_table); + // port.set_received_fis_address( + // clb_fbu_table + size_of::(), + // ); + + // // MAPPING the first header with 8 entries (0x100 in + // total // table size) + // let cmd_list = port.cmd_list(); + // cmd_list.entries[0].set_cmd_table( + // clb_fbu_table + // + size_of::() + // + size_of::(), + // ); + + // port.cmd.set_fre(); + // port.serr.zero_error(); + // // port.ie.set_dhre(); + // // port.ie.set_pse(); + // // port.ie.set_dse(); + // // port.ie.set_tfee(); + // port.is.clear_pending_interrupts(); + // self.ghc.is.clear_all(); + + // port.cmd.set_sud(); + // port.cmd.set_pod(); + // + // port.cmd.set_icc(InterfaceCommunicationControl::Active); + + // loop { + // if !port.tfd.is_bsy() + // && !port.tfd.is_drq() + // && matches!( + // port.ssts.power().unwrap(), + // InterfacePowerManagement::Active + // ) + // { + // break; + // } + // } + // port.cmd.start(); + // println!("Started port number: {}", i) + // } + // } + todo!() + // count } } diff --git a/kernel/src/drivers/pci.rs b/kernel/src/drivers/pci.rs index 8378527..8bf8a18 100644 --- a/kernel/src/drivers/pci.rs +++ b/kernel/src/drivers/pci.rs @@ -1,9 +1,6 @@ extern crate alloc; -use crate::{ - drivers::ata::ahci::AHCIBaseAddress, - memory::allocators::page_allocator::allocator::PhysicalPageAllocator, -}; +use crate::drivers::ata::ahci::AHCIBaseAddress; use alloc::vec::Vec; use common::enums::{ CascadedPicInterruptLine, ClassCode, DeviceID, HeaderType, diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 091d82b..cb1d39d 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -34,11 +34,14 @@ use crate::{ vga_display::color_code::ColorCode, }, memory::{ - allocators::page_allocator::{ - allocator::PhysicalPageAllocator, buddy::BUDDY_ALLOCATOR, + allocators::{ + buddy::BUDDY_ALLOCATOR, + slab::{ + Generic512, SLAB_ALLOCATOR, descriptor::SlabDescriptor, + }, }, memory_map::{MemoryMap, MemoryRegion, parse_map}, - page_descriptor::pages_init, + page_descriptor::{Unassigned, pages_init}, }, }; @@ -67,18 +70,17 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Obtained Memory Map"); println!("{}", MemoryMap(parsed_memory_map!())); - pages_init(&MemoryMap(parsed_memory_map!())); - unsafe { BUDDY_ALLOCATOR.init() }; - + pages_init(MemoryMap(parsed_memory_map!())); + unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!())) }; okprintln!("Allocator Initialized"); unsafe { - InterruptDescriptorTable::init(&mut IDT, alloc_pages!(1).into()); + InterruptDescriptorTable::init(&mut IDT, alloc_pages!(1)); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); okprintln!("Initialized Programmable Interrupt Controller"); - let keyboard_buffer_address = alloc_pages!(1).into(); + let keyboard_buffer_address = alloc_pages!(1).translate(); Keyboard::init( &mut KEYBOARD, keyboard_buffer_address, @@ -87,6 +89,20 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Initialized Keyboard"); interrupts::enable(); } + + unsafe { SLAB_ALLOCATOR.init() } + + println!("{:?}", unsafe { + SLAB_ALLOCATOR.slab_of::().as_ref() + }); + + let generic512 = unsafe { SLAB_ALLOCATOR.kmalloc::() }; + println!("{:?}", generic512); + + println!("{:?}", unsafe { + SLAB_ALLOCATOR.slab_of::().as_ref() + }); + // panic!("") // let mut pci_devices = pci::scan_pci(); // println!("Press ENTER to enumerate PCI devices!"); diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 728de2e..9c9dd43 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -3,13 +3,14 @@ use core::ptr::{self, NonNull}; use common::{ address_types::PhysicalAddress, constants::REGULAR_PAGE_SIZE, - enums::{BUDDY_MAX_ORDER, BuddyOrder}, + enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, }; use cpu_utils::structures::paging::PageTable; use crate::{ memory::{ allocators::buddy::meta::BuddyBlockMeta, + memory_map::{MemoryRegion, ParsedMemoryMap}, page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, }, println, @@ -18,11 +19,7 @@ use crate::{ pub mod meta; pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { - freelist: [BuddyBlockMeta { - next: None, - prev: None, - order: None, - }; BUDDY_MAX_ORDER], + freelist: [BuddyBlockMeta::default(); BUDDY_MAX_ORDER], }; pub struct BuddyAllocator { @@ -30,7 +27,7 @@ pub struct BuddyAllocator { } impl BuddyAllocator { - pub fn alloc_pages(&mut self, num_pages: usize) -> usize { + pub fn alloc_pages(&mut self, num_pages: usize) -> PhysicalAddress { assert!( num_pages <= (1 << BuddyOrder::MAX as usize), "Size cannot be greater then: {}", @@ -85,14 +82,11 @@ impl BuddyAllocator { Some(lhs) } - pub fn merge(&self) { - unimplemented!() - } + pub fn merge(&self, page: NonNull) {} pub fn alloc_table(&mut self) -> &'static mut PageTable { unsafe { - let address = - { PhysicalAddress::new_unchecked(self.alloc_pages(1)) }; + let address = self.alloc_pages(1); ptr::write_volatile( address.as_mut_ptr::(), PageTable::empty(), @@ -101,31 +95,46 @@ impl BuddyAllocator { } } - pub fn init(&'static mut self) { - let mut iter = unsafe { - PAGES - .iter_mut() - .step_by(1 << BuddyOrder::MAX as usize) - .peekable() - }; + pub fn init(&'static mut self, map: ParsedMemoryMap) { + for area in map + .iter() + .filter(|a| a.region_type == MemoryRegionType::Usable) + { + let mut start = UnassignedPage::index_of_page( + (area.base_address as usize).into(), + ); + let end = UnassignedPage::index_of_page( + ((area.base_address + area.length) as usize).into(), + ); - let mut prev = None; + let mut prev = None; - while let Some(curr) = iter.next() { - curr.buddy_meta.next = iter.peek().map(|v| unsafe { - NonNull::new_unchecked( - *v as *const Page as *mut UnassignedPage, + while start < end { + let largest_order = BuddyOrder::try_from( + ((end - start).ilog2().min(BuddyOrder::MAX as u32)) + as u8, ) - }); - curr.buddy_meta.prev = prev; - curr.buddy_meta.order = Some(BuddyOrder::MAX); - prev = Some(NonNull::from_mut(curr)) + .unwrap(); + + println!("{:?}", largest_order); + + let curr = unsafe { &mut PAGES[start] }; + let next = unsafe { + &mut PAGES[start + (1 << largest_order as usize)] + }; + + curr.buddy_meta.next = Some(NonNull::from_mut(next)); + curr.buddy_meta.prev = prev; + curr.buddy_meta.order = Some(largest_order); + prev = Some(NonNull::from_mut(curr)); + + self.freelist[largest_order as usize] + .attach(NonNull::from_mut(curr)); + + start += largest_order as usize; + } } - self.freelist[BUDDY_MAX_ORDER - 1] = BuddyBlockMeta { - next: Some(unsafe { NonNull::new_unchecked(&mut PAGES[0]) }), - prev: None, - order: Some(BuddyOrder::MAX), - }; + // Allocate initial MB // Allocate pages array diff --git a/kernel/src/memory/allocators/buddy/meta.rs b/kernel/src/memory/allocators/buddy/meta.rs index 4b8e667..31a3621 100644 --- a/kernel/src/memory/allocators/buddy/meta.rs +++ b/kernel/src/memory/allocators/buddy/meta.rs @@ -1,4 +1,4 @@ -use core::{iter::ByRefSized, ptr::NonNull}; +use core::ptr::NonNull; use common::enums::BuddyOrder; @@ -11,9 +11,9 @@ use crate::memory::{ #[derive(Clone, Copy, Debug)] pub struct BuddyBlockMeta { - next: Option>, - prev: Option>, - order: Option, + pub next: Option>, + pub prev: Option>, + pub order: Option, } impl const Default for BuddyBlockMeta { diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index b24b2fa..5a43cba 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -36,7 +36,7 @@ impl SlabDescriptor { let mut objects = unsafe { NonNull::slice_from_raw_parts( NonNull::new_unchecked( - address as *mut PreallocatedObject, + address.as_mut_ptr::>(), ), ((1 << order) * REGULAR_PAGE_SIZE) / size_of::>(), diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 7014e97..2a7f661 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -3,7 +3,7 @@ use core::ptr::NonNull; use crate::{ memory::{ allocators::{ - page_allocator::buddy::BuddyBlockMeta, + buddy::meta::BuddyBlockMeta, slab::{cache::SlabCache, traits::SlabPosition}, }, memory_map::ParsedMemoryMap, @@ -11,6 +11,7 @@ use crate::{ println, }; use common::{ + address_types::PhysicalAddress, constants::{ PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, @@ -24,15 +25,25 @@ pub struct Unassigned; pub type UnassignedPage = Page; -impl UnassignedPage { - pub fn assign(&self) -> &Page { - let ptr = self as *const _ as usize; - unsafe { &*(ptr as *const Page) } +#[extend::ext] +pub impl NonNull> { + fn assign(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) } } +} - pub fn assign_mut(&mut self) -> &mut Page { - let ptr = self as *const _ as usize; - unsafe { &mut *(ptr as *mut Page) } +#[extend::ext] +pub impl NonNull> { + fn as_unassigned(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut Page) + } + } +} + +impl UnassignedPage { + pub fn assign(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self as *const _ as *mut Page) } } } @@ -56,12 +67,14 @@ impl Page { unsafe { &mut *(ptr as *mut UnassignedPage) } } - pub fn physical_address(&self) -> usize { + pub fn physical_address(&self) -> PhysicalAddress { let index = (self.as_unassigned() as *const _ as usize - PAGE_ALLOCATOR_OFFSET) / size_of::(); - index * REGULAR_PAGE_SIZE + unsafe { + PhysicalAddress::new_unchecked(index * REGULAR_PAGE_SIZE) + } } pub fn get_buddy(&self) -> Option<*mut Page> { @@ -82,9 +95,10 @@ impl Page { /// /// # Safety /// This function does not attach the new references! + #[allow(clippy::type_complexity)] pub unsafe fn split( &mut self, - ) -> Option<(*mut Page, *mut Page)> { + ) -> Option<(NonNull>, NonNull>)> { // Reduce it's order to find it's order. let prev_order = @@ -98,17 +112,21 @@ impl Page { + (1 << prev_order as usize); // Find it's half - let buddy = unsafe { PAGES[index].assign_mut::() }; + let mut buddy = unsafe { PAGES[index].assign::() }; // Set the order of the buddy. - write_volatile!(buddy.buddy_meta.order, Some(prev_order)); + write_volatile!(buddy.as_mut().buddy_meta.order, Some(prev_order)); + + Some((NonNull::from_mut(self), buddy)) + } - Some((self as *mut Page, buddy as *mut Page)) + pub const fn index_of_page(address: PhysicalAddress) -> usize { + address.as_usize() / REGULAR_PAGE_SIZE } } -pub fn pages_init(map: &ParsedMemoryMap) -> usize { - let last = map.last().unwrap(); +pub fn pages_init(mmap: ParsedMemoryMap) -> usize { + let last = mmap.last().unwrap(); let last_page = (last.base_address + last.length) as usize & !REGULAR_PAGE_ALIGNMENT.as_usize(); let total_pages = last_page / REGULAR_PAGE_SIZE; diff --git a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs index 07c727f..edc1004 100644 --- a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs +++ b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs @@ -1,5 +1,5 @@ use common::{ - address_types::VirtualAddress, + address_types::{PhysicalAddress, VirtualAddress}, enums::{ ProtectionLevel, SystemSegmentType, interrupts::{Interrupt, InterruptStackTable, InterruptType}, @@ -74,7 +74,7 @@ impl InterruptDescriptorTable { /// - `base_address`: A virtual address that the IDT will be placed on. pub fn init( uninit: &'static mut MaybeUninit<&mut Self>, - base_address: VirtualAddress, + base_address: PhysicalAddress, ) { let mut gdt_register: MaybeUninit = MaybeUninit::uninit(); From 724c2e782f7abee0f4d3e8753e3503982ee4d821 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 19 Jan 2026 19:58:07 +0200 Subject: [PATCH 43/78] tests and fixes --- kernel/src/drivers/interrupt_handlers.rs | 4 +- kernel/src/main.rs | 20 +++++++-- kernel/src/memory/allocators/buddy.rs | 45 +++++++++++++------ kernel/src/memory/allocators/extensions.rs | 19 ++++---- kernel/src/memory/page_descriptor.rs | 9 ++-- .../structures/interrupt_descriptor_table.rs | 2 +- 6 files changed, 65 insertions(+), 34 deletions(-) diff --git a/kernel/src/drivers/interrupt_handlers.rs b/kernel/src/drivers/interrupt_handlers.rs index 8178ff1..110b561 100644 --- a/kernel/src/drivers/interrupt_handlers.rs +++ b/kernel/src/drivers/interrupt_handlers.rs @@ -168,8 +168,8 @@ pub extern "x86-interrupt" fn page_fault_handler( error_code: u64, ) { println!("Interrupt: PageFault"); - println!("Stack frame: {:#?}", stack_frame); - println!("Error code: {:#x}", error_code); + // println!("Stack frame: {:#?}", stack_frame); + // println!("Error code: {:#x}", error_code); println!("Faulting address: {:x}", cr2::read()); } diff --git a/kernel/src/main.rs b/kernel/src/main.rs index cb1d39d..cb6a3c7 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -36,6 +36,7 @@ use crate::{ memory::{ allocators::{ buddy::BUDDY_ALLOCATOR, + extensions::PageTableExt, slab::{ Generic512, SLAB_ALLOCATOR, descriptor::SlabDescriptor, }, @@ -54,8 +55,9 @@ use common::{ }; use cpu_utils::{ instructions::interrupts::{self}, - structures::interrupt_descriptor_table::{ - IDT, InterruptDescriptorTable, + structures::{ + interrupt_descriptor_table::{IDT, InterruptDescriptorTable}, + paging::PageTable, }, }; @@ -72,15 +74,25 @@ pub unsafe extern "C" fn _start() -> ! { pages_init(MemoryMap(parsed_memory_map!())); unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!())) }; + + let last = MemoryMap(parsed_memory_map!()).last().unwrap(); + + PageTable::current_table_mut() + .map_physical_memory((last.base_address + last.length) as usize); okprintln!("Allocator Initialized"); + println!("Address: {:x?}", unsafe { alloc_pages!(1).translate() }); unsafe { - InterruptDescriptorTable::init(&mut IDT, alloc_pages!(1)); + InterruptDescriptorTable::init( + &mut IDT, + alloc_pages!(1).translate(), + ); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); + okprintln!("Initialized Programmable Interrupt Controller"); - let keyboard_buffer_address = alloc_pages!(1).translate(); + let keyboard_buffer_address: common::address_types::VirtualAddress = alloc_pages!(1).translate(); Keyboard::init( &mut KEYBOARD, keyboard_buffer_address, diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 9c9dd43..fbf3bbe 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -3,17 +3,18 @@ use core::ptr::{self, NonNull}; use common::{ address_types::PhysicalAddress, constants::REGULAR_PAGE_SIZE, - enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, + enums::{BUDDY_MAX_ORDER, BuddyOrder, Color, MemoryRegionType}, }; use cpu_utils::structures::paging::PageTable; use crate::{ + drivers::vga_display::color_code::ColorCode, memory::{ allocators::buddy::meta::BuddyBlockMeta, memory_map::{MemoryRegion, ParsedMemoryMap}, page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, }, - println, + print, println, }; pub mod meta; @@ -86,7 +87,7 @@ impl BuddyAllocator { pub fn alloc_table(&mut self) -> &'static mut PageTable { unsafe { - let address = self.alloc_pages(1); + let address = self.alloc_pages(1).translate(); ptr::write_volatile( address.as_mut_ptr::(), PageTable::empty(), @@ -116,11 +117,13 @@ impl BuddyAllocator { ) .unwrap(); + println!("Start: {}, end: {}", start, end); + println!("{:?}", largest_order); let curr = unsafe { &mut PAGES[start] }; let next = unsafe { - &mut PAGES[start + (1 << largest_order as usize)] + &mut PAGES[start + ((1 << largest_order as usize) - 1)] }; curr.buddy_meta.next = Some(NonNull::from_mut(next)); @@ -131,21 +134,37 @@ impl BuddyAllocator { self.freelist[largest_order as usize] .attach(NonNull::from_mut(curr)); - start += largest_order as usize; + start += 1 << largest_order as usize; } } + for (i, meta) in self.freelist.iter().enumerate() { + let mut next = meta.next; + + if next.is_some() { + println!("Order: {:?}", i ; color = ColorCode::new(Color::Blue, Color::Black)); + } else { + continue; + } + + while let Some(node) = next { + print!("{:?} -> ", node); + unsafe { next = node.as_ref().buddy_meta.next }; + } + + println!(""); + } // Allocate initial MB // Allocate pages array - let mem_map_size_pages = unsafe { - (PAGES.len() * size_of::()) / REGULAR_PAGE_SIZE - }; - println!("Mem map pages total: {}", mem_map_size_pages); - println!( - "Mem Map allocation: {:x?}", - self.alloc_pages(256 + mem_map_size_pages) - ); + // let mem_map_size_pages = unsafe { + // (PAGES.len() * size_of::()) / + // REGULAR_PAGE_SIZE }; + // println!("Mem map pages total: {}", mem_map_size_pages); + // println!( + // "Mem Map allocation: {:x?}", + // self.alloc_pages(256 + mem_map_size_pages) + // ); } } #[macro_export] diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index 164dddc..3972fa4 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -1,8 +1,8 @@ use common::{ address_types::{PhysicalAddress, VirtualAddress}, constants::{ - BIG_PAGE_SIZE, PAGE_ALLOCATOR_OFFSET, PAGE_DIRECTORY_ENTRIES, - PHYSICAL_MEMORY_OFFSET, + BIG_PAGE_SIZE, HUGE_PAGE_SIZE, PAGE_ALLOCATOR_OFFSET, + PAGE_DIRECTORY_ENTRIES, PHYSICAL_MEMORY_OFFSET, }, enums::{PageSize, PageTableLevel}, error::EntryError, @@ -192,18 +192,19 @@ pub impl PageTable { // ANCHOR_END: page_table_find_available_page /// Map the region of memory from 0 to `mem_size_bytes` - /// at the top of the page table so that ```rust - /// VirtualAddress(0xffff800000000000) -> - /// PhysicalAddress(0) ``` + /// at the top of the page table so that + /// + /// ```rust + /// VirtualAddress(0xffff800000000000) -> PhysicalAddress(0) + /// ``` /// /// TODO: ADD SUPPORT FOR FULL FLAG #[allow(unsafe_op_in_unsafe_fn)] fn map_physical_memory(&mut self, mem_size_bytes: usize) { let mut second_level_entries_count = - (mem_size_bytes / BIG_PAGE_SIZE).max(1); - let mut third_level_entries_count = second_level_entries_count - .div_ceil(PAGE_ALLOCATOR_OFFSET) - .max(1); + (mem_size_bytes / BIG_PAGE_SIZE) + 1; + let mut third_level_entries_count = + second_level_entries_count.div_ceil(HUGE_PAGE_SIZE) + 1; let forth_level_entries_count = third_level_entries_count .div_ceil(PAGE_DIRECTORY_ENTRIES) .clamp(1, 256); diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 2a7f661..73486d0 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -127,13 +127,12 @@ impl Page { pub fn pages_init(mmap: ParsedMemoryMap) -> usize { let last = mmap.last().unwrap(); - let last_page = (last.base_address + last.length) as usize - & !REGULAR_PAGE_ALIGNMENT.as_usize(); - let total_pages = last_page / REGULAR_PAGE_SIZE; + let last_address = (last.base_address + last.length) as usize; + let total_pages = last_address / REGULAR_PAGE_SIZE; println!( - "Last Page: {}, Total Pages: {}, size_of_array: {:x?} Kib", - last_page, + "Last address: {}, Total Pages: {}, size_of_array: {:x?} Kib", + last_address, total_pages, total_pages * size_of::>() / 1024 ); diff --git a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs index edc1004..5e22d7e 100644 --- a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs +++ b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs @@ -74,7 +74,7 @@ impl InterruptDescriptorTable { /// - `base_address`: A virtual address that the IDT will be placed on. pub fn init( uninit: &'static mut MaybeUninit<&mut Self>, - base_address: PhysicalAddress, + base_address: VirtualAddress, ) { let mut gdt_register: MaybeUninit = MaybeUninit::uninit(); From 399066f5fb9db2a8a8d8214c90b80fcbe9795ba1 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 21 Jan 2026 00:47:29 +0200 Subject: [PATCH 44/78] added some function templates that should be implemented. --- .vscode/settings.json | 6 ++ kernel/src/main.rs | 22 ++----- kernel/src/memory/allocators/buddy.rs | 57 +++++-------------- kernel/src/memory/allocators/extensions.rs | 11 +++- kernel/src/memory/allocators/slab/cache.rs | 18 ++++++ .../src/memory/allocators/slab/descriptor.rs | 2 +- kernel/src/memory/allocators/slab/macros.rs | 6 +- kernel/src/memory/page_descriptor.rs | 45 ++++++++++++++- 8 files changed, 102 insertions(+), 65 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index a30e23b..52df39e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -59,6 +59,7 @@ "fbss", "FDDI", "firewire", + "freelist", "FXSR", "Gameport", "GPIB", @@ -69,6 +70,7 @@ "Hotplug", "hpcp", "HPET", + "ilog", "infe", "infs", "inval", @@ -78,6 +80,7 @@ "ipms", "IRDA", "ISDN", + "kmalloc", "lctrl", "Learnix", "lgdt", @@ -85,6 +88,7 @@ "lshift", "mdat", "metavar", + "mmap", "MOVBE", "mpsp", "mpss", @@ -92,6 +96,7 @@ "MTRR", "Multiport", "nomem", + "nonmax", "nostack", "notif", "NVME", @@ -106,6 +111,7 @@ "PCLMUL", "PDCM", "PDPT", + "peekable", "PICMG", "PICPI", "PIIX", diff --git a/kernel/src/main.rs b/kernel/src/main.rs index cb6a3c7..7da1b28 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -38,11 +38,12 @@ use crate::{ buddy::BUDDY_ALLOCATOR, extensions::PageTableExt, slab::{ - Generic512, SLAB_ALLOCATOR, descriptor::SlabDescriptor, + Generic512, Generic8192, SLAB_ALLOCATOR, + descriptor::SlabDescriptor, }, }, memory_map::{MemoryMap, MemoryRegion, parse_map}, - page_descriptor::{Unassigned, pages_init}, + page_descriptor::{PAGES, Unassigned, UnassignedPage, pages_init}, }, }; @@ -73,14 +74,13 @@ pub unsafe extern "C" fn _start() -> ! { println!("{}", MemoryMap(parsed_memory_map!())); pages_init(MemoryMap(parsed_memory_map!())); - unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!())) }; + unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!()), 0) }; let last = MemoryMap(parsed_memory_map!()).last().unwrap(); PageTable::current_table_mut() .map_physical_memory((last.base_address + last.length) as usize); - okprintln!("Allocator Initialized"); - println!("Address: {:x?}", unsafe { alloc_pages!(1).translate() }); + okprintln!("Initialized buddy allocator"); unsafe { InterruptDescriptorTable::init( &mut IDT, @@ -103,17 +103,7 @@ pub unsafe extern "C" fn _start() -> ! { } unsafe { SLAB_ALLOCATOR.init() } - - println!("{:?}", unsafe { - SLAB_ALLOCATOR.slab_of::().as_ref() - }); - - let generic512 = unsafe { SLAB_ALLOCATOR.kmalloc::() }; - println!("{:?}", generic512); - - println!("{:?}", unsafe { - SLAB_ALLOCATOR.slab_of::().as_ref() - }); + okprintln!("Initialized slab allocator"); // panic!("") // let mut pci_devices = pci::scan_pci(); diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index fbf3bbe..9b8c8d6 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -2,19 +2,14 @@ use core::ptr::{self, NonNull}; use common::{ address_types::PhysicalAddress, - constants::REGULAR_PAGE_SIZE, - enums::{BUDDY_MAX_ORDER, BuddyOrder, Color, MemoryRegionType}, + enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, }; use cpu_utils::structures::paging::PageTable; -use crate::{ - drivers::vga_display::color_code::ColorCode, - memory::{ - allocators::buddy::meta::BuddyBlockMeta, - memory_map::{MemoryRegion, ParsedMemoryMap}, - page_descriptor::{PAGES, Page, Unassigned, UnassignedPage}, - }, - print, println, +use crate::memory::{ + allocators::buddy::meta::BuddyBlockMeta, + memory_map::ParsedMemoryMap, + page_descriptor::{PAGES, Unassigned, UnassignedPage}, }; pub mod meta; @@ -83,7 +78,9 @@ impl BuddyAllocator { Some(lhs) } - pub fn merge(&self, page: NonNull) {} + pub fn merge_until_max(&self, page: NonNull) { + todo!() + } pub fn alloc_table(&mut self) -> &'static mut PageTable { unsafe { @@ -96,7 +93,11 @@ impl BuddyAllocator { } } - pub fn init(&'static mut self, map: ParsedMemoryMap) { + /// The code_end number should be the end address of the code. + /// + /// This function will not put in the free list pages that hold + /// addresses from 0->code_end + pub fn init(&'static mut self, map: ParsedMemoryMap, code_end: usize) { for area in map .iter() .filter(|a| a.region_type == MemoryRegionType::Usable) @@ -117,10 +118,6 @@ impl BuddyAllocator { ) .unwrap(); - println!("Start: {}, end: {}", start, end); - - println!("{:?}", largest_order); - let curr = unsafe { &mut PAGES[start] }; let next = unsafe { &mut PAGES[start + ((1 << largest_order as usize) - 1)] @@ -137,34 +134,6 @@ impl BuddyAllocator { start += 1 << largest_order as usize; } } - - for (i, meta) in self.freelist.iter().enumerate() { - let mut next = meta.next; - - if next.is_some() { - println!("Order: {:?}", i ; color = ColorCode::new(Color::Blue, Color::Black)); - } else { - continue; - } - - while let Some(node) = next { - print!("{:?} -> ", node); - unsafe { next = node.as_ref().buddy_meta.next }; - } - - println!(""); - } - // Allocate initial MB - - // Allocate pages array - // let mem_map_size_pages = unsafe { - // (PAGES.len() * size_of::()) / - // REGULAR_PAGE_SIZE }; - // println!("Mem map pages total: {}", mem_map_size_pages); - // println!( - // "Mem Map allocation: {:x?}", - // self.alloc_pages(256 + mem_map_size_pages) - // ); } } #[macro_export] diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index 3972fa4..d8d1a46 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -6,6 +6,7 @@ use common::{ }, enums::{PageSize, PageTableLevel}, error::EntryError, + late_init::LateInit, }; use cpu_utils::structures::paging::{ PageEntryFlags, PageTable, PageTableEntry, @@ -16,7 +17,9 @@ use strum::VariantArray; use common::error::TableError; use cpu_utils::structures::paging::EntryIndex; -use crate::memory::allocators::buddy::BUDDY_ALLOCATOR; +use crate::memory::{ + allocators::buddy::BUDDY_ALLOCATOR, page_descriptor::PageMap, +}; #[ext] pub impl PhysicalAddress { @@ -253,3 +256,9 @@ pub impl PageSize { } } } + +#[ext] +pub impl PageMap { + /// Reallocates the page array on the buddy allocator. + fn reallocate(init: &'static mut LateInit) {} +} diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index c38a983..9f24e8e 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -2,6 +2,7 @@ use super::descriptor::SlabDescriptor; use super::traits::{SlabCacheConstructor, SlabPosition}; use crate::memory::allocators::slab::SLAB_ALLOCATOR; use crate::memory::page_descriptor::Unassigned; +use core::alloc::{Allocator, GlobalAlloc}; use core::ptr::NonNull; #[derive(Clone, Debug)] @@ -95,3 +96,20 @@ impl SlabCacheConstructor for SlabCache> { } } } + +unsafe impl Allocator for SlabCache { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result, core::alloc::AllocError> { + todo!() + } + + unsafe fn deallocate( + &self, + ptr: core::ptr::NonNull, + layout: core::alloc::Layout, + ) { + todo!() + } +} diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 5a43cba..6acc5ff 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -32,7 +32,7 @@ impl SlabDescriptor { order: usize, next: Option>>, ) -> SlabDescriptor { - let address = unsafe { alloc_pages!(1 << order) }; + let address = unsafe { alloc_pages!(1 << order).translate() }; let mut objects = unsafe { NonNull::slice_from_raw_parts( NonNull::new_unchecked( diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 39de3b6..0e5f04d 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -50,10 +50,14 @@ macro_rules! define_slab_system { let index = <$t>::POSITION; self.slabs[index].write(SlabCache::<$t>::new( - size_of::<$t>().next_multiple_of(REGULAR_PAGE_SIZE) / REGULAR_PAGE_SIZE + (size_of::<$t>().next_multiple_of(REGULAR_PAGE_SIZE) / REGULAR_PAGE_SIZE) - 1 ).as_unassigned().clone()); )* } } } } + +// TODO implement reverse lookup with an enum that will automatically be +// generated and check the code generated on compiler explorer. if +// interesting, write on it on the book diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 73486d0..58def62 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -4,7 +4,10 @@ use crate::{ memory::{ allocators::{ buddy::meta::BuddyBlockMeta, - slab::{cache::SlabCache, traits::SlabPosition}, + slab::{ + cache::SlabCache, descriptor::SlabDescriptor, + traits::SlabPosition, + }, }, memory_map::ParsedMemoryMap, }, @@ -20,6 +23,8 @@ use common::{ write_volatile, }; +use core::ops::{Deref, DerefMut}; + #[derive(Default, Clone, Copy, Debug)] pub struct Unassigned; @@ -52,7 +57,7 @@ pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = #[derive(Debug)] pub struct Page { - pub owner: Option>>, + pub owner: Option>>, pub buddy_meta: BuddyBlockMeta, } @@ -120,6 +125,13 @@ impl Page { Some((NonNull::from_mut(self), buddy)) } + /// Try to merge this page with it's buddy. + /// + /// Note: This function should not be recursive + pub unsafe fn merge(&self) { + todo!("") + } + pub const fn index_of_page(address: PhysicalAddress) -> usize { address.as_usize() / REGULAR_PAGE_SIZE } @@ -154,3 +166,32 @@ pub fn pages_init(mmap: ParsedMemoryMap) -> usize { PAGES.as_ptr_range().end as usize } } + +pub struct PageMap { + map: &'static mut [UnassignedPage], + // lock: todo!(), +} + +impl Deref for PageMap { + type Target = [UnassignedPage]; + + fn deref(&self) -> &Self::Target { + self.map + } +} + +impl DerefMut for PageMap { + fn deref_mut(&mut self) -> &mut Self::Target { + self.map + } +} + +impl PageMap { + /// Initializes all pages on a constant address. + pub fn init( + uninit: &'static mut LateInit, + mmap: ParsedMemoryMap, + ) { + todo!() + } +} From 8088b9adb79fe963371cd3db511845661f8686f3 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 22 Jan 2026 00:17:45 +0200 Subject: [PATCH 45/78] added a macro to generate generic struct and implement the generic trait on them --- learnix-macros/src/lib.rs | 46 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index ed1cbb1..86cf32c 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -1,7 +1,9 @@ use flag::FlagInput; use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{DeriveInput, parse_macro_input}; +use syn::{ + DeriveInput, LitInt, Token, parse_macro_input, punctuated::Punctuated, +}; mod flag; @@ -237,3 +239,45 @@ pub fn rw1_flag(input: TokenStream) -> TokenStream { expanded.into() } // ANCHOR_END: rw1_flag + +#[proc_macro] +pub fn generate_generics(input: TokenStream) -> TokenStream { + // Parse the input as a comma-separated list of integers: 8, 16, 32... + let parser = Punctuated::::parse_terminated; + let input = parse_macro_input!(input with parser); + + let mut expanded = quote! {}; + + // initial range for the first item + let mut last_size: usize = 0; + + for lit in input { + let generic_size: usize = lit + .base10_parse() + .expect("Invalid integer format, expected base10"); + + let generic_name = format_ident!("Generic{}", generic_size); + + // minimum size of 8 bytes (usize on 64 bit). + let array_size = generic_size / 8; + + let start = last_size; + let end = generic_size; + + let struct_def = quote! { + #[derive(Debug, Clone, Copy)] + pub struct #generic_name(pub [usize; #array_size]); + + impl Generic for #generic_name { + fn size(&self) -> usize { #generic_size } + const START: usize = #start; + const END: usize = #end; + } + }; + + last_size = generic_size + 1; + expanded.extend(struct_def); + } + + TokenStream::from(expanded) +} From 4adc63a2d7438333bce087f9470e3f698a0bd154 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 22 Jan 2026 00:18:13 +0200 Subject: [PATCH 46/78] added macro implementation of the generic structs and traits via macro --- kernel/src/memory/allocators/slab.rs | 191 ++++++++++++++++++++++----- 1 file changed, 158 insertions(+), 33 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 73f391b..bf73354 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -3,45 +3,38 @@ pub mod descriptor; pub mod macros; pub mod traits; +use common::address_types::VirtualAddress; +use learnix_macros::generate_generics; + use crate::{ define_slab_system, memory::{ - allocators::slab::{ - cache::SlabCache, - descriptor::SlabDescriptor, - traits::{SlabCacheConstructor, SlabPosition}, + allocators::{ + extensions::VirtualAddressExt, + slab::{ + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{SlabCacheConstructor, SlabPosition}, + }, }, - page_descriptor::Unassigned, + page_descriptor::{PAGES, Unassigned, UnassignedPage}, }, }; -use core::ptr::NonNull; - -#[derive(Debug)] -pub struct Generic8(pub usize); -#[derive(Debug)] -pub struct Generic16(pub [usize; 2]); -#[derive(Debug)] -pub struct Generic32(pub [usize; 4]); -#[derive(Debug)] -pub struct Generic64(pub [usize; 8]); -#[derive(Debug)] -pub struct Generic96(pub [usize; 12]); -#[derive(Debug)] -pub struct Generic128(pub [usize; 16]); -#[derive(Debug)] -pub struct Generic192(pub [usize; 24]); -#[derive(Debug)] -pub struct Generic256(pub [usize; 32]); -#[derive(Debug)] -pub struct Generic512(pub [usize; 64]); -#[derive(Debug)] -pub struct Generic1024(pub [usize; 128]); -#[derive(Debug)] -pub struct Generic2048(pub [usize; 256]); -#[derive(Debug)] -pub struct Generic4096(pub [usize; 512]); -#[derive(Debug)] -pub struct Generic8192(pub [usize; 1024]); +use core::{ + alloc::{AllocError, Allocator}, + ptr::NonNull, +}; + +pub trait Generic { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} + +generate_generics!( + 8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096, 8192 +); define_slab_system!( SlabDescriptor, @@ -71,6 +64,21 @@ impl SlabAllocator { let mut slab = self.slab_of::(); unsafe { slab.as_mut().alloc() } } + + pub fn kfree(&self, ptr: NonNull) { + let index = UnassignedPage::index_of_page(unsafe { + VirtualAddress::new_unchecked(ptr.as_ptr() as usize) + .translate() + }); + + let page = unsafe { PAGES[index].assign::().as_ref() }; + + if let Some(mut descriptor) = page.owner { + unsafe { descriptor.as_mut().dealloc(ptr) }; + } else { + panic!("Object is freed from a page that has not owner!") + } + } } #[extend::ext] @@ -80,6 +88,123 @@ impl NonNull> { } } +#[extend::ext] +pub impl NonNull { + fn into_u8(&self) -> NonNull<[u8]> { + unsafe { + let data = NonNull::new_unchecked(self.as_ptr() as *mut u8); + let size = self.as_ref().size(); + NonNull::slice_from_raw_parts(data, size) + } + } + + fn from_u8(data: NonNull) -> NonNull { + unsafe { NonNull::new_unchecked(data.as_ptr() as *mut T) } + } +} + +unsafe impl Allocator for SlabAllocator { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result, core::alloc::AllocError> { + if layout.size() < layout.align() { + return Err(AllocError); + } + + match layout.size() { + Generic8::START..=Generic8::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic16::START..=Generic16::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic32::START..=Generic32::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic64::START..=Generic64::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic96::START..=Generic96::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic128::START..=Generic128::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic192::START..=Generic192::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic256::START..=Generic256::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic512::START..=Generic512::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic1024::START..=Generic1024::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic2048::START..=Generic2048::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic4096::START..=Generic4096::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic8192::START..=Generic8192::END => { + Ok(self.kmalloc::().into_u8()) + } + _ => Err(AllocError), + } + } + + unsafe fn deallocate( + &self, + ptr: core::ptr::NonNull, + layout: core::alloc::Layout, + ) { + match layout.size() { + Generic8::START..=Generic8::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic16::START..=Generic16::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic32::START..=Generic32::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic64::START..=Generic64::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic96::START..=Generic96::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic128::START..=Generic128::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic192::START..=Generic192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic256::START..=Generic256::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic512::START..=Generic512::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic1024::START..=Generic1024::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic2048::START..=Generic2048::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic4096::START..=Generic4096::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic8192::START..=Generic8192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + _ => unreachable!(), + } + } +} unsafe impl Send for SlabDescriptor {} unsafe impl Sync for SlabDescriptor {} unsafe impl Send for SlabCache {} From 3224d6455acd90aa304f364d2f05e3c31a71a651 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 22 Jan 2026 00:19:05 +0200 Subject: [PATCH 47/78] changed implementation of the allocator trait to the slab allocator struct --- kernel/src/memory/allocators/slab/cache.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 9f24e8e..c38a983 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -2,7 +2,6 @@ use super::descriptor::SlabDescriptor; use super::traits::{SlabCacheConstructor, SlabPosition}; use crate::memory::allocators::slab::SLAB_ALLOCATOR; use crate::memory::page_descriptor::Unassigned; -use core::alloc::{Allocator, GlobalAlloc}; use core::ptr::NonNull; #[derive(Clone, Debug)] @@ -96,20 +95,3 @@ impl SlabCacheConstructor for SlabCache> { } } } - -unsafe impl Allocator for SlabCache { - fn allocate( - &self, - layout: core::alloc::Layout, - ) -> Result, core::alloc::AllocError> { - todo!() - } - - unsafe fn deallocate( - &self, - ptr: core::ptr::NonNull, - layout: core::alloc::Layout, - ) { - todo!() - } -} From 50336de1ee475274c57fe948b61f08938c426f95 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 22 Jan 2026 00:19:18 +0200 Subject: [PATCH 48/78] added a note on dealloc function --- kernel/src/memory/allocators/slab/descriptor.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 6acc5ff..38c0fc3 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -78,8 +78,11 @@ impl SlabDescriptor { unsafe { NonNull::from_mut(&mut preallocated.allocated) } } - pub unsafe fn dealloc(&mut self, ptr: *const T) { - let freed_index = (ptr.addr() - self.objects.as_ptr().addr()) + pub unsafe fn dealloc(&mut self, ptr: NonNull) { + todo!("Remember to call drop on the item"); + + let freed_index = (ptr.as_ptr().addr() + - self.objects.as_ptr().addr()) / size_of::>(); unsafe { From 301171e58b0685a345ec306eeb73e3ccc4c47e22 Mon Sep 17 00:00:00 2001 From: sagi Date: Thu, 22 Jan 2026 18:28:01 +0200 Subject: [PATCH 49/78] changed constant address into the one saved in the static because it will be reallocated in the future --- .vscode/settings.json | 1 + kernel/src/memory/allocators/extensions.rs | 4 ++-- kernel/src/memory/page_descriptor.rs | 16 +++++++--------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 52df39e..2a818f2 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -80,6 +80,7 @@ "ipms", "IRDA", "ISDN", + "kfree", "kmalloc", "lctrl", "Learnix", diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index d8d1a46..b79e292 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -1,8 +1,8 @@ use common::{ address_types::{PhysicalAddress, VirtualAddress}, constants::{ - BIG_PAGE_SIZE, HUGE_PAGE_SIZE, PAGE_ALLOCATOR_OFFSET, - PAGE_DIRECTORY_ENTRIES, PHYSICAL_MEMORY_OFFSET, + BIG_PAGE_SIZE, HUGE_PAGE_SIZE, PAGE_DIRECTORY_ENTRIES, + PHYSICAL_MEMORY_OFFSET, }, enums::{PageSize, PageTableLevel}, error::EntryError, diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page_descriptor.rs index 58def62..19f8cdd 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page_descriptor.rs @@ -4,10 +4,7 @@ use crate::{ memory::{ allocators::{ buddy::meta::BuddyBlockMeta, - slab::{ - cache::SlabCache, descriptor::SlabDescriptor, - traits::SlabPosition, - }, + slab::{descriptor::SlabDescriptor, traits::SlabPosition}, }, memory_map::ParsedMemoryMap, }, @@ -15,9 +12,7 @@ use crate::{ }; use common::{ address_types::PhysicalAddress, - constants::{ - PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, + constants::{PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_SIZE}, enums::BuddyOrder, late_init::LateInit, write_volatile, @@ -61,6 +56,8 @@ pub struct Page { pub buddy_meta: BuddyBlockMeta, } +pub struct + impl Page { pub fn as_unassigned(&self) -> &UnassignedPage { let ptr = self as *const _ as usize; @@ -74,7 +71,7 @@ impl Page { pub fn physical_address(&self) -> PhysicalAddress { let index = (self.as_unassigned() as *const _ as usize - - PAGE_ALLOCATOR_OFFSET) + - unsafe { PAGES.as_ptr().addr() }) / size_of::(); unsafe { @@ -111,8 +108,9 @@ impl Page { .unwrap(); write_volatile!(self.buddy_meta.order, Some(prev_order)); + let index = ((self.as_unassigned() as *const _ as usize - - PAGE_ALLOCATOR_OFFSET) + - unsafe { PAGES.as_ptr().addr() }) / size_of::()) + (1 << prev_order as usize); From 7e95ddad33ccff048cc9639d531b0202d903316f Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 23 Jan 2026 03:37:59 +0200 Subject: [PATCH 50/78] moved page map and page meta to their own modules --- kernel/src/memory/allocators/buddy.rs | 20 ++-- kernel/src/memory/allocators/buddy/meta.rs | 53 --------- kernel/src/memory/allocators/extensions.rs | 2 +- kernel/src/memory/mod.rs | 3 +- .../memory/{page_descriptor.rs => page.rs} | 108 ++++++------------ kernel/src/memory/page/map.rs | 34 ++++++ kernel/src/memory/page/meta.rs | 65 +++++++++++ 7 files changed, 146 insertions(+), 139 deletions(-) delete mode 100644 kernel/src/memory/allocators/buddy/meta.rs rename kernel/src/memory/{page_descriptor.rs => page.rs} (61%) create mode 100644 kernel/src/memory/page/map.rs create mode 100644 kernel/src/memory/page/meta.rs diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 9b8c8d6..cfae213 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -7,19 +7,17 @@ use common::{ use cpu_utils::structures::paging::PageTable; use crate::memory::{ - allocators::buddy::meta::BuddyBlockMeta, memory_map::ParsedMemoryMap, - page_descriptor::{PAGES, Unassigned, UnassignedPage}, + page::{PAGES, UnassignedPage, meta::BuddyPageMeta}, + unassigned::Unassigned, }; -pub mod meta; - pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { - freelist: [BuddyBlockMeta::default(); BUDDY_MAX_ORDER], + freelist: [const { BuddyPageMeta::default() }; BUDDY_MAX_ORDER], }; pub struct BuddyAllocator { - freelist: [BuddyBlockMeta; BUDDY_MAX_ORDER], + freelist: [BuddyPageMeta; BUDDY_MAX_ORDER], } impl BuddyAllocator { @@ -122,10 +120,12 @@ impl BuddyAllocator { let next = unsafe { &mut PAGES[start + ((1 << largest_order as usize) - 1)] }; - - curr.buddy_meta.next = Some(NonNull::from_mut(next)); - curr.buddy_meta.prev = prev; - curr.buddy_meta.order = Some(largest_order); + unsafe { + (*curr.meta.buddy).next = + Some(NonNull::from_mut(next)); + (*curr.meta.buddy).prev = prev; + (*curr.meta.buddy).order = Some(largest_order); + } prev = Some(NonNull::from_mut(curr)); self.freelist[largest_order as usize] diff --git a/kernel/src/memory/allocators/buddy/meta.rs b/kernel/src/memory/allocators/buddy/meta.rs deleted file mode 100644 index 31a3621..0000000 --- a/kernel/src/memory/allocators/buddy/meta.rs +++ /dev/null @@ -1,53 +0,0 @@ -use core::ptr::NonNull; - -use common::enums::BuddyOrder; - -use crate::memory::{ - allocators::slab::traits::SlabPosition, - page_descriptor::{ - NonNullPageTExt, NonNullPageUnassignedExt, Page, UnassignedPage, - }, -}; - -#[derive(Clone, Copy, Debug)] -pub struct BuddyBlockMeta { - pub next: Option>, - pub prev: Option>, - pub order: Option, -} - -impl const Default for BuddyBlockMeta { - fn default() -> Self { - Self { - next: None, - prev: None, - order: None, - } - } -} - -impl BuddyBlockMeta { - pub fn detach(&mut self) -> Option>> { - let detached = self.next?; // None if there is no page to detach - - self.next = unsafe { detached.as_ref().buddy_meta.next }; - - if let Some(mut next) = self.next { - unsafe { next.as_mut().buddy_meta.prev = None } - } - - Some(detached.assign::()) - } - - pub fn attach(&mut self, mut p: NonNull>) { - unsafe { p.as_mut().buddy_meta.next = self.next }; - - if let Some(mut next) = self.next { - unsafe { - next.as_mut().buddy_meta.prev = Some(p.as_unassigned()) - }; - } - - self.next = Some(p.as_unassigned()) - } -} diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index b79e292..f1f9000 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -18,7 +18,7 @@ use common::error::TableError; use cpu_utils::structures::paging::EntryIndex; use crate::memory::{ - allocators::buddy::BUDDY_ALLOCATOR, page_descriptor::PageMap, + allocators::buddy::BUDDY_ALLOCATOR, page::map::PageMap, }; #[ext] diff --git a/kernel/src/memory/mod.rs b/kernel/src/memory/mod.rs index 3f8cfb0..eba83e0 100644 --- a/kernel/src/memory/mod.rs +++ b/kernel/src/memory/mod.rs @@ -1,3 +1,4 @@ pub mod allocators; pub mod memory_map; -pub mod page_descriptor; +pub mod page; +pub mod unassigned; diff --git a/kernel/src/memory/page_descriptor.rs b/kernel/src/memory/page.rs similarity index 61% rename from kernel/src/memory/page_descriptor.rs rename to kernel/src/memory/page.rs index 19f8cdd..22f3cbf 100644 --- a/kernel/src/memory/page_descriptor.rs +++ b/kernel/src/memory/page.rs @@ -1,12 +1,13 @@ -use core::ptr::NonNull; +use core::{mem::ManuallyDrop, ptr::NonNull}; use crate::{ memory::{ - allocators::{ - buddy::meta::BuddyBlockMeta, - slab::{descriptor::SlabDescriptor, traits::SlabPosition}, + allocators::slab::{ + descriptor::SlabDescriptor, traits::SlabPosition, }, memory_map::ParsedMemoryMap, + page::meta::{BuddyPageMeta, PageMeta}, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, }, println, }; @@ -18,22 +19,25 @@ use common::{ write_volatile, }; -use core::ops::{Deref, DerefMut}; - -#[derive(Default, Clone, Copy, Debug)] -pub struct Unassigned; +pub mod map; +pub mod meta; pub type UnassignedPage = Page; -#[extend::ext] -pub impl NonNull> { +pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = + LateInit::uninit(); + +impl AssignSlab for NonNull> { + type Target = NonNull>; + fn assign(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) } } } -#[extend::ext] -pub impl NonNull> { +impl UnassignSlab for NonNull> { + type Target = NonNull>; + fn as_unassigned(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) @@ -41,36 +45,14 @@ pub impl NonNull> { } } -impl UnassignedPage { - pub fn assign(&self) -> NonNull> { - unsafe { NonNull::new_unchecked(self as *const _ as *mut Page) } - } -} - -pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = - LateInit::uninit(); - -#[derive(Debug)] pub struct Page { pub owner: Option>>, - pub buddy_meta: BuddyBlockMeta, + pub meta: PageMeta, } -pub struct - impl Page { - pub fn as_unassigned(&self) -> &UnassignedPage { - let ptr = self as *const _ as usize; - unsafe { &*(ptr as *const UnassignedPage) } - } - - pub fn as_unassigned_mut(&mut self) -> &mut UnassignedPage { - let ptr = self as *const _ as usize; - unsafe { &mut *(ptr as *mut UnassignedPage) } - } - pub fn physical_address(&self) -> PhysicalAddress { - let index = (self.as_unassigned() as *const _ as usize + let index = (self as *const _ as usize - unsafe { PAGES.as_ptr().addr() }) / size_of::(); @@ -80,7 +62,7 @@ impl Page { } pub fn get_buddy(&self) -> Option<*mut Page> { - let order = self.buddy_meta.order?; + let order = unsafe { self.meta.buddy.order? }; if let BuddyOrder::MAX = order { None } else { @@ -103,22 +85,27 @@ impl Page { ) -> Option<(NonNull>, NonNull>)> { // Reduce it's order to find it's order. - let prev_order = - BuddyOrder::try_from(self.buddy_meta.order? as u8 - 1) - .unwrap(); + let prev_order = BuddyOrder::try_from( + unsafe { self.meta.buddy.order? } as u8 - 1, + ) + .unwrap(); - write_volatile!(self.buddy_meta.order, Some(prev_order)); + write_volatile!((*self.meta.buddy).order, Some(prev_order)); - let index = ((self.as_unassigned() as *const _ as usize + let index = ((self as *const _ as usize - unsafe { PAGES.as_ptr().addr() }) / size_of::()) + (1 << prev_order as usize); // Find it's half - let mut buddy = unsafe { PAGES[index].assign::() }; + let mut buddy = + unsafe { NonNull::from_mut(&mut PAGES[index]).assign::() }; // Set the order of the buddy. - write_volatile!(buddy.as_mut().buddy_meta.order, Some(prev_order)); + write_volatile!( + (*buddy.as_mut().meta.buddy).order, + Some(prev_order) + ); Some((NonNull::from_mut(self), buddy)) } @@ -156,7 +143,9 @@ pub fn pages_init(mmap: ParsedMemoryMap) -> usize { core::ptr::write_volatile( p as *mut UnassignedPage, UnassignedPage { - buddy_meta: BuddyBlockMeta::default(), + meta: PageMeta { + buddy: ManuallyDrop::new(BuddyPageMeta::default()), + }, owner: None, }, ); @@ -164,32 +153,3 @@ pub fn pages_init(mmap: ParsedMemoryMap) -> usize { PAGES.as_ptr_range().end as usize } } - -pub struct PageMap { - map: &'static mut [UnassignedPage], - // lock: todo!(), -} - -impl Deref for PageMap { - type Target = [UnassignedPage]; - - fn deref(&self) -> &Self::Target { - self.map - } -} - -impl DerefMut for PageMap { - fn deref_mut(&mut self) -> &mut Self::Target { - self.map - } -} - -impl PageMap { - /// Initializes all pages on a constant address. - pub fn init( - uninit: &'static mut LateInit, - mmap: ParsedMemoryMap, - ) { - todo!() - } -} diff --git a/kernel/src/memory/page/map.rs b/kernel/src/memory/page/map.rs new file mode 100644 index 0000000..4af3978 --- /dev/null +++ b/kernel/src/memory/page/map.rs @@ -0,0 +1,34 @@ +use core::ops::{Deref, DerefMut}; + +use common::late_init::LateInit; + +use crate::memory::{memory_map::ParsedMemoryMap, page::UnassignedPage}; + +pub struct PageMap { + map: &'static mut [UnassignedPage], + // lock: todo!(), +} + +impl Deref for PageMap { + type Target = [UnassignedPage]; + + fn deref(&self) -> &Self::Target { + self.map + } +} + +impl DerefMut for PageMap { + fn deref_mut(&mut self) -> &mut Self::Target { + self.map + } +} + +impl PageMap { + /// Initializes all pages on a constant address. + pub fn init( + uninit: &'static mut LateInit, + mmap: ParsedMemoryMap, + ) { + todo!() + } +} diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs new file mode 100644 index 0000000..6757104 --- /dev/null +++ b/kernel/src/memory/page/meta.rs @@ -0,0 +1,65 @@ +use core::{mem::ManuallyDrop, ptr::NonNull}; + +use common::enums::BuddyOrder; + +use crate::memory::{ + allocators::slab::{ + cache::SlabCache, descriptor::SlabDescriptor, traits::SlabPosition, + }, + page::{Page, UnassignedPage}, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, +}; + +pub union PageMeta { + pub buddy: ManuallyDrop, + pub slab: ManuallyDrop>, +} + +#[derive(Debug)] +pub struct BuddyPageMeta { + pub next: Option>, + pub prev: Option>, + pub order: Option, +} + +impl const Default for BuddyPageMeta { + fn default() -> Self { + Self { + next: None, + prev: None, + order: None, + } + } +} + +impl BuddyPageMeta { + pub fn detach(&mut self) -> Option>> { + let detached = self.next?; // None if there is no page to detach + + self.next = unsafe { detached.as_ref().meta.buddy.next }; + + if let Some(mut next) = self.next { + unsafe { (*next.as_mut().meta.buddy).prev = None } + } + + Some(detached.assign::()) + } + + pub fn attach(&mut self, mut p: NonNull>) { + unsafe { (*p.as_mut().meta.buddy).next = self.next }; + + if let Some(mut next) = self.next { + unsafe { + (*next.as_mut().meta.buddy).prev = Some(p.as_unassigned()) + }; + } + + self.next = Some(p.as_unassigned()) + } +} + +#[derive(Debug)] +pub struct SlabPageMeta { + owner: NonNull>, + freelist: NonNull>, +} From cf4dc4ff362c863bfd8b91602020d6406c738d6c Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 23 Jan 2026 03:38:14 +0200 Subject: [PATCH 51/78] added assign and unassign traits for slabs --- .vscode/settings.json | 1 + kernel/src/main.rs | 14 +--- kernel/src/memory/allocators/slab.rs | 19 ++--- kernel/src/memory/allocators/slab/cache.rs | 4 +- .../src/memory/allocators/slab/descriptor.rs | 75 ++++++++++++------- kernel/src/memory/allocators/slab/macros.rs | 3 +- kernel/src/memory/allocators/slab/traits.rs | 2 +- kernel/src/memory/unassigned.rs | 16 ++++ 8 files changed, 81 insertions(+), 53 deletions(-) create mode 100644 kernel/src/memory/unassigned.rs diff --git a/.vscode/settings.json b/.vscode/settings.json index 2a818f2..8d53b99 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -167,6 +167,7 @@ "udma", "ufis", "UHCI", + "Unassignment", "Uninit", "USBPI", "virt", diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 7da1b28..15cc1cd 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -26,24 +26,18 @@ use core::{num::NonZero, panic::PanicInfo}; use crate::{ drivers::{ - ata::ahci::{HBAMemoryRegisters, IdentityPacketData}, interrupt_handlers, keyboard::{KEYBOARD, ps2_keyboard::Keyboard}, - pci::{self}, pic8259::{CascadedPIC, PIC}, vga_display::color_code::ColorCode, }, memory::{ allocators::{ - buddy::BUDDY_ALLOCATOR, - extensions::PageTableExt, - slab::{ - Generic512, Generic8192, SLAB_ALLOCATOR, - descriptor::SlabDescriptor, - }, + buddy::BUDDY_ALLOCATOR, extensions::PageTableExt, + slab::SLAB_ALLOCATOR, }, - memory_map::{MemoryMap, MemoryRegion, parse_map}, - page_descriptor::{PAGES, Unassigned, UnassignedPage, pages_init}, + memory_map::{MemoryMap, parse_map}, + page::pages_init, }, }; diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index bf73354..d52c4f1 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -12,12 +12,12 @@ use crate::{ allocators::{ extensions::VirtualAddressExt, slab::{ - cache::SlabCache, - descriptor::SlabDescriptor, - traits::{SlabCacheConstructor, SlabPosition}, + cache::SlabCache, descriptor::SlabDescriptor, + traits::SlabPosition, }, }, - page_descriptor::{PAGES, Unassigned, UnassignedPage}, + page::{PAGES, UnassignedPage}, + unassigned::{AssignSlab, Unassigned}, }, }; use core::{ @@ -71,7 +71,9 @@ impl SlabAllocator { .translate() }); - let page = unsafe { PAGES[index].assign::().as_ref() }; + let page = unsafe { + NonNull::from_mut(&mut PAGES[index]).assign::().as_ref() + }; if let Some(mut descriptor) = page.owner { unsafe { descriptor.as_mut().dealloc(ptr) }; @@ -81,13 +83,6 @@ impl SlabAllocator { } } -#[extend::ext] -impl NonNull> { - fn assign(self) -> NonNull> { - unsafe { self.as_ref().assign::() } - } -} - #[extend::ext] pub impl NonNull { fn into_u8(&self) -> NonNull<[u8]> { diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index c38a983..972621d 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,7 +1,7 @@ use super::descriptor::SlabDescriptor; use super::traits::{SlabCacheConstructor, SlabPosition}; use crate::memory::allocators::slab::SLAB_ALLOCATOR; -use crate::memory::page_descriptor::Unassigned; +use crate::memory::unassigned::{AssignSlab, Unassigned}; use core::ptr::NonNull; #[derive(Clone, Debug)] @@ -77,7 +77,7 @@ impl SlabCacheConstructor for SlabCache { SlabCache { buddy_order, - free: Some(unsafe { free.as_ref().assign::() }), + free: Some(free.assign::()), partial: None, full: None, } diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 38c0fc3..c668111 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -1,9 +1,13 @@ use super::traits::SlabPosition; -use crate::{alloc_pages, memory::page_descriptor::Unassigned}; +use crate::{ + alloc_pages, + memory::unassigned::{AssignSlab, UnassignSlab, Unassigned}, +}; use common::constants::REGULAR_PAGE_SIZE; use core::{ fmt::Debug, mem::{ManuallyDrop, size_of}, + num::{NonZero, NonZeroU16}, ptr::NonNull, }; use nonmax::NonMaxU16; @@ -23,10 +27,46 @@ impl Debug for PreallocatedObject { #[derive(Debug, Clone)] pub struct SlabDescriptor { pub next_free_idx: Option, + pub total_allocated: u16, pub objects: NonNull<[PreallocatedObject]>, pub next: Option>>, } +impl UnassignSlab for SlabDescriptor { + type Target = SlabDescriptor; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + (*(self as *const SlabDescriptor + as *mut SlabDescriptor)) + .clone() + } + } +} + +impl AssignSlab for NonNull> { + type Target = + NonNull>; + + fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor) + } + } +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked( + self.as_ptr() as *mut SlabDescriptor + ) + } + } +} + impl SlabDescriptor { pub fn new( order: usize, @@ -59,6 +99,7 @@ impl SlabDescriptor { SlabDescriptor { next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), + total_allocated: 0, objects, next, } @@ -75,6 +116,8 @@ impl SlabDescriptor { self.next_free_idx = unsafe { preallocated.next_free_idx }; + self.total_allocated += 1; + unsafe { NonNull::from_mut(&mut preallocated.allocated) } } @@ -91,30 +134,8 @@ impl SlabDescriptor { }; self.next_free_idx = unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; - } - - pub fn as_unassigned(&self) -> &SlabDescriptor { - unsafe { - &*(self as *const _ as *const SlabDescriptor) - } - } - pub fn as_unassigned_mut( - &mut self, - ) -> &mut SlabDescriptor { - unsafe { - &mut *(self as *mut _ as *mut SlabDescriptor) - } - } -} - -impl SlabDescriptor { - pub fn assign(&self) -> NonNull> { - unsafe { - NonNull::new_unchecked( - self as *const _ as *mut SlabDescriptor, - ) - } + self.total_allocated -= 1; } } @@ -125,10 +146,10 @@ impl SlabDescriptor> { let mut descriptor = SlabDescriptor::>::new(order, None); - let mut ptr = descriptor.alloc(); + let mut self_allocation = descriptor.alloc(); - unsafe { *ptr.as_mut() = descriptor.as_unassigned().clone() } + unsafe { *self_allocation.as_mut() = descriptor.as_unassigned() } - unsafe { ptr.as_ref().assign::>() } + self_allocation.assign::>() } } diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 0e5f04d..aefaff5 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -24,13 +24,14 @@ macro_rules! register_slabs { macro_rules! define_slab_system { ($($t:ty),* $(,)?) => { use common::constants::REGULAR_PAGE_SIZE; + use $crate::memory::allocators::slab::traits::SlabCacheConstructor; $crate::register_slabs!($($t),*); const COUNT: usize = [$(stringify!($t)),*].len(); pub struct SlabAllocator { - slabs: [common::late_init::LateInit>; COUNT] + slabs: [common::late_init::LateInit>; COUNT] } impl SlabAllocator { diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index 17d91d5..55090a3 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -1,4 +1,4 @@ -use crate::memory::page_descriptor::Unassigned; +use crate::memory::unassigned::Unassigned; /// Get the position on the slab array, for a slab of the given type. /// diff --git a/kernel/src/memory/unassigned.rs b/kernel/src/memory/unassigned.rs new file mode 100644 index 0000000..80565aa --- /dev/null +++ b/kernel/src/memory/unassigned.rs @@ -0,0 +1,16 @@ +use crate::memory::allocators::slab::traits::SlabPosition; + +#[derive(Default, Clone, Copy, Debug)] +pub struct Unassigned; + +pub trait UnassignSlab { + type Target; + + fn as_unassigned(&self) -> Self::Target; +} + +pub trait AssignSlab { + type Target; + + fn assign(&self) -> Self::Target; +} From 7781894f7a4334121dfb302f2e2ad781997425c6 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:57:31 +0200 Subject: [PATCH 52/78] changed to full specialization --- kernel/src/main.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 15cc1cd..9d5aa4a 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -18,7 +18,8 @@ #![feature(ascii_char)] #![feature(const_convert)] #![feature(core_intrinsics)] -#![feature(min_specialization)] +#![feature(explicit_tail_calls)] +#![feature(specialization)] #![deny(clippy::all)] mod drivers; mod memory; @@ -37,17 +38,11 @@ use crate::{ slab::SLAB_ALLOCATOR, }, memory_map::{MemoryMap, parse_map}, - page::pages_init, + page::{PAGES, map::PageMap}, }, }; -use common::{ - address_types::PhysicalAddress, - constants::{ - PHYSICAL_MEMORY_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::{CascadedPicInterruptLine, Color, PS2ScanCode}, -}; +use common::{constants::REGULAR_PAGE_SIZE, enums::Color}; use cpu_utils::{ instructions::interrupts::{self}, structures::{ @@ -67,7 +62,7 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Obtained Memory Map"); println!("{}", MemoryMap(parsed_memory_map!())); - pages_init(MemoryMap(parsed_memory_map!())); + PageMap::init(unsafe { &mut PAGES }, MemoryMap(parsed_memory_map!())); unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!()), 0) }; let last = MemoryMap(parsed_memory_map!()).last().unwrap(); From 637858ea9914ea58636d0108bed8b832a9165d62 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:57:51 +0200 Subject: [PATCH 53/78] moved functions from page into buddy --- kernel/src/memory/allocators/buddy.rs | 170 ++++++++++++++++++++++---- kernel/src/memory/page.rs | 129 +++---------------- 2 files changed, 163 insertions(+), 136 deletions(-) diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index cfae213..9bc0d82 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -3,6 +3,7 @@ use core::ptr::{self, NonNull}; use common::{ address_types::PhysicalAddress, enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, + write_volatile, }; use cpu_utils::structures::paging::PageTable; @@ -16,6 +17,15 @@ pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { freelist: [const { BuddyPageMeta::default() }; BUDDY_MAX_ORDER], }; +#[macro_export] +/// Allocate the amount of pages specified, and return the address +macro_rules! alloc_pages { + ($page_number: expr) => {{ + use $crate::memory::allocators::buddy::BUDDY_ALLOCATOR; + BUDDY_ALLOCATOR.alloc_pages($page_number) + }}; +} + pub struct BuddyAllocator { freelist: [BuddyPageMeta; BUDDY_MAX_ORDER], } @@ -50,34 +60,49 @@ impl BuddyAllocator { &mut self, wanted_order: usize, ) -> Option> { - let mut closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + let closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) .find(|i| self.freelist[*i].next.is_some())?; - let initial_page = unsafe { - self.freelist[closet_order] - .detach::() - .unwrap() - .as_mut() - }; - - let (mut lhs, mut rhs) = unsafe { initial_page.split() }.unwrap(); - closet_order -= 1; + let initial_page = + self.freelist[closet_order].detach::().unwrap(); - while closet_order != wanted_order { - self.freelist[closet_order].attach(rhs); + Some(self.split_recursive( + initial_page, + closet_order, + wanted_order, + )) + } - let split_ref = unsafe { lhs.as_mut() }; + fn split_recursive( + &mut self, + page: NonNull, + current_order: usize, + target_order: usize, + ) -> NonNull { + debug_assert!( + target_order < current_order, + "Target order cannot be greater then current order" + ); - (lhs, rhs) = unsafe { split_ref.split().unwrap() }; - closet_order -= 1; + if current_order == target_order { + return page; } - self.freelist[closet_order].attach(rhs); - Some(lhs) + let (lhs, rhs) = unsafe { BuddyAllocator::split(page).unwrap() }; + + let next_order = current_order - 1; + self.freelist[next_order].attach(rhs); + + become self.split_recursive(lhs, next_order, target_order) } - pub fn merge_until_max(&self, page: NonNull) { - todo!() + /// This function will try to merge a page on the buddy allocator until + pub fn merge_recursive(&self, page: NonNull) { + if let Some(merged) = + unsafe { BuddyAllocator::merge_with_buddy(page) } + { + become BuddyAllocator::merge_recursive(self, merged); + } } pub fn alloc_table(&mut self) -> &'static mut PageTable { @@ -136,11 +161,104 @@ impl BuddyAllocator { } } } -#[macro_export] -/// Allocate the amount of pages specified, and return the address -macro_rules! alloc_pages { - ($page_number: expr) => {{ - use $crate::memory::allocators::buddy::BUDDY_ALLOCATOR; - BUDDY_ALLOCATOR.alloc_pages($page_number) - }}; + +impl BuddyAllocator { + /// TODO: Make an unsafe split if relevant + /// + /// # Safety + /// This function does not attach the new references! + #[allow(clippy::type_complexity)] + unsafe fn split( + mut page: NonNull, + ) -> Option<(NonNull, NonNull)> { + // Reduce it's order to find it's order. + let prev_order = BuddyOrder::try_from( + unsafe { page.as_ref().meta.buddy.order? } as u8 - 1, + ) + .expect("Page order cannot be reduced"); + + write_volatile!( + (*page.as_mut().meta.buddy).order, + Some(prev_order) + ); + + let index = unsafe { + ((page.as_ref() as *const _ as usize - PAGES.as_ptr().addr()) + / size_of::()) + + (1 << prev_order as usize) + }; + + // Find it's half + let mut buddy = unsafe { NonNull::from_mut(&mut PAGES[index]) }; + + // Set the order of the buddy. + write_volatile!( + (*buddy.as_mut().meta.buddy).order, + Some(prev_order) + ); + + Some((page, buddy)) + } + + /// This function will detach the given page and it's buddy from their + /// freelist, increase their and attach to the increased order + /// list. + unsafe fn merge_with_buddy( + page: NonNull, + ) -> Option> { + let buddy = BuddyAllocator::buddy_of(page)?; + + let next_order = BuddyOrder::try_from(unsafe { + page.as_ref().meta.buddy.order.unwrap() as u8 + 1 + }) + .unwrap(); + + BuddyAllocator::detach_from_mid(page); + BuddyAllocator::detach_from_mid(buddy); + + // Operate on the page that it's address is lower. + let (mut left, mut right) = if page < buddy { + (page, buddy) + } else { + (buddy, page) + }; + + unsafe { + (*left.as_mut().meta.buddy).order = Some(next_order); + (*right.as_mut().meta.buddy) = BuddyPageMeta::default(); + }; + + Some(left) + } + + fn detach_from_mid(page: NonNull) { + let (mut prev, next) = unsafe { + let p_ref = page.as_ref(); + ( + p_ref.meta.buddy.prev.expect("Page has no prev"), + p_ref.meta.buddy.next.expect("Page has no next"), + ) + }; + + unsafe { (*prev.as_mut().meta.buddy).next = Some(next) } + } + + fn buddy_of( + page: NonNull, + ) -> Option> { + let order = unsafe { page.as_ref().meta.buddy.order? }; + if let BuddyOrder::MAX = order { + None + } else { + unsafe { + let buddy_address = page.as_ref() as *const _ as usize + ^ ((1 << order as usize) + * size_of::()); + + Some(NonNull::new_unchecked( + buddy_address as *mut UnassignedPage, + )) + } + } + } } diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs index 22f3cbf..59b9ec8 100644 --- a/kernel/src/memory/page.rs +++ b/kernel/src/memory/page.rs @@ -1,22 +1,13 @@ -use core::{mem::ManuallyDrop, ptr::NonNull}; +use core::{marker::PhantomData, ptr::NonNull}; -use crate::{ - memory::{ - allocators::slab::{ - descriptor::SlabDescriptor, traits::SlabPosition, - }, - memory_map::ParsedMemoryMap, - page::meta::{BuddyPageMeta, PageMeta}, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, - }, - println, +use crate::memory::{ + allocators::slab::traits::SlabPosition, + page::{map::PageMap, meta::PageMeta}, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, }; use common::{ - address_types::PhysicalAddress, - constants::{PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_SIZE}, - enums::BuddyOrder, + address_types::PhysicalAddress, constants::REGULAR_PAGE_SIZE, late_init::LateInit, - write_volatile, }; pub mod map; @@ -24,8 +15,12 @@ pub mod meta; pub type UnassignedPage = Page; -pub static mut PAGES: LateInit<&'static mut [UnassignedPage]> = - LateInit::uninit(); +pub static mut PAGES: LateInit = LateInit::uninit(); + +pub struct Page { + pub meta: PageMeta, + _phantom: PhantomData, +} impl AssignSlab for NonNull> { type Target = NonNull>; @@ -45,12 +40,14 @@ impl UnassignSlab for NonNull> { } } -pub struct Page { - pub owner: Option>>, - pub meta: PageMeta, -} - impl Page { + pub fn new(meta: PageMeta) -> Page { + Page { + meta, + _phantom: PhantomData::, + } + } + pub fn physical_address(&self) -> PhysicalAddress { let index = (self as *const _ as usize - unsafe { PAGES.as_ptr().addr() }) @@ -61,95 +58,7 @@ impl Page { } } - pub fn get_buddy(&self) -> Option<*mut Page> { - let order = unsafe { self.meta.buddy.order? }; - if let BuddyOrder::MAX = order { - None - } else { - Some( - (self as *const _ as usize - ^ ((1 << order as usize) - * size_of::())) - as *mut Page, - ) - } - } - - /// TODO: Make an unsafe split if relevant - /// - /// # Safety - /// This function does not attach the new references! - #[allow(clippy::type_complexity)] - pub unsafe fn split( - &mut self, - ) -> Option<(NonNull>, NonNull>)> { - // Reduce it's order to find it's order. - - let prev_order = BuddyOrder::try_from( - unsafe { self.meta.buddy.order? } as u8 - 1, - ) - .unwrap(); - - write_volatile!((*self.meta.buddy).order, Some(prev_order)); - - let index = ((self as *const _ as usize - - unsafe { PAGES.as_ptr().addr() }) - / size_of::()) - + (1 << prev_order as usize); - - // Find it's half - let mut buddy = - unsafe { NonNull::from_mut(&mut PAGES[index]).assign::() }; - - // Set the order of the buddy. - write_volatile!( - (*buddy.as_mut().meta.buddy).order, - Some(prev_order) - ); - - Some((NonNull::from_mut(self), buddy)) - } - - /// Try to merge this page with it's buddy. - /// - /// Note: This function should not be recursive - pub unsafe fn merge(&self) { - todo!("") - } - pub const fn index_of_page(address: PhysicalAddress) -> usize { address.as_usize() / REGULAR_PAGE_SIZE } } - -pub fn pages_init(mmap: ParsedMemoryMap) -> usize { - let last = mmap.last().unwrap(); - let last_address = (last.base_address + last.length) as usize; - let total_pages = last_address / REGULAR_PAGE_SIZE; - - println!( - "Last address: {}, Total Pages: {}, size_of_array: {:x?} Kib", - last_address, - total_pages, - total_pages * size_of::>() / 1024 - ); - unsafe { - PAGES.write(core::slice::from_raw_parts_mut( - PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, - total_pages, - )); - - for p in PAGES.iter_mut() { - core::ptr::write_volatile( - p as *mut UnassignedPage, - UnassignedPage { - meta: PageMeta { - buddy: ManuallyDrop::new(BuddyPageMeta::default()), - }, - owner: None, - }, - ); - } - PAGES.as_ptr_range().end as usize - } -} From 26fffca3804a7735e387ef08f07d6ba574d976f9 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:58:17 +0200 Subject: [PATCH 54/78] added traits --- kernel/src/memory/allocators/slab.rs | 20 ++++++-------------- kernel/src/memory/allocators/slab/traits.rs | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index d52c4f1..729711a 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -12,8 +12,9 @@ use crate::{ allocators::{ extensions::VirtualAddressExt, slab::{ - cache::SlabCache, descriptor::SlabDescriptor, - traits::SlabPosition, + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{Generic, SlabPosition}, }, }, page::{PAGES, UnassignedPage}, @@ -25,13 +26,6 @@ use core::{ ptr::NonNull, }; -pub trait Generic { - const START: usize; - const END: usize; - - fn size(&self) -> usize; -} - generate_generics!( 8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096, 8192 ); @@ -75,11 +69,9 @@ impl SlabAllocator { NonNull::from_mut(&mut PAGES[index]).assign::().as_ref() }; - if let Some(mut descriptor) = page.owner { - unsafe { descriptor.as_mut().dealloc(ptr) }; - } else { - panic!("Object is freed from a page that has not owner!") - } + let descriptor = unsafe { page.meta.slab.freelist }; + + unsafe { descriptor.assign::().as_mut().dealloc(ptr) }; } } diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index 55090a3..3969e98 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -15,3 +15,17 @@ impl SlabPosition for Unassigned { pub trait SlabCacheConstructor { fn new(buddy_order: usize) -> Self; } + +pub trait Generic { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} + +pub trait DmaGeneric { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} From 77589cef15a7005ac230bb6c6997d5dd66e00b5e Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:58:30 +0200 Subject: [PATCH 55/78] added page flags to slab --- kernel/src/memory/allocators/slab/cache.rs | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 972621d..f411ffb 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,12 +1,16 @@ +use cpu_utils::structures::paging::PageEntryFlags; + use super::descriptor::SlabDescriptor; use super::traits::{SlabCacheConstructor, SlabPosition}; use crate::memory::allocators::slab::SLAB_ALLOCATOR; +use crate::memory::allocators::slab::traits::DmaGeneric; use crate::memory::unassigned::{AssignSlab, Unassigned}; use core::ptr::NonNull; #[derive(Clone, Debug)] pub struct SlabCache { pub buddy_order: usize, + pub pflags: PageEntryFlags, pub free: Option>>, pub partial: Option>>, pub full: Option>>, @@ -77,6 +81,28 @@ impl SlabCacheConstructor for SlabCache { SlabCache { buddy_order, + pflags: PageEntryFlags::regular_page_flags(), + free: Some(free.assign::()), + partial: None, + full: None, + } + } +} + +impl SlabCacheConstructor for SlabCache { + fn new(buddy_order: usize) -> Self { + let mut free = unsafe { + SLAB_ALLOCATOR + .slab_of::>() + .as_mut() + .alloc() + }; + + unsafe { *free.as_mut() = SlabDescriptor::new(buddy_order, None) } + + SlabCache { + buddy_order, + pflags: PageEntryFlags::regular_io_page_flags(), free: Some(free.assign::()), partial: None, full: None, @@ -89,6 +115,7 @@ impl SlabCacheConstructor for SlabCache> { let partial = SlabDescriptor::>::initial_descriptor(buddy_order); SlabCache { buddy_order, + pflags: PageEntryFlags::regular_page_flags(), free: None, partial: Some(partial), full: None, From 0929350af786da982192fcce4fa88577de12d0c7 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:58:53 +0200 Subject: [PATCH 56/78] changed to div ceil --- kernel/src/memory/allocators/slab/macros.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index aefaff5..e98fee7 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -51,7 +51,7 @@ macro_rules! define_slab_system { let index = <$t>::POSITION; self.slabs[index].write(SlabCache::<$t>::new( - (size_of::<$t>().next_multiple_of(REGULAR_PAGE_SIZE) / REGULAR_PAGE_SIZE) - 1 + size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE) ).as_unassigned().clone()); )* } From 9b7ad7d8d006841798bea966ef7514cf50d9399c Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:59:05 +0200 Subject: [PATCH 57/78] added initi function to here --- kernel/src/memory/page/map.rs | 68 ++++++++++++++++++++++++++++------- 1 file changed, 56 insertions(+), 12 deletions(-) diff --git a/kernel/src/memory/page/map.rs b/kernel/src/memory/page/map.rs index 4af3978..4b1d205 100644 --- a/kernel/src/memory/page/map.rs +++ b/kernel/src/memory/page/map.rs @@ -1,34 +1,78 @@ -use core::ops::{Deref, DerefMut}; +use core::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; -use common::late_init::LateInit; +use common::{ + address_types::VirtualAddress, + constants::{PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_SIZE}, + late_init::LateInit, +}; -use crate::memory::{memory_map::ParsedMemoryMap, page::UnassignedPage}; +use crate::{ + memory::{ + memory_map::ParsedMemoryMap, + page::{ + PAGES, UnassignedPage, + meta::{BuddyPageMeta, PageMeta}, + }, + }, + println, +}; -pub struct PageMap { - map: &'static mut [UnassignedPage], - // lock: todo!(), -} +pub struct PageMap(NonNull<[UnassignedPage]>); impl Deref for PageMap { type Target = [UnassignedPage]; fn deref(&self) -> &Self::Target { - self.map + unsafe { self.0.as_ref() } } } impl DerefMut for PageMap { fn deref_mut(&mut self) -> &mut Self::Target { - self.map + unsafe { self.0.as_mut() } } } impl PageMap { - /// Initializes all pages on a constant address. + /// Initializes all pages on the constant address + /// ([`PAGE_ALLOCATOR_OFFSET`]) and returns the end address. pub fn init( uninit: &'static mut LateInit, mmap: ParsedMemoryMap, - ) { - todo!() + ) -> VirtualAddress { + let last = mmap.last().unwrap(); + let last_address = (last.base_address + last.length) as usize; + let total_pages = last_address / REGULAR_PAGE_SIZE; + + println!( + "Last address: {}, Total Pages: {}, size_of_array: {:x?} Kib", + last_address, + total_pages, + total_pages * size_of::() / 1024 + ); + unsafe { + let page_map = NonNull::slice_from_raw_parts( + NonNull::new_unchecked( + PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, + ), + total_pages, + ); + + uninit.write(PageMap(page_map)); + + for p in uninit.as_mut().iter_mut() { + core::ptr::write_volatile( + p as *mut UnassignedPage, + UnassignedPage::new(PageMeta { + buddy: ManuallyDrop::new(BuddyPageMeta::default()), + }), + ) + } + (PAGES.as_ptr_range().end as usize).into() + } } } From c8c44a9a0d08028a12dc51ce4e299f654e598804 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:59:17 +0200 Subject: [PATCH 58/78] made meta fields public --- kernel/src/memory/page/meta.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs index 6757104..0b53409 100644 --- a/kernel/src/memory/page/meta.rs +++ b/kernel/src/memory/page/meta.rs @@ -60,6 +60,6 @@ impl BuddyPageMeta { #[derive(Debug)] pub struct SlabPageMeta { - owner: NonNull>, - freelist: NonNull>, + pub owner: NonNull>, + pub freelist: NonNull>, } From c33f194ddd22363b21ea03eeec9446d6424f581e Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:59:30 +0200 Subject: [PATCH 59/78] made allocator virtual address --- shared/common/src/constants/addresses.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/shared/common/src/constants/addresses.rs b/shared/common/src/constants/addresses.rs index a3e482a..d2abce2 100644 --- a/shared/common/src/constants/addresses.rs +++ b/shared/common/src/constants/addresses.rs @@ -16,6 +16,7 @@ pub const TOP_IDENTITY_PAGE_TABLE_L3_OFFSET: usize = 0xe000; pub const TOP_IDENTITY_PAGE_TABLE_L2_OFFSET: usize = 0xf000; pub const KERNEL_OFFSET: u64 = 0x10000; -pub const PAGE_ALLOCATOR_OFFSET: usize = 0x100000; #[cfg(target_arch = "x86_64")] pub const PHYSICAL_MEMORY_OFFSET: usize = 0xffff800000000000; +#[cfg(target_arch = "x86_64")] +pub const PAGE_ALLOCATOR_OFFSET: usize = PHYSICAL_MEMORY_OFFSET + 0x100000; From 1bb6bb469c1168d6601a7d061168a7250d62c4b9 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 00:59:48 +0200 Subject: [PATCH 60/78] removed legacy mapping --- shared/cpu_utils/src/structures/paging/init.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/shared/cpu_utils/src/structures/paging/init.rs b/shared/cpu_utils/src/structures/paging/init.rs index 290d304..4b8a6a3 100644 --- a/shared/cpu_utils/src/structures/paging/init.rs +++ b/shared/cpu_utils/src/structures/paging/init.rs @@ -56,10 +56,6 @@ pub fn enable() -> Option<()> { PhysicalAddress::new_unchecked(0), PageEntryFlags::huge_page_flags(), ); - identity_page_table_l2.entries[1].map_unchecked( - PhysicalAddress::new_unchecked(0x200000), - PageEntryFlags::huge_page_flags(), - ); } // ANCHOR_END: setup_page_tables // ANCHOR: setup_top_page_tables From d5a4ff841182807dd6d372a8bcd9dd06a2a5e29f Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 22:52:46 +0200 Subject: [PATCH 61/78] changed the set flags function to accept page size and number of pages to set from this address --- kernel/src/memory/allocators/extensions.rs | 29 +++++++++++++++++----- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index f1f9000..39abd8f 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -1,3 +1,5 @@ +use core::num::NonZero; + use common::{ address_types::{PhysicalAddress, VirtualAddress}, constants::{ @@ -118,9 +120,19 @@ pub impl VirtualAddress { } } - fn set_flags(&self, flags: PageEntryFlags) -> Result<(), EntryError> { - let page_size = PageSize::from_alignment(self.alignment()) - .expect("self address is not aligned to a page size"); + fn set_flags( + &self, + flags: PageEntryFlags, + page_size: PageSize, + num_pages: NonZero, + ) -> Result<(), EntryError> { + let address_index = self + .index_of(PageTableLevel::VARIANTS[page_size as usize + 1]); + + debug_assert!( + address_index + num_pages.get() <= PAGE_DIRECTORY_ENTRIES, + "There are only 512 entries inside a table" + ); let mut table = PageTable::current_table_mut(); @@ -130,9 +142,14 @@ pub impl VirtualAddress { let entry = &mut table.entries[index]; table = entry.mapped_table_mut()?; } - table.entries[self - .index_of(PageTableLevel::VARIANTS[page_size as usize + 1])] - .set_flags(flags); + + table + .entries + .iter_mut() + .skip(address_index) + .take(num_pages.get()) + .for_each(|entry| entry.set_flags(flags)); + Ok(()) } From ac83bd81c77049841e03ffe9845961cb361ebfce Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 22:53:08 +0200 Subject: [PATCH 62/78] derived copy --- shared/cpu_utils/src/structures/paging/entry_flags.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/cpu_utils/src/structures/paging/entry_flags.rs b/shared/cpu_utils/src/structures/paging/entry_flags.rs index d989265..bc10733 100644 --- a/shared/cpu_utils/src/structures/paging/entry_flags.rs +++ b/shared/cpu_utils/src/structures/paging/entry_flags.rs @@ -44,7 +44,7 @@ macro_rules! table_entry_flags { // ANCHOR: page_entry_flags /// A wrapper for `PageTableEntry` flags for easier use -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct PageEntryFlags(pub u64); // ANCHOR_END: page_entry_flags From 6fcbd7ea893c3920484998135a61b7c134f1391f Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 25 Jan 2026 22:53:41 +0200 Subject: [PATCH 63/78] used net set flags definition --- kernel/src/memory/allocators/slab/cache.rs | 18 ++++++++++---- .../src/memory/allocators/slab/descriptor.rs | 24 +++++++++++++++---- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index f411ffb..42d015b 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -70,6 +70,9 @@ impl SlabCache { impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { + const PFLAGS: PageEntryFlags = + PageEntryFlags::regular_io_page_flags(); + let mut free = unsafe { SLAB_ALLOCATOR .slab_of::>() @@ -77,11 +80,13 @@ impl SlabCacheConstructor for SlabCache { .alloc() }; - unsafe { *free.as_mut() = SlabDescriptor::new(buddy_order, None) } + unsafe { + *free.as_mut() = SlabDescriptor::new(buddy_order, PFLAGS, None) + } SlabCache { buddy_order, - pflags: PageEntryFlags::regular_page_flags(), + pflags: PFLAGS, free: Some(free.assign::()), partial: None, full: None, @@ -91,6 +96,9 @@ impl SlabCacheConstructor for SlabCache { impl SlabCacheConstructor for SlabCache { fn new(buddy_order: usize) -> Self { + const PFLAGS: PageEntryFlags = + PageEntryFlags::regular_io_page_flags(); + let mut free = unsafe { SLAB_ALLOCATOR .slab_of::>() @@ -98,11 +106,13 @@ impl SlabCacheConstructor for SlabCache { .alloc() }; - unsafe { *free.as_mut() = SlabDescriptor::new(buddy_order, None) } + unsafe { + *free.as_mut() = SlabDescriptor::new(buddy_order, PFLAGS, None) + } SlabCache { buddy_order, - pflags: PageEntryFlags::regular_io_page_flags(), + pflags: PFLAGS, free: Some(free.assign::()), partial: None, full: None, diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index c668111..1887990 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -1,15 +1,19 @@ use super::traits::SlabPosition; use crate::{ alloc_pages, - memory::unassigned::{AssignSlab, UnassignSlab, Unassigned}, + memory::{ + allocators::extensions::VirtualAddressExt, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, + }, }; -use common::constants::REGULAR_PAGE_SIZE; +use common::{constants::REGULAR_PAGE_SIZE, enums::PageSize}; use core::{ fmt::Debug, mem::{ManuallyDrop, size_of}, - num::{NonZero, NonZeroU16}, + num::NonZero, ptr::NonNull, }; +use cpu_utils::structures::paging::PageEntryFlags; use nonmax::NonMaxU16; /// Preallocated object in the slab allocator. @@ -70,9 +74,17 @@ impl UnassignSlab for NonNull> { impl SlabDescriptor { pub fn new( order: usize, + pflags: PageEntryFlags, next: Option>>, ) -> SlabDescriptor { let address = unsafe { alloc_pages!(1 << order).translate() }; + + address + .set_flags(pflags, PageSize::Regular, unsafe { + NonZero::new_unchecked(1 << order) + }) + .unwrap(); + let mut objects = unsafe { NonNull::slice_from_raw_parts( NonNull::new_unchecked( @@ -144,7 +156,11 @@ impl SlabDescriptor> { order: usize, ) -> NonNull>> { let mut descriptor = - SlabDescriptor::>::new(order, None); + SlabDescriptor::>::new( + order, + PageEntryFlags::regular_page_flags(), + None, + ); let mut self_allocation = descriptor.alloc(); From 9050cfaaf215f98e3d7669114a77ddcbbf0ae188 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 28 Jan 2026 22:24:29 +0200 Subject: [PATCH 64/78] added a new slab trait --- kernel/src/memory/allocators/slab.rs | 26 +++++----- .../src/memory/allocators/slab/descriptor.rs | 48 ++++++++++--------- kernel/src/memory/allocators/slab/macros.rs | 5 ++ kernel/src/memory/allocators/slab/traits.rs | 16 ++++++- kernel/src/memory/page.rs | 22 +++++---- kernel/src/memory/page/meta.rs | 8 ++-- kernel/src/memory/unassigned.rs | 6 +-- 7 files changed, 76 insertions(+), 55 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 729711a..bfdbc58 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -9,13 +9,10 @@ use learnix_macros::generate_generics; use crate::{ define_slab_system, memory::{ - allocators::{ - extensions::VirtualAddressExt, - slab::{ - cache::SlabCache, - descriptor::SlabDescriptor, - traits::{Generic, SlabPosition}, - }, + allocators::slab::{ + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{Generic, Slab, SlabPosition}, }, page::{PAGES, UnassignedPage}, unassigned::{AssignSlab, Unassigned}, @@ -50,19 +47,18 @@ define_slab_system!( pub static mut SLAB_ALLOCATOR: SlabAllocator = SlabAllocator::new(); impl SlabAllocator { - pub fn slab_of(&self) -> NonNull> { + pub fn slab_of(&self) -> NonNull> { self.slabs[T::POSITION].assign::() } - pub fn kmalloc(&self) -> NonNull { + pub fn kmalloc(&self) -> NonNull { let mut slab = self.slab_of::(); unsafe { slab.as_mut().alloc() } } - pub fn kfree(&self, ptr: NonNull) { + pub fn kfree(&self, ptr: NonNull) { let index = UnassignedPage::index_of_page(unsafe { VirtualAddress::new_unchecked(ptr.as_ptr() as usize) - .translate() }); let page = unsafe { @@ -192,7 +188,7 @@ unsafe impl Allocator for SlabAllocator { } } } -unsafe impl Send for SlabDescriptor {} -unsafe impl Sync for SlabDescriptor {} -unsafe impl Send for SlabCache {} -unsafe impl Sync for SlabCache {} +unsafe impl Send for SlabDescriptor {} +unsafe impl Sync for SlabDescriptor {} +unsafe impl Send for SlabCache {} +unsafe impl Sync for SlabCache {} diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 1887990..614938d 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -2,15 +2,14 @@ use super::traits::SlabPosition; use crate::{ alloc_pages, memory::{ - allocators::extensions::VirtualAddressExt, + allocators::slab::traits::Slab, unassigned::{AssignSlab, UnassignSlab, Unassigned}, }, }; -use common::{constants::REGULAR_PAGE_SIZE, enums::PageSize}; +use common::constants::REGULAR_PAGE_SIZE; use core::{ fmt::Debug, mem::{ManuallyDrop, size_of}, - num::NonZero, ptr::NonNull, }; use cpu_utils::structures::paging::PageEntryFlags; @@ -36,7 +35,7 @@ pub struct SlabDescriptor { pub next: Option>>, } -impl UnassignSlab for SlabDescriptor { +impl UnassignSlab for SlabDescriptor { type Target = SlabDescriptor; fn as_unassigned(&self) -> Self::Target { @@ -49,17 +48,16 @@ impl UnassignSlab for SlabDescriptor { } impl AssignSlab for NonNull> { - type Target = - NonNull>; + type Target = NonNull>; - fn assign(&self) -> NonNull> { + fn assign(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor) } } } -impl UnassignSlab for NonNull> { +impl UnassignSlab for NonNull> { type Target = NonNull>; fn as_unassigned(&self) -> Self::Target { @@ -71,20 +69,22 @@ impl UnassignSlab for NonNull> { } } -impl SlabDescriptor { - pub fn new( +impl SlabDescriptor { + /// Create a new slab descriptor. + /// + /// # Safety + /// This function is marked as unsafe because it does not initialize + /// the page that the allocation is on. + /// + /// This function is meant to be called from the [`grow`] + /// function inside slab cache. (Which is safe and do initialize + /// the page) + pub unsafe fn new( order: usize, - pflags: PageEntryFlags, next: Option>>, ) -> SlabDescriptor { let address = unsafe { alloc_pages!(1 << order).translate() }; - address - .set_flags(pflags, PageSize::Regular, unsafe { - NonZero::new_unchecked(1 << order) - }) - .unwrap(); - let mut objects = unsafe { NonNull::slice_from_raw_parts( NonNull::new_unchecked( @@ -155,12 +155,14 @@ impl SlabDescriptor> { pub fn initial_descriptor( order: usize, ) -> NonNull>> { - let mut descriptor = - SlabDescriptor::>::new( - order, - PageEntryFlags::regular_page_flags(), - None, - ); + todo!( + "PAGE IS NOT INITIALIZED HERE, UNDERSTAND IF GROW IS VIABLE \ + OR OTHER SOLUTION IS NEEDED" + ); + + let mut descriptor = unsafe { + SlabDescriptor::>::new(order, None) + }; let mut self_allocation = descriptor.alloc(); diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index e98fee7..30fb007 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -8,6 +8,9 @@ macro_rules! register_slabs { impl $crate::memory::allocators::slab::traits::SlabPosition for $head { const POSITION: usize = $idx; } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} + $crate::register_slabs!(@step $idx + 1; $($tail),*); }; @@ -15,6 +18,8 @@ macro_rules! register_slabs { impl $crate::memory::allocators::slab::traits::SlabPosition for $head { const POSITION: usize = $idx; } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} }; (@step $idx:expr; ) => {}; diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index 3969e98..aa69341 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -1,10 +1,16 @@ +use cpu_utils::structures::paging::PageEntryFlags; + use crate::memory::unassigned::Unassigned; /// Get the position on the slab array, for a slab of the given type. /// /// Shouldn't implement this trait manually; it is implemented /// via the `define_slab_system` macro. -pub trait SlabPosition: 'static + Sized { +pub trait Slab: 'static + Sized + SlabPosition + SlabFlags {} + +impl Slab for Unassigned {} + +pub trait SlabPosition { const POSITION: usize; } @@ -12,6 +18,14 @@ impl SlabPosition for Unassigned { const POSITION: usize = usize::MAX; } +pub trait SlabFlags: SlabPosition { + const PFLAGS: PageEntryFlags; +} + +impl SlabFlags for T { + default const PFLAGS: PageEntryFlags = PageEntryFlags::default(); +} + pub trait SlabCacheConstructor { fn new(buddy_order: usize) -> Self; } diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs index 59b9ec8..92014ee 100644 --- a/kernel/src/memory/page.rs +++ b/kernel/src/memory/page.rs @@ -1,12 +1,16 @@ use core::{marker::PhantomData, ptr::NonNull}; use crate::memory::{ - allocators::slab::traits::SlabPosition, + allocators::{ + extensions::VirtualAddressExt, + slab::traits::{Slab, SlabPosition}, + }, page::{map::PageMap, meta::PageMeta}, unassigned::{AssignSlab, UnassignSlab, Unassigned}, }; use common::{ - address_types::PhysicalAddress, constants::REGULAR_PAGE_SIZE, + address_types::{PhysicalAddress, VirtualAddress}, + constants::REGULAR_PAGE_SIZE, late_init::LateInit, }; @@ -17,20 +21,20 @@ pub type UnassignedPage = Page; pub static mut PAGES: LateInit = LateInit::uninit(); -pub struct Page { +pub struct Page { pub meta: PageMeta, _phantom: PhantomData, } impl AssignSlab for NonNull> { - type Target = NonNull>; + type Target = NonNull>; - fn assign(&self) -> NonNull> { + fn assign(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) } } } -impl UnassignSlab for NonNull> { +impl UnassignSlab for NonNull> { type Target = NonNull>; fn as_unassigned(&self) -> NonNull> { @@ -40,7 +44,7 @@ impl UnassignSlab for NonNull> { } } -impl Page { +impl Page { pub fn new(meta: PageMeta) -> Page { Page { meta, @@ -58,7 +62,7 @@ impl Page { } } - pub const fn index_of_page(address: PhysicalAddress) -> usize { - address.as_usize() / REGULAR_PAGE_SIZE + pub fn index_of_page(address: VirtualAddress) -> usize { + address.translate().as_usize() / REGULAR_PAGE_SIZE } } diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs index 0b53409..c07cb36 100644 --- a/kernel/src/memory/page/meta.rs +++ b/kernel/src/memory/page/meta.rs @@ -4,7 +4,7 @@ use common::enums::BuddyOrder; use crate::memory::{ allocators::slab::{ - cache::SlabCache, descriptor::SlabDescriptor, traits::SlabPosition, + cache::SlabCache, descriptor::SlabDescriptor, traits::Slab, }, page::{Page, UnassignedPage}, unassigned::{AssignSlab, UnassignSlab, Unassigned}, @@ -33,7 +33,7 @@ impl const Default for BuddyPageMeta { } impl BuddyPageMeta { - pub fn detach(&mut self) -> Option>> { + pub fn detach(&mut self) -> Option>> { let detached = self.next?; // None if there is no page to detach self.next = unsafe { detached.as_ref().meta.buddy.next }; @@ -45,7 +45,7 @@ impl BuddyPageMeta { Some(detached.assign::()) } - pub fn attach(&mut self, mut p: NonNull>) { + pub fn attach(&mut self, mut p: NonNull>) { unsafe { (*p.as_mut().meta.buddy).next = self.next }; if let Some(mut next) = self.next { @@ -59,7 +59,7 @@ impl BuddyPageMeta { } #[derive(Debug)] -pub struct SlabPageMeta { +pub struct SlabPageMeta { pub owner: NonNull>, pub freelist: NonNull>, } diff --git a/kernel/src/memory/unassigned.rs b/kernel/src/memory/unassigned.rs index 80565aa..5860455 100644 --- a/kernel/src/memory/unassigned.rs +++ b/kernel/src/memory/unassigned.rs @@ -1,4 +1,4 @@ -use crate::memory::allocators::slab::traits::SlabPosition; +use crate::memory::allocators::slab::traits::{Slab, SlabPosition}; #[derive(Default, Clone, Copy, Debug)] pub struct Unassigned; @@ -10,7 +10,7 @@ pub trait UnassignSlab { } pub trait AssignSlab { - type Target; + type Target; - fn assign(&self) -> Self::Target; + fn assign(&self) -> Self::Target; } From fe2358bc773912e74ffc4dd24ab22f4ebe4fd700 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 28 Jan 2026 22:24:44 +0200 Subject: [PATCH 65/78] added new nightly feature --- kernel/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 9d5aa4a..dc64e34 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -17,6 +17,7 @@ #![feature(ascii_char_variants)] #![feature(ascii_char)] #![feature(const_convert)] +#![feature(slice_ptr_get)] #![feature(core_intrinsics)] #![feature(explicit_tail_calls)] #![feature(specialization)] From ef5fa1fee819ab03036d82ad957939dd5b8143c6 Mon Sep 17 00:00:00 2001 From: sagi Date: Wed, 28 Jan 2026 22:25:02 +0200 Subject: [PATCH 66/78] used new slab trait, and added a grow function --- kernel/src/memory/allocators/slab/cache.rs | 117 ++++++++++++--------- 1 file changed, 65 insertions(+), 52 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 42d015b..05a4434 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,11 +1,24 @@ +use core::{num::NonZero, ptr::NonNull}; + +use common::{address_types::VirtualAddress, enums::PageSize}; use cpu_utils::structures::paging::PageEntryFlags; -use super::descriptor::SlabDescriptor; -use super::traits::{SlabCacheConstructor, SlabPosition}; -use crate::memory::allocators::slab::SLAB_ALLOCATOR; -use crate::memory::allocators::slab::traits::DmaGeneric; -use crate::memory::unassigned::{AssignSlab, Unassigned}; -use core::ptr::NonNull; +use crate::memory::{ + allocators::{ + extensions::VirtualAddressExt, + slab::{ + SLAB_ALLOCATOR, + traits::{Slab, SlabFlags}, + }, + }, + page::{PAGES, UnassignedPage}, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, +}; + +use super::{ + descriptor::SlabDescriptor, + traits::{SlabCacheConstructor, SlabPosition}, +}; #[derive(Clone, Debug)] pub struct SlabCache { @@ -16,15 +29,55 @@ pub struct SlabCache { pub full: Option>>, } -impl SlabCache { +impl SlabCache { pub fn as_unassigned(&self) -> &SlabCache { + todo!("Change to trait implementation"); unsafe { &*(self as *const _ as *const SlabCache) } } pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { + todo!("Change to trait implementation"); unsafe { &mut *(self as *mut _ as *mut SlabCache) } } + /// Allocate a new slab descriptor, attaches it to the free slab list, + /// and initialize it's page. + pub fn grow(&self) -> NonNull> { + // Allocate a new slab descriptor for this slab + let mut slab = unsafe { + SLAB_ALLOCATOR + .kmalloc::>() + .assign::() + }; + + unsafe { + *slab.as_mut() = + SlabDescriptor::::new(self.buddy_order, None) + } + + let slab_address: VirtualAddress = + unsafe { slab.as_ref().objects.as_ptr().addr().into() }; + + slab_address + .set_flags(self.pflags, PageSize::Regular, unsafe { + NonZero::::new_unchecked(1 << self.buddy_order) + }) + .unwrap(); + + let slab_page = unsafe { + &mut PAGES[UnassignedPage::index_of_page(slab_address)] + }; + + // Set owner and freelist. + unsafe { + (*slab_page.meta.slab).freelist = slab.as_unassigned(); + (*slab_page.meta.slab).owner = + NonNull::from_ref(self.as_unassigned()); + }; + + slab + } + pub fn alloc(&mut self) -> NonNull { if let Some(mut partial) = self.partial { let partial = unsafe { partial.as_mut() }; @@ -61,59 +114,19 @@ impl SlabCache { } impl SlabCache { - pub fn assign(&self) -> NonNull> { + pub fn assign(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self as *const _ as *mut SlabCache) } } } -impl SlabCacheConstructor for SlabCache { +impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { - const PFLAGS: PageEntryFlags = - PageEntryFlags::regular_io_page_flags(); - - let mut free = unsafe { - SLAB_ALLOCATOR - .slab_of::>() - .as_mut() - .alloc() - }; - - unsafe { - *free.as_mut() = SlabDescriptor::new(buddy_order, PFLAGS, None) - } - SlabCache { buddy_order, - pflags: PFLAGS, - free: Some(free.assign::()), - partial: None, - full: None, - } - } -} - -impl SlabCacheConstructor for SlabCache { - fn new(buddy_order: usize) -> Self { - const PFLAGS: PageEntryFlags = - PageEntryFlags::regular_io_page_flags(); - - let mut free = unsafe { - SLAB_ALLOCATOR - .slab_of::>() - .as_mut() - .alloc() - }; - - unsafe { - *free.as_mut() = SlabDescriptor::new(buddy_order, PFLAGS, None) - } - - SlabCache { - buddy_order, - pflags: PFLAGS, - free: Some(free.assign::()), + pflags: T::PFLAGS, + free: None, partial: None, full: None, } @@ -125,7 +138,7 @@ impl SlabCacheConstructor for SlabCache> { let partial = SlabDescriptor::>::initial_descriptor(buddy_order); SlabCache { buddy_order, - pflags: PageEntryFlags::regular_page_flags(), + pflags: SlabDescriptor::::PFLAGS, free: None, partial: Some(partial), full: None, From 5afa82280967aaf3c09b51fee34223fa10559bba Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 30 Jan 2026 12:48:56 +0200 Subject: [PATCH 67/78] changed name of position into slab_position for clarity --- kernel/src/memory/allocators/slab.rs | 2 +- kernel/src/memory/allocators/slab/macros.rs | 6 +++--- kernel/src/memory/allocators/slab/traits.rs | 11 ++++++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index bfdbc58..1705b23 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -48,7 +48,7 @@ pub static mut SLAB_ALLOCATOR: SlabAllocator = SlabAllocator::new(); impl SlabAllocator { pub fn slab_of(&self) -> NonNull> { - self.slabs[T::POSITION].assign::() + self.slabs[T::SLAB_POSITION].assign::() } pub fn kmalloc(&self) -> NonNull { diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 30fb007..27e8b8b 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -6,7 +6,7 @@ macro_rules! register_slabs { (@step $idx:expr; $head:ty, $($tail:ty),+) => { impl $crate::memory::allocators::slab::traits::SlabPosition for $head { - const POSITION: usize = $idx; + const SLAB_POSITION: usize = $idx; } impl $crate::memory::allocators::slab::traits::Slab for $head {} @@ -16,7 +16,7 @@ macro_rules! register_slabs { (@step $idx:expr; $head:ty) => { impl $crate::memory::allocators::slab::traits::SlabPosition for $head { - const POSITION: usize = $idx; + const SLAB_POSITION: usize = $idx; } impl $crate::memory::allocators::slab::traits::Slab for $head {} @@ -53,7 +53,7 @@ macro_rules! define_slab_system { pub fn init(&'static mut self) { $( - let index = <$t>::POSITION; + let index = <$t>::SLAB_POSITION; self.slabs[index].write(SlabCache::<$t>::new( size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE) diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index aa69341..3830323 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -11,11 +11,11 @@ pub trait Slab: 'static + Sized + SlabPosition + SlabFlags {} impl Slab for Unassigned {} pub trait SlabPosition { - const POSITION: usize; + const SLAB_POSITION: usize; } impl SlabPosition for Unassigned { - const POSITION: usize = usize::MAX; + const SLAB_POSITION: usize = usize::MAX; } pub trait SlabFlags: SlabPosition { @@ -23,7 +23,12 @@ pub trait SlabFlags: SlabPosition { } impl SlabFlags for T { - default const PFLAGS: PageEntryFlags = PageEntryFlags::default(); + default const PFLAGS: PageEntryFlags = + PageEntryFlags::regular_page_flags(); +} + +impl SlabFlags for Unassigned { + const PFLAGS: PageEntryFlags = PageEntryFlags::default(); } pub trait SlabCacheConstructor { From 7e4acbad9dbfaeff2a256ce6cb0fadb94000fd5e Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 30 Jan 2026 12:49:46 +0200 Subject: [PATCH 68/78] initialized the page od the initial descriptor in the constructor of the cache --- kernel/src/memory/allocators/slab/cache.rs | 41 ++++++++++++++++++- .../src/memory/allocators/slab/descriptor.rs | 6 --- 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 05a4434..44c180a 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -135,7 +135,46 @@ impl SlabCacheConstructor for SlabCache { impl SlabCacheConstructor for SlabCache> { fn new(buddy_order: usize) -> SlabCache> { - let partial = SlabDescriptor::>::initial_descriptor(buddy_order); + let mut partial = SlabDescriptor::>::initial_descriptor(buddy_order); + + unsafe { + *partial.as_mut() = + SlabDescriptor::>::new( + buddy_order, + None, + ) + } + + let slab_address: VirtualAddress = + unsafe { partial.as_ref().objects.as_ptr().addr().into() }; + + slab_address + .set_flags( + SlabDescriptor::::PFLAGS, + PageSize::Regular, + unsafe { + NonZero::::new_unchecked(1 << buddy_order) + }, + ) + .unwrap(); + + let slab_page = unsafe { + &mut PAGES[UnassignedPage::index_of_page(slab_address)] + }; + + // Set owner and freelist. + unsafe { + (*slab_page.meta.slab).freelist = partial.as_unassigned(); + + // This assumption can be made, because the created cache in + // this function will go to the constant position on the slab + // array defined with the `SlabPosition` array + (*slab_page.meta.slab).owner = NonNull::from_ref( + &SLAB_ALLOCATOR.slabs + [SlabDescriptor::::SLAB_POSITION], + ); + }; + SlabCache { buddy_order, pflags: SlabDescriptor::::PFLAGS, diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 614938d..199c93c 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -12,7 +12,6 @@ use core::{ mem::{ManuallyDrop, size_of}, ptr::NonNull, }; -use cpu_utils::structures::paging::PageEntryFlags; use nonmax::NonMaxU16; /// Preallocated object in the slab allocator. @@ -155,11 +154,6 @@ impl SlabDescriptor> { pub fn initial_descriptor( order: usize, ) -> NonNull>> { - todo!( - "PAGE IS NOT INITIALIZED HERE, UNDERSTAND IF GROW IS VIABLE \ - OR OTHER SOLUTION IS NEEDED" - ); - let mut descriptor = unsafe { SlabDescriptor::>::new(order, None) }; From 305058c5940bdfbb43c5af0a77dffc6ee8f9b9d2 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 31 Jan 2026 17:26:25 +0200 Subject: [PATCH 69/78] Added some useful functions to translate between virtual address, page and a NonNull Ponter. --- kernel/src/memory/allocators/buddy.rs | 7 +++++-- kernel/src/memory/page.rs | 18 ++++++++++++++++-- shared/common/src/address_types.rs | 8 ++++++++ 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 9bc0d82..210ecf7 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -125,10 +125,10 @@ impl BuddyAllocator { .iter() .filter(|a| a.region_type == MemoryRegionType::Usable) { - let mut start = UnassignedPage::index_of_page( + let mut start = UnassignedPage::index_of( (area.base_address as usize).into(), ); - let end = UnassignedPage::index_of_page( + let end = UnassignedPage::index_of( ((area.base_address + area.length) as usize).into(), ); @@ -231,6 +231,9 @@ impl BuddyAllocator { Some(left) } + // This function will probably fail, should change that the head of the + // page list is static and the list starts from the second node, and + // then this would work fn detach_from_mid(page: NonNull) { let (mut prev, next) = unsafe { let p_ref = page.as_ref(); diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs index 92014ee..e2792ac 100644 --- a/kernel/src/memory/page.rs +++ b/kernel/src/memory/page.rs @@ -62,7 +62,21 @@ impl Page { } } - pub fn index_of_page(address: VirtualAddress) -> usize { - address.translate().as_usize() / REGULAR_PAGE_SIZE + /// Return the index of the page structure inside the [`PAGES`] array + /// pointed by this virtual address. + /// + /// **Note**: if you meant to get the page structure, consider using + /// [`Page::from_virt`] + pub fn index_of(addr: VirtualAddress) -> usize { + addr.translate().as_usize() / REGULAR_PAGE_SIZE + } + + /// Return the physical page structure that is pointed by this physical + /// address + pub fn from_virt(addr: VirtualAddress) -> NonNull> { + unsafe { + NonNull::from_ref(&PAGES[Page::::index_of(addr)]) + .assign::() + } } } diff --git a/shared/common/src/address_types.rs b/shared/common/src/address_types.rs index ca6534d..1b8a80f 100644 --- a/shared/common/src/address_types.rs +++ b/shared/common/src/address_types.rs @@ -1,3 +1,5 @@ +use core::ptr::NonNull; + #[cfg(target_arch = "x86_64")] use crate::constants::PHYSICAL_MEMORY_OFFSET; use crate::enums::PageTableLevel; @@ -61,6 +63,12 @@ impl const From for PhysicalAddress { #[repr(C)] pub struct VirtualAddress(usize); +impl From> for VirtualAddress { + fn from(value: NonNull) -> Self { + unsafe { VirtualAddress::new_unchecked(value.as_ptr().addr()) } + } +} + impl const From for VirtualAddress { // TODO! Change into new in the future fn from(value: usize) -> Self { From 3d2167ce47348404c67ced2824c59d6dd6d6dd27 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 31 Jan 2026 17:27:13 +0200 Subject: [PATCH 70/78] Change the grow function, and used take_ownership on the initial descriptor --- kernel/src/memory/allocators/slab/cache.rs | 105 +++++++++------------ 1 file changed, 43 insertions(+), 62 deletions(-) diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 44c180a..6a51e99 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -1,7 +1,6 @@ use core::{num::NonZero, ptr::NonNull}; -use common::{address_types::VirtualAddress, enums::PageSize}; -use cpu_utils::structures::paging::PageEntryFlags; +use common::address_types::VirtualAddress; use crate::memory::{ allocators::{ @@ -11,7 +10,7 @@ use crate::memory::{ traits::{Slab, SlabFlags}, }, }, - page::{PAGES, UnassignedPage}, + page::UnassignedPage, unassigned::{AssignSlab, UnassignSlab, Unassigned}, }; @@ -21,28 +20,29 @@ use super::{ }; #[derive(Clone, Debug)] -pub struct SlabCache { +pub struct SlabCache { pub buddy_order: usize, - pub pflags: PageEntryFlags, pub free: Option>>, pub partial: Option>>, pub full: Option>>, } -impl SlabCache { - pub fn as_unassigned(&self) -> &SlabCache { - todo!("Change to trait implementation"); - unsafe { &*(self as *const _ as *const SlabCache) } - } +impl UnassignSlab for NonNull> { + type Target = NonNull>; - pub fn as_unassigned_mut(&mut self) -> &mut SlabCache { - todo!("Change to trait implementation"); - unsafe { &mut *(self as *mut _ as *mut SlabCache) } + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked( + self.as_ptr() as *mut SlabCache + ) + } } +} +impl SlabCache { /// Allocate a new slab descriptor, attaches it to the free slab list, /// and initialize it's page. - pub fn grow(&self) -> NonNull> { + pub fn grow(&mut self) { // Allocate a new slab descriptor for this slab let mut slab = unsafe { SLAB_ALLOCATOR @@ -52,30 +52,33 @@ impl SlabCache { unsafe { *slab.as_mut() = - SlabDescriptor::::new(self.buddy_order, None) + SlabDescriptor::::new(self.buddy_order, self.free) } + self.take_ownership(slab); + + self.free = Some(slab); + } + + pub fn take_ownership(&self, slab: NonNull>) { let slab_address: VirtualAddress = unsafe { slab.as_ref().objects.as_ptr().addr().into() }; slab_address - .set_flags(self.pflags, PageSize::Regular, unsafe { + .set_flags(T::PFLAGS, T::PSIZE, unsafe { NonZero::::new_unchecked(1 << self.buddy_order) }) .unwrap(); - let slab_page = unsafe { - &mut PAGES[UnassignedPage::index_of_page(slab_address)] - }; + let slab_page = + unsafe { UnassignedPage::from_virt(slab_address).as_mut() }; // Set owner and freelist. unsafe { (*slab_page.meta.slab).freelist = slab.as_unassigned(); (*slab_page.meta.slab).owner = - NonNull::from_ref(self.as_unassigned()); + NonNull::from_ref(self).as_unassigned(); }; - - slab } pub fn alloc(&mut self) -> NonNull { @@ -125,7 +128,6 @@ impl SlabCacheConstructor for SlabCache { default fn new(buddy_order: usize) -> SlabCache { SlabCache { buddy_order, - pflags: T::PFLAGS, free: None, partial: None, full: None, @@ -135,52 +137,31 @@ impl SlabCacheConstructor for SlabCache { impl SlabCacheConstructor for SlabCache> { fn new(buddy_order: usize) -> SlabCache> { - let mut partial = SlabDescriptor::>::initial_descriptor(buddy_order); - - unsafe { - *partial.as_mut() = - SlabDescriptor::>::new( - buddy_order, - None, - ) - } + let partial = SlabDescriptor::>::initial_descriptor(buddy_order); - let slab_address: VirtualAddress = - unsafe { partial.as_ref().objects.as_ptr().addr().into() }; - - slab_address - .set_flags( - SlabDescriptor::::PFLAGS, - PageSize::Regular, - unsafe { - NonZero::::new_unchecked(1 << buddy_order) - }, - ) - .unwrap(); - - let slab_page = unsafe { - &mut PAGES[UnassignedPage::index_of_page(slab_address)] + // This assumption can be made, because the created cache in + // this function will go to the constant position on the slab + // array defined with the `SlabPosition` array + let mut future_owner = unsafe { + SLAB_ALLOCATOR.slab_of::>() }; - // Set owner and freelist. - unsafe { - (*slab_page.meta.slab).freelist = partial.as_unassigned(); - - // This assumption can be made, because the created cache in - // this function will go to the constant position on the slab - // array defined with the `SlabPosition` array - (*slab_page.meta.slab).owner = NonNull::from_ref( - &SLAB_ALLOCATOR.slabs - [SlabDescriptor::::SLAB_POSITION], - ); - }; - - SlabCache { + let cache = SlabCache { buddy_order, - pflags: SlabDescriptor::::PFLAGS, free: None, partial: Some(partial), full: None, + }; + + // Only in this function, we initialiuze the global array in the + // new function. + // + // Because then we can use the `take_ownership` function + unsafe { + *future_owner.as_mut() = cache.clone(); + future_owner.as_mut().take_ownership(partial); } + + cache } } From e40432f5ae0fbee006ea4b58830c541c914cb4dd Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 31 Jan 2026 17:27:56 +0200 Subject: [PATCH 71/78] improved trait implementation --- .../src/memory/allocators/slab/descriptor.rs | 28 +++++++++---------- kernel/src/memory/allocators/slab/macros.rs | 8 ++++-- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 199c93c..159d09c 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -27,25 +27,13 @@ impl Debug for PreallocatedObject { } #[derive(Debug, Clone)] -pub struct SlabDescriptor { +pub struct SlabDescriptor { pub next_free_idx: Option, pub total_allocated: u16, pub objects: NonNull<[PreallocatedObject]>, pub next: Option>>, } -impl UnassignSlab for SlabDescriptor { - type Target = SlabDescriptor; - - fn as_unassigned(&self) -> Self::Target { - unsafe { - (*(self as *const SlabDescriptor - as *mut SlabDescriptor)) - .clone() - } - } -} - impl AssignSlab for NonNull> { type Target = NonNull>; @@ -132,6 +120,8 @@ impl SlabDescriptor { unsafe { NonNull::from_mut(&mut preallocated.allocated) } } + // TODO: In tests rembmber to implement something on T that implement + // drop and see that when freeing the memory it is called pub unsafe fn dealloc(&mut self, ptr: NonNull) { todo!("Remember to call drop on the item"); @@ -151,6 +141,11 @@ impl SlabDescriptor { } impl SlabDescriptor> { + /// Return a pointer to the initial descriptor after it allocated + /// himself. + /// + /// The pointer the is returned by this function contains an already + /// initalized descriptor that allocates itself. pub fn initial_descriptor( order: usize, ) -> NonNull>> { @@ -160,7 +155,12 @@ impl SlabDescriptor> { let mut self_allocation = descriptor.alloc(); - unsafe { *self_allocation.as_mut() = descriptor.as_unassigned() } + unsafe { + *self_allocation.as_mut() = NonNull::from_ref(&descriptor) + .as_unassigned() + .as_ref() + .clone() + } self_allocation.assign::>() } diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 27e8b8b..927f112 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -55,9 +55,11 @@ macro_rules! define_slab_system { $( let index = <$t>::SLAB_POSITION; - self.slabs[index].write(SlabCache::<$t>::new( - size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE) - ).as_unassigned().clone()); + let initialized = SlabCache::<$t>::new(size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE)); + + let unassigned = NonNull::from_ref(&initialized).as_unassigned(); + + self.slabs[index].write(unsafe { unassigned.as_ref().clone() }); )* } } From 5db72a6649faf95161466c916bd8e6ff4d63dc38 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 31 Jan 2026 17:28:20 +0200 Subject: [PATCH 72/78] added page size field to SlabFlags trait --- kernel/src/memory/allocators/slab/traits.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index 3830323..8220e10 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -1,3 +1,4 @@ +use common::enums::PageSize; use cpu_utils::structures::paging::PageEntryFlags; use crate::memory::unassigned::Unassigned; @@ -20,15 +21,19 @@ impl SlabPosition for Unassigned { pub trait SlabFlags: SlabPosition { const PFLAGS: PageEntryFlags; + const PSIZE: PageSize; } impl SlabFlags for T { default const PFLAGS: PageEntryFlags = PageEntryFlags::regular_page_flags(); + + default const PSIZE: PageSize = PageSize::Regular; } impl SlabFlags for Unassigned { const PFLAGS: PageEntryFlags = PageEntryFlags::default(); + const PSIZE: PageSize = PageSize::Regular; } pub trait SlabCacheConstructor { From b9453930b77610ab7c68438ba0f71dfbce9d51d1 Mon Sep 17 00:00:00 2001 From: sagi Date: Sat, 31 Jan 2026 17:28:44 +0200 Subject: [PATCH 73/78] used new functionality --- kernel/src/memory/allocators/slab.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 1705b23..8057716 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -3,7 +3,6 @@ pub mod descriptor; pub mod macros; pub mod traits; -use common::address_types::VirtualAddress; use learnix_macros::generate_generics; use crate::{ @@ -14,8 +13,8 @@ use crate::{ descriptor::SlabDescriptor, traits::{Generic, Slab, SlabPosition}, }, - page::{PAGES, UnassignedPage}, - unassigned::{AssignSlab, Unassigned}, + page::Page, + unassigned::{AssignSlab, UnassignSlab, Unassigned}, }, }; use core::{ @@ -57,13 +56,7 @@ impl SlabAllocator { } pub fn kfree(&self, ptr: NonNull) { - let index = UnassignedPage::index_of_page(unsafe { - VirtualAddress::new_unchecked(ptr.as_ptr() as usize) - }); - - let page = unsafe { - NonNull::from_mut(&mut PAGES[index]).assign::().as_ref() - }; + let page = unsafe { Page::::from_virt(ptr.into()).as_ref() }; let descriptor = unsafe { page.meta.slab.freelist }; From 755d0c098ee8f91088b8a8885aba0f1944f70622 Mon Sep 17 00:00:00 2001 From: sagi Date: Sun, 1 Feb 2026 22:32:33 +0200 Subject: [PATCH 74/78] replaced unassigned with () --- kernel/src/memory/allocators/buddy.rs | 9 ++--- kernel/src/memory/allocators/slab.rs | 4 +- kernel/src/memory/allocators/slab/cache.rs | 38 ++++++++----------- .../src/memory/allocators/slab/descriptor.rs | 19 ++++------ kernel/src/memory/allocators/slab/macros.rs | 2 +- kernel/src/memory/allocators/slab/traits.rs | 8 ++-- kernel/src/memory/page.rs | 19 ++++------ kernel/src/memory/page/meta.rs | 4 +- kernel/src/memory/unassigned.rs | 5 +-- 9 files changed, 43 insertions(+), 65 deletions(-) diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 210ecf7..07059bf 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -10,7 +10,6 @@ use cpu_utils::structures::paging::PageTable; use crate::memory::{ memory_map::ParsedMemoryMap, page::{PAGES, UnassignedPage, meta::BuddyPageMeta}, - unassigned::Unassigned, }; pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { @@ -64,7 +63,7 @@ impl BuddyAllocator { .find(|i| self.freelist[*i].next.is_some())?; let initial_page = - self.freelist[closet_order].detach::().unwrap(); + self.freelist[closet_order].detach::<()>().unwrap(); Some(self.split_recursive( initial_page, @@ -231,9 +230,9 @@ impl BuddyAllocator { Some(left) } - // This function will probably fail, should change that the head of the - // page list is static and the list starts from the second node, and - // then this would work + // TODO: This function will probably fail, should change that the head + // of the page list is static and the list starts from the second + // node, and then this would work fn detach_from_mid(page: NonNull) { let (mut prev, next) = unsafe { let p_ref = page.as_ref(); diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs index 8057716..886df9d 100644 --- a/kernel/src/memory/allocators/slab.rs +++ b/kernel/src/memory/allocators/slab.rs @@ -14,7 +14,7 @@ use crate::{ traits::{Generic, Slab, SlabPosition}, }, page::Page, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, + unassigned::{AssignSlab, UnassignSlab}, }, }; use core::{ @@ -27,7 +27,7 @@ generate_generics!( ); define_slab_system!( - SlabDescriptor, + SlabDescriptor<()>, Generic8, Generic16, Generic32, diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs index 6a51e99..eebc815 100644 --- a/kernel/src/memory/allocators/slab/cache.rs +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -5,19 +5,13 @@ use common::address_types::VirtualAddress; use crate::memory::{ allocators::{ extensions::VirtualAddressExt, - slab::{ - SLAB_ALLOCATOR, - traits::{Slab, SlabFlags}, - }, + slab::{SLAB_ALLOCATOR, traits::Slab}, }, page::UnassignedPage, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, + unassigned::{AssignSlab, UnassignSlab}, }; -use super::{ - descriptor::SlabDescriptor, - traits::{SlabCacheConstructor, SlabPosition}, -}; +use super::{descriptor::SlabDescriptor, traits::SlabCacheConstructor}; #[derive(Clone, Debug)] pub struct SlabCache { @@ -28,13 +22,11 @@ pub struct SlabCache { } impl UnassignSlab for NonNull> { - type Target = NonNull>; + type Target = NonNull>; fn as_unassigned(&self) -> Self::Target { unsafe { - NonNull::new_unchecked( - self.as_ptr() as *mut SlabCache - ) + NonNull::new_unchecked(self.as_ptr() as *mut SlabCache<()>) } } } @@ -45,9 +37,7 @@ impl SlabCache { pub fn grow(&mut self) { // Allocate a new slab descriptor for this slab let mut slab = unsafe { - SLAB_ALLOCATOR - .kmalloc::>() - .assign::() + SLAB_ALLOCATOR.kmalloc::>().assign::() }; unsafe { @@ -116,7 +106,7 @@ impl SlabCache { } } -impl SlabCache { +impl SlabCache<()> { pub fn assign(&self) -> NonNull> { unsafe { NonNull::new_unchecked(self as *const _ as *mut SlabCache) @@ -135,16 +125,18 @@ impl SlabCacheConstructor for SlabCache { } } -impl SlabCacheConstructor for SlabCache> { - fn new(buddy_order: usize) -> SlabCache> { - let partial = SlabDescriptor::>::initial_descriptor(buddy_order); +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { + let partial = + SlabDescriptor::>::initial_descriptor( + buddy_order, + ); // This assumption can be made, because the created cache in // this function will go to the constant position on the slab // array defined with the `SlabPosition` array - let mut future_owner = unsafe { - SLAB_ALLOCATOR.slab_of::>() - }; + let mut future_owner = + unsafe { SLAB_ALLOCATOR.slab_of::>() }; let cache = SlabCache { buddy_order, diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 159d09c..6c5c538 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -1,9 +1,8 @@ -use super::traits::SlabPosition; use crate::{ alloc_pages, memory::{ allocators::slab::traits::Slab, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, + unassigned::{AssignSlab, UnassignSlab}, }, }; use common::constants::REGULAR_PAGE_SIZE; @@ -34,7 +33,7 @@ pub struct SlabDescriptor { pub next: Option>>, } -impl AssignSlab for NonNull> { +impl AssignSlab for NonNull> { type Target = NonNull>; fn assign(&self) -> NonNull> { @@ -45,13 +44,11 @@ impl AssignSlab for NonNull> { } impl UnassignSlab for NonNull> { - type Target = NonNull>; + type Target = NonNull>; fn as_unassigned(&self) -> Self::Target { unsafe { - NonNull::new_unchecked( - self.as_ptr() as *mut SlabDescriptor - ) + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor<()>) } } } @@ -140,7 +137,7 @@ impl SlabDescriptor { } } -impl SlabDescriptor> { +impl SlabDescriptor> { /// Return a pointer to the initial descriptor after it allocated /// himself. /// @@ -148,9 +145,9 @@ impl SlabDescriptor> { /// initalized descriptor that allocates itself. pub fn initial_descriptor( order: usize, - ) -> NonNull>> { + ) -> NonNull>> { let mut descriptor = unsafe { - SlabDescriptor::>::new(order, None) + SlabDescriptor::>::new(order, None) }; let mut self_allocation = descriptor.alloc(); @@ -162,6 +159,6 @@ impl SlabDescriptor> { .clone() } - self_allocation.assign::>() + self_allocation.assign::>() } } diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs index 927f112..994f2d2 100644 --- a/kernel/src/memory/allocators/slab/macros.rs +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -36,7 +36,7 @@ macro_rules! define_slab_system { const COUNT: usize = [$(stringify!($t)),*].len(); pub struct SlabAllocator { - slabs: [common::late_init::LateInit>; COUNT] + slabs: [common::late_init::LateInit>; COUNT] } impl SlabAllocator { diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs index 8220e10..16dfc9a 100644 --- a/kernel/src/memory/allocators/slab/traits.rs +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -1,21 +1,19 @@ use common::enums::PageSize; use cpu_utils::structures::paging::PageEntryFlags; -use crate::memory::unassigned::Unassigned; - /// Get the position on the slab array, for a slab of the given type. /// /// Shouldn't implement this trait manually; it is implemented /// via the `define_slab_system` macro. pub trait Slab: 'static + Sized + SlabPosition + SlabFlags {} -impl Slab for Unassigned {} +impl Slab for () {} pub trait SlabPosition { const SLAB_POSITION: usize; } -impl SlabPosition for Unassigned { +impl SlabPosition for () { const SLAB_POSITION: usize = usize::MAX; } @@ -31,7 +29,7 @@ impl SlabFlags for T { default const PSIZE: PageSize = PageSize::Regular; } -impl SlabFlags for Unassigned { +impl SlabFlags for () { const PFLAGS: PageEntryFlags = PageEntryFlags::default(); const PSIZE: PageSize = PageSize::Regular; } diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs index e2792ac..2b4ba20 100644 --- a/kernel/src/memory/page.rs +++ b/kernel/src/memory/page.rs @@ -1,12 +1,9 @@ use core::{marker::PhantomData, ptr::NonNull}; use crate::memory::{ - allocators::{ - extensions::VirtualAddressExt, - slab::traits::{Slab, SlabPosition}, - }, + allocators::{extensions::VirtualAddressExt, slab::traits::Slab}, page::{map::PageMap, meta::PageMeta}, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, + unassigned::{AssignSlab, UnassignSlab}, }; use common::{ address_types::{PhysicalAddress, VirtualAddress}, @@ -17,7 +14,7 @@ use common::{ pub mod map; pub mod meta; -pub type UnassignedPage = Page; +pub type UnassignedPage = Page<()>; pub static mut PAGES: LateInit = LateInit::uninit(); @@ -26,7 +23,7 @@ pub struct Page { _phantom: PhantomData, } -impl AssignSlab for NonNull> { +impl AssignSlab for NonNull> { type Target = NonNull>; fn assign(&self) -> NonNull> { @@ -35,12 +32,10 @@ impl AssignSlab for NonNull> { } impl UnassignSlab for NonNull> { - type Target = NonNull>; + type Target = NonNull>; - fn as_unassigned(&self) -> NonNull> { - unsafe { - NonNull::new_unchecked(self.as_ptr() as *mut Page) - } + fn as_unassigned(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page<()>) } } } diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs index c07cb36..c5ca152 100644 --- a/kernel/src/memory/page/meta.rs +++ b/kernel/src/memory/page/meta.rs @@ -7,12 +7,12 @@ use crate::memory::{ cache::SlabCache, descriptor::SlabDescriptor, traits::Slab, }, page::{Page, UnassignedPage}, - unassigned::{AssignSlab, UnassignSlab, Unassigned}, + unassigned::{AssignSlab, UnassignSlab}, }; pub union PageMeta { pub buddy: ManuallyDrop, - pub slab: ManuallyDrop>, + pub slab: ManuallyDrop>, } #[derive(Debug)] diff --git a/kernel/src/memory/unassigned.rs b/kernel/src/memory/unassigned.rs index 5860455..8ad4e38 100644 --- a/kernel/src/memory/unassigned.rs +++ b/kernel/src/memory/unassigned.rs @@ -1,7 +1,4 @@ -use crate::memory::allocators::slab::traits::{Slab, SlabPosition}; - -#[derive(Default, Clone, Copy, Debug)] -pub struct Unassigned; +use crate::memory::allocators::slab::traits::Slab; pub trait UnassignSlab { type Target; From c55ccee088c29b13f4a3f88cc3262878abff4af4 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 2 Feb 2026 20:37:31 +0200 Subject: [PATCH 75/78] started implementing a walk function for virtual address --- kernel/src/memory/allocators/extensions.rs | 14 +++++++++++++- kernel/src/memory/allocators/slab/descriptor.rs | 2 +- shared/common/src/address_types.rs | 2 +- shared/common/src/enums/paging.rs | 8 ++++---- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index 39abd8f..a5fa948 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -1,4 +1,4 @@ -use core::num::NonZero; +use core::{num::NonZero, ptr::NonNull}; use common::{ address_types::{PhysicalAddress, VirtualAddress}, @@ -153,6 +153,18 @@ pub impl VirtualAddress { Ok(()) } + fn walk( + &self, + wanted: PageTableLevel, + ) -> Result, EntryError> { + let mut table = PageTable::current_table_mut(); + + for level in PageTableLevel::VARIANTS[0..=wanted as usize] { + let entry = &table.entries[self.index_of(level)]; + table = entry.mapped_table_mut()?; + } + } + fn translate(&self) -> PhysicalAddress { todo!() } diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index 6c5c538..da1349b 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -142,7 +142,7 @@ impl SlabDescriptor> { /// himself. /// /// The pointer the is returned by this function contains an already - /// initalized descriptor that allocates itself. + /// initialized descriptor that allocates itself. pub fn initial_descriptor( order: usize, ) -> NonNull>> { diff --git a/shared/common/src/address_types.rs b/shared/common/src/address_types.rs index 1b8a80f..76fe45a 100644 --- a/shared/common/src/address_types.rs +++ b/shared/common/src/address_types.rs @@ -108,7 +108,7 @@ impl VirtualAddress { /// 1 -> index of 1st table // ANCHOR: virtual_nth_pt_index_unchecked pub const fn index_of(&self, level: PageTableLevel) -> usize { - (self.0 >> (39 - 9 * (4 - level as usize))) & 0o777 + (self.0 >> (39 - 9 * (level as usize))) & 0o777 } // pub fn translate(&self) -> Option { diff --git a/shared/common/src/enums/paging.rs b/shared/common/src/enums/paging.rs index 000c964..7f4f1f0 100644 --- a/shared/common/src/enums/paging.rs +++ b/shared/common/src/enums/paging.rs @@ -23,10 +23,10 @@ use crate::{ )] #[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] pub enum PageTableLevel { - PML4 = 4, - PDPT = 3, + PML4 = 0, + PDPT = 1, PD = 2, - PT = 1, + PT = 3, } impl PageTableLevel { @@ -74,7 +74,7 @@ impl PageSize { /// return `true` for those, and it cannot be allocated on `PD` so for /// it is will return `false` pub fn allocatable_at(&self, table_level: PageTableLevel) -> bool { - (3 - *self as usize) <= table_level as usize + (*self as usize + 1) >= table_level as usize } /// Determines the appropriate `PageSizeAlignment` for a From 85ef13c30653a053730d1662c4caab607ec5a854 Mon Sep 17 00:00:00 2001 From: sagi Date: Mon, 2 Feb 2026 23:46:44 +0200 Subject: [PATCH 76/78] addde translate and walk function --- kernel/src/memory/allocators/extensions.rs | 50 +++++++++++++--------- kernel/src/memory/page.rs | 5 ++- shared/common/src/address_types.rs | 18 -------- shared/common/src/enums/paging.rs | 9 ++++ 4 files changed, 43 insertions(+), 39 deletions(-) diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index a5fa948..382b761 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -126,47 +126,57 @@ pub impl VirtualAddress { page_size: PageSize, num_pages: NonZero, ) -> Result<(), EntryError> { - let address_index = self - .index_of(PageTableLevel::VARIANTS[page_size as usize + 1]); + let address_index = self.index_of(page_size.min_level()); debug_assert!( address_index + num_pages.get() <= PAGE_DIRECTORY_ENTRIES, "There are only 512 entries inside a table" ); - let mut table = PageTable::current_table_mut(); + let mut table = self.walk(page_size.min_level())?; - for level in PageTableLevel::VARIANTS[0..page_size as usize].iter() - { - let index = self.index_of(*level); - let entry = &mut table.entries[index]; - table = entry.mapped_table_mut()?; + unsafe { + table + .as_mut() + .entries + .iter_mut() + .skip(address_index) + .take(num_pages.get()) + .for_each(|entry| entry.set_flags(flags)); } - table - .entries - .iter_mut() - .skip(address_index) - .take(num_pages.get()) - .for_each(|entry| entry.set_flags(flags)); - Ok(()) } + /// Return the entry that is pointed by the wanted level fn walk( &self, wanted: PageTableLevel, - ) -> Result, EntryError> { + ) -> Result, EntryError> { let mut table = PageTable::current_table_mut(); - for level in PageTableLevel::VARIANTS[0..=wanted as usize] { - let entry = &table.entries[self.index_of(level)]; + for level in PageTableLevel::VARIANTS[0..wanted as usize].iter() { + let entry = &table.entries[self.index_of(*level)]; table = entry.mapped_table_mut()?; } + + Ok(NonNull::from_mut(table)) } - fn translate(&self) -> PhysicalAddress { - todo!() + fn translate(&self) -> Option { + let mut table = PageTable::current_table_mut(); + + for level in PageTableLevel::VARIANTS.iter() { + let entry = &table.entries[self.index_of(*level)]; + match entry.mapped_table_mut() { + Ok(t) => table = t, + Err(EntryError::NotATable) => { + return unsafe { Some(entry.mapped_unchecked()) }; + } + Err(EntryError::NoMapping) => return None, + } + } + unreachable!() } } diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs index 2b4ba20..13990f3 100644 --- a/kernel/src/memory/page.rs +++ b/kernel/src/memory/page.rs @@ -63,7 +63,10 @@ impl Page { /// **Note**: if you meant to get the page structure, consider using /// [`Page::from_virt`] pub fn index_of(addr: VirtualAddress) -> usize { - addr.translate().as_usize() / REGULAR_PAGE_SIZE + addr.translate() + .expect("Address could not be translated") + .as_usize() + / REGULAR_PAGE_SIZE } /// Return the physical page structure that is pointed by this physical diff --git a/shared/common/src/address_types.rs b/shared/common/src/address_types.rs index 76fe45a..df176b1 100644 --- a/shared/common/src/address_types.rs +++ b/shared/common/src/address_types.rs @@ -110,24 +110,6 @@ impl VirtualAddress { pub const fn index_of(&self, level: PageTableLevel) -> usize { (self.0 >> (39 - 9 * (level as usize))) & 0o777 } - - // pub fn translate(&self) -> Option { - // let mut current_table = - // PageTable::current_table(); for i in 0..4 { - // let index = self.rev_nth_index_unchecked(i); - // match - // current_table.entries[index].mapped_table_mut() { - // Ok(table) => current_table = table, - // Err(EntryError::NotATable) => { - // return unsafe { - // Some(current_table.entries[index].mapped_unchecked()) - // }; } - // Err(EntryError::NoMapping) => return - // None, Err(EntryError::Full) => - // unreachable!(), } - // } - // None - // } } impl PhysicalAddress { diff --git a/shared/common/src/enums/paging.rs b/shared/common/src/enums/paging.rs index 7f4f1f0..9ef30b2 100644 --- a/shared/common/src/enums/paging.rs +++ b/shared/common/src/enums/paging.rs @@ -77,6 +77,15 @@ impl PageSize { (*self as usize + 1) >= table_level as usize } + /// The minimal page level that this page size can exist on. + pub fn min_level(&self) -> PageTableLevel { + match self { + PageSize::Regular => PageTableLevel::PT, + PageSize::Big => PageTableLevel::PD, + PageSize::Huge => PageTableLevel::PDPT, + } + } + /// Determines the appropriate `PageSizeAlignment` for a /// given memory layout. /// From 7027ade07385d33fac4198a3d107c6faf3ebf7a9 Mon Sep 17 00:00:00 2001 From: sagi Date: Tue, 3 Feb 2026 00:37:42 +0200 Subject: [PATCH 77/78] started changing &'static mut into NonNull --- kernel/src/drivers/ata/ahci/hba.rs | 13 ++++++++----- kernel/src/drivers/vga_display/writer.rs | 3 ++- learnix-macros/src/lib.rs | 7 ++----- .../src/structures/paging/page_table_entry.rs | 6 +++++- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs index 7eccf45..41bc0b8 100644 --- a/kernel/src/drivers/ata/ahci/hba.rs +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -3,7 +3,7 @@ /// Implemented directly from https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/serial-ata-ahci-spec-rev1-3-1.pdf extern crate alloc; -use core::{fmt::Debug, num::NonZero, panic}; +use core::{fmt::Debug, num::NonZero, panic, ptr::NonNull}; use common::{ address_types::PhysicalAddress, @@ -1198,7 +1198,9 @@ pub struct HBAMemoryRegisters { } impl HBAMemoryRegisters { - pub fn new(a: PhysicalAddress) -> Result<&'static mut Self, HbaError> { + pub fn new( + a: PhysicalAddress, + ) -> Result, HbaError> { if !a.is_aligned(REGULAR_PAGE_ALIGNMENT) { return Err(HbaError::AddressNotAligned); } @@ -1209,8 +1211,9 @@ impl HBAMemoryRegisters { PageSize::Regular, ); - let hba: &'static mut HBAMemoryRegisters = - unsafe { &mut *a.translate().as_mut_ptr() }; + let mut hba_ptr = a.translate().as_ptr::(); + + let hba = unsafe { hba_ptr.as_mut() }; hba.ghc.ghc.set_ae(); hba.ghc.ghc.set_ie(); @@ -1225,7 +1228,7 @@ impl HBAMemoryRegisters { unimplemented!("Didn't implement bios os handoff") } - Ok(hba) + Ok(hba_ptr) } /// Returns the amount of active devices found and set them into idle diff --git a/kernel/src/drivers/vga_display/writer.rs b/kernel/src/drivers/vga_display/writer.rs index be2fe4f..fda256d 100644 --- a/kernel/src/drivers/vga_display/writer.rs +++ b/kernel/src/drivers/vga_display/writer.rs @@ -1,4 +1,5 @@ use core::ascii::Char; +use core::ptr::NonNull; use super::color_code::ColorCode; use super::screen_char::ScreenChar; @@ -11,7 +12,7 @@ use cpu_utils::instructions::port::PortExt; pub struct Writer { pub cursor_position: usize, pub color: ColorCode, - pub screen: &'static mut [ScreenChar], + pub screen: NonNull<[ScreenChar]>, } // ANCHOR_END: writer diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index 86cf32c..0acebad 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -20,11 +20,8 @@ pub fn common_address_functions(input: TokenStream) -> TokenStream { pub const fn as_usize(&self) -> usize { self.0 } - pub const unsafe fn as_mut_ptr(&self) -> *mut T { - core::ptr::with_exposed_provenance_mut::(self.0) - } - pub const fn as_ptr(&self) -> *const T { - core::ptr::with_exposed_provenance::(self.0) + pub const fn as_ptr(&self) -> core::ptr::NonNull { + core::ptr::NonNull::new_unchecked(core::ptr::with_exposed_provenance_mut::(self.0)) } pub const fn is_aligned( &self, diff --git a/shared/cpu_utils/src/structures/paging/page_table_entry.rs b/shared/cpu_utils/src/structures/paging/page_table_entry.rs index 4769bec..6a395de 100644 --- a/shared/cpu_utils/src/structures/paging/page_table_entry.rs +++ b/shared/cpu_utils/src/structures/paging/page_table_entry.rs @@ -136,6 +136,8 @@ impl PageTableEntry { } // ANCHOR_END: page_table_entry_mapped + // TODO: CHANGE STATIC REF HERE AND EVERY OTHER LOW LEVEL PLACE LIKE + // THIS INTO NonNull /// Return the physical address mapped by this table as /// a reference into a page table. /// @@ -144,7 +146,9 @@ impl PageTableEntry { // ANCHOR: page_table_entry_mapped_table_mut #[cfg(target_arch = "x86_64")] #[allow(clippy::mut_from_ref)] - pub fn mapped_table_mut(&self) -> Result<&mut PageTable, EntryError> { + pub fn mapped_table_mut( + &self, + ) -> Result<&'static mut PageTable, EntryError> { // first check if the entry is mapped. let pt = unsafe { &mut *self.mapped()?.translate().as_mut_ptr::() From f8031ab97fd7026fe07224a40daf11d981b7ea37 Mon Sep 17 00:00:00 2001 From: sagi Date: Fri, 6 Feb 2026 14:06:04 +0200 Subject: [PATCH 78/78] Changed necessary fields to non_null and the fields that are actually static stayed static references. --- kernel/src/drivers/ata/ahci/hba.rs | 3 +- kernel/src/drivers/vga_display/writer.rs | 3 +- kernel/src/main.rs | 7 +- kernel/src/memory/allocators/buddy.rs | 6 +- kernel/src/memory/allocators/extensions.rs | 75 ++++++++++--------- .../src/memory/allocators/slab/descriptor.rs | 14 ++-- kernel/src/memory/memory_map.rs | 12 ++- learnix-macros/src/lib.rs | 6 +- shared/common/src/bitmap.rs | 2 +- shared/common/src/ring_buffer.rs | 2 +- .../structures/interrupt_descriptor_table.rs | 12 ++- .../src/structures/paging/page_table.rs | 24 ++---- .../src/structures/paging/page_table_entry.rs | 30 ++------ 13 files changed, 92 insertions(+), 104 deletions(-) diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs index 41bc0b8..4fcda72 100644 --- a/kernel/src/drivers/ata/ahci/hba.rs +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -1211,7 +1211,8 @@ impl HBAMemoryRegisters { PageSize::Regular, ); - let mut hba_ptr = a.translate().as_ptr::(); + let mut hba_ptr = + a.translate().as_non_null::(); let hba = unsafe { hba_ptr.as_mut() }; diff --git a/kernel/src/drivers/vga_display/writer.rs b/kernel/src/drivers/vga_display/writer.rs index fda256d..be2fe4f 100644 --- a/kernel/src/drivers/vga_display/writer.rs +++ b/kernel/src/drivers/vga_display/writer.rs @@ -1,5 +1,4 @@ use core::ascii::Char; -use core::ptr::NonNull; use super::color_code::ColorCode; use super::screen_char::ScreenChar; @@ -12,7 +11,7 @@ use cpu_utils::instructions::port::PortExt; pub struct Writer { pub cursor_position: usize, pub color: ColorCode, - pub screen: NonNull<[ScreenChar]>, + pub screen: &'static mut [ScreenChar], } // ANCHOR_END: writer diff --git a/kernel/src/main.rs b/kernel/src/main.rs index dc64e34..df95021 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -68,8 +68,11 @@ pub unsafe extern "C" fn _start() -> ! { let last = MemoryMap(parsed_memory_map!()).last().unwrap(); - PageTable::current_table_mut() - .map_physical_memory((last.base_address + last.length) as usize); + unsafe { + PageTable::current_table().as_mut().map_physical_memory( + (last.base_address + last.length) as usize, + ); + } okprintln!("Initialized buddy allocator"); unsafe { InterruptDescriptorTable::init( diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs index 07059bf..4a15b29 100644 --- a/kernel/src/memory/allocators/buddy.rs +++ b/kernel/src/memory/allocators/buddy.rs @@ -104,14 +104,14 @@ impl BuddyAllocator { } } - pub fn alloc_table(&mut self) -> &'static mut PageTable { + pub fn alloc_table(&mut self) -> NonNull { unsafe { let address = self.alloc_pages(1).translate(); ptr::write_volatile( - address.as_mut_ptr::(), + address.as_non_null::().as_ptr(), PageTable::empty(), ); - &mut *address.as_mut_ptr::() + address.as_non_null::() } } diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs index 382b761..ed66725 100644 --- a/kernel/src/memory/allocators/extensions.rs +++ b/kernel/src/memory/allocators/extensions.rs @@ -51,23 +51,19 @@ pub impl PageTableEntry { /// Else, it will override what is inside the entry and /// map a new table to it so valid table is guaranteed /// to be returned. - fn force_resolve_table_mut(&mut self) -> Option<&mut PageTable> { - match self.mapped_table_mut() { + fn force_resolve_table_mut(&mut self) -> Option> { + match self.mapped_table() { Ok(table) => Some(table), Err(EntryError::NotATable) => None, Err(EntryError::NoMapping) => unsafe { let resolved_table = BUDDY_ALLOCATOR.alloc_table(); self.map_unchecked( PhysicalAddress::new_unchecked( - resolved_table.address().as_usize(), + resolved_table.addr().get(), ), PageEntryFlags::table_flags(), ); - Some( - &mut *self - .mapped_unchecked() - .as_mut_ptr::(), - ) + Some(self.mapped_unchecked().as_non_null::()) }, } } @@ -95,19 +91,19 @@ pub impl VirtualAddress { if address.is_aligned(page_size.alignment()) && self.is_aligned(page_size.alignment()) { - let mut table = PageTable::current_table_mut(); + let mut table = PageTable::current_table(); for level in PageTableLevel::VARIANTS[0..=page_size as usize].iter() { let index = self.index_of(*level); - let entry = &mut table.entries[index]; + let entry = unsafe { &mut table.as_mut().entries[index] }; let resolved_table = entry .force_resolve_table_mut() .expect("Tried to create table on a mapped entry"); table = resolved_table; } unsafe { - table.entries[self.index_of( + table.as_mut().entries[self.index_of( PageTableLevel::VARIANTS[page_size as usize + 1], )] .map(address, flags); @@ -153,23 +149,25 @@ pub impl VirtualAddress { &self, wanted: PageTableLevel, ) -> Result, EntryError> { - let mut table = PageTable::current_table_mut(); + let mut table = PageTable::current_table(); for level in PageTableLevel::VARIANTS[0..wanted as usize].iter() { - let entry = &table.entries[self.index_of(*level)]; - table = entry.mapped_table_mut()?; + let entry = + unsafe { &table.as_ref().entries[self.index_of(*level)] }; + table = entry.mapped_table()?; } - Ok(NonNull::from_mut(table)) + Ok(table) } fn translate(&self) -> Option { - let mut table = PageTable::current_table_mut(); + let mut table = PageTable::current_table(); for level in PageTableLevel::VARIANTS.iter() { - let entry = &table.entries[self.index_of(*level)]; - match entry.mapped_table_mut() { - Ok(t) => table = t, + let entry = + unsafe { &table.as_mut().entries[self.index_of(*level)] }; + match entry.mapped_table() { + Ok(mapped) => table = mapped, Err(EntryError::NotATable) => { return unsafe { Some(entry.mapped_unchecked()) }; } @@ -182,6 +180,7 @@ pub impl VirtualAddress { #[ext] pub impl PageTable { + // TODO: trn into a tail called function with become /// Find an avavilable page in the given size. // ANCHOR: page_table_find_available_page #[cfg(target_arch = "x86_64")] @@ -193,14 +192,16 @@ pub impl PageTable { let mut page_tables = [Self::current_table(); TOTAL_LEVELS]; let mut current_level = PageTableLevel::PML4; loop { - let current_table = + let mut current_table = page_tables[TOTAL_LEVELS - current_level as usize]; - let ti = current_table.try_fetch_table( - level_indices[TOTAL_LEVELS - current_level as usize], - current_level, - page_size, - ); + let ti = unsafe { + current_table.as_mut().try_fetch_table( + level_indices[TOTAL_LEVELS - current_level as usize], + current_level, + page_size, + ) + }; let next_table = match ti { EntryIndex::OutOfEntries | EntryIndex::PageDoesNotFit => { @@ -213,7 +214,7 @@ pub impl PageTable { level_indices[TOTAL_LEVELS - current_level as usize] = entry.table_index(); unsafe { - &*entry.mapped_unchecked().as_ptr::() + entry.mapped_unchecked().as_non_null::() } } EntryIndex::Index(i) => { @@ -233,6 +234,7 @@ pub impl PageTable { } // ANCHOR_END: page_table_find_available_page + // TODO: turn into a tail called function with become /// Map the region of memory from 0 to `mem_size_bytes` /// at the top of the page table so that /// @@ -254,20 +256,23 @@ pub impl PageTable { for forth_entry in &mut self.entries[(PAGE_DIRECTORY_ENTRIES / 2) ..(forth_level_entries_count + (PAGE_DIRECTORY_ENTRIES / 2))] { - let third_table = + let mut third_table = forth_entry.force_resolve_table_mut().unwrap(); - for third_entry in &mut third_table.entries - [0..third_level_entries_count.min(PAGE_DIRECTORY_ENTRIES)] - { - let second_table = + for third_entry in unsafe { + &mut third_table.as_mut().entries[0 + ..third_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { + let mut second_table = third_entry.force_resolve_table_mut().unwrap(); third_level_entries_count -= 1; - for second_entry in &mut second_table.entries[0 - ..second_level_entries_count - .min(PAGE_DIRECTORY_ENTRIES)] - { + for second_entry in unsafe { + &mut second_table.as_mut().entries[0 + ..second_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { if !second_entry.is_present() { unsafe { second_entry.map( diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs index da1349b..2844f74 100644 --- a/kernel/src/memory/allocators/slab/descriptor.rs +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -69,15 +69,11 @@ impl SlabDescriptor { ) -> SlabDescriptor { let address = unsafe { alloc_pages!(1 << order).translate() }; - let mut objects = unsafe { - NonNull::slice_from_raw_parts( - NonNull::new_unchecked( - address.as_mut_ptr::>(), - ), - ((1 << order) * REGULAR_PAGE_SIZE) - / size_of::>(), - ) - }; + let mut objects = NonNull::slice_from_raw_parts( + address.as_non_null::>(), + ((1 << order) * REGULAR_PAGE_SIZE) + / size_of::>(), + ); for (i, object) in unsafe { objects.as_mut() }.iter_mut().enumerate() diff --git a/kernel/src/memory/memory_map.rs b/kernel/src/memory/memory_map.rs index 18419a4..7ea62f5 100644 --- a/kernel/src/memory/memory_map.rs +++ b/kernel/src/memory/memory_map.rs @@ -16,13 +16,15 @@ macro_rules! parsed_memory_map { as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegion>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegion>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::PARSED_MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; @@ -38,12 +40,14 @@ macro_rules! raw_memory_map { common::constants::addresses::MEMORY_MAP_OFFSET as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegionExtended>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegionExtended>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index 0acebad..70f8ae0 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -20,8 +20,10 @@ pub fn common_address_functions(input: TokenStream) -> TokenStream { pub const fn as_usize(&self) -> usize { self.0 } - pub const fn as_ptr(&self) -> core::ptr::NonNull { - core::ptr::NonNull::new_unchecked(core::ptr::with_exposed_provenance_mut::(self.0)) + pub const fn as_non_null(&self) -> core::ptr::NonNull { + core::ptr::NonNull::new( + core::ptr::with_exposed_provenance_mut::(self.0) + ).expect("Tried to create NonNull from address, found null") } pub const fn is_aligned( &self, diff --git a/shared/common/src/bitmap.rs b/shared/common/src/bitmap.rs index 06d8031..1adde6a 100644 --- a/shared/common/src/bitmap.rs +++ b/shared/common/src/bitmap.rs @@ -182,7 +182,7 @@ impl BitMap { BitMap { map: unsafe { slice::from_raw_parts_mut( - map_address.as_mut_ptr::(), + map_address.as_non_null::().as_mut(), map_size, ) }, diff --git a/shared/common/src/ring_buffer.rs b/shared/common/src/ring_buffer.rs index 6535b6e..d52b3f2 100644 --- a/shared/common/src/ring_buffer.rs +++ b/shared/common/src/ring_buffer.rs @@ -17,7 +17,7 @@ impl RingBuffer { write_idx: 0, buffer: unsafe { slice::from_raw_parts_mut( - buffer_address.as_mut_ptr::(), + buffer_address.as_non_null::().as_mut(), length.get(), ) }, diff --git a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs index 5e22d7e..7043601 100644 --- a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs +++ b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs @@ -1,5 +1,5 @@ use common::{ - address_types::{PhysicalAddress, VirtualAddress}, + address_types::VirtualAddress, enums::{ ProtectionLevel, SystemSegmentType, interrupts::{Interrupt, InterruptStackTable, InterruptType}, @@ -106,14 +106,20 @@ impl InterruptDescriptorTable { gdt.load_tss(tss); unsafe { ptr::write_volatile( - base_address.as_mut_ptr::(), + base_address + .as_non_null::() + .as_ptr(), InterruptDescriptorTable { interrupts: [const { InterruptDescriptorTableEntry::missing() }; 256], }, ); - uninit.write(&mut *base_address.as_mut_ptr::()); + uninit.write( + base_address + .as_non_null::() + .as_mut(), + ); uninit.assume_init_ref().load(); } } diff --git a/shared/cpu_utils/src/structures/paging/page_table.rs b/shared/cpu_utils/src/structures/paging/page_table.rs index fee1d80..9b7739b 100644 --- a/shared/cpu_utils/src/structures/paging/page_table.rs +++ b/shared/cpu_utils/src/structures/paging/page_table.rs @@ -1,4 +1,4 @@ -use core::ptr; +use core::ptr::{self, NonNull}; use crate::{registers::cr3, structures::paging::PageTableEntry}; use common::{ @@ -48,35 +48,25 @@ impl PageTable { #[inline] pub unsafe fn empty_from_ptr( page_table_ptr: VirtualAddress, - ) -> Option<&'static mut PageTable> { + ) -> Option> { if !page_table_ptr.is_aligned(REGULAR_PAGE_ALIGNMENT) { return None; } unsafe { ptr::write_volatile( - page_table_ptr.as_mut_ptr::(), + page_table_ptr.as_non_null::().as_ptr(), PageTable::empty(), ); - Some(&mut *page_table_ptr.as_mut_ptr::()) + Some(page_table_ptr.as_non_null::()) } } // ANCHOR_END: page_table_empty_from_ptr // ANCHOR: page_table_current_table #[inline] - pub fn current_table() -> &'static PageTable { - unsafe { - &*core::ptr::with_exposed_provenance(cr3::read() as usize) - } - } - - #[inline] - pub fn current_table_mut() -> &'static mut PageTable { - unsafe { - &mut *core::ptr::with_exposed_provenance_mut( - cr3::read() as usize - ) - } + pub fn current_table() -> NonNull { + NonNull::new(cr3::read() as usize as *mut PageTable) + .expect("Page table pointer is not present in cr3, found NULL") } // ANCHOR_END: page_table_current_table diff --git a/shared/cpu_utils/src/structures/paging/page_table_entry.rs b/shared/cpu_utils/src/structures/paging/page_table_entry.rs index 6a395de..6e6c990 100644 --- a/shared/cpu_utils/src/structures/paging/page_table_entry.rs +++ b/shared/cpu_utils/src/structures/paging/page_table_entry.rs @@ -1,3 +1,6 @@ +#[cfg(target_arch = "x86_64")] +use core::ptr::NonNull; + use common::{ address_types::PhysicalAddress, constants::{ENTRY_ADDRESS_MASK, REGULAR_PAGE_ALIGNMENT}, @@ -136,38 +139,17 @@ impl PageTableEntry { } // ANCHOR_END: page_table_entry_mapped - // TODO: CHANGE STATIC REF HERE AND EVERY OTHER LOW LEVEL PLACE LIKE - // THIS INTO NonNull /// Return the physical address mapped by this table as /// a reference into a page table. /// /// This method assumes all page tables are identity /// mapped. - // ANCHOR: page_table_entry_mapped_table_mut - #[cfg(target_arch = "x86_64")] - #[allow(clippy::mut_from_ref)] - pub fn mapped_table_mut( - &self, - ) -> Result<&'static mut PageTable, EntryError> { - // first check if the entry is mapped. - let pt = unsafe { - &mut *self.mapped()?.translate().as_mut_ptr::() - }; - // then check if it is a table. - if !self.is_huge_page() && self.is_table() { - Ok(pt) - } else { - Err(EntryError::NotATable) - } - } - // ANCHOR_END: page_table_entry_mapped_table_mut - // ANCHOR: page_table_entry_mapped_table #[cfg(target_arch = "x86_64")] - pub fn mapped_table(&self) -> Result<&PageTable, EntryError> { + #[allow(clippy::mut_from_ref)] + pub fn mapped_table(&self) -> Result, EntryError> { // first check if the entry is mapped. - let pt = - unsafe { &*self.mapped()?.translate().as_ptr::() }; + let pt = self.mapped()?.translate().as_non_null::(); // then check if it is a table. if !self.is_huge_page() && self.is_table() { Ok(pt)