diff --git a/.vscode/settings.json b/.vscode/settings.json index 58834f2..8d53b99 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -59,6 +59,7 @@ "fbss", "FDDI", "firewire", + "freelist", "FXSR", "Gameport", "GPIB", @@ -69,6 +70,7 @@ "Hotplug", "hpcp", "HPET", + "ilog", "infe", "infs", "inval", @@ -78,6 +80,8 @@ "ipms", "IRDA", "ISDN", + "kfree", + "kmalloc", "lctrl", "Learnix", "lgdt", @@ -85,6 +89,7 @@ "lshift", "mdat", "metavar", + "mmap", "MOVBE", "mpsp", "mpss", @@ -92,6 +97,7 @@ "MTRR", "Multiport", "nomem", + "nonmax", "nostack", "notif", "NVME", @@ -106,6 +112,7 @@ "PCLMUL", "PDCM", "PDPT", + "peekable", "PICMG", "PICPI", "PIIX", @@ -157,8 +164,10 @@ "tfee", "tfes", "thiserror", + "udma", "ufis", "UHCI", + "Unassignment", "Uninit", "USBPI", "virt", @@ -171,7 +180,7 @@ ], "rust-analyzer.inlayHints.chainingHints.enable": true, "rust-analyzer.check.command": "clippy", - "rust-analyzer.cargo.extraArgs": [ + "rust-analyzer.check.extraArgs": [ "--release" ], "rust-analyzer.cargo.extraEnv": { diff --git a/build/build.rs b/build/build.rs index 1124c9d..3c7866b 100644 --- a/build/build.rs +++ b/build/build.rs @@ -56,7 +56,7 @@ fn main() -> io::Result<()> { "targets/32bit_target.json", "release", ); - build_stage("../kernel", "targets/64bit_target.json", &profile); + build_stage("../kernel", "targets/64bit_target.json", "release"); // Combine binaries into one image let input_dir = PathBuf::from("bin"); diff --git a/build/runner/runner.bat b/build/runner/runner.bat index 3845e2e..fba6a2b 100644 --- a/build/runner/runner.bat +++ b/build/runner/runner.bat @@ -4,5 +4,5 @@ qemu-system-x86_64 ^ -M q35 ^ -drive id=disk0,file=build/image.bin,if=none,format=raw ^ - -device ide-hd,drive=disk0,bus=ide.0 ^ + -device ide-hd,drive=disk0,bus=ide.0,rotation_rate=1 ^ -monitor stdio diff --git a/build/targets/64bit_target.json b/build/targets/64bit_target.json index 1ae7434..7d8c39d 100644 --- a/build/targets/64bit_target.json +++ b/build/targets/64bit_target.json @@ -13,7 +13,12 @@ "position-independent-executables": true, "relro-level": "off", "stack-probes": { - "kind": "call" + "kind": "inline-or-call", + "min-llvm-version-for-inline": [ + 16, + 0, + 0 + ] }, "static-position-independent-executables": true, "target-pointer-width": 64, diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index c9584e8..36e6781 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -16,3 +16,4 @@ extend = "1.2.0" learnix-macros = { path = "../learnix-macros" } strum_macros = { version = "0.27", default-features = false } strum = { version = "0.27", default-features = false } +nonmax = { version = "0.5.5", default-features = false } diff --git a/kernel/src/drivers/ata/ahci/fis.rs b/kernel/src/drivers/ata/ahci/fis.rs index 26dbd98..3542819 100644 --- a/kernel/src/drivers/ata/ahci/fis.rs +++ b/kernel/src/drivers/ata/ahci/fis.rs @@ -1,24 +1,30 @@ -use common::enums::{AtaCommand, FisType}; +use core::{ascii::Char, fmt::Debug, num::NonZero}; + +use common::{ + enums::{AtaCommand, FisType}, + volatile::Volatile, +}; +use learnix_macros::{flag, ro_flag}; #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct RegisterH2D { - fis_type: FisType, - pm_flags: u8, - command: AtaCommand, - features: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, - features_ext: u8, - sector_count: u8, - sector_count_ext: u8, + fis_type: Volatile, + pm_flags: Volatile, + command: Volatile, + features: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, + features_ext: Volatile, + sector_count: Volatile, + sector_count_ext: Volatile, _reserved0: u8, - control: u8, + control: Volatile, _reserved1: [u8; 4], } @@ -32,25 +38,25 @@ impl RegisterH2D { sector_count: u16, control: u8, ) -> RegisterH2D { - let features_low = features as u8; - let features_ext = (features >> 8) as u8; - let lba1 = lba as u8; - let lba2 = (lba >> 8) as u8; - let lba3 = (lba >> 16) as u8; - let lba4 = (lba >> 24) as u8; - let lba5 = (lba >> 32) as u8; - let lba6 = (lba >> 40) as u8; - let sector_count_low = sector_count as u8; - let sector_count_ext = (sector_count >> 8) as u8; + let features_low = Volatile::new(features as u8); + let features_ext = Volatile::new((features >> 8) as u8); + let lba1 = Volatile::new(lba as u8); + let lba2 = Volatile::new((lba >> 8) as u8); + let lba3 = Volatile::new((lba >> 16) as u8); + let lba4 = Volatile::new((lba >> 24) as u8); + let lba5 = Volatile::new((lba >> 32) as u8); + let lba6 = Volatile::new((lba >> 40) as u8); + let sector_count_low = Volatile::new(sector_count as u8); + let sector_count_ext = Volatile::new((sector_count >> 8) as u8); RegisterH2D { - fis_type: FisType::RegisterFisHost2Device, - pm_flags, - command, + fis_type: Volatile::new(FisType::RegisterFisHost2Device), + pm_flags: Volatile::new(pm_flags), + command: Volatile::new(command), features: features_low, lba1, lba2, lba3, - device, + device: Volatile::new(device), lba4, lba5, lba6, @@ -58,7 +64,7 @@ impl RegisterH2D { sector_count: sector_count_low, sector_count_ext, _reserved0: 0, - control, + control: Volatile::new(control), _reserved1: [0; 4], } } @@ -67,20 +73,20 @@ impl RegisterH2D { #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct RegisterD2H { - fis_type: FisType, - pm_flags: u8, - status: u8, - error: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, _reserved0: u8, - sector_count: u8, - sector_count_ext: u8, + sector_count: Volatile, + sector_count_ext: Volatile, _reserved1: [u8; 6], } @@ -89,8 +95,8 @@ impl RegisterD2H {} #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct DmaActivateD2H { - fis_type: FisType, - pm_flags: u8, + fis_type: Volatile, + pm_flags: Volatile, _reserved: [u8; 2], } @@ -98,14 +104,14 @@ pub struct DmaActivateD2H { #[repr(C, align(4))] #[derive(Clone, Copy, Debug)] pub struct DmaSetup { - fis_type: FisType, - pm_flags: u8, + fis_type: Volatile, + pm_flags: Volatile, _reserved0: [u8; 2], - dma_buffer_id_lower: u32, - dma_buffer_id_upper: u32, + dma_buffer_id_lower: Volatile, + dma_buffer_id_upper: Volatile, _reserved1: u32, - dma_buffer_offset: u32, - dma_transfer_count: u32, + dma_buffer_offset: Volatile, + dma_transfer_count: Volatile, _reserved: u32, } @@ -113,65 +119,65 @@ pub struct DmaSetup { #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct BistActivate { - fis_type: FisType, - pm_flags: u8, - pattern_def: u8, + fis_type: Volatile, + pm_flags: Volatile, + pattern_def: Volatile, _reserved: u8, - data1: u8, - data2: u8, - data3: u8, - data4: u8, + data1: Volatile, + data2: Volatile, + data3: Volatile, + data4: Volatile, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct PioSetupD2H { - fis_type: FisType, - pm_flags: u8, - status: u8, - error: u8, - lba1: u8, - lba2: u8, - lba3: u8, - device: u8, - lba4: u8, - lba5: u8, - lba6: u8, + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, _reserved0: u8, - sector_count: u8, - sector_count_exp: u8, + sector_count: Volatile, + sector_count_exp: Volatile, _reserved1: u8, - estatus: u8, - transfer_count: u16, + estatus: Volatile, + transfer_count: Volatile, _reserved2: u16, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct Data { - fis_type: u8, - pm_port: u8, + fis_type: Volatile, + pm_port: Volatile, _reserved0: [u8; 2], - data: [u32; SIZE], + data: Volatile<[u32; SIZE]>, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct SetDeviceBits { - fis_type: FisType, - pm_port: u8, - status: u8, - error: u8, + fis_type: Volatile, + pm_port: Volatile, + status: Volatile, + error: Volatile, _reserved: u32, } impl SetDeviceBits { pub fn status_low(&self) -> u8 { - self.status & !0x7 + self.status.read() & !0x7 } pub fn status_high(&self) -> u8 { - (self.status >> 4) & !0x7 + (self.status.read() >> 4) & !0x7 } } @@ -193,7 +199,87 @@ impl Default for Fis { } } +pub struct GeneralInfo(u16); + +impl GeneralInfo { + ro_flag!(non_magnetic, 15); + ro_flag!(removable_media, 7); + ro_flag!(not_removable_media, 6); +} + +impl Debug for GeneralInfo { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "Non Magnetic: {:?}", self.is_non_magnetic())?; + writeln!(f, "Removable Media: {:?}", self.is_removable_media())?; + writeln!( + f, + "Not Removable Media: {:?}", + self.is_not_removable_media() + ) + } +} + +pub struct DeviceCapabilities(u16); + +impl DeviceCapabilities { + ro_flag!(lba_dma_support, 10); +} + +impl Debug for DeviceCapabilities { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "LBA & DMA Support: {:?},", self.is_lba_dma_support()) + } +} + +pub struct ValidFields(u16); + +impl ValidFields { + ro_flag!(valid_54_58, 0); + ro_flag!(valid_64_70, 1); +} + +#[derive(Debug)] #[repr(C, align(512))] pub struct IdentityPacketData { - pub data: [u16; 0x100], + pub info: GeneralInfo, + pub cylinders: u16, + _reserved0: u16, + pub heads: u16, + _vendor0: [u16; 2], + pub sectors: u16, + _vendor1: [u16; 3], + pub serial_number: [Char; 20], + _vendor2: [u16; 3], + /// Firmware revision in ASCII Characters + pub firmware_rev: [Char; 8], + /// Model number in ASCII Characters + pub model_num: [Char; 40], + pub max_sectors_rw_multiple: u8, + pub _vendor3: u8, + _reserved1: u16, + pub capabilities: u16, + _reserved9: u16, + pub pio_data_transfer_time: u16, + pub dma_data_transfer_time: u16, + pub valid_fields: u16, + pub cur_cylinders: u16, + pub cur_heads: u16, + pub cur_sectors: u16, + pub capacity_sectors: [u16; 2], + pub _reserved10: u16, + pub lba_total_sectors_28: [u16; 2], + // _reserved2: [u16; 19], + // pub major_version: u16, + // pub minor_version: u16, + + // pub command_sets_supported: [u16; 3], + // pub command_sets_enabled: [u16; 3], + // pub udma_modes: u16, + // pub lba_total_sectors_48: u64, + // _reserved4: [u16; 113], // Words 169-206 + // pub physical_logical_sector_size: u16, // Word 209 + // _reserved5: [u16; 7], // Words 210-216 + // pub nominal_media_rotation_rate: u16, /* Word 217 (The SSD vs + // HDDkey) + // * _reserved6: [u16; 40], */ } diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs index 5173b0f..4fcda72 100644 --- a/kernel/src/drivers/ata/ahci/hba.rs +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -3,10 +3,10 @@ /// Implemented directly from https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/serial-ata-ahci-spec-rev1-3-1.pdf extern crate alloc; -use core::{mem::MaybeUninit, num::NonZero}; +use core::{fmt::Debug, num::NonZero, panic, ptr::NonNull}; use common::{ - address_types::{PhysicalAddress, VirtualAddress}, + address_types::PhysicalAddress, constants::{ PHYSICAL_MEMORY_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, }, @@ -14,11 +14,14 @@ use common::{ AtaCommand, Color, DeviceDetection, DeviceType, InterfaceCommunicationControl, InterfaceInitialization, InterfacePowerManagement, InterfaceSpeed, - InterfaceSpeedRestriction, PageSize, PicInterruptVectorOffset, + InterfaceSpeedRestriction, PageSize, }, error::{AhciError, ConversionError, DiagnosticError, HbaError}, + read_volatile, + volatile::Volatile, + write_volatile, }; -use cpu_utils::{instructions::port, structures::paging::PageEntryFlags}; +use cpu_utils::structures::paging::PageEntryFlags; use learnix_macros::{flag, ro_flag, rw1_flag, rwc_flag}; use num_enum::UnsafeFromPrimitive; use strum::IntoEnumIterator; @@ -32,18 +35,17 @@ use crate::{ }, vga_display::color_code::ColorCode, }, - memory::allocators::page_allocator::{ - allocator::PhysicalPageAllocator, extensions::PhysicalAddressExt, - }, + eprintln, + memory::allocators::extensions::PhysicalAddressExt, print, println, }; -use alloc::vec::Vec; - +#[repr(transparent)] #[derive(Copy, Clone)] pub struct AHCIBaseAddress(pub u32); /// CAP +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct HBACapabilities(pub u32); @@ -70,7 +72,11 @@ impl HBACapabilities { ro_flag!(sal, 25); pub fn interface_speed(&self) -> InterfaceSpeed { - unsafe { core::mem::transmute(((self.0 >> 20) & 0xf) as u8) } + unsafe { + core::mem::transmute( + (((read_volatile!(self.0)) >> 20) & 0xf) as u8, + ) + } } // Support AHCI mode only @@ -93,7 +99,7 @@ impl HBACapabilities { // This value is between 1 and 32 pub fn number_of_commands(&self) -> u8 { - ((self.0 >> 8) & 0x1f) as u8 + (((read_volatile!(self.0)) >> 8) & 0x1f) as u8 } // Command completion coalescing supported @@ -107,11 +113,12 @@ impl HBACapabilities { /// Returns the number of ports implemented pub fn number_of_ports(&self) -> u8 { - (self.0 & 0x1f) as u8 + (read_volatile!(self.0) & 0x1f) as u8 } } /// GHC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct GlobalHostControl(pub u32); @@ -131,6 +138,7 @@ impl GlobalHostControl { } /// IS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct InterruptStatus(pub u32); @@ -138,11 +146,15 @@ impl InterruptStatus { // Port Interrupt Pending Status. Corresponds to bits of the PI // register. Cleared by writing a '1' to the corresponding bit. pub fn is_port_pending(&self, port_num: u8) -> bool { - (self.0 & (1 << port_num)) != 0 + (read_volatile!(self.0) & (1 << port_num)) != 0 } pub fn clear(&mut self, port_num: u8) { - self.0 |= 1 << port_num; + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)); + } + + pub fn clear_all(&mut self) { + write_volatile!(self.0, 0); } // RWC flag for Port 0 Interrupt Pending Status @@ -180,59 +192,61 @@ impl InterruptStatus { } // PI +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct PortsImplemented(pub u32); impl PortsImplemented { // Port i is Implemented (P[i]) pub fn is_port_implemented(&self, port_num: u8) -> bool { - (self.0 & (1 << port_num)) != 0 + (read_volatile!(self.0) & (1 << port_num)) != 0 } } // VS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct Version(pub u32); impl Version { // Major Version Number (Bits 31:16) pub fn major_version(&self) -> u16 { - (self.0 >> 16) as u16 + (read_volatile!(self.0) >> 16) as u16 } // Minor Version Number (Bits 15:0) pub fn minor_version(&self) -> u16 { - (self.0 & 0xFFFF) as u16 + (read_volatile!(self.0) & 0xffff) as u16 } } /// CCC_CTL +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct CommandCompletionCoalescingControl(pub u32); impl CommandCompletionCoalescingControl { pub fn interrupt_time_ms(&self) -> u16 { - const MASK: u32 = 0xFFFF; - ((self.0 >> 16) & MASK) as u16 + ((read_volatile!(self.0) >> 16) & 0xffff) as u16 } // Command Completions (CC): Number of command completions necessary to // cause a CCC interrupt pub fn command_completions(&self) -> u8 { - const MASK: u32 = 0xFF; - ((self.0 >> 8) & MASK) as u8 + ((read_volatile!(self.0) >> 8) & 0xff) as u8 } flag!(enable, 0); } /// CCC_PORTS +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct CommandCompletionCoalescingPorts(pub u32); impl CommandCompletionCoalescingPorts { pub fn set_port(&mut self, port_num: u8) { - self.0 |= 1 << port_num + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)) } pub fn unset(&mut self, port_num: u8) { @@ -273,22 +287,24 @@ impl CommandCompletionCoalescingPorts { } /// EM_LOC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct EnclosureManagementLocation(pub u32); impl EnclosureManagementLocation { pub fn dword_offset_from_abar(&self) -> usize { - (self.0 >> 16) as usize + (read_volatile!(self.0) >> 16) as usize } /// ZERO is invalid /// TODO understand how to check if i have both receive and transmit pub fn buffet_size(&self) -> Option> { - NonZero::new((self.0 & 0xffff) as usize) + NonZero::new((read_volatile!(self.0) & 0xffff) as usize) } } /// EM_CTL +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct EnclosureManagementControl(pub u32); @@ -328,6 +344,7 @@ impl EnclosureManagementControl { } /// CAP2 +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct HostCapabilitiesExtended(pub u32); @@ -352,6 +369,7 @@ impl HostCapabilitiesExtended { } // BOHC +#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct BiosOsControlStatus(pub u32); @@ -394,6 +412,7 @@ pub struct VendorSpecificRegisters { } /// Port X Interrupt status +#[repr(transparent)] pub struct PortInterruptStatus(pub u32); impl PortInterruptStatus { @@ -449,11 +468,12 @@ impl PortInterruptStatus { rwc_flag!(dhrs, 0); pub fn clear_pending_interrupts(&mut self) { - self.0 = 0; + write_volatile!(self.0, u32::MAX); } } /// Port X Interrupt Enable +#[repr(transparent)] pub struct InterruptEnable(pub u32); impl InterruptEnable { @@ -510,12 +530,16 @@ impl InterruptEnable { } /// Port X Command and status +#[repr(transparent)] pub struct CmdStatus(pub u32); impl CmdStatus { pub fn set_icc(&mut self, icc: InterfaceCommunicationControl) { - self.0 &= !(0xf << 28); - self.0 |= (icc as u32) << 28; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 28)); + write_volatile!( + self.0, + read_volatile!(self.0) | (icc as u32) << 28 + ); } // Aggressive Slumber / Partial @@ -563,14 +587,11 @@ impl CmdStatus { // Mechanical Presence Switch State ro_flag!(mpss, 13); - pub fn set_current_cmd(&mut self, ccs: u8) { + pub fn get_current_cmd(&mut self) -> u32 { if !self.is_st() { - return; + return 0; } - (0x0u8..=0x1fu8).contains(&ccs).then(|| { - self.0 &= !(0x1f << 8); - self.0 |= (ccs as u32) << 8; - }); + (read_volatile!(self.0) >> 8) & 0x1f } // FIS Receive Enable @@ -625,6 +646,7 @@ impl CmdStatus { } /// Port x Task File Data +#[repr(transparent)] pub struct TaskFileData(pub u32); impl TaskFileData { @@ -638,7 +660,7 @@ impl TaskFileData { ro_flag!(bsy, 7); pub fn error(&self) -> u8 { - (self.0 >> 8) as u8 + (read_volatile!(self.0) >> 8) as u8 } } @@ -663,39 +685,41 @@ impl Signature { } /// Port X SATA Status +#[repr(transparent)] pub struct SataStatus(pub u32); impl SataStatus { pub fn power( &self, ) -> Result> { - let power = ((self.0 >> 8) & 0xf) as u8; + let power = ((read_volatile!(self.0) >> 8) & 0xf) as u8; InterfacePowerManagement::try_from(power) } pub fn speed(&self) -> InterfaceSpeed { - let speed = ((self.0 >> 4) & 0xf) as u8; + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; unsafe { InterfaceSpeed::unchecked_transmute_from(speed) } } pub fn detection( &self, ) -> Result> { - let detection = (self.0 & 0xf) as u8; + let detection = (read_volatile!(self.0) & 0xf) as u8; DeviceDetection::try_from(detection) } } /// Port X SATA control +#[repr(transparent)] pub struct SataControl(pub u32); impl SataControl { pub fn port_multiplier(&self) -> u8 { - ((self.0 >> 16) & 0xf) as u8 + ((read_volatile!(self.0) >> 16) & 0xf) as u8 } pub fn select_power_management(&self) -> u8 { - ((self.0 >> 12) & 0xf) as u8 + ((read_volatile!(self.0) >> 12) & 0xf) as u8 } flag!(devslp_disabled, 10); @@ -703,7 +727,7 @@ impl SataControl { flag!(partial_disabled, 8); pub fn max_speed(&self) -> InterfaceSpeedRestriction { - let speed = ((self.0 >> 4) & 0xf) as u8; + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; unsafe { InterfaceSpeedRestriction::unchecked_transmute_from(speed) } @@ -711,15 +735,20 @@ impl SataControl { pub fn set_max_speed(&mut self, speed: InterfaceSpeed) { if speed != InterfaceSpeed::DevNotPresent { - self.0 &= !(0xf << 4); - self.0 |= (speed as u32) << 4; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 4)); + write_volatile!( + self.0, + read_volatile!(self.0) | (speed as u32) << 4 + ); } } pub fn device_initialization( &self, ) -> Result> { - InterfaceInitialization::try_from((self.0 & 0xf) as u8) + InterfaceInitialization::try_from( + (read_volatile!(self.0) & 0xf) as u8, + ) } // TODO THIS COMMAND ANY MAYBE OTHER SHOULD PROBABLY MOVE TO THE PORT @@ -729,48 +758,66 @@ impl SataControl { &mut self, init: InterfaceInitialization, ) { - self.0 &= !0xf; - self.0 |= init as u32; + write_volatile!(self.0, read_volatile!(self.0) & !0xf); + write_volatile!(self.0, read_volatile!(self.0) | init as u32); } } /// Port X SATA error +#[repr(transparent)] pub struct SataError(pub u32); impl SataError { pub fn diagnostic(&self) -> impl Iterator { - let diagnostic_errors = ((self.0 >> 16) & 0xffff) as u16; + let diagnostic_errors = + ((read_volatile!(self.0) >> 16) & 0xffff) as u16; DiagnosticError::iter() .filter(move |n| *n as u16 & diagnostic_errors != 0) } pub fn error(&self) -> impl Iterator { - let ahci_error = (self.0 & 0xffff) as u16; + let ahci_error = (read_volatile!(self.0) & 0xffff) as u16; AhciError::iter().filter(move |n| *n as u16 & ahci_error != 0) } + + pub fn zero_error(&mut self) { + write_volatile!(self.0, read_volatile!(self.0) & !0xffff) + } } /// Port X Sata Active +#[repr(transparent)] pub struct SataActive(pub u32); /// Port X Command issue -pub struct CmdIssue(pub u32); +#[repr(transparent)] +pub struct CmdIssue(pub Volatile); + +impl CmdIssue { + pub fn issue_cmd(&mut self, cmd: u8) { + self.0.write(self.0.read() | 1 << cmd); + } +} /// Port X SATA Notification +#[repr(transparent)] pub struct SataNotification(pub u32); impl SataNotification { /// Get port multiplier notification pub fn set_pm_notif(&mut self, pm_port: u8) { - (0x0..0xf) - .contains(&pm_port) - .then(|| self.0 |= pm_port as u32); + (0x0..0xf).contains(&pm_port).then(|| { + write_volatile!( + self.0, + read_volatile!(self.0) | pm_port as u32 + ) + }); } /// Get port multiplier notification pub fn get_pm_notif(&self, pm_port: u8) -> bool { if (0x0..0xf).contains(&pm_port) { - (self.0 & !0xffff) & (1 << pm_port) != 0 + (read_volatile!(self.0) & !0xffff) & (1 << pm_port) != 0 } else { false } @@ -778,25 +825,29 @@ impl SataNotification { } /// Port X Frame Information Structure based switching control +#[repr(transparent)] pub struct FisSwitchControl(pub u32); impl FisSwitchControl { /// Port multiplier device that experienced fatal error pub fn device_with_error(&self) -> u8 { - ((self.0 >> 16) & 0xf) as u8 + ((read_volatile!(self.0) >> 16) & 0xf) as u8 } /// The number of devices that FIS-Based switching has been optimized /// for. The minimum value for this field should be 0x2. pub fn active_device_optimization(&self) -> u8 { - ((self.0 >> 12) & 0xf) as u8 + ((read_volatile!(self.0) >> 12) & 0xf) as u8 } /// Set the port multiplier port number, that should receive the next /// command pub fn device_to_issue(&mut self, dev_num: u8) { - self.0 &= !(0xf << 8); - self.0 |= (dev_num as u32) << 8; + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 8)); + write_volatile!( + self.0, + read_volatile!(self.0) | (dev_num as u32) << 8 + ); } // Single device error @@ -810,19 +861,20 @@ impl FisSwitchControl { } /// Port x Device sleep +#[repr(transparent)] pub struct DeviceSleep(pub u32); impl DeviceSleep { /// Device Sleep Idle Timeout Multiplier pub fn dito_multiplier(&self) -> u8 { - ((self.0 >> 25) & 0xf) as u8 + ((read_volatile!(self.0) >> 25) & 0xf) as u8 } /// Raw dito value /// /// **Use [`dito_actual`] for the actual wait time** pub fn dito_ms(&self) -> u16 { - ((self.0 >> 15) & 0x3ff) as u16 + ((read_volatile!(self.0) >> 15) & 0x3ff) as u16 } /// The actual timeout, which is dito * (dito_multiplier + 1) @@ -835,7 +887,7 @@ impl DeviceSleep { /// TODO: currently only read only, if write needed, check /// documentation about extended cap and writing to this offset pub fn mdat(&self) -> u8 { - ((self.0 >> 10) & 0x1f) as u8 + ((read_volatile!(self.0) >> 10) & 0x1f) as u8 } /// Device sleep exit timeout @@ -843,7 +895,7 @@ impl DeviceSleep { /// TODO: currently only read only, if write needed, check /// documentation about extended cap and writing to this offset pub fn deto_ms(&self) -> u8 { - ((self.0 >> 2) & 0xff) as u8 + ((read_volatile!(self.0) >> 2) & 0xff) as u8 } // Device sleep present @@ -854,18 +906,19 @@ impl DeviceSleep { } /// Port X Vendor specific +#[repr(transparent)] pub struct VendorSpecific(pub u32); #[repr(C)] pub struct PortControlRegisters { /// Port X Command list base address low - pub clb: u32, + pub clb: Volatile, /// Port X Command list base address high - pub clbu: u32, + pub clbu: Volatile, /// Port X frame information structure base address low - pub fb: u32, + pub fb: Volatile, /// Port X frame information structure base address high - pub fbu: u32, + pub fbu: Volatile, pub is: PortInterruptStatus, pub ie: InterruptEnable, pub cmd: CmdStatus, @@ -887,30 +940,30 @@ pub struct PortControlRegisters { impl PortControlRegisters { /// Return the full command list address by combining the low and high /// 32bit parts - pub fn cmd_list(&mut self) -> &mut CommandList { - let cmd_list_addr = ((self.clbu as usize) << 32) - | (self.clb as usize & !((1 << 10) - 1)); - unsafe { &mut *(cmd_list_addr as *mut CommandList) } + pub fn cmd_list(&mut self) -> &mut CmdList { + let cmd_list_addr = ((self.clbu.read() as usize) << 32) + | (self.clb.read() as usize & !((1 << 10) - 1)); + unsafe { &mut *(cmd_list_addr as *mut CmdList) } } - pub fn set_cmd_list(&mut self, clb: &CommandList) { - let ptr = clb as *const _ as usize; - self.clb = (ptr & 0xffffffff) as u32; - self.clbu = (ptr >> 32) as u32; + pub fn set_cmd_list_address(&mut self, ptr: usize) { + println!("CLB: {:x?}", ptr); + self.clb.write((ptr & 0xffffffff) as u32); + self.clbu.write((ptr >> 32) as u32); } /// Return the full frame information structure address by combining /// the low and high 32bit parts pub fn received_fis(&self) -> &ReceivedFis { - let rfis_addr = ((self.fbu as usize) << 32) - | (self.fb as usize & !((1 << 8) - 1)); + let rfis_addr = ((self.fbu.read() as usize) << 32) + | (self.fb.read() as usize & !((1 << 8) - 1)); unsafe { &*(rfis_addr as *const ReceivedFis) } } - pub fn set_received_fis(&mut self, fis: &ReceivedFis) { - let ptr = fis as *const _ as usize; - self.fb = (ptr & 0xffffffff) as u32; - self.fbu = (ptr >> 32) as u32; + pub fn set_received_fis_address(&mut self, ptr: usize) { + println!("FB: {:x?}", ptr); + self.fb.write((ptr & 0xffffffff) as u32); + self.fbu.write((ptr >> 32) as u32); } pub fn set_status(&mut self, port: u8) { @@ -923,7 +976,7 @@ impl PortControlRegisters { /// Return the index of an available command slot if one exists pub fn find_cmd_slot(&self) -> Option { - let mut slots = self.ci.0 | self.sact.0; + let mut slots = self.ci.0.read() | self.sact.0; for i in 0usize..32 { if slots & 1 == 0 { return Some(i); @@ -933,19 +986,79 @@ impl PortControlRegisters { } None } + + pub fn identity_packet(&mut self, buf: *mut IdentityPacketData) { + let fis = RegisterH2D::new( + 1 << 7, + AtaCommand::IdentifyDevice, + 0, + 0, + 0, + 0, + 0, + ); + let cmd = &mut self.cmd_list().entries[0]; + let cmd_table = &mut cmd.cmd_table::<8>(); + let prdt_ent = &mut cmd_table.table[0]; + write_volatile!(cmd_table.cfis, Fis { h2d: fis }); + prdt_ent.set_buffer(buf); + prdt_ent.dbc.set_dbc(511); + cmd.info.set_command_fis_len(size_of::()); + cmd.info.set_prdtl(1); + println!("Sending command!"); + self.ci.issue_cmd(0); + + let mut timeout = 0xfffff; + loop { + if self.is.0 != 0 { + if self.is.is_tfes() { + eprintln!("ERROR READING FROM DISK"); + for error in self.serr.error() { + println!("{:?}", error); + } + if self.tfd.is_err() { + println!( + "TASK FILE DATA ERROR STATE\nERROR: {:08b}", + self.tfd.error() + ); + } + } + println!("Finished!"); + println!("{:032b}", self.is.0); + break; + } else { + timeout -= 1 + } + + if timeout == 0 { + panic!("Timeout on identity packet read") + } + } + unsafe { + for w in (&mut *buf).serial_number.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).model_num.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).firmware_rev.chunks_exact_mut(2) { + w.swap(0, 1); + } + } + } } /// TODO, DECIDE IF ITS OK THAT THIS IS ONE BYTE GREATER IN SIZE #[repr(C, align(256))] pub struct ReceivedFis { - pub dsfis: DmaSetup, + pub dsfis: Volatile, _reserved0: u32, - pub psfis: PioSetupD2H, + pub psfis: Volatile, _reserved1: [u32; 3], - pub rfis: RegisterD2H, + pub rfis: Volatile, _reserved2: u32, - pub sdbfis: SetDeviceBits, - pub ufis: [u8; 64], + pub sdbfis: Volatile, + pub ufis: Volatile<[u8; 64]>, _reserved3: [u32; 24], } @@ -955,12 +1068,18 @@ pub struct CmdListDescriptionInfo(pub u32); impl CmdListDescriptionInfo { /// Set the Physical region descriptor table length pub fn set_prdtl(&mut self, size: u16) { - self.0 |= (size as u32) << 16; + write_volatile!( + self.0, + read_volatile!(self.0) | (size as u32) << 16 + ); } /// Set the port multiplier port pub fn set_pm_port(&mut self, pm_port: u8) { - self.0 |= ((pm_port & 0xf) as u32) << 12 + write_volatile!( + self.0, + read_volatile!(self.0) | ((pm_port & 0xf) as u32) << 12 + ); } // Clear busy upon R_OK @@ -981,47 +1100,47 @@ impl CmdListDescriptionInfo { // ATAPI flag!(a, 5); - /// Length of command FIS in dwords - pub fn set_command_fis_len_dw(&mut self, len: u8) { - assert!(len < 2, "Len must be smaller then 2"); - assert!(len > 16, "Len must be greater then 16 "); - self.0 |= len as u32; + /// Length of command FIS len (internally converted to dw) + pub fn set_command_fis_len(&mut self, len: usize) { + assert!(len < 64, "Len must be smaller then 64"); + assert!(len > 8, "Len must be greater then 8 "); + write_volatile!( + self.0, + read_volatile!(self.0) | (len / size_of::()) as u32 + ); } } #[repr(C)] -pub struct CommandHeader { +pub struct CmdHeader { info: CmdListDescriptionInfo, - prdb_byte_count: u32, + prdb_byte_count: Volatile, /// Command table descriptor base address - ctba: u32, + ctba: Volatile, /// Command table desciprtor base address upper - ctbau: u32, + ctbau: Volatile, _reserved: [u32; 4], } -impl CommandHeader { +impl CmdHeader { pub fn cmd_table( &mut self, - ) -> &mut CommandTable { - let cmd_table_addr = - ((self.ctbau as usize) << 32) | (self.ctba as usize); - unsafe { &mut *(cmd_table_addr as *mut CommandTable) } + ) -> &mut CmdTable { + let cmd_table_addr = ((self.ctbau.read() as usize) << 32) + | (self.ctba.read() as usize); + unsafe { &mut *(cmd_table_addr as *mut CmdTable) } } - pub fn set_cmd_table( - &mut self, - table: &CommandTable, - ) { - let ptr = table as *const _ as usize; - self.ctba = (ptr & 0xffffffff) as u32; - self.ctbau = (ptr >> 32) as u32; + pub fn set_cmd_table(&mut self, ptr: usize) { + println!("CMD TBL: {:x?}", ptr); + self.ctba.write((ptr & 0xffffffff) as u32); + self.ctbau.write((ptr >> 32) as u32); } } #[repr(C, align(1024))] -pub struct CommandList { - pub entries: [CommandHeader; 32], +pub struct CmdList { + pub entries: [CmdHeader; 32], } pub struct PrdtDescriptionInfo(pub u32); @@ -1034,67 +1153,36 @@ impl PrdtDescriptionInfo { pub fn set_dbc(&mut self, dbc: u32) { const MB: u32 = 1 << 20; assert!(dbc < 4 * MB, "DBC should be smaller then 4Mib"); + write_volatile!(self.0, read_volatile!(self.0) | dbc | 1); } } -#[repr(C, align(128))] -pub struct CommandTableEntry { +#[repr(C)] +pub struct CmdTableEntry { /// Data base address buffer - dba: u32, + dba: Volatile, /// Data base address buffer upper - dbau: u32, + dbau: Volatile, _reserved: u32, /// Data byte count (A maximum of 4mb is available) dbc: PrdtDescriptionInfo, } -impl CommandTableEntry { +impl CmdTableEntry { pub fn set_buffer(&mut self, buf: *mut T) { let ptr = buf as usize; - self.dba = (ptr & 0xffffffff) as u32; - self.dbau = (ptr >> 32) as u32; + self.dba.write((ptr & 0xffffffff) as u32); + self.dbau.write((ptr >> 32) as u32); } } -#[repr(C)] -pub struct CommandTable { +#[repr(C, align(256))] +pub struct CmdTable { cfis: Fis, /// TODO acmd: [u8; 0x10], _reserved: [u8; 0x30], - table: [CommandTableEntry; ENTRIES], -} - -#[repr(C, align(4096))] -pub struct PortCommands { - pub fis: ReceivedFis, - pub cmd_list: CommandList, - pub cmd_table: [CommandTable; 32], - _reserved: [u8; 0x100], -} - -impl PortCommands { - pub fn empty() -> &'static mut PortCommands { - // TODO CREATE EXTERNAL UTIL FUNCTION FOR THIS AND USE ALSO ON PAGE - // TABLE CREATION - let zeroed = unsafe { - core::slice::from_raw_parts_mut( - alloc_pages!( - size_of::>() - / REGULAR_PAGE_SIZE - ) as *mut usize, - size_of::>() / size_of::(), - ) - }; - zeroed.fill(0); - - // TODO MAKE LESS SKEYTCHY - let port_cmd_ptr = (zeroed.as_mut_ptr() as usize - - PHYSICAL_MEMORY_OFFSET) - as *mut PortCommands; - - unsafe { &mut *port_cmd_ptr } - } + table: [CmdTableEntry; ENTRIES], } #[repr(C)] @@ -1110,7 +1198,9 @@ pub struct HBAMemoryRegisters { } impl HBAMemoryRegisters { - pub fn new(a: PhysicalAddress) -> Result<&'static mut Self, HbaError> { + pub fn new( + a: PhysicalAddress, + ) -> Result, HbaError> { if !a.is_aligned(REGULAR_PAGE_ALIGNMENT) { return Err(HbaError::AddressNotAligned); } @@ -1121,149 +1211,120 @@ impl HBAMemoryRegisters { PageSize::Regular, ); - let hba: &'static mut HBAMemoryRegisters = - unsafe { &mut *a.translate().as_mut_ptr() }; + let mut hba_ptr = + a.translate().as_non_null::(); - if hba.ghc.pi.0 >= (1 << 31) { - panic!("There is no support for HBA's with more then 30 ports") - } + let hba = unsafe { hba_ptr.as_mut() }; hba.ghc.ghc.set_ae(); hba.ghc.ghc.set_ie(); - println!("BIOS / OS Handoff: {}", hba.ghc.cap_ext.is_boh()); - println!("Interrupts: {}", hba.ghc.ghc.is_ie()); - - Ok(hba) - } - - /// Returns the amount of active devices found - pub fn probe(&self) -> usize { - println!( - "Detected {} implemented ports", - self.ghc.cap.number_of_ports() - ); - - let mut count = 0; - for (i, port) in self.ports.iter().enumerate() { - if self.ghc.pi.is_port_implemented(i as u8) - && let Ok(power) = port.ssts.power() - && let InterfacePowerManagement::Active = power - { - count += 1; - println!("\nDetected device at port number: {}", i); - print!(" Device Power: "); - println!("{:?}", power ; color = ColorCode::new(Color::Green, Color::Black)); - print!(" Device Speed: "); - println!("{}", port.ssts.speed() ; color = ColorCode::new(Color::Green, Color::Black)); - print!(" Device type: "); - match port.sig.device_type() { - Ok(t) => { - println!("{:?}", t ; color = ColorCode::new(Color::Green, Color::Black) ) - } - Err(e) => { - println!("{:?}", e ; color = ColorCode::new(Color::Red, Color::Black) ) - } - } - } + if hba.ghc.pi.0 >= (1 << 31) { + panic!("There is no support for HBA's with more then 30 ports") } - count - } - - pub fn map_device( - &'static mut self, - port_number: usize, - ) -> AhciDeviceController { - AhciDeviceController::::new( - &mut self.ports[port_number], - PortCommands::empty(), - ) - } -} - -pub struct AhciDeviceController { - pub port: &'static mut PortControlRegisters, - pub port_cmds: &'static mut PortCommands, -} -impl AhciDeviceController { - pub fn new( - port: &'static mut PortControlRegisters, - port_cmds: &'static mut PortCommands, - ) -> AhciDeviceController { - println!("port address: {:x?}", port as *const _ as usize); - println!( - "Port commands address: {:x?}", - port_cmds as *const _ as usize - ); + println!("BIOS / OS Handoff: {}", hba.ghc.cap_ext.is_boh()); - port.cmd.stop(); - port.set_cmd_list(&port_cmds.cmd_list); - port.set_received_fis(&port_cmds.fis); - for (header, table) in port_cmds - .cmd_list - .entries - .iter_mut() - .zip(port_cmds.cmd_table.iter()) - { - header.info.set_prdtl(ENTRIES as u16); - header.set_cmd_table(table); + if hba.ghc.cap_ext.is_boh() { + unimplemented!("Didn't implement bios os handoff") } - port.cmd.start(); - AhciDeviceController { port, port_cmds } + Ok(hba_ptr) } - pub fn identity_packet( - &mut self, - buf: *mut IdentityPacketData, - ) -> Option<()> { - self.port.is.clear_pending_interrupts(); - let slot = self.port.find_cmd_slot()?; - let header = &mut self.port.cmd_list().entries[slot]; - header.info.unset_w(); - header.info.set_p(); - header.info.set_prdtl(ENTRIES as u16); - - let table = header.cmd_table::(); - table.table[0].dbc.set_i(); - table.table[0].dbc.set_dbc(511); // 256 words - 1 - table.table[0].set_buffer(buf); - - let fis = RegisterH2D::new( - 0x80, - AtaCommand::IdentifyDevice, - 0, - 0, - 0, - 0, - 0, - ); - table.cfis = Fis { h2d: fis }; - unsafe { - let v = core::ptr::read_volatile(&self.port.ci.0); - core::ptr::write_volatile( - &mut self.port.ci.0, - v | (1 << slot), - ); - - let mut timeout = 0xffffffu32; - loop { - let v = core::ptr::read_volatile(&self.port.ci.0); - if v & (1 << slot) == 0 { - break; - } - timeout -= 1; - if timeout == 0 { - panic!("TIME EXCEEDED ON IDENTITY READ") - } - - if self.port.is.0 != 0 { - panic!("ERROR ON IDENTITY READ, {}", self.port.is.0); - } - } - } - - Some(()) + /// Returns the amount of active devices found and set them into idle + /// state. + pub fn probe_init(&mut self) -> usize { + // println!( + // "Detected {} implemented ports", + // self.ghc.cap.number_of_ports() + // ); + + // println!( + // "Supported command slots: {}, Supported 64bit addresses: + // {}", self.ghc.cap.number_of_commands(), + // self.ghc.cap.is_s64a() + // ); + + // let mut count = 0; + // for (i, port) in self.ports.iter_mut().enumerate() { + // if self.ghc.pi.is_port_implemented(i as u8) + // && let Ok(power) = port.ssts.power() + // && let InterfacePowerManagement::Active = power + // { + // count += 1; + // println!("\nDetected device at port number: {}", i); + // print!(" Device Power: "); + // println!("{:?}", power ; color = + // ColorCode::new(Color::Green, Color::Black)); + // print!(" Device Speed: "); println!("{}", + // port.ssts.speed() ; color = ColorCode::new(Color::Green, + // Color::Black)); print!(" Device type: "); + // match port.sig.device_type() { + // Ok(t) => { + // println!("{:?}", t ; color = + // ColorCode::new(Color::Green, Color::Black) ) + // } Err(e) => { + // println!("{:?}", e ; color = + // ColorCode::new(Color::Red, Color::Black) ) + // } } + // port.cmd.stop(); + + // let clb_fbu_table = unsafe { alloc_pages!(1) }; + // for i in (0..4096).step_by(size_of::()) { + // unsafe { + // core::ptr::write_volatile( + // ((clb_fbu_table + i) + + // PHYSICAL_MEMORY_OFFSET) as + // *mut usize, 0, + // ); + // } + // } + + // port.set_cmd_list_address(clb_fbu_table); + // port.set_received_fis_address( + // clb_fbu_table + size_of::(), + // ); + + // // MAPPING the first header with 8 entries (0x100 in + // total // table size) + // let cmd_list = port.cmd_list(); + // cmd_list.entries[0].set_cmd_table( + // clb_fbu_table + // + size_of::() + // + size_of::(), + // ); + + // port.cmd.set_fre(); + // port.serr.zero_error(); + // // port.ie.set_dhre(); + // // port.ie.set_pse(); + // // port.ie.set_dse(); + // // port.ie.set_tfee(); + // port.is.clear_pending_interrupts(); + // self.ghc.is.clear_all(); + + // port.cmd.set_sud(); + // port.cmd.set_pod(); + // + // port.cmd.set_icc(InterfaceCommunicationControl::Active); + + // loop { + // if !port.tfd.is_bsy() + // && !port.tfd.is_drq() + // && matches!( + // port.ssts.power().unwrap(), + // InterfacePowerManagement::Active + // ) + // { + // break; + // } + // } + // port.cmd.start(); + // println!("Started port number: {}", i) + // } + // } + todo!() + // count } } diff --git a/kernel/src/drivers/ata/ahci/mod.rs b/kernel/src/drivers/ata/ahci/mod.rs index 9d86c20..397d27c 100644 --- a/kernel/src/drivers/ata/ahci/mod.rs +++ b/kernel/src/drivers/ata/ahci/mod.rs @@ -1,12 +1,16 @@ pub mod fis; pub mod hba; +use common::enums::CascadedPicInterruptLine; use cpu_utils::structures::interrupt_descriptor_table::InterruptStackFrame; pub use fis::*; pub use hba::*; +use crate::{drivers::pic8259::PIC, println}; + pub extern "x86-interrupt" fn ahci_interrupt( _stack_frame: InterruptStackFrame, ) { - panic!("AHCI Interrupts!"); + println!("AHCI Interrupts!"); + unsafe { PIC.end_of_interrupt(CascadedPicInterruptLine::Ahci) }; } diff --git a/kernel/src/drivers/interrupt_handlers.rs b/kernel/src/drivers/interrupt_handlers.rs index 8178ff1..110b561 100644 --- a/kernel/src/drivers/interrupt_handlers.rs +++ b/kernel/src/drivers/interrupt_handlers.rs @@ -168,8 +168,8 @@ pub extern "x86-interrupt" fn page_fault_handler( error_code: u64, ) { println!("Interrupt: PageFault"); - println!("Stack frame: {:#?}", stack_frame); - println!("Error code: {:#x}", error_code); + // println!("Stack frame: {:#?}", stack_frame); + // println!("Error code: {:#x}", error_code); println!("Faulting address: {:x}", cr2::read()); } diff --git a/kernel/src/drivers/pci.rs b/kernel/src/drivers/pci.rs index 3f20657..8bf8a18 100644 --- a/kernel/src/drivers/pci.rs +++ b/kernel/src/drivers/pci.rs @@ -1,12 +1,6 @@ extern crate alloc; -use crate::{ - drivers::ata::ahci::AHCIBaseAddress, - memory::allocators::page_allocator::{ - ALLOCATOR, allocator::PhysicalPageAllocator, - }, - println, -}; +use crate::drivers::ata::ahci::AHCIBaseAddress; use alloc::vec::Vec; use common::enums::{ CascadedPicInterruptLine, ClassCode, DeviceID, HeaderType, @@ -406,47 +400,46 @@ impl PciDevice { pub fn enable_interrupts(&self, irq: CascadedPicInterruptLine) {} } -pub fn scan_pci() -> Vec { - let mut v: Vec = - Vec::with_capacity_in(64, unsafe { - ALLOCATOR.assume_init_ref().clone() - }); - for bus in 0..=255 { - for device in 0..32 { - let common = - PciConfigurationCycle::read_common_header(bus, device, 0); - if common.vendor_device.vendor == VendorID::NonExistent { - continue; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device( - bus, device, 0, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - if !common.header_type.is_multifunction() { - continue; - } - for function in 1..8 { - println!("{}", function); - let common = PciConfigurationCycle::read_common_header( - bus, device, function, - ); - if common.vendor_device.vendor == VendorID::NonExistent { - continue; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device( - bus, device, function, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - } - } - } - v -} +// pub fn scan_pci() -> Vec { +// let mut v: Vec = +// Vec::with_capacity_in(64, unsafe { +// ALLOCATOR.assume_init_ref().clone() +// }); +// for bus in 0..=255 { +// for device in 0..32 { +// let common = +// PciConfigurationCycle::read_common_header(bus, device, +// 0); if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, 0, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// if !common.header_type.is_multifunction() { +// continue; +// } +// for function in 1..8 { +// let common = PciConfigurationCycle::read_common_header( +// bus, device, function, +// ); +// if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, function, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// } +// } +// } +// v +// } diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 81d75a3..df95021 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -17,58 +17,41 @@ #![feature(ascii_char_variants)] #![feature(ascii_char)] #![feature(const_convert)] +#![feature(slice_ptr_get)] +#![feature(core_intrinsics)] +#![feature(explicit_tail_calls)] +#![feature(specialization)] #![deny(clippy::all)] mod drivers; mod memory; -use core::{ - alloc::{Allocator, Layout}, - mem::MaybeUninit, - num::NonZero, - panic::PanicInfo, -}; +use core::{num::NonZero, panic::PanicInfo}; use crate::{ drivers::{ - ata::ahci::{ - AhciDeviceController, GenericHostControl, HBAMemoryRegisters, - IdentityPacketData, - }, interrupt_handlers, keyboard::{KEYBOARD, ps2_keyboard::Keyboard}, - pci::{self}, pic8259::{CascadedPIC, PIC}, vga_display::color_code::ColorCode, }, memory::{ - allocators::page_allocator::{ - allocator::PhysicalPageAllocator, - extensions::{PhysicalAddressExt, VirtualAddressExt}, + allocators::{ + buddy::BUDDY_ALLOCATOR, extensions::PageTableExt, + slab::SLAB_ALLOCATOR, }, - memory_map::{ParsedMapDisplay, parse_map}, + memory_map::{MemoryMap, parse_map}, + page::{PAGES, map::PageMap}, }, }; -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - constants::{ - BIG_PAGE_ALIGNMENT, PHYSICAL_MEMORY_OFFSET, - REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::{ - Color, DeviceDetection, DeviceType, InterfacePowerManagement, - PS2ScanCode, PageSize, PciDeviceType, - }, -}; +use common::{constants::REGULAR_PAGE_SIZE, enums::Color}; use cpu_utils::{ instructions::interrupts::{self}, structures::{ interrupt_descriptor_table::{IDT, InterruptDescriptorTable}, - paging::PageEntryFlags, + paging::PageTable, }, }; -use memory::allocators::page_allocator::ALLOCATOR; - #[unsafe(no_mangle)] #[unsafe(link_section = ".start")] #[allow(clippy::missing_safety_doc)] @@ -78,18 +61,31 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Entered Long Mode"); parse_map(); okprintln!("Obtained Memory Map"); - println!("{}", ParsedMapDisplay(parsed_memory_map!())); - PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); - okprintln!("Allocator Initialized"); + println!("{}", MemoryMap(parsed_memory_map!())); + + PageMap::init(unsafe { &mut PAGES }, MemoryMap(parsed_memory_map!())); + unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!()), 0) }; + + let last = MemoryMap(parsed_memory_map!()).last().unwrap(); + unsafe { - let idt_address = alloc_pages!(1).into(); - InterruptDescriptorTable::init(&mut IDT, idt_address); + PageTable::current_table().as_mut().map_physical_memory( + (last.base_address + last.length) as usize, + ); + } + okprintln!("Initialized buddy allocator"); + unsafe { + InterruptDescriptorTable::init( + &mut IDT, + alloc_pages!(1).translate(), + ); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); + okprintln!("Initialized Programmable Interrupt Controller"); - let keyboard_buffer_address = alloc_pages!(1).into(); + let keyboard_buffer_address: common::address_types::VirtualAddress = alloc_pages!(1).translate(); Keyboard::init( &mut KEYBOARD, keyboard_buffer_address, @@ -98,75 +94,86 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Initialized Keyboard"); interrupts::enable(); } - let mut pci_devices = pci::scan_pci(); - println!("Press ENTER to enumerate PCI devices!"); - let a = pci_devices.as_ptr() as usize; - println!("pci_devices address: {:x}", a); - loop { - let c = unsafe { KEYBOARD.assume_init_mut().read_raw_scancode() }; - if let Some(e) = c - && PS2ScanCode::from_scancode(e) == PS2ScanCode::Enter - { - break; - } - } + unsafe { SLAB_ALLOCATOR.init() } + okprintln!("Initialized slab allocator"); + + // panic!("") + // let mut pci_devices = pci::scan_pci(); + // println!("Press ENTER to enumerate PCI devices!"); + // let a = pci_devices.as_ptr() as usize; + // println!("pci_devices address: {:x}", a); + + // loop { + // let c = unsafe { KEYBOARD.assume_init_mut().read_raw_scancode() + // }; if let Some(e) = c + // && PS2ScanCode::from_scancode(e) == PS2ScanCode::Enter + // { + // break; + // } + // } + + // unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; + // for device in pci_devices.iter_mut() { + // // println!("{:#?}", unsafe { device.common.vendor_device }); + // // println!("{:#?}", unsafe { device.common.header_type }); + // // println!("{:#?}\n", unsafe { device.common.device_type }); + + // if device.header.common().device_type.is_ahci() { + // let a = unsafe { + // PhysicalAddress::new_unchecked( + // device.header.general_device.bar5.address(), + // ) + // }; + + // println!( + // "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ + // Memory Space: {}", + // device.header.common().command.is_bus_master(), + // device.header.common().command.is_interrupt_disable(), + // device.header.common().command.is_io_space(), + // device.header.common().command.is_memory_space() + // ); + + // println!( + // "Interrupt Line: {}, Interrupt Pin: {}", + // unsafe { device.header.general_device.interrupt_line }, + // unsafe { device.header.general_device.interrupt_pin } + // ); + + // let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); + // let hba = HBAMemoryRegisters::new(aligned).unwrap(); + // let _ = hba.probe_init(); + // let p = &mut hba.ports[0]; + + // let buf = + // unsafe { alloc_pages!(1) as *mut IdentityPacketData }; + + // p.identity_packet(buf); + + // let id = unsafe { + // core::ptr::read_volatile( + // (buf as usize + PHYSICAL_MEMORY_OFFSET) + // as *mut IdentityPacketData, + // ) + // }; + + // println!("{:?}", id); + + // println!("Cylinders: {}", id.cylinders); + // println!("Heads: {}", id.heads); + // println!("Sectors: {}", id.sectors); + + // println!("Serial: {:?}", &id.serial_number); + // println!("Model: {:?}", &id.model_num); + // println!("Firmware: {:?}", &id.firmware_rev); + // } + // } - for device in pci_devices.iter_mut() { - // println!("{:#?}", unsafe { device.common.vendor_device }); - // println!("{:#?}", unsafe { device.common.header_type }); - // println!("{:#?}\n", unsafe { device.common.device_type }); - - if device.header.common().device_type.is_ahci() { - let a = unsafe { - PhysicalAddress::new_unchecked( - device.header.general_device.bar5.address(), - ) - }; - - println!( - "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ - Memory Space: {}", - device.header.common().command.is_bus_master(), - device.header.common().command.is_interrupt_disable(), - device.header.common().command.is_io_space(), - device.header.common().command.is_memory_space() - ); - - println!( - "Interrupt Line: {}, Interrupt Pin: {}", - unsafe { device.header.general_device.interrupt_line }, - unsafe { device.header.general_device.interrupt_pin } - ); - - let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); - let hba = HBAMemoryRegisters::new(aligned).unwrap(); - let _ = hba.probe(); - let mut controller = hba.map_device::<13>(0); - let b = unsafe { alloc_pages!(1) - PHYSICAL_MEMORY_OFFSET }; - - println!("b: {:x?}", &b as *const _ as usize); - let rfis = controller.port_cmds.fis.rfis; - println!("rfis: {:?}", rfis); - controller.identity_packet(b as *mut IdentityPacketData); - - let rfis = controller.port_cmds.fis.rfis; - println!("rfis: {:?}", rfis); - - let d = unsafe { - core::ptr::read_volatile(b as *const IdentityPacketData) - }; - - println!("Data Address: {:x?}", b); - - println!("Data: {:?}", d.data); - - // println!("{:x?}", controller.port_cmds as *const _ as usize) - } - } loop { unsafe { - print!("{}", KEYBOARD.assume_init_mut().read_char() ; color = ColorCode::new(Color::Green, Color::Black)); + print!("{}", KEYBOARD.assume_init_mut().read_char() ; color + = ColorCode::new(Color::Green, Color::Black)); } } } diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs new file mode 100644 index 0000000..4a15b29 --- /dev/null +++ b/kernel/src/memory/allocators/buddy.rs @@ -0,0 +1,266 @@ +use core::ptr::{self, NonNull}; + +use common::{ + address_types::PhysicalAddress, + enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, + write_volatile, +}; +use cpu_utils::structures::paging::PageTable; + +use crate::memory::{ + memory_map::ParsedMemoryMap, + page::{PAGES, UnassignedPage, meta::BuddyPageMeta}, +}; + +pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { + freelist: [const { BuddyPageMeta::default() }; BUDDY_MAX_ORDER], +}; + +#[macro_export] +/// Allocate the amount of pages specified, and return the address +macro_rules! alloc_pages { + ($page_number: expr) => {{ + use $crate::memory::allocators::buddy::BUDDY_ALLOCATOR; + BUDDY_ALLOCATOR.alloc_pages($page_number) + }}; +} + +pub struct BuddyAllocator { + freelist: [BuddyPageMeta; BUDDY_MAX_ORDER], +} + +impl BuddyAllocator { + pub fn alloc_pages(&mut self, num_pages: usize) -> PhysicalAddress { + assert!( + num_pages <= (1 << BuddyOrder::MAX as usize), + "Size cannot be greater then: {}", + 1 << BuddyOrder::MAX as usize + ); + let order = (usize::BITS + - 1 + - num_pages.next_power_of_two().leading_zeros()) + as usize; + + let page = self.freelist[order].detach().unwrap_or_else(|| { + self.split_until(order) + .expect("Out of memory, swap is not implemented") + }); + + unsafe { page.as_ref().physical_address() } + } + + // pub fn free_pages(&self, address: usize) { + // let page_index = address / REGULAR_PAGE_SIZE; + // } + + /// This function assumes that `wanted_order` is empty, and won't check + /// it. + pub fn split_until( + &mut self, + wanted_order: usize, + ) -> Option> { + let closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + .find(|i| self.freelist[*i].next.is_some())?; + + let initial_page = + self.freelist[closet_order].detach::<()>().unwrap(); + + Some(self.split_recursive( + initial_page, + closet_order, + wanted_order, + )) + } + + fn split_recursive( + &mut self, + page: NonNull, + current_order: usize, + target_order: usize, + ) -> NonNull { + debug_assert!( + target_order < current_order, + "Target order cannot be greater then current order" + ); + + if current_order == target_order { + return page; + } + + let (lhs, rhs) = unsafe { BuddyAllocator::split(page).unwrap() }; + + let next_order = current_order - 1; + self.freelist[next_order].attach(rhs); + + become self.split_recursive(lhs, next_order, target_order) + } + + /// This function will try to merge a page on the buddy allocator until + pub fn merge_recursive(&self, page: NonNull) { + if let Some(merged) = + unsafe { BuddyAllocator::merge_with_buddy(page) } + { + become BuddyAllocator::merge_recursive(self, merged); + } + } + + pub fn alloc_table(&mut self) -> NonNull { + unsafe { + let address = self.alloc_pages(1).translate(); + ptr::write_volatile( + address.as_non_null::().as_ptr(), + PageTable::empty(), + ); + address.as_non_null::() + } + } + + /// The code_end number should be the end address of the code. + /// + /// This function will not put in the free list pages that hold + /// addresses from 0->code_end + pub fn init(&'static mut self, map: ParsedMemoryMap, code_end: usize) { + for area in map + .iter() + .filter(|a| a.region_type == MemoryRegionType::Usable) + { + let mut start = UnassignedPage::index_of( + (area.base_address as usize).into(), + ); + let end = UnassignedPage::index_of( + ((area.base_address + area.length) as usize).into(), + ); + + let mut prev = None; + + while start < end { + let largest_order = BuddyOrder::try_from( + ((end - start).ilog2().min(BuddyOrder::MAX as u32)) + as u8, + ) + .unwrap(); + + let curr = unsafe { &mut PAGES[start] }; + let next = unsafe { + &mut PAGES[start + ((1 << largest_order as usize) - 1)] + }; + unsafe { + (*curr.meta.buddy).next = + Some(NonNull::from_mut(next)); + (*curr.meta.buddy).prev = prev; + (*curr.meta.buddy).order = Some(largest_order); + } + prev = Some(NonNull::from_mut(curr)); + + self.freelist[largest_order as usize] + .attach(NonNull::from_mut(curr)); + + start += 1 << largest_order as usize; + } + } + } +} + +impl BuddyAllocator { + /// TODO: Make an unsafe split if relevant + /// + /// # Safety + /// This function does not attach the new references! + #[allow(clippy::type_complexity)] + unsafe fn split( + mut page: NonNull, + ) -> Option<(NonNull, NonNull)> { + // Reduce it's order to find it's order. + let prev_order = BuddyOrder::try_from( + unsafe { page.as_ref().meta.buddy.order? } as u8 - 1, + ) + .expect("Page order cannot be reduced"); + + write_volatile!( + (*page.as_mut().meta.buddy).order, + Some(prev_order) + ); + + let index = unsafe { + ((page.as_ref() as *const _ as usize - PAGES.as_ptr().addr()) + / size_of::()) + + (1 << prev_order as usize) + }; + + // Find it's half + let mut buddy = unsafe { NonNull::from_mut(&mut PAGES[index]) }; + + // Set the order of the buddy. + write_volatile!( + (*buddy.as_mut().meta.buddy).order, + Some(prev_order) + ); + + Some((page, buddy)) + } + + /// This function will detach the given page and it's buddy from their + /// freelist, increase their and attach to the increased order + /// list. + unsafe fn merge_with_buddy( + page: NonNull, + ) -> Option> { + let buddy = BuddyAllocator::buddy_of(page)?; + + let next_order = BuddyOrder::try_from(unsafe { + page.as_ref().meta.buddy.order.unwrap() as u8 + 1 + }) + .unwrap(); + + BuddyAllocator::detach_from_mid(page); + BuddyAllocator::detach_from_mid(buddy); + + // Operate on the page that it's address is lower. + let (mut left, mut right) = if page < buddy { + (page, buddy) + } else { + (buddy, page) + }; + + unsafe { + (*left.as_mut().meta.buddy).order = Some(next_order); + (*right.as_mut().meta.buddy) = BuddyPageMeta::default(); + }; + + Some(left) + } + + // TODO: This function will probably fail, should change that the head + // of the page list is static and the list starts from the second + // node, and then this would work + fn detach_from_mid(page: NonNull) { + let (mut prev, next) = unsafe { + let p_ref = page.as_ref(); + ( + p_ref.meta.buddy.prev.expect("Page has no prev"), + p_ref.meta.buddy.next.expect("Page has no next"), + ) + }; + + unsafe { (*prev.as_mut().meta.buddy).next = Some(next) } + } + + fn buddy_of( + page: NonNull, + ) -> Option> { + let order = unsafe { page.as_ref().meta.buddy.order? }; + if let BuddyOrder::MAX = order { + None + } else { + unsafe { + let buddy_address = page.as_ref() as *const _ as usize + ^ ((1 << order as usize) + * size_of::()); + + Some(NonNull::new_unchecked( + buddy_address as *mut UnassignedPage, + )) + } + } + } +} diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs new file mode 100644 index 0000000..ed66725 --- /dev/null +++ b/kernel/src/memory/allocators/extensions.rs @@ -0,0 +1,308 @@ +use core::{num::NonZero, ptr::NonNull}; + +use common::{ + address_types::{PhysicalAddress, VirtualAddress}, + constants::{ + BIG_PAGE_SIZE, HUGE_PAGE_SIZE, PAGE_DIRECTORY_ENTRIES, + PHYSICAL_MEMORY_OFFSET, + }, + enums::{PageSize, PageTableLevel}, + error::EntryError, + late_init::LateInit, +}; +use cpu_utils::structures::paging::{ + PageEntryFlags, PageTable, PageTableEntry, +}; +use extend::ext; +use strum::VariantArray; + +use common::error::TableError; +use cpu_utils::structures::paging::EntryIndex; + +use crate::memory::{ + allocators::buddy::BUDDY_ALLOCATOR, page::map::PageMap, +}; + +#[ext] +pub impl PhysicalAddress { + fn map( + &self, + address: VirtualAddress, + flags: PageEntryFlags, + page_size: PageSize, + ) { + address.map(*self, flags, page_size) + } + + fn translate(&self) -> VirtualAddress { + unsafe { + VirtualAddress::new_unchecked( + PHYSICAL_MEMORY_OFFSET + self.as_usize(), + ) + } + } +} + +#[ext] +pub impl PageTableEntry { + /// This function will return a table mapped in this + /// entry if there is one. + /// + /// Else, it will override what is inside the entry and + /// map a new table to it so valid table is guaranteed + /// to be returned. + fn force_resolve_table_mut(&mut self) -> Option> { + match self.mapped_table() { + Ok(table) => Some(table), + Err(EntryError::NotATable) => None, + Err(EntryError::NoMapping) => unsafe { + let resolved_table = BUDDY_ALLOCATOR.alloc_table(); + self.map_unchecked( + PhysicalAddress::new_unchecked( + resolved_table.addr().get(), + ), + PageEntryFlags::table_flags(), + ); + Some(self.mapped_unchecked().as_non_null::()) + }, + } + } +} + +#[ext] +pub impl VirtualAddress { + /// Map this `virtual address` into the given + /// `physical_address` with the current page table, + /// obtained from `cr3` if a page table for the + /// given virtual address doesn't exist, a new table + /// **will** be created for it + /// + /// # Parameters + /// + /// - `address`: The physical address to map this to, this address is + /// needed + /// - `page_size`: The size of the page from the [`PageSize`] enum + fn map( + &self, + address: PhysicalAddress, + flags: PageEntryFlags, + page_size: PageSize, + ) { + if address.is_aligned(page_size.alignment()) + && self.is_aligned(page_size.alignment()) + { + let mut table = PageTable::current_table(); + for level in + PageTableLevel::VARIANTS[0..=page_size as usize].iter() + { + let index = self.index_of(*level); + let entry = unsafe { &mut table.as_mut().entries[index] }; + let resolved_table = entry + .force_resolve_table_mut() + .expect("Tried to create table on a mapped entry"); + table = resolved_table; + } + unsafe { + table.as_mut().entries[self.index_of( + PageTableLevel::VARIANTS[page_size as usize + 1], + )] + .map(address, flags); + } + } else { + panic!( + "address alignment doesn't match page type alignment, \ + todo! raise a page fault" + ) + } + } + + fn set_flags( + &self, + flags: PageEntryFlags, + page_size: PageSize, + num_pages: NonZero, + ) -> Result<(), EntryError> { + let address_index = self.index_of(page_size.min_level()); + + debug_assert!( + address_index + num_pages.get() <= PAGE_DIRECTORY_ENTRIES, + "There are only 512 entries inside a table" + ); + + let mut table = self.walk(page_size.min_level())?; + + unsafe { + table + .as_mut() + .entries + .iter_mut() + .skip(address_index) + .take(num_pages.get()) + .for_each(|entry| entry.set_flags(flags)); + } + + Ok(()) + } + + /// Return the entry that is pointed by the wanted level + fn walk( + &self, + wanted: PageTableLevel, + ) -> Result, EntryError> { + let mut table = PageTable::current_table(); + + for level in PageTableLevel::VARIANTS[0..wanted as usize].iter() { + let entry = + unsafe { &table.as_ref().entries[self.index_of(*level)] }; + table = entry.mapped_table()?; + } + + Ok(table) + } + + fn translate(&self) -> Option { + let mut table = PageTable::current_table(); + + for level in PageTableLevel::VARIANTS.iter() { + let entry = + unsafe { &table.as_mut().entries[self.index_of(*level)] }; + match entry.mapped_table() { + Ok(mapped) => table = mapped, + Err(EntryError::NotATable) => { + return unsafe { Some(entry.mapped_unchecked()) }; + } + Err(EntryError::NoMapping) => return None, + } + } + unreachable!() + } +} + +#[ext] +pub impl PageTable { + // TODO: trn into a tail called function with become + /// Find an avavilable page in the given size. + // ANCHOR: page_table_find_available_page + #[cfg(target_arch = "x86_64")] + fn find_available_page( + page_size: PageSize, + ) -> Result { + const TOTAL_LEVELS: usize = PageTableLevel::VARIANTS.len(); + let mut level_indices = [0usize; TOTAL_LEVELS]; + let mut page_tables = [Self::current_table(); TOTAL_LEVELS]; + let mut current_level = PageTableLevel::PML4; + loop { + let mut current_table = + page_tables[TOTAL_LEVELS - current_level as usize]; + + let ti = unsafe { + current_table.as_mut().try_fetch_table( + level_indices[TOTAL_LEVELS - current_level as usize], + current_level, + page_size, + ) + }; + + let next_table = match ti { + EntryIndex::OutOfEntries | EntryIndex::PageDoesNotFit => { + current_level = current_level.prev()?; + level_indices + [TOTAL_LEVELS - current_level as usize] += 1; + continue; + } + EntryIndex::Entry(entry) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + entry.table_index(); + unsafe { + entry.mapped_unchecked().as_non_null::() + } + } + EntryIndex::Index(i) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + i; + return Ok(VirtualAddress::from_indices( + level_indices, + )); + } + }; + let next_level = current_level + .next() + .expect("Can't go next on a first level table"); + page_tables[TOTAL_LEVELS - next_level as usize] = next_table; + current_level = next_level; + } + } + // ANCHOR_END: page_table_find_available_page + + // TODO: turn into a tail called function with become + /// Map the region of memory from 0 to `mem_size_bytes` + /// at the top of the page table so that + /// + /// ```rust + /// VirtualAddress(0xffff800000000000) -> PhysicalAddress(0) + /// ``` + /// + /// TODO: ADD SUPPORT FOR FULL FLAG + #[allow(unsafe_op_in_unsafe_fn)] + fn map_physical_memory(&mut self, mem_size_bytes: usize) { + let mut second_level_entries_count = + (mem_size_bytes / BIG_PAGE_SIZE) + 1; + let mut third_level_entries_count = + second_level_entries_count.div_ceil(HUGE_PAGE_SIZE) + 1; + let forth_level_entries_count = third_level_entries_count + .div_ceil(PAGE_DIRECTORY_ENTRIES) + .clamp(1, 256); + let mut next_mapped = unsafe { PhysicalAddress::new_unchecked(0) }; + for forth_entry in &mut self.entries[(PAGE_DIRECTORY_ENTRIES / 2) + ..(forth_level_entries_count + (PAGE_DIRECTORY_ENTRIES / 2))] + { + let mut third_table = + forth_entry.force_resolve_table_mut().unwrap(); + + for third_entry in unsafe { + &mut third_table.as_mut().entries[0 + ..third_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { + let mut second_table = + third_entry.force_resolve_table_mut().unwrap(); + + third_level_entries_count -= 1; + for second_entry in unsafe { + &mut second_table.as_mut().entries[0 + ..second_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { + if !second_entry.is_present() { + unsafe { + second_entry.map( + next_mapped, + PageEntryFlags::huge_page_flags(), + ); + } + } + next_mapped += BIG_PAGE_SIZE.into(); + second_level_entries_count -= 1; + } + } + } + } +} + +#[ext] +pub impl PageSize { + fn default_flags(&self) -> PageEntryFlags { + match self { + PageSize::Regular => PageEntryFlags::regular_page_flags(), + PageSize::Big | PageSize::Huge => { + PageEntryFlags::huge_page_flags() + } + } + } +} + +#[ext] +pub impl PageMap { + /// Reallocates the page array on the buddy allocator. + fn reallocate(init: &'static mut LateInit) {} +} diff --git a/kernel/src/memory/allocators/mod.rs b/kernel/src/memory/allocators/mod.rs index e6b41e5..6b2929b 100644 --- a/kernel/src/memory/allocators/mod.rs +++ b/kernel/src/memory/allocators/mod.rs @@ -1 +1,3 @@ -pub mod page_allocator; +pub mod buddy; +pub mod extensions; +pub mod slab; diff --git a/kernel/src/memory/allocators/page_allocator/allocator.rs b/kernel/src/memory/allocators/page_allocator/allocator.rs deleted file mode 100644 index 826cef7..0000000 --- a/kernel/src/memory/allocators/page_allocator/allocator.rs +++ /dev/null @@ -1,236 +0,0 @@ -use core::{ - alloc::{AllocError, Allocator, Layout}, - cell::UnsafeCell, - mem::MaybeUninit, - ptr::{self, NonNull}, -}; - -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - bitmap::{BitMap, ContiguousBlockLayout, Position}, - constants::{ - FIRST_STAGE_OFFSET, PAGE_ALLOCATOR_OFFSET, PHYSICAL_MEMORY_OFFSET, - REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::MemoryRegionType, -}; -use cpu_utils::structures::paging::PageTable; - -use crate::parsed_memory_map; - -#[derive(Debug)] -// TODO: This is not thread safe, probably should use Mutex -// in the future -/// Physical page allocator implemented with a bitmap, every -/// bit corresponds to a physical page -pub struct PhysicalPageAllocator(UnsafeCell); - -impl Clone for PhysicalPageAllocator { - fn clone(&self) -> Self { - unsafe { - let bitmap = self.map_mut(); - Self(UnsafeCell::new(bitmap.clone())) - } - } -} - -impl PhysicalPageAllocator { - /// Creates a new allocator from the `bitmap_address` - /// and the `memory_size`. - /// - /// # Parameters - /// - /// - `bitmap_address`: Virtual address that is identity mapped and - /// will use to store the map - /// - `memory_size`: Memory size in bytes - #[allow(unsafe_op_in_unsafe_fn)] - pub const unsafe fn new( - bitmap_address: VirtualAddress, - memory_size: usize, - ) -> PhysicalPageAllocator { - let size_in_pages = memory_size / REGULAR_PAGE_SIZE; - let map_size = size_in_pages / u64::BITS as usize; - PhysicalPageAllocator(UnsafeCell::new(BitMap::new( - bitmap_address, - map_size, - ))) - } - - pub const fn address_position( - address: PhysicalAddress, - ) -> Option { - if address.is_aligned(REGULAR_PAGE_ALIGNMENT) { - let bit_index = address.as_usize() / REGULAR_PAGE_SIZE; - return Some(Position::from_abs_bit_index(bit_index)); - } - None - } - - unsafe fn map(&self) -> &BitMap { - unsafe { self.0.as_ref_unchecked() } - } - - #[allow(clippy::mut_from_ref)] - unsafe fn map_mut(&self) -> &mut BitMap { - unsafe { self.0.as_mut_unchecked() } - } - - pub fn init(uninit: &'static mut MaybeUninit) { - unsafe { - let memory_size = parsed_memory_map!() - .iter() - .map(|x| x.length as usize) - .sum::(); - uninit.write(Self::new( - PhysicalAddress::new_unchecked(PAGE_ALLOCATOR_OFFSET) - .translate(), - memory_size, - )); - let initialized = uninit.assume_init_mut(); - - // Set the null page - initialized - .map_mut() - .set_bit(&Position::new_unchecked(0, 0)); - - let start_address = const { - PhysicalAddress::new_unchecked(FIRST_STAGE_OFFSET as usize) - .align_down(REGULAR_PAGE_ALIGNMENT) - }; - let start_position = - Self::address_position(start_address).unwrap(); - // Allocate the addresses that are used for the - // code, and for other variables. - let end_address = PhysicalAddress::new_unchecked( - PAGE_ALLOCATOR_OFFSET - + core::mem::size_of_val(initialized.map().map), - ) - .align_up(REGULAR_PAGE_ALIGNMENT); - let size_bits = ((end_address - start_address) - / REGULAR_PAGE_SIZE) - .as_usize(); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - for region in parsed_memory_map!() { - if region.region_type != MemoryRegionType::Usable { - let start_address_aligned = - PhysicalAddress::new_unchecked( - region.base_address as usize - & (u64::MAX - ^ (REGULAR_PAGE_SIZE as u64 - 1)) - as usize, - ); - let start_position = - Self::address_position(start_address_aligned) - .unwrap(); - let size_bits = - region.length as usize / REGULAR_PAGE_SIZE; - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - } - } - }; - } - - /// Resolves `map_index` and `bit_index` into actual - /// physical address - pub fn resolve_position(p: &Position) -> PhysicalAddress { - unsafe { - PhysicalAddress::new_unchecked( - ((p.map_index * (u64::BITS as usize)) + p.bit_index) - * REGULAR_PAGE_SIZE, - ) - } - } - - pub fn resolve_address(address: PhysicalAddress) -> Position { - let starting_bit_idx = address.as_usize() / REGULAR_PAGE_SIZE; - Position::from_abs_bit_index(starting_bit_idx) - } - - pub fn available_memory(&self) -> usize { - unsafe { self.map().count_zeros() * REGULAR_PAGE_SIZE } - } - - /// Return the physical address of this table - pub(super) fn alloc_table(&self) -> &'static mut PageTable { - let free_block = unsafe { self.map().find_free_block(1) }; - - match free_block { - Some((p, _)) => unsafe { - let physical_address = Self::resolve_position(&p); - - ptr::write( - physical_address.translate().as_mut_ptr::(), - PageTable::empty(), - ); - - self.map_mut().set_bit(&p); - - &mut *physical_address.as_mut_ptr::() - }, - - None => panic!( - "No physical memory is available to allocate this table" - ), - } - } -} - -#[allow(unsafe_op_in_unsafe_fn)] -unsafe impl Allocator for PhysicalPageAllocator { - fn allocate( - &self, - layout: Layout, - ) -> Result, AllocError> { - unsafe { - if let Ok(layout) = - layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) - && let Some((p, block)) = self - .map() - .find_free_block(layout.size() / REGULAR_PAGE_SIZE) - { - self.map_mut().set_contiguous_block(&p, &block); - return Ok(NonNull::slice_from_raw_parts( - NonNull::new_unchecked( - Self::resolve_position(&p) - .translate() - .as_mut_ptr::(), - ), - layout.size(), - )); - } - Err(AllocError) - } - } - - /// TODO USE INVAL PAGE HERE ON THE ADDRESS - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if let Ok(layout) = - layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) - { - let start_position = - Self::resolve_address(PhysicalAddress::new_unchecked( - ptr.as_ptr() as usize - PHYSICAL_MEMORY_OFFSET, - )); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - layout.size() / REGULAR_PAGE_SIZE, - ); - self.map_mut() - .unset_contiguous_block(&start_position, &block); - } - } -} - -unsafe impl Sync for PhysicalPageAllocator {} diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/page_allocator/extensions.rs deleted file mode 100644 index a9603d6..0000000 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ /dev/null @@ -1,202 +0,0 @@ -use super::ALLOCATOR; -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - constants::{ - BIG_PAGE_SIZE, PAGE_ALLOCATOR_OFFSET, PAGE_DIRECTORY_ENTRIES, - PHYSICAL_MEMORY_OFFSET, - }, - enums::{PageSize, PageTableLevel}, - error::EntryError, -}; -use cpu_utils::structures::paging::{ - PageEntryFlags, PageTable, PageTableEntry, -}; -use extend::ext; -use strum::VariantArray; -#[ext] -pub impl PhysicalAddress { - fn map( - &self, - address: VirtualAddress, - flags: PageEntryFlags, - page_size: PageSize, - ) { - address.map(*self, flags, page_size) - } - - fn translate(&self) -> VirtualAddress { - unsafe { - VirtualAddress::new_unchecked( - PHYSICAL_MEMORY_OFFSET + self.as_usize(), - ) - } - } -} - -#[ext] -pub impl PageTableEntry { - /// This function will return a table mapped in this - /// entry if there is one. - /// - /// Else, it will override what is inside the entry and - /// map a new table to it so valid table is guaranteed - /// to be returned. - fn force_resolve_table_mut(&mut self) -> Option<&mut PageTable> { - match self.mapped_table_mut() { - Ok(table) => Some(table), - Err(EntryError::NotATable) => None, - Err(EntryError::NoMapping) => { - let resolved_table = - unsafe { ALLOCATOR.assume_init_ref().alloc_table() }; - unsafe { - self.map_unchecked( - PhysicalAddress::new_unchecked( - resolved_table.address().as_usize(), - ), - PageEntryFlags::table_flags(), - ); - } - unsafe { - Some( - &mut *self - .mapped_unchecked() - .as_mut_ptr::(), - ) - } - } - } - } -} - -#[ext] -pub impl VirtualAddress { - /// Map this `virtual address` into the given - /// `physical_address` with the current page table, - /// obtained from `cr3` if a page table for the - /// given virtual address doesn't exist, a new table - /// **will** be created for it - /// - /// # Parameters - /// - /// - `address`: The physical address to map this to, this address is - /// needed - /// - `page_size`: The size of the page from the [`PageSize`] enum - fn map( - &self, - address: PhysicalAddress, - flags: PageEntryFlags, - page_size: PageSize, - ) { - if address.is_aligned(page_size.alignment()) - && self.is_aligned(page_size.alignment()) - { - let mut table = PageTable::current_table_mut(); - for level in - PageTableLevel::VARIANTS[0..=page_size as usize].iter() - { - let index = self.index_of(*level); - let entry = &mut table.entries[index]; - let resolved_table = - entry.force_resolve_table_mut().unwrap(); - table = resolved_table; - } - unsafe { - table.entries[self.index_of( - PageTableLevel::VARIANTS[page_size as usize + 1], - )] - .map(address, flags); - } - } else { - panic!( - "address alignment doesn't match page type alignment, \ - todo! raise a page fault" - ) - } - } - - fn set_flags(&self, flags: PageEntryFlags) -> Result<(), EntryError> { - let page_size = PageSize::from_alignment(self.alignment()) - .expect("self address is not aligned to a page size"); - - let mut table = PageTable::current_table_mut(); - - for level in PageTableLevel::VARIANTS[0..page_size as usize].iter() - { - let index = self.index_of(*level); - let entry = &mut table.entries[index]; - table = entry.mapped_table_mut()?; - } - table.entries[self - .index_of(PageTableLevel::VARIANTS[page_size as usize + 1])] - .set_flags(flags); - Ok(()) - } - - fn translate(&self) -> PhysicalAddress { - todo!() - } -} - -#[ext] -pub impl PageTable { - /// Map the region of memory from 0 to `mem_size_bytes` - /// at the top of the page table so that ```rust - /// VirtualAddress(0xffff800000000000) -> - /// PhysicalAddress(0) ``` - /// - /// TODO: ADD SUPPORT FOR FULL FLAG - #[allow(unsafe_op_in_unsafe_fn)] - fn map_physical_memory(&mut self, mem_size_bytes: usize) { - let mut second_level_entries_count = - (mem_size_bytes / BIG_PAGE_SIZE).max(1); - let mut third_level_entries_count = second_level_entries_count - .div_ceil(PAGE_ALLOCATOR_OFFSET) - .max(1); - let forth_level_entries_count = third_level_entries_count - .div_ceil(PAGE_DIRECTORY_ENTRIES) - .clamp(1, 256); - let mut next_mapped = unsafe { PhysicalAddress::new_unchecked(0) }; - for forth_entry in &mut self.entries[(PAGE_DIRECTORY_ENTRIES / 2) - ..(forth_level_entries_count + (PAGE_DIRECTORY_ENTRIES / 2))] - { - let third_table = - forth_entry.force_resolve_table_mut().unwrap(); - - for third_entry in &mut third_table.entries - [0..third_level_entries_count.min(PAGE_DIRECTORY_ENTRIES)] - { - let second_table = - third_entry.force_resolve_table_mut().unwrap(); - - third_level_entries_count -= 1; - for second_entry in &mut second_table.entries[0 - ..second_level_entries_count - .min(PAGE_DIRECTORY_ENTRIES)] - { - if !second_entry.is_present() { - unsafe { - second_entry.map( - next_mapped, - PageEntryFlags::huge_page_flags(), - ); - } - } - next_mapped += BIG_PAGE_SIZE.into(); - second_level_entries_count -= 1; - } - } - } - } -} - -#[ext] -pub impl PageSize { - fn default_flags(&self) -> PageEntryFlags { - match self { - PageSize::Regular => PageEntryFlags::regular_page_flags(), - PageSize::Big | PageSize::Huge => { - PageEntryFlags::huge_page_flags() - } - } - } -} diff --git a/kernel/src/memory/allocators/page_allocator/mod.rs b/kernel/src/memory/allocators/page_allocator/mod.rs deleted file mode 100644 index 1d7d882..0000000 --- a/kernel/src/memory/allocators/page_allocator/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -pub mod allocator; -pub mod extensions; - -use allocator::PhysicalPageAllocator; -use core::mem::MaybeUninit; - -pub static mut ALLOCATOR: MaybeUninit = - MaybeUninit::uninit(); - -#[macro_export] -/// Allocate the amout of pages specified, and return the address -macro_rules! alloc_pages { - ($page_number: expr) => {{ - use core::alloc::{Allocator, Layout}; - use $crate::memory::allocators::page_allocator::ALLOCATOR; - ALLOCATOR - .assume_init_ref() - .allocate(Layout::from_size_align_unchecked( - REGULAR_PAGE_SIZE * $page_number, - REGULAR_PAGE_ALIGNMENT.as_usize(), - )) - .unwrap() - .addr() - .get() - }}; -} diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs new file mode 100644 index 0000000..886df9d --- /dev/null +++ b/kernel/src/memory/allocators/slab.rs @@ -0,0 +1,187 @@ +pub mod cache; +pub mod descriptor; +pub mod macros; +pub mod traits; + +use learnix_macros::generate_generics; + +use crate::{ + define_slab_system, + memory::{ + allocators::slab::{ + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{Generic, Slab, SlabPosition}, + }, + page::Page, + unassigned::{AssignSlab, UnassignSlab}, + }, +}; +use core::{ + alloc::{AllocError, Allocator}, + ptr::NonNull, +}; + +generate_generics!( + 8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096, 8192 +); + +define_slab_system!( + SlabDescriptor<()>, + Generic8, + Generic16, + Generic32, + Generic64, + Generic96, + Generic128, + Generic192, + Generic256, + Generic512, + Generic1024, + Generic2048, + Generic4096, + Generic8192, +); + +pub static mut SLAB_ALLOCATOR: SlabAllocator = SlabAllocator::new(); + +impl SlabAllocator { + pub fn slab_of(&self) -> NonNull> { + self.slabs[T::SLAB_POSITION].assign::() + } + + pub fn kmalloc(&self) -> NonNull { + let mut slab = self.slab_of::(); + unsafe { slab.as_mut().alloc() } + } + + pub fn kfree(&self, ptr: NonNull) { + let page = unsafe { Page::::from_virt(ptr.into()).as_ref() }; + + let descriptor = unsafe { page.meta.slab.freelist }; + + unsafe { descriptor.assign::().as_mut().dealloc(ptr) }; + } +} + +#[extend::ext] +pub impl NonNull { + fn into_u8(&self) -> NonNull<[u8]> { + unsafe { + let data = NonNull::new_unchecked(self.as_ptr() as *mut u8); + let size = self.as_ref().size(); + NonNull::slice_from_raw_parts(data, size) + } + } + + fn from_u8(data: NonNull) -> NonNull { + unsafe { NonNull::new_unchecked(data.as_ptr() as *mut T) } + } +} + +unsafe impl Allocator for SlabAllocator { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result, core::alloc::AllocError> { + if layout.size() < layout.align() { + return Err(AllocError); + } + + match layout.size() { + Generic8::START..=Generic8::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic16::START..=Generic16::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic32::START..=Generic32::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic64::START..=Generic64::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic96::START..=Generic96::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic128::START..=Generic128::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic192::START..=Generic192::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic256::START..=Generic256::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic512::START..=Generic512::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic1024::START..=Generic1024::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic2048::START..=Generic2048::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic4096::START..=Generic4096::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic8192::START..=Generic8192::END => { + Ok(self.kmalloc::().into_u8()) + } + _ => Err(AllocError), + } + } + + unsafe fn deallocate( + &self, + ptr: core::ptr::NonNull, + layout: core::alloc::Layout, + ) { + match layout.size() { + Generic8::START..=Generic8::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic16::START..=Generic16::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic32::START..=Generic32::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic64::START..=Generic64::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic96::START..=Generic96::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic128::START..=Generic128::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic192::START..=Generic192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic256::START..=Generic256::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic512::START..=Generic512::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic1024::START..=Generic1024::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic2048::START..=Generic2048::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic4096::START..=Generic4096::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic8192::START..=Generic8192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + _ => unreachable!(), + } + } +} +unsafe impl Send for SlabDescriptor {} +unsafe impl Sync for SlabDescriptor {} +unsafe impl Send for SlabCache {} +unsafe impl Sync for SlabCache {} diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs new file mode 100644 index 0000000..eebc815 --- /dev/null +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -0,0 +1,159 @@ +use core::{num::NonZero, ptr::NonNull}; + +use common::address_types::VirtualAddress; + +use crate::memory::{ + allocators::{ + extensions::VirtualAddressExt, + slab::{SLAB_ALLOCATOR, traits::Slab}, + }, + page::UnassignedPage, + unassigned::{AssignSlab, UnassignSlab}, +}; + +use super::{descriptor::SlabDescriptor, traits::SlabCacheConstructor}; + +#[derive(Clone, Debug)] +pub struct SlabCache { + pub buddy_order: usize, + pub free: Option>>, + pub partial: Option>>, + pub full: Option>>, +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabCache<()>) + } + } +} + +impl SlabCache { + /// Allocate a new slab descriptor, attaches it to the free slab list, + /// and initialize it's page. + pub fn grow(&mut self) { + // Allocate a new slab descriptor for this slab + let mut slab = unsafe { + SLAB_ALLOCATOR.kmalloc::>().assign::() + }; + + unsafe { + *slab.as_mut() = + SlabDescriptor::::new(self.buddy_order, self.free) + } + + self.take_ownership(slab); + + self.free = Some(slab); + } + + pub fn take_ownership(&self, slab: NonNull>) { + let slab_address: VirtualAddress = + unsafe { slab.as_ref().objects.as_ptr().addr().into() }; + + slab_address + .set_flags(T::PFLAGS, T::PSIZE, unsafe { + NonZero::::new_unchecked(1 << self.buddy_order) + }) + .unwrap(); + + let slab_page = + unsafe { UnassignedPage::from_virt(slab_address).as_mut() }; + + // Set owner and freelist. + unsafe { + (*slab_page.meta.slab).freelist = slab.as_unassigned(); + (*slab_page.meta.slab).owner = + NonNull::from_ref(self).as_unassigned(); + }; + } + + pub fn alloc(&mut self) -> NonNull { + if let Some(mut partial) = self.partial { + let partial = unsafe { partial.as_mut() }; + + let allocation = partial.alloc(); + + if partial.next_free_idx.is_none() { + self.partial = partial.next; + partial.next = self.full; + self.full = Some(NonNull::from_mut(partial)); + } + return allocation; + } + if let Some(mut free) = self.free { + let free = unsafe { free.as_mut() }; + + let allocation = free.alloc(); + + self.free = free.next; + free.next = self.partial; + self.partial = Some(NonNull::from_mut(free)); + + return allocation; + } + + todo!( + "Handle cases where partial and free are full, and \ + allocation from the page allocator is needed." + ) + } + pub fn dealloc(&self, _ptr: NonNull) { + todo!() + } +} + +impl SlabCache<()> { + pub fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self as *const _ as *mut SlabCache) + } + } +} + +impl SlabCacheConstructor for SlabCache { + default fn new(buddy_order: usize) -> SlabCache { + SlabCache { + buddy_order, + free: None, + partial: None, + full: None, + } + } +} + +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { + let partial = + SlabDescriptor::>::initial_descriptor( + buddy_order, + ); + + // This assumption can be made, because the created cache in + // this function will go to the constant position on the slab + // array defined with the `SlabPosition` array + let mut future_owner = + unsafe { SLAB_ALLOCATOR.slab_of::>() }; + + let cache = SlabCache { + buddy_order, + free: None, + partial: Some(partial), + full: None, + }; + + // Only in this function, we initialiuze the global array in the + // new function. + // + // Because then we can use the `take_ownership` function + unsafe { + *future_owner.as_mut() = cache.clone(); + future_owner.as_mut().take_ownership(partial); + } + + cache + } +} diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs new file mode 100644 index 0000000..2844f74 --- /dev/null +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -0,0 +1,160 @@ +use crate::{ + alloc_pages, + memory::{ + allocators::slab::traits::Slab, + unassigned::{AssignSlab, UnassignSlab}, + }, +}; +use common::constants::REGULAR_PAGE_SIZE; +use core::{ + fmt::Debug, + mem::{ManuallyDrop, size_of}, + ptr::NonNull, +}; +use nonmax::NonMaxU16; + +/// Preallocated object in the slab allocator. +pub union PreallocatedObject { + pub allocated: ManuallyDrop, + pub next_free_idx: Option, +} + +impl Debug for PreallocatedObject { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct SlabDescriptor { + pub next_free_idx: Option, + pub total_allocated: u16, + pub objects: NonNull<[PreallocatedObject]>, + pub next: Option>>, +} + +impl AssignSlab for NonNull> { + type Target = NonNull>; + + fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor) + } + } +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor<()>) + } + } +} + +impl SlabDescriptor { + /// Create a new slab descriptor. + /// + /// # Safety + /// This function is marked as unsafe because it does not initialize + /// the page that the allocation is on. + /// + /// This function is meant to be called from the [`grow`] + /// function inside slab cache. (Which is safe and do initialize + /// the page) + pub unsafe fn new( + order: usize, + next: Option>>, + ) -> SlabDescriptor { + let address = unsafe { alloc_pages!(1 << order).translate() }; + + let mut objects = NonNull::slice_from_raw_parts( + address.as_non_null::>(), + ((1 << order) * REGULAR_PAGE_SIZE) + / size_of::>(), + ); + + for (i, object) in + unsafe { objects.as_mut() }.iter_mut().enumerate() + { + *object = PreallocatedObject { + next_free_idx: Some(unsafe { + NonMaxU16::new_unchecked(i as u16 + 1) + }), + } + } + + unsafe { + objects.as_mut().last_mut().unwrap().next_free_idx = None + }; + + SlabDescriptor { + next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), + total_allocated: 0, + objects, + next, + } + } + + pub fn alloc(&mut self) -> NonNull { + debug_assert!( + self.next_free_idx.is_some(), + "Called allocate on a full slab" + ); + + let idx = self.next_free_idx.unwrap().get() as usize; + let preallocated = unsafe { &mut self.objects.as_mut()[idx] }; + + self.next_free_idx = unsafe { preallocated.next_free_idx }; + + self.total_allocated += 1; + + unsafe { NonNull::from_mut(&mut preallocated.allocated) } + } + + // TODO: In tests rembmber to implement something on T that implement + // drop and see that when freeing the memory it is called + pub unsafe fn dealloc(&mut self, ptr: NonNull) { + todo!("Remember to call drop on the item"); + + let freed_index = (ptr.as_ptr().addr() + - self.objects.as_ptr().addr()) + / size_of::>(); + + unsafe { + self.objects.as_mut()[freed_index].next_free_idx = + self.next_free_idx; + }; + self.next_free_idx = + unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; + + self.total_allocated -= 1; + } +} + +impl SlabDescriptor> { + /// Return a pointer to the initial descriptor after it allocated + /// himself. + /// + /// The pointer the is returned by this function contains an already + /// initialized descriptor that allocates itself. + pub fn initial_descriptor( + order: usize, + ) -> NonNull>> { + let mut descriptor = unsafe { + SlabDescriptor::>::new(order, None) + }; + + let mut self_allocation = descriptor.alloc(); + + unsafe { + *self_allocation.as_mut() = NonNull::from_ref(&descriptor) + .as_unassigned() + .as_ref() + .clone() + } + + self_allocation.assign::>() + } +} diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs new file mode 100644 index 0000000..994f2d2 --- /dev/null +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -0,0 +1,71 @@ +#[macro_export] +macro_rules! register_slabs { + ($($t:ty),* $(,)?) => { + $crate::register_slabs!(@step 0; $($t),*); + }; + + (@step $idx:expr; $head:ty, $($tail:ty),+) => { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { + const SLAB_POSITION: usize = $idx; + } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} + + $crate::register_slabs!(@step $idx + 1; $($tail),*); + }; + + (@step $idx:expr; $head:ty) => { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { + const SLAB_POSITION: usize = $idx; + } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} + }; + + (@step $idx:expr; ) => {}; +} + +#[macro_export] +macro_rules! define_slab_system { + ($($t:ty),* $(,)?) => { + use common::constants::REGULAR_PAGE_SIZE; + use $crate::memory::allocators::slab::traits::SlabCacheConstructor; + + $crate::register_slabs!($($t),*); + + const COUNT: usize = [$(stringify!($t)),*].len(); + + pub struct SlabAllocator { + slabs: [common::late_init::LateInit>; COUNT] + } + + impl SlabAllocator { + pub const fn new() -> Self { + Self { + slabs: [ + $({ + let _ = stringify!($t); + common::late_init::LateInit::uninit() + }),* + ] + } + } + + pub fn init(&'static mut self) { + $( + let index = <$t>::SLAB_POSITION; + + let initialized = SlabCache::<$t>::new(size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE)); + + let unassigned = NonNull::from_ref(&initialized).as_unassigned(); + + self.slabs[index].write(unsafe { unassigned.as_ref().clone() }); + )* + } + } + } +} + +// TODO implement reverse lookup with an enum that will automatically be +// generated and check the code generated on compiler explorer. if +// interesting, write on it on the book diff --git a/kernel/src/memory/allocators/slab/preallocated.rs b/kernel/src/memory/allocators/slab/preallocated.rs new file mode 100644 index 0000000..e69de29 diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs new file mode 100644 index 0000000..16dfc9a --- /dev/null +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -0,0 +1,53 @@ +use common::enums::PageSize; +use cpu_utils::structures::paging::PageEntryFlags; + +/// Get the position on the slab array, for a slab of the given type. +/// +/// Shouldn't implement this trait manually; it is implemented +/// via the `define_slab_system` macro. +pub trait Slab: 'static + Sized + SlabPosition + SlabFlags {} + +impl Slab for () {} + +pub trait SlabPosition { + const SLAB_POSITION: usize; +} + +impl SlabPosition for () { + const SLAB_POSITION: usize = usize::MAX; +} + +pub trait SlabFlags: SlabPosition { + const PFLAGS: PageEntryFlags; + const PSIZE: PageSize; +} + +impl SlabFlags for T { + default const PFLAGS: PageEntryFlags = + PageEntryFlags::regular_page_flags(); + + default const PSIZE: PageSize = PageSize::Regular; +} + +impl SlabFlags for () { + const PFLAGS: PageEntryFlags = PageEntryFlags::default(); + const PSIZE: PageSize = PageSize::Regular; +} + +pub trait SlabCacheConstructor { + fn new(buddy_order: usize) -> Self; +} + +pub trait Generic { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} + +pub trait DmaGeneric { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} diff --git a/kernel/src/memory/memory_map.rs b/kernel/src/memory/memory_map.rs index f84672b..7ea62f5 100644 --- a/kernel/src/memory/memory_map.rs +++ b/kernel/src/memory/memory_map.rs @@ -3,6 +3,7 @@ use common::{ enums::MemoryRegionType, }; use core::fmt::{self, Display, Formatter}; +use derive_more::{Deref, DerefMut}; #[macro_export] macro_rules! parsed_memory_map { @@ -15,13 +16,15 @@ macro_rules! parsed_memory_map { as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegion>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegion>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::PARSED_MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; @@ -37,12 +40,14 @@ macro_rules! raw_memory_map { common::constants::addresses::MEMORY_MAP_OFFSET as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegionExtended>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegionExtended>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; @@ -104,11 +109,13 @@ impl MemoryRegionTrait for MemoryRegionExtended { } } -pub struct ParsedMapDisplay( - pub &'static [T], -); +#[derive(Deref, DerefMut)] +pub struct MemoryMap(pub &'static [T]); -impl Display for ParsedMapDisplay { +pub type RawMemoryMap = MemoryMap; +pub type ParsedMemoryMap = MemoryMap; + +impl Display for MemoryMap { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut usable = 0u64; let mut reserved = 0u64; @@ -120,10 +127,10 @@ impl Display for ParsedMapDisplay { write!( f, - "[0x{:0>9x} - 0x{:0>9x}]: type: {}", + "[0x{:0>9x} - 0x{:0>9x}]: type: {:?}", entry.base_address(), entry.base_address() + entry.length(), - entry.region_type() as u32 + entry.region_type() )?; match entry.region_type() { diff --git a/kernel/src/memory/mod.rs b/kernel/src/memory/mod.rs index 17bb0dc..eba83e0 100644 --- a/kernel/src/memory/mod.rs +++ b/kernel/src/memory/mod.rs @@ -1,2 +1,4 @@ pub mod allocators; pub mod memory_map; +pub mod page; +pub mod unassigned; diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs new file mode 100644 index 0000000..13990f3 --- /dev/null +++ b/kernel/src/memory/page.rs @@ -0,0 +1,80 @@ +use core::{marker::PhantomData, ptr::NonNull}; + +use crate::memory::{ + allocators::{extensions::VirtualAddressExt, slab::traits::Slab}, + page::{map::PageMap, meta::PageMeta}, + unassigned::{AssignSlab, UnassignSlab}, +}; +use common::{ + address_types::{PhysicalAddress, VirtualAddress}, + constants::REGULAR_PAGE_SIZE, + late_init::LateInit, +}; + +pub mod map; +pub mod meta; + +pub type UnassignedPage = Page<()>; + +pub static mut PAGES: LateInit = LateInit::uninit(); + +pub struct Page { + pub meta: PageMeta, + _phantom: PhantomData, +} + +impl AssignSlab for NonNull> { + type Target = NonNull>; + + fn assign(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) } + } +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page<()>) } + } +} + +impl Page { + pub fn new(meta: PageMeta) -> Page { + Page { + meta, + _phantom: PhantomData::, + } + } + + pub fn physical_address(&self) -> PhysicalAddress { + let index = (self as *const _ as usize + - unsafe { PAGES.as_ptr().addr() }) + / size_of::(); + + unsafe { + PhysicalAddress::new_unchecked(index * REGULAR_PAGE_SIZE) + } + } + + /// Return the index of the page structure inside the [`PAGES`] array + /// pointed by this virtual address. + /// + /// **Note**: if you meant to get the page structure, consider using + /// [`Page::from_virt`] + pub fn index_of(addr: VirtualAddress) -> usize { + addr.translate() + .expect("Address could not be translated") + .as_usize() + / REGULAR_PAGE_SIZE + } + + /// Return the physical page structure that is pointed by this physical + /// address + pub fn from_virt(addr: VirtualAddress) -> NonNull> { + unsafe { + NonNull::from_ref(&PAGES[Page::::index_of(addr)]) + .assign::() + } + } +} diff --git a/kernel/src/memory/page/map.rs b/kernel/src/memory/page/map.rs new file mode 100644 index 0000000..4b1d205 --- /dev/null +++ b/kernel/src/memory/page/map.rs @@ -0,0 +1,78 @@ +use core::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use common::{ + address_types::VirtualAddress, + constants::{PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_SIZE}, + late_init::LateInit, +}; + +use crate::{ + memory::{ + memory_map::ParsedMemoryMap, + page::{ + PAGES, UnassignedPage, + meta::{BuddyPageMeta, PageMeta}, + }, + }, + println, +}; + +pub struct PageMap(NonNull<[UnassignedPage]>); + +impl Deref for PageMap { + type Target = [UnassignedPage]; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.as_ref() } + } +} + +impl DerefMut for PageMap { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.as_mut() } + } +} + +impl PageMap { + /// Initializes all pages on the constant address + /// ([`PAGE_ALLOCATOR_OFFSET`]) and returns the end address. + pub fn init( + uninit: &'static mut LateInit, + mmap: ParsedMemoryMap, + ) -> VirtualAddress { + let last = mmap.last().unwrap(); + let last_address = (last.base_address + last.length) as usize; + let total_pages = last_address / REGULAR_PAGE_SIZE; + + println!( + "Last address: {}, Total Pages: {}, size_of_array: {:x?} Kib", + last_address, + total_pages, + total_pages * size_of::() / 1024 + ); + unsafe { + let page_map = NonNull::slice_from_raw_parts( + NonNull::new_unchecked( + PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, + ), + total_pages, + ); + + uninit.write(PageMap(page_map)); + + for p in uninit.as_mut().iter_mut() { + core::ptr::write_volatile( + p as *mut UnassignedPage, + UnassignedPage::new(PageMeta { + buddy: ManuallyDrop::new(BuddyPageMeta::default()), + }), + ) + } + (PAGES.as_ptr_range().end as usize).into() + } + } +} diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs new file mode 100644 index 0000000..c5ca152 --- /dev/null +++ b/kernel/src/memory/page/meta.rs @@ -0,0 +1,65 @@ +use core::{mem::ManuallyDrop, ptr::NonNull}; + +use common::enums::BuddyOrder; + +use crate::memory::{ + allocators::slab::{ + cache::SlabCache, descriptor::SlabDescriptor, traits::Slab, + }, + page::{Page, UnassignedPage}, + unassigned::{AssignSlab, UnassignSlab}, +}; + +pub union PageMeta { + pub buddy: ManuallyDrop, + pub slab: ManuallyDrop>, +} + +#[derive(Debug)] +pub struct BuddyPageMeta { + pub next: Option>, + pub prev: Option>, + pub order: Option, +} + +impl const Default for BuddyPageMeta { + fn default() -> Self { + Self { + next: None, + prev: None, + order: None, + } + } +} + +impl BuddyPageMeta { + pub fn detach(&mut self) -> Option>> { + let detached = self.next?; // None if there is no page to detach + + self.next = unsafe { detached.as_ref().meta.buddy.next }; + + if let Some(mut next) = self.next { + unsafe { (*next.as_mut().meta.buddy).prev = None } + } + + Some(detached.assign::()) + } + + pub fn attach(&mut self, mut p: NonNull>) { + unsafe { (*p.as_mut().meta.buddy).next = self.next }; + + if let Some(mut next) = self.next { + unsafe { + (*next.as_mut().meta.buddy).prev = Some(p.as_unassigned()) + }; + } + + self.next = Some(p.as_unassigned()) + } +} + +#[derive(Debug)] +pub struct SlabPageMeta { + pub owner: NonNull>, + pub freelist: NonNull>, +} diff --git a/kernel/src/memory/unassigned.rs b/kernel/src/memory/unassigned.rs new file mode 100644 index 0000000..8ad4e38 --- /dev/null +++ b/kernel/src/memory/unassigned.rs @@ -0,0 +1,13 @@ +use crate::memory::allocators::slab::traits::Slab; + +pub trait UnassignSlab { + type Target; + + fn as_unassigned(&self) -> Self::Target; +} + +pub trait AssignSlab { + type Target; + + fn assign(&self) -> Self::Target; +} diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index 696256a..70f8ae0 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -1,7 +1,9 @@ use flag::FlagInput; use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{DeriveInput, parse_macro_input}; +use syn::{ + DeriveInput, LitInt, Token, parse_macro_input, punctuated::Punctuated, +}; mod flag; @@ -18,11 +20,10 @@ pub fn common_address_functions(input: TokenStream) -> TokenStream { pub const fn as_usize(&self) -> usize { self.0 } - pub const unsafe fn as_mut_ptr(&self) -> *mut T { - core::ptr::with_exposed_provenance_mut::(self.0) - } - pub const fn as_ptr(&self) -> *const T { - core::ptr::with_exposed_provenance::(self.0) + pub const fn as_non_null(&self) -> core::ptr::NonNull { + core::ptr::NonNull::new( + core::ptr::with_exposed_provenance_mut::(self.0) + ).expect("Tried to create NonNull from address, found null") } pub const fn is_aligned( &self, @@ -185,6 +186,7 @@ pub fn rwc_flag(input: TokenStream) -> TokenStream { // build identifiers let name_str = name.to_string(); let clear_ident = format_ident!("clear_{}", name_str); + let support_ident = format_ident!("is_{}", name_str); let expanded = quote! { #[inline] @@ -194,6 +196,19 @@ pub fn rwc_flag(input: TokenStream) -> TokenStream { pub const fn #clear_ident(&mut self) { self.0 |= 1 << #bit; } + + + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Checks if the corresponding flag is set + pub fn #support_ident(&self) -> bool { + unsafe { + core::ptr::read_volatile( + self as *const _ as *mut usize + ) & ((1<< #bit) as usize) != 0 + } + } }; expanded.into() @@ -223,3 +238,45 @@ pub fn rw1_flag(input: TokenStream) -> TokenStream { expanded.into() } // ANCHOR_END: rw1_flag + +#[proc_macro] +pub fn generate_generics(input: TokenStream) -> TokenStream { + // Parse the input as a comma-separated list of integers: 8, 16, 32... + let parser = Punctuated::::parse_terminated; + let input = parse_macro_input!(input with parser); + + let mut expanded = quote! {}; + + // initial range for the first item + let mut last_size: usize = 0; + + for lit in input { + let generic_size: usize = lit + .base10_parse() + .expect("Invalid integer format, expected base10"); + + let generic_name = format_ident!("Generic{}", generic_size); + + // minimum size of 8 bytes (usize on 64 bit). + let array_size = generic_size / 8; + + let start = last_size; + let end = generic_size; + + let struct_def = quote! { + #[derive(Debug, Clone, Copy)] + pub struct #generic_name(pub [usize; #array_size]); + + impl Generic for #generic_name { + fn size(&self) -> usize { #generic_size } + const START: usize = #start; + const END: usize = #end; + } + }; + + last_size = generic_size + 1; + expanded.extend(struct_def); + } + + TokenStream::from(expanded) +} diff --git a/shared/common/src/address_types.rs b/shared/common/src/address_types.rs index ca6534d..df176b1 100644 --- a/shared/common/src/address_types.rs +++ b/shared/common/src/address_types.rs @@ -1,3 +1,5 @@ +use core::ptr::NonNull; + #[cfg(target_arch = "x86_64")] use crate::constants::PHYSICAL_MEMORY_OFFSET; use crate::enums::PageTableLevel; @@ -61,6 +63,12 @@ impl const From for PhysicalAddress { #[repr(C)] pub struct VirtualAddress(usize); +impl From> for VirtualAddress { + fn from(value: NonNull) -> Self { + unsafe { VirtualAddress::new_unchecked(value.as_ptr().addr()) } + } +} + impl const From for VirtualAddress { // TODO! Change into new in the future fn from(value: usize) -> Self { @@ -100,26 +108,8 @@ impl VirtualAddress { /// 1 -> index of 1st table // ANCHOR: virtual_nth_pt_index_unchecked pub const fn index_of(&self, level: PageTableLevel) -> usize { - (self.0 >> (39 - 9 * (4 - level as usize))) & 0o777 + (self.0 >> (39 - 9 * (level as usize))) & 0o777 } - - // pub fn translate(&self) -> Option { - // let mut current_table = - // PageTable::current_table(); for i in 0..4 { - // let index = self.rev_nth_index_unchecked(i); - // match - // current_table.entries[index].mapped_table_mut() { - // Ok(table) => current_table = table, - // Err(EntryError::NotATable) => { - // return unsafe { - // Some(current_table.entries[index].mapped_unchecked()) - // }; } - // Err(EntryError::NoMapping) => return - // None, Err(EntryError::Full) => - // unreachable!(), } - // } - // None - // } } impl PhysicalAddress { diff --git a/shared/common/src/bitmap.rs b/shared/common/src/bitmap.rs index 06d8031..1adde6a 100644 --- a/shared/common/src/bitmap.rs +++ b/shared/common/src/bitmap.rs @@ -182,7 +182,7 @@ impl BitMap { BitMap { map: unsafe { slice::from_raw_parts_mut( - map_address.as_mut_ptr::(), + map_address.as_non_null::().as_mut(), map_size, ) }, diff --git a/shared/common/src/constants/addresses.rs b/shared/common/src/constants/addresses.rs index a3e482a..d2abce2 100644 --- a/shared/common/src/constants/addresses.rs +++ b/shared/common/src/constants/addresses.rs @@ -16,6 +16,7 @@ pub const TOP_IDENTITY_PAGE_TABLE_L3_OFFSET: usize = 0xe000; pub const TOP_IDENTITY_PAGE_TABLE_L2_OFFSET: usize = 0xf000; pub const KERNEL_OFFSET: u64 = 0x10000; -pub const PAGE_ALLOCATOR_OFFSET: usize = 0x100000; #[cfg(target_arch = "x86_64")] pub const PHYSICAL_MEMORY_OFFSET: usize = 0xffff800000000000; +#[cfg(target_arch = "x86_64")] +pub const PAGE_ALLOCATOR_OFFSET: usize = PHYSICAL_MEMORY_OFFSET + 0x100000; diff --git a/shared/common/src/enums/ata.rs b/shared/common/src/enums/ata.rs index fe45d37..89fc0e2 100644 --- a/shared/common/src/enums/ata.rs +++ b/shared/common/src/enums/ata.rs @@ -8,5 +8,6 @@ use crate::error::ConversionError; pub enum AtaCommand { Nop = 0, ReadDmaExt = 0x25, + IdentifyPacketDevice = 0xa1, IdentifyDevice = 0xec, } diff --git a/shared/common/src/enums/buddy.rs b/shared/common/src/enums/buddy.rs new file mode 100644 index 0000000..5a50fda --- /dev/null +++ b/shared/common/src/enums/buddy.rs @@ -0,0 +1,37 @@ +use crate::error::ConversionError; +use num_enum::{TryFromPrimitive, UnsafeFromPrimitive}; +use strum::VariantArray; +use strum_macros::VariantArray; + +pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); + +#[repr(u8)] +#[derive( + VariantArray, + Clone, + Copy, + PartialEq, + Debug, + Eq, + TryFromPrimitive, + UnsafeFromPrimitive, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum BuddyOrder { + Order0 = 0, + Order1 = 1, + Order2 = 2, + Order3 = 3, + Order4 = 4, + Order5 = 5, + Order6 = 6, + Order7 = 7, + Order8 = 8, + Order9 = 9, + Order10 = 10, +} + +impl BuddyOrder { + pub const MIN: BuddyOrder = *BuddyOrder::VARIANTS.first().unwrap(); + pub const MAX: BuddyOrder = *BuddyOrder::VARIANTS.last().unwrap(); +} diff --git a/shared/common/src/enums/mod.rs b/shared/common/src/enums/mod.rs index 1466b58..2149e19 100644 --- a/shared/common/src/enums/mod.rs +++ b/shared/common/src/enums/mod.rs @@ -1,6 +1,7 @@ pub mod ahci; pub mod ata; pub mod bios_interrupts; +pub mod buddy; pub mod cpuid; pub mod general; pub mod global_descriptor_table; @@ -16,6 +17,7 @@ pub mod vga; pub use ahci::*; pub use ata::*; pub use bios_interrupts::*; +pub use buddy::*; pub use cpuid::*; pub use general::*; pub use global_descriptor_table::*; diff --git a/shared/common/src/enums/paging.rs b/shared/common/src/enums/paging.rs index 8e41257..9ef30b2 100644 --- a/shared/common/src/enums/paging.rs +++ b/shared/common/src/enums/paging.rs @@ -23,10 +23,10 @@ use crate::{ )] #[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] pub enum PageTableLevel { - PML4 = 4, - PDPT = 3, + PML4 = 0, + PDPT = 1, PD = 2, - PT = 1, + PT = 3, } impl PageTableLevel { @@ -67,8 +67,23 @@ impl PageSize { } } - pub fn exceeds(&self, table_level: PageTableLevel) -> bool { - (3 - *self as usize) <= table_level as usize + /// Conclude if a page can be allocated in the give PageTableLevel + /// + /// # Example + /// A huge (2Mib) Page can be allocated on PML4, PDPT and PD so it will + /// return `true` for those, and it cannot be allocated on `PD` so for + /// it is will return `false` + pub fn allocatable_at(&self, table_level: PageTableLevel) -> bool { + (*self as usize + 1) >= table_level as usize + } + + /// The minimal page level that this page size can exist on. + pub fn min_level(&self) -> PageTableLevel { + match self { + PageSize::Regular => PageTableLevel::PT, + PageSize::Big => PageTableLevel::PD, + PageSize::Huge => PageTableLevel::PDPT, + } } /// Determines the appropriate `PageSizeAlignment` for a diff --git a/shared/common/src/enums/pic8259.rs b/shared/common/src/enums/pic8259.rs index bace8f9..d701e76 100644 --- a/shared/common/src/enums/pic8259.rs +++ b/shared/common/src/enums/pic8259.rs @@ -34,7 +34,7 @@ pub enum CascadedPicInterruptLine { Irq7 = 1 << 7, Irq8 = 1 << 8, Irq9 = 1 << 9, - Irq10 = 1 << 10, + Ahci = 1 << 10, Irq11 = 1 << 11, Irq12 = 1 << 12, Irq13 = 1 << 13, diff --git a/shared/common/src/late_init.rs b/shared/common/src/late_init.rs new file mode 100644 index 0000000..a2ba4a5 --- /dev/null +++ b/shared/common/src/late_init.rs @@ -0,0 +1,30 @@ +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; + +pub struct LateInit(MaybeUninit); + +impl LateInit { + pub const fn uninit() -> LateInit { + LateInit::(MaybeUninit::uninit()) + } + + pub const fn write(&mut self, val: T) { + self.0.write(val); + } +} + +impl Deref for LateInit { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.assume_init_ref() } + } +} + +impl DerefMut for LateInit { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.assume_init_mut() } + } +} diff --git a/shared/common/src/lib.rs b/shared/common/src/lib.rs index 7bbbbb8..4923b17 100644 --- a/shared/common/src/lib.rs +++ b/shared/common/src/lib.rs @@ -13,7 +13,10 @@ pub mod bitmap; pub mod constants; pub mod enums; pub mod error; +pub mod late_init; pub mod ring_buffer; +pub mod volatile; + struct FakeAllocator; unsafe impl core::alloc::GlobalAlloc for FakeAllocator { diff --git a/shared/common/src/ring_buffer.rs b/shared/common/src/ring_buffer.rs index 6535b6e..d52b3f2 100644 --- a/shared/common/src/ring_buffer.rs +++ b/shared/common/src/ring_buffer.rs @@ -17,7 +17,7 @@ impl RingBuffer { write_idx: 0, buffer: unsafe { slice::from_raw_parts_mut( - buffer_address.as_mut_ptr::(), + buffer_address.as_non_null::().as_mut(), length.get(), ) }, diff --git a/shared/common/src/volatile.rs b/shared/common/src/volatile.rs new file mode 100644 index 0000000..296a209 --- /dev/null +++ b/shared/common/src/volatile.rs @@ -0,0 +1,49 @@ +use core::fmt::Debug; + +#[derive(Copy)] +#[repr(transparent)] +pub struct Volatile(T); + +impl Volatile { + pub fn new(vol: T) -> Volatile { + Volatile(vol) + } + + /// Read from the hardware register + pub fn read(&self) -> T { + unsafe { core::ptr::read_volatile(&self.0) } + } + + /// Write to the hardware register + pub fn write(&mut self, value: T) { + unsafe { core::ptr::write_volatile(&mut self.0 as *mut T, value) } + } +} + +impl Clone for Volatile { + fn clone(&self) -> Self { + Volatile(self.read()) + } +} + +impl Debug for Volatile { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_fmt(format_args!("{:?}", &self.0 as *const T)) + } +} + +#[macro_export] +macro_rules! read_volatile { + ($arg: expr) => { + unsafe { core::ptr::read_volatile(core::ptr::addr_of!($arg)) } + }; +} + +#[macro_export] +macro_rules! write_volatile { + ($arg: expr, $val: expr) => { + unsafe { + core::ptr::write_volatile(core::ptr::addr_of_mut!($arg), $val) + } + }; +} diff --git a/shared/cpu_utils/Cargo.toml b/shared/cpu_utils/Cargo.toml index 97d0fca..d6e005f 100644 --- a/shared/cpu_utils/Cargo.toml +++ b/shared/cpu_utils/Cargo.toml @@ -11,3 +11,5 @@ derive_more = { version = "2.0.1", default-features = false, features = [ thiserror = { version = "2.0.12", default-features = false } extend = "1.2.0" learnix-macros = { path = "../../learnix-macros" } +strum_macros = { version = "0.27", default-features = false } +strum = { version = "0.27", default-features = false } diff --git a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs index 07c727f..7043601 100644 --- a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs +++ b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs @@ -106,14 +106,20 @@ impl InterruptDescriptorTable { gdt.load_tss(tss); unsafe { ptr::write_volatile( - base_address.as_mut_ptr::(), + base_address + .as_non_null::() + .as_ptr(), InterruptDescriptorTable { interrupts: [const { InterruptDescriptorTableEntry::missing() }; 256], }, ); - uninit.write(&mut *base_address.as_mut_ptr::()); + uninit.write( + base_address + .as_non_null::() + .as_mut(), + ); uninit.assume_init_ref().load(); } } diff --git a/shared/cpu_utils/src/structures/paging/entry_flags.rs b/shared/cpu_utils/src/structures/paging/entry_flags.rs index d989265..bc10733 100644 --- a/shared/cpu_utils/src/structures/paging/entry_flags.rs +++ b/shared/cpu_utils/src/structures/paging/entry_flags.rs @@ -44,7 +44,7 @@ macro_rules! table_entry_flags { // ANCHOR: page_entry_flags /// A wrapper for `PageTableEntry` flags for easier use -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct PageEntryFlags(pub u64); // ANCHOR_END: page_entry_flags diff --git a/shared/cpu_utils/src/structures/paging/init.rs b/shared/cpu_utils/src/structures/paging/init.rs index 9a0d3c8..4b8a6a3 100644 --- a/shared/cpu_utils/src/structures/paging/init.rs +++ b/shared/cpu_utils/src/structures/paging/init.rs @@ -54,8 +54,7 @@ pub fn enable() -> Option<()> { ); identity_page_table_l2.entries[0].map_unchecked( PhysicalAddress::new_unchecked(0), - /// TODO PATCH REMOVE LATER - PageEntryFlags::huge_io_page_flags(), + PageEntryFlags::huge_page_flags(), ); } // ANCHOR_END: setup_page_tables diff --git a/shared/cpu_utils/src/structures/paging/page_table.rs b/shared/cpu_utils/src/structures/paging/page_table.rs index 5d597f3..9b7739b 100644 --- a/shared/cpu_utils/src/structures/paging/page_table.rs +++ b/shared/cpu_utils/src/structures/paging/page_table.rs @@ -1,11 +1,11 @@ -use core::ptr; +use core::ptr::{self, NonNull}; use crate::{registers::cr3, structures::paging::PageTableEntry}; use common::{ address_types::VirtualAddress, constants::{PAGE_DIRECTORY_ENTRIES, REGULAR_PAGE_ALIGNMENT}, enums::{PageSize, PageTableLevel}, - error::{EntryError, TableError}, + error::EntryError, }; // ANCHOR: page_table @@ -17,6 +17,14 @@ pub struct PageTable { } // ANCHOR_END: page_table +#[derive(Debug)] +pub enum EntryIndex { + Entry(&'static PageTableEntry), + Index(usize), + PageDoesNotFit, + OutOfEntries, +} + // ANCHOR: page_table_impl impl PageTable { // ANCHOR: page_table_empty @@ -40,35 +48,25 @@ impl PageTable { #[inline] pub unsafe fn empty_from_ptr( page_table_ptr: VirtualAddress, - ) -> Option<&'static mut PageTable> { + ) -> Option> { if !page_table_ptr.is_aligned(REGULAR_PAGE_ALIGNMENT) { return None; } unsafe { ptr::write_volatile( - page_table_ptr.as_mut_ptr::(), + page_table_ptr.as_non_null::().as_ptr(), PageTable::empty(), ); - Some(&mut *page_table_ptr.as_mut_ptr::()) + Some(page_table_ptr.as_non_null::()) } } // ANCHOR_END: page_table_empty_from_ptr // ANCHOR: page_table_current_table #[inline] - pub fn current_table() -> &'static PageTable { - unsafe { - &*core::ptr::with_exposed_provenance(cr3::read() as usize) - } - } - - #[inline] - pub fn current_table_mut() -> &'static mut PageTable { - unsafe { - &mut *core::ptr::with_exposed_provenance_mut( - cr3::read() as usize - ) - } + pub fn current_table() -> NonNull { + NonNull::new(cr3::read() as usize as *mut PageTable) + .expect("Page table pointer is not present in cr3, found NULL") } // ANCHOR_END: page_table_current_table @@ -88,70 +86,28 @@ impl PageTable { /// Returns the index of the found entry and the page table if found. // Anchor: page_table_try_fetch_table #[cfg(target_arch = "x86_64")] - fn try_fetch_table( - &self, + pub fn try_fetch_table( + &'static self, start_at: usize, table_level: PageTableLevel, page_size: PageSize, - ) -> (usize, Option<&PageTable>) { + ) -> EntryIndex { + if !page_size.allocatable_at(table_level) { + return EntryIndex::PageDoesNotFit; + } + for (i, entry) in self.entries.iter().enumerate().skip(start_at) { match entry.mapped_table() { - Ok(v) => { - if page_size.exceeds(table_level) { - continue; - } - return (i, Some(v)); + Ok(_) => { + return EntryIndex::Entry(entry); } Err(EntryError::NoMapping) => { - return (i, None); + return EntryIndex::Index(i); } Err(EntryError::NotATable) => continue, } } - (PAGE_DIRECTORY_ENTRIES, None) - } - - /// Find an avavilable page in the given size. - // ANCHOR: page_table_find_available_page - #[cfg(target_arch = "x86_64")] - pub fn find_available_page( - page_size: PageSize, - ) -> Result { - const LEVELS: usize = 4; - let mut level_indices = [0usize; LEVELS]; - let mut page_tables = [Self::current_table(); LEVELS]; - let mut current_level = PageTableLevel::PML4; - loop { - let current_table = page_tables[current_level as usize]; - - let next_table = match current_table.try_fetch_table( - level_indices[current_level as usize], - current_level, - page_size, - ) { - (PAGE_DIRECTORY_ENTRIES, None) => { - current_level = current_level.prev()?; - level_indices[current_level as usize] += 1; - continue; - } - (i, Some(table)) => { - level_indices[current_level as usize] = i; - table - } - (i, None) => { - level_indices[current_level as usize] = i; - return Ok(VirtualAddress::from_indices( - level_indices, - )); - } - }; - let next_level = current_level - .next() - .expect("Can't go next on a first level table"); - page_tables[next_level as usize] = next_table; - level_indices[next_level as usize] += 1; - } + EntryIndex::OutOfEntries } - // ANCHOR_END: page_table_find_available_page } // ANCHOR_END: page_table_impl diff --git a/shared/cpu_utils/src/structures/paging/page_table_entry.rs b/shared/cpu_utils/src/structures/paging/page_table_entry.rs index dbbaac2..6e6c990 100644 --- a/shared/cpu_utils/src/structures/paging/page_table_entry.rs +++ b/shared/cpu_utils/src/structures/paging/page_table_entry.rs @@ -1,3 +1,6 @@ +#[cfg(target_arch = "x86_64")] +use core::ptr::NonNull; + use common::{ address_types::PhysicalAddress, constants::{ENTRY_ADDRESS_MASK, REGULAR_PAGE_ALIGNMENT}, @@ -141,14 +144,12 @@ impl PageTableEntry { /// /// This method assumes all page tables are identity /// mapped. - // ANCHOR: page_table_entry_mapped_table_mut + // ANCHOR: page_table_entry_mapped_table #[cfg(target_arch = "x86_64")] #[allow(clippy::mut_from_ref)] - pub fn mapped_table_mut(&self) -> Result<&mut PageTable, EntryError> { + pub fn mapped_table(&self) -> Result, EntryError> { // first check if the entry is mapped. - let pt = unsafe { - &mut *self.mapped()?.translate().as_mut_ptr::() - }; + let pt = self.mapped()?.translate().as_non_null::(); // then check if it is a table. if !self.is_huge_page() && self.is_table() { Ok(pt) @@ -156,20 +157,10 @@ impl PageTableEntry { Err(EntryError::NotATable) } } - // ANCHOR_END: page_table_entry_mapped_table_mut + // ANCHOR_END: page_table_entry_mapped_table - // ANCHOR: page_table_entry_mapped_table - #[cfg(target_arch = "x86_64")] - pub fn mapped_table(&self) -> Result<&PageTable, EntryError> { - // first check if the entry is mapped. - let pt = - unsafe { &*self.mapped()?.translate().as_ptr::() }; - // then check if it is a table. - if !self.is_huge_page() && self.is_table() { - Ok(pt) - } else { - Err(EntryError::NotATable) - } + pub fn table_index(&self) -> usize { + let table_offset = self as *const _ as usize & ((1 << 12) - 1); + table_offset / size_of::() } - // ANCHOR_END: page_table_entry_mapped_table }