diff --git a/.cargo/config.toml b/.cargo/config.toml index 52ed4a0..57eb0d8 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -4,3 +4,12 @@ build-std-features = ["compiler-builtins-mem"] unstable-options = true [build] rustflags = ["-Zpolonius"] + +[target.'cfg(target_os = "windows")'] +runner = ".\\build\\runner\\runner.bat" + +[target.'cfg(target_os = "macos")'] +runner = ["sh", "./build/runner/runner.sh"] + +[target.'cfg(target_os = "linux")'] +runner = ["sh", "./build/runner/runner.sh"] diff --git a/.vscode/settings.json b/.vscode/settings.json index 932c3f4..8d53b99 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,51 +9,180 @@ ], "rust-analyzer.cargo.target": "x86_64-unknown-linux-gnu", "cSpell.words": [ + "abar", + "acmd", + "ACPI", "ACPINVS", + "adse", + "AHCI", + "alhd", + "alpe", "APIC", + "apst", + "apste", "avavilable", "BIST", + "BOHC", "bootable", "capslock", "cardbus", + "cccs", + "cfis", + "clbu", + "CLFSH", + "CMOV", + "CNXT", + "colorcode", + "cpde", + "cpds", "Cpuid", + "ctba", + "ctbau", + "dbau", + "desciprtor", + "deto", "devsel", + "devslp", + "dhre", + "dhrs", + "dlae", + "dmpe", + "dmps", + "DPIO", + "dsfis", + "DTES", + "dwords", "EFER", + "EHCI", + "EISA", + "fbscp", + "fbss", + "FDDI", + "firewire", + "freelist", + "FXSR", + "Gameport", + "GPIB", + "hbde", + "hbds", + "hbfe", + "hbfs", + "Hotplug", + "hpcp", + "HPET", + "ilog", + "infe", + "infs", "inval", "iopl", + "ipme", + "IPMIPI", + "ipms", + "IRDA", + "ISDN", + "kfree", + "kmalloc", "lctrl", "Learnix", "lgdt", "ljmp", "lshift", + "mdat", + "metavar", + "mmap", + "MOVBE", + "mpsp", + "mpss", + "mrsm", + "MTRR", + "Multiport", "nomem", + "nonmax", "nostack", + "notif", + "NVME", + "NVMHCI", + "nvmp", + "OHCI", "okprintln", + "OSXSAVE", "outb", "outl", + "PCID", + "PCLMUL", + "PDCM", "PDPT", + "peekable", + "PICMG", + "PICPI", + "PIIX", + "POPCNT", "popfq", + "prce", + "prcs", + "prdb", + "Prdt", + "prdtl", "prefetchable", + "psfis", + "purporse", "pushfq", "rdmsr", + "RDRAND", + "realtek", "repr", + "rfis", "rflags", "rodata", "rshift", + "RTCPI", "rustc", + "sact", + "sadm", + "safte", + "salp", "scancode", + "sctl", + "sdbe", + "sdbfis", + "SDBG", + "sdbs", + "SERCOS", "serr", + "sgpio", "SMAP", + "SMIC", + "smps", + "snqc", + "sntf", "sofware", + "sooe", + "ssntf", + "SSSE", + "ssts", "superkey", + "tfee", + "tfes", "thiserror", + "udma", + "ufis", + "UHCI", + "Unassignment", "Uninit", + "USBPI", "virt", + "virtio", "wrmsr", - "xchg" + "xchg", + "XHCI", + "XSAVE", + "XTPR" ], - "rust-analyzer.inlayHints.chainingHints.enable": false, + "rust-analyzer.inlayHints.chainingHints.enable": true, "rust-analyzer.check.command": "clippy", + "rust-analyzer.check.extraArgs": [ + "--release" + ], "rust-analyzer.cargo.extraEnv": { "RUSTFLAGS": "-Zpolonius" }, diff --git a/.zed/settings.json b/.zed/settings.json index 431559e..9f49148 100644 --- a/.zed/settings.json +++ b/.zed/settings.json @@ -3,13 +3,61 @@ // For a full list of overridable settings, and general information on folder-specific settings, // see the documentation: https://zed.dev/docs/configuring-zed#settings-files { + "minimap": { + "display_in": "active_editor", + "show": "always", + }, + "inlay_hints": { + "show_background": true, + "enabled": true, + }, + "relative_line_numbers": "enabled", + "show_edit_predictions": false, + "terminal": {}, + "lsp_document_colors": "inlay", + "indent_guides": { + "coloring": "fixed", + "background_coloring": "disabled", + }, + "colorize_brackets": true, + "features": { + "edit_prediction_provider": "zed", + }, + "telemetry": { + "diagnostics": true, + "metrics": true, + }, + "vim_mode": true, + "icon_theme": { + "mode": "dark", + "light": "Zed (Default)", + "dark": "Material Icon Theme", + }, + "base_keymap": "VSCode", + "ui_font_size": 16, + "buffer_font_size": 15, + "theme": { + "mode": "dark", + "light": "One Dark", + "dark": "One Dark Pro", + }, "lsp": { "rust-analyzer": { "initialization_options": { - "cargo": { - "target": "x86_64-unknown-linux-none" - } - } - } - } + "inlayHints": { + "maxLength": null, + "lifetimeElisionHints": { + "useParameterNames": true, + }, + "closureReturnTypeHints": { + "enable": "always", + }, + }, + "check": { + "command": "clippy", + "extraArgs": ["--release"], + }, + }, + }, + }, } diff --git a/build/build.rs b/build/build.rs index 1124c9d..3c7866b 100644 --- a/build/build.rs +++ b/build/build.rs @@ -56,7 +56,7 @@ fn main() -> io::Result<()> { "targets/32bit_target.json", "release", ); - build_stage("../kernel", "targets/64bit_target.json", &profile); + build_stage("../kernel", "targets/64bit_target.json", "release"); // Combine binaries into one image let input_dir = PathBuf::from("bin"); diff --git a/build/runner/runner.bat b/build/runner/runner.bat new file mode 100644 index 0000000..fba6a2b --- /dev/null +++ b/build/runner/runner.bat @@ -0,0 +1,8 @@ +@echo off + + +qemu-system-x86_64 ^ + -M q35 ^ + -drive id=disk0,file=build/image.bin,if=none,format=raw ^ + -device ide-hd,drive=disk0,bus=ide.0,rotation_rate=1 ^ + -monitor stdio diff --git a/build/runner/runner.sh b/build/runner/runner.sh new file mode 100644 index 0000000..fbc251d --- /dev/null +++ b/build/runner/runner.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +qemu-system-x86_64 \ + -M q35 \ + -drive id=disk0,format=raw,file=build/image.bin,if=none \ + -drive id=disk1,format=raw,file=build/bin/first_stage,if=none \ + -device ide-hd,drive=disk0,bus=ide.0 \ + -device ide-hd,drive=disk1,bus=ide.1 \ + -monitor stdio diff --git a/build/targets/64bit_target.json b/build/targets/64bit_target.json index 1ae7434..7d8c39d 100644 --- a/build/targets/64bit_target.json +++ b/build/targets/64bit_target.json @@ -13,7 +13,12 @@ "position-independent-executables": true, "relro-level": "off", "stack-probes": { - "kind": "call" + "kind": "inline-or-call", + "min-llvm-version-for-inline": [ + 16, + 0, + 0 + ] }, "static-position-independent-executables": true, "target-pointer-width": 64, diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 9671118..36e6781 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -9,5 +9,11 @@ cpu_utils = { path = "../shared/cpu_utils" } derive_more = { version = "2.0.1", default-features = false, features = [ "full", ] } +num_enum = { version = "0.7.5", default-features = false, features = [ + "complex-expressions", +] } extend = "1.2.0" learnix-macros = { path = "../learnix-macros" } +strum_macros = { version = "0.27", default-features = false } +strum = { version = "0.27", default-features = false } +nonmax = { version = "0.5.5", default-features = false } diff --git a/kernel/src/drivers/ata/ahci/fis.rs b/kernel/src/drivers/ata/ahci/fis.rs new file mode 100644 index 0000000..3542819 --- /dev/null +++ b/kernel/src/drivers/ata/ahci/fis.rs @@ -0,0 +1,285 @@ +use core::{ascii::Char, fmt::Debug, num::NonZero}; + +use common::{ + enums::{AtaCommand, FisType}, + volatile::Volatile, +}; +use learnix_macros::{flag, ro_flag}; + +#[repr(C, align(4))] +#[derive(Clone, Copy, Debug)] +pub struct RegisterH2D { + fis_type: Volatile, + pm_flags: Volatile, + command: Volatile, + features: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, + features_ext: Volatile, + sector_count: Volatile, + sector_count_ext: Volatile, + _reserved0: u8, + control: Volatile, + _reserved1: [u8; 4], +} + +impl RegisterH2D { + pub fn new( + pm_flags: u8, + command: AtaCommand, + features: u16, + lba: u64, + device: u8, + sector_count: u16, + control: u8, + ) -> RegisterH2D { + let features_low = Volatile::new(features as u8); + let features_ext = Volatile::new((features >> 8) as u8); + let lba1 = Volatile::new(lba as u8); + let lba2 = Volatile::new((lba >> 8) as u8); + let lba3 = Volatile::new((lba >> 16) as u8); + let lba4 = Volatile::new((lba >> 24) as u8); + let lba5 = Volatile::new((lba >> 32) as u8); + let lba6 = Volatile::new((lba >> 40) as u8); + let sector_count_low = Volatile::new(sector_count as u8); + let sector_count_ext = Volatile::new((sector_count >> 8) as u8); + RegisterH2D { + fis_type: Volatile::new(FisType::RegisterFisHost2Device), + pm_flags: Volatile::new(pm_flags), + command: Volatile::new(command), + features: features_low, + lba1, + lba2, + lba3, + device: Volatile::new(device), + lba4, + lba5, + lba6, + features_ext, + sector_count: sector_count_low, + sector_count_ext, + _reserved0: 0, + control: Volatile::new(control), + _reserved1: [0; 4], + } + } +} + +#[repr(C, align(4))] +#[derive(Clone, Copy, Debug)] +pub struct RegisterD2H { + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, + _reserved0: u8, + sector_count: Volatile, + sector_count_ext: Volatile, + _reserved1: [u8; 6], +} + +impl RegisterD2H {} + +#[repr(C, align(4))] +#[derive(Clone, Copy, Debug)] +pub struct DmaActivateD2H { + fis_type: Volatile, + pm_flags: Volatile, + _reserved: [u8; 2], +} + +/// Bidirectional +#[repr(C, align(4))] +#[derive(Clone, Copy, Debug)] +pub struct DmaSetup { + fis_type: Volatile, + pm_flags: Volatile, + _reserved0: [u8; 2], + dma_buffer_id_lower: Volatile, + dma_buffer_id_upper: Volatile, + _reserved1: u32, + dma_buffer_offset: Volatile, + dma_transfer_count: Volatile, + _reserved: u32, +} + +/// Bidirectional +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct BistActivate { + fis_type: Volatile, + pm_flags: Volatile, + pattern_def: Volatile, + _reserved: u8, + data1: Volatile, + data2: Volatile, + data3: Volatile, + data4: Volatile, +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct PioSetupD2H { + fis_type: Volatile, + pm_flags: Volatile, + status: Volatile, + error: Volatile, + lba1: Volatile, + lba2: Volatile, + lba3: Volatile, + device: Volatile, + lba4: Volatile, + lba5: Volatile, + lba6: Volatile, + _reserved0: u8, + sector_count: Volatile, + sector_count_exp: Volatile, + _reserved1: u8, + estatus: Volatile, + transfer_count: Volatile, + _reserved2: u16, +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct Data { + fis_type: Volatile, + pm_port: Volatile, + _reserved0: [u8; 2], + data: Volatile<[u32; SIZE]>, +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +pub struct SetDeviceBits { + fis_type: Volatile, + pm_port: Volatile, + status: Volatile, + error: Volatile, + _reserved: u32, +} + +impl SetDeviceBits { + pub fn status_low(&self) -> u8 { + self.status.read() & !0x7 + } + + pub fn status_high(&self) -> u8 { + (self.status.read() >> 4) & !0x7 + } +} + +#[repr(C)] +pub union Fis { + pub h2d: RegisterH2D, + pub d2h: RegisterD2H, + pub dma_activate: DmaActivateD2H, + pub dma_setup: DmaSetup, + pub bist: BistActivate, + pub pio_setup: PioSetupD2H, + pub set_device_bits: SetDeviceBits, + pub raw: [u8; 64], +} + +impl Default for Fis { + fn default() -> Self { + Fis { raw: [0; 64] } + } +} + +pub struct GeneralInfo(u16); + +impl GeneralInfo { + ro_flag!(non_magnetic, 15); + ro_flag!(removable_media, 7); + ro_flag!(not_removable_media, 6); +} + +impl Debug for GeneralInfo { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "Non Magnetic: {:?}", self.is_non_magnetic())?; + writeln!(f, "Removable Media: {:?}", self.is_removable_media())?; + writeln!( + f, + "Not Removable Media: {:?}", + self.is_not_removable_media() + ) + } +} + +pub struct DeviceCapabilities(u16); + +impl DeviceCapabilities { + ro_flag!(lba_dma_support, 10); +} + +impl Debug for DeviceCapabilities { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "LBA & DMA Support: {:?},", self.is_lba_dma_support()) + } +} + +pub struct ValidFields(u16); + +impl ValidFields { + ro_flag!(valid_54_58, 0); + ro_flag!(valid_64_70, 1); +} + +#[derive(Debug)] +#[repr(C, align(512))] +pub struct IdentityPacketData { + pub info: GeneralInfo, + pub cylinders: u16, + _reserved0: u16, + pub heads: u16, + _vendor0: [u16; 2], + pub sectors: u16, + _vendor1: [u16; 3], + pub serial_number: [Char; 20], + _vendor2: [u16; 3], + /// Firmware revision in ASCII Characters + pub firmware_rev: [Char; 8], + /// Model number in ASCII Characters + pub model_num: [Char; 40], + pub max_sectors_rw_multiple: u8, + pub _vendor3: u8, + _reserved1: u16, + pub capabilities: u16, + _reserved9: u16, + pub pio_data_transfer_time: u16, + pub dma_data_transfer_time: u16, + pub valid_fields: u16, + pub cur_cylinders: u16, + pub cur_heads: u16, + pub cur_sectors: u16, + pub capacity_sectors: [u16; 2], + pub _reserved10: u16, + pub lba_total_sectors_28: [u16; 2], + // _reserved2: [u16; 19], + // pub major_version: u16, + // pub minor_version: u16, + + // pub command_sets_supported: [u16; 3], + // pub command_sets_enabled: [u16; 3], + // pub udma_modes: u16, + // pub lba_total_sectors_48: u64, + // _reserved4: [u16; 113], // Words 169-206 + // pub physical_logical_sector_size: u16, // Word 209 + // _reserved5: [u16; 7], // Words 210-216 + // pub nominal_media_rotation_rate: u16, /* Word 217 (The SSD vs + // HDDkey) + // * _reserved6: [u16; 40], */ +} diff --git a/kernel/src/drivers/ata/ahci/hba.rs b/kernel/src/drivers/ata/ahci/hba.rs new file mode 100644 index 0000000..4fcda72 --- /dev/null +++ b/kernel/src/drivers/ata/ahci/hba.rs @@ -0,0 +1,1330 @@ +/// AHCI implementation for the learnix operating system +/// +/// Implemented directly from https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/serial-ata-ahci-spec-rev1-3-1.pdf +extern crate alloc; + +use core::{fmt::Debug, num::NonZero, panic, ptr::NonNull}; + +use common::{ + address_types::PhysicalAddress, + constants::{ + PHYSICAL_MEMORY_OFFSET, REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, + }, + enums::{ + AtaCommand, Color, DeviceDetection, DeviceType, + InterfaceCommunicationControl, InterfaceInitialization, + InterfacePowerManagement, InterfaceSpeed, + InterfaceSpeedRestriction, PageSize, + }, + error::{AhciError, ConversionError, DiagnosticError, HbaError}, + read_volatile, + volatile::Volatile, + write_volatile, +}; +use cpu_utils::structures::paging::PageEntryFlags; +use learnix_macros::{flag, ro_flag, rw1_flag, rwc_flag}; +use num_enum::UnsafeFromPrimitive; +use strum::IntoEnumIterator; + +use crate::{ + alloc_pages, + drivers::{ + ata::ahci::{ + DmaSetup, Fis, IdentityPacketData, PioSetupD2H, RegisterD2H, + RegisterH2D, SetDeviceBits, + }, + vga_display::color_code::ColorCode, + }, + eprintln, + memory::allocators::extensions::PhysicalAddressExt, + print, println, +}; + +#[repr(transparent)] +#[derive(Copy, Clone)] +pub struct AHCIBaseAddress(pub u32); + +/// CAP +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct HBACapabilities(pub u32); + +impl HBACapabilities { + // Support 64bit addressing + ro_flag!(s64a, 31); + + // Support native command queuing + ro_flag!(snqc, 30); + + // Support s-notification register + ro_flag!(ssntf, 29); + + // Support mechanical presence switch + ro_flag!(smps, 28); + + // Support staggered Spin-up + ro_flag!(sss, 27); + + // Support aggressive link power management + ro_flag!(salp, 26); + + // Support activity lead + ro_flag!(sal, 25); + + pub fn interface_speed(&self) -> InterfaceSpeed { + unsafe { + core::mem::transmute( + (((read_volatile!(self.0)) >> 20) & 0xf) as u8, + ) + } + } + + // Support AHCI mode only + ro_flag!(sam, 18); + + // Support port multiplier + ro_flag!(spm, 17); + + // Frame Information Structure based switching supported + ro_flag!(fbss, 16); + + // Programmed I/O multiple Data request block + ro_flag!(pmd, 15); + + // Slumber state capable + ro_flag!(ssc, 15); + + // Partial state capable + ro_flag!(psc, 14); + + // This value is between 1 and 32 + pub fn number_of_commands(&self) -> u8 { + (((read_volatile!(self.0)) >> 8) & 0x1f) as u8 + } + + // Command completion coalescing supported + ro_flag!(cccs, 7); + + // Enclosure management supported + ro_flag!(ems, 6); + + // Support external SATA + ro_flag!(sxs, 5); + + /// Returns the number of ports implemented + pub fn number_of_ports(&self) -> u8 { + (read_volatile!(self.0) & 0x1f) as u8 + } +} + +/// GHC +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct GlobalHostControl(pub u32); + +impl GlobalHostControl { + // AHCI Enable. Must be set for the HBA to operate in AHCI mode. + flag!(ae, 31); + + // MSI Revert to Single Message + // 1.3.1) + flag!(mrsm, 2); + + // Interrupt Enable + flag!(ie, 1); + + // HBA Reset + flag!(hr, 0); +} + +/// IS +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct InterruptStatus(pub u32); + +impl InterruptStatus { + // Port Interrupt Pending Status. Corresponds to bits of the PI + // register. Cleared by writing a '1' to the corresponding bit. + pub fn is_port_pending(&self, port_num: u8) -> bool { + (read_volatile!(self.0) & (1 << port_num)) != 0 + } + + pub fn clear(&mut self, port_num: u8) { + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)); + } + + pub fn clear_all(&mut self) { + write_volatile!(self.0, 0); + } + + // RWC flag for Port 0 Interrupt Pending Status + rwc_flag!(ip01, 1); + rwc_flag!(ip02, 2); + rwc_flag!(ip03, 3); + rwc_flag!(ip04, 4); + rwc_flag!(ip05, 5); + rwc_flag!(ip06, 6); + rwc_flag!(ip07, 7); + rwc_flag!(ip08, 8); + rwc_flag!(ip09, 9); + rwc_flag!(ip10, 10); + rwc_flag!(ip11, 11); + rwc_flag!(ip12, 12); + rwc_flag!(ip13, 13); + rwc_flag!(ip14, 14); + rwc_flag!(ip15, 15); + rwc_flag!(ip16, 16); + rwc_flag!(ip17, 17); + rwc_flag!(ip18, 18); + rwc_flag!(ip19, 19); + rwc_flag!(ip20, 20); + rwc_flag!(ip21, 21); + rwc_flag!(ip22, 22); + rwc_flag!(ip23, 23); + rwc_flag!(ip24, 24); + rwc_flag!(ip25, 25); + rwc_flag!(ip26, 26); + rwc_flag!(ip27, 27); + rwc_flag!(ip28, 28); + rwc_flag!(ip29, 29); + rwc_flag!(ip30, 30); + rwc_flag!(ip31, 31); +} + +// PI +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct PortsImplemented(pub u32); + +impl PortsImplemented { + // Port i is Implemented (P[i]) + pub fn is_port_implemented(&self, port_num: u8) -> bool { + (read_volatile!(self.0) & (1 << port_num)) != 0 + } +} + +// VS +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct Version(pub u32); + +impl Version { + // Major Version Number (Bits 31:16) + pub fn major_version(&self) -> u16 { + (read_volatile!(self.0) >> 16) as u16 + } + + // Minor Version Number (Bits 15:0) + pub fn minor_version(&self) -> u16 { + (read_volatile!(self.0) & 0xffff) as u16 + } +} + +/// CCC_CTL +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct CommandCompletionCoalescingControl(pub u32); + +impl CommandCompletionCoalescingControl { + pub fn interrupt_time_ms(&self) -> u16 { + ((read_volatile!(self.0) >> 16) & 0xffff) as u16 + } + + // Command Completions (CC): Number of command completions necessary to + // cause a CCC interrupt + pub fn command_completions(&self) -> u8 { + ((read_volatile!(self.0) >> 8) & 0xff) as u8 + } + + flag!(enable, 0); +} + +/// CCC_PORTS +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct CommandCompletionCoalescingPorts(pub u32); + +impl CommandCompletionCoalescingPorts { + pub fn set_port(&mut self, port_num: u8) { + write_volatile!(self.0, read_volatile!(self.0) | (1 << port_num)) + } + + pub fn unset(&mut self, port_num: u8) { + self.0 &= !(1 << port_num) + } + + flag!(prt01, 1); + flag!(prt02, 2); + flag!(prt03, 3); + flag!(prt04, 4); + flag!(prt05, 5); + flag!(prt06, 6); + flag!(prt07, 7); + flag!(prt08, 8); + flag!(prt09, 9); + flag!(prt10, 10); + flag!(prt11, 11); + flag!(prt12, 12); + flag!(prt13, 13); + flag!(prt14, 14); + flag!(prt15, 15); + flag!(prt16, 16); + flag!(prt17, 17); + flag!(prt18, 18); + flag!(prt19, 19); + flag!(prt20, 20); + flag!(prt21, 21); + flag!(prt22, 22); + flag!(prt23, 23); + flag!(prt24, 24); + flag!(prt25, 25); + flag!(prt26, 26); + flag!(prt27, 27); + flag!(prt28, 28); + flag!(prt29, 29); + flag!(prt30, 30); + flag!(prt31, 31); +} + +/// EM_LOC +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct EnclosureManagementLocation(pub u32); + +impl EnclosureManagementLocation { + pub fn dword_offset_from_abar(&self) -> usize { + (read_volatile!(self.0) >> 16) as usize + } + + /// ZERO is invalid + /// TODO understand how to check if i have both receive and transmit + pub fn buffet_size(&self) -> Option> { + NonZero::new((read_volatile!(self.0) & 0xffff) as usize) + } +} + +/// EM_CTL +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct EnclosureManagementControl(pub u32); + +impl EnclosureManagementControl { + // Port multiplier support + ro_flag!(pm, 27); + + // Activity LED hardware driven + ro_flag!(alhd, 26); + + // Transmit only + ro_flag!(xmt, 25); + + // Single message buffer + ro_flag!(smb, 24); + + // SGPIO Enclosure management messages + ro_flag!(sgpio, 19); + + // SES2 Enclosure management massages + ro_flag!(ses2, 18); + + // SAF-TE Enclosure management massages + ro_flag!(safte, 17); + + // Led message type + ro_flag!(led, 16); + + // Reset + rw1_flag!(reset, 9); + + // Transmit massage + rw1_flag!(tm, 8); + + // Message received + rwc_flag!(mr, 0); +} + +/// CAP2 +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct HostCapabilitiesExtended(pub u32); + +impl HostCapabilitiesExtended { + // DevSleep entrance from slumber only + ro_flag!(deso, 5); + + // Aggressive device sleep management + ro_flag!(sadm, 4); + + // Support device sleep + ro_flag!(sds, 3); + + // Automatic partial to slumber transitions + ro_flag!(apst, 2); + + // NVMHCI present + ro_flag!(nvmp, 1); + + // Bios/OS handoff + ro_flag!(boh, 0); +} + +// BOHC +#[repr(transparent)] +#[derive(Debug, Clone, Copy)] +pub struct BiosOsControlStatus(pub u32); + +impl BiosOsControlStatus { + // Bios Busy + flag!(bb, 4); + + // OS ownership change + rwc_flag!(ooc, 3); + + // SMI on OS ownership change enable + flag!(sooe, 2); + + // OS Owned semaphore + flag!(oos, 1); + + // BIOS owned semaphore + flag!(bos, 0); +} + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct GenericHostControl { + pub cap: HBACapabilities, + pub ghc: GlobalHostControl, + pub is: InterruptStatus, + pub pi: PortsImplemented, + pub vs: Version, + pub ccc_ctl: CommandCompletionCoalescingControl, + pub ccc_ports: CommandCompletionCoalescingPorts, + pub em_loc: EnclosureManagementLocation, + pub em_ctl: EnclosureManagementControl, + pub cap_ext: HostCapabilitiesExtended, + pub bohc: BiosOsControlStatus, +} + +#[repr(C)] +pub struct VendorSpecificRegisters { + _reserved: [u8; 0x74], +} + +/// Port X Interrupt status +#[repr(transparent)] +pub struct PortInterruptStatus(pub u32); + +impl PortInterruptStatus { + // Cold port detect status + rwc_flag!(cpds, 31); + + // Task file error status + rwc_flag!(tfes, 30); + + // Host bust fatal error status + rwc_flag!(hbfs, 29); + + // Host Bus Data Error Status + rwc_flag!(hbds, 28); + + // Interface Fatal Error Status + rwc_flag!(ifs, 27); + + // Interface Non-fatal Error Status + rwc_flag!(infs, 26); + + // Overflow Status + rwc_flag!(ofs, 24); + + // Incorrect Port Multiplier Status + rwc_flag!(ipms, 23); + + // PhyRdy Change Status + ro_flag!(prcs, 22); + + // Device Mechanical Presence Status + rwc_flag!(dmps, 7); + + // Port Connect Change Status + ro_flag!(pcs, 6); + + // Descriptor Processed + rwc_flag!(dps, 5); + + // Unknown FIS Interrupt + ro_flag!(ufs, 4); + + // Set Device Bits Interrupt + rwc_flag!(sdbs, 3); + + // DMA Setup FIS Interrupt + rwc_flag!(dss, 2); + + // PIO Setup FIS Interrupt + rwc_flag!(pss, 1); + + // Device to Host Register FIS Interrupt + rwc_flag!(dhrs, 0); + + pub fn clear_pending_interrupts(&mut self) { + write_volatile!(self.0, u32::MAX); + } +} + +/// Port X Interrupt Enable +#[repr(transparent)] +pub struct InterruptEnable(pub u32); + +impl InterruptEnable { + // Cold Presence Detect Enable + flag!(cpde, 31); + + // Task File Error Enable + flag!(tfee, 30); + + // Host Bus Fatal Error Enable + flag!(hbfe, 29); + + // Host Bus Data Error Enable + flag!(hbde, 28); + + // Interface Fatal Error Enable + flag!(ife, 27); + + // Interface Non-fatal Error Enable + flag!(infe, 26); + + // Overflow Enable + flag!(ofe, 24); + + // Incorrect Port Multiplier Enable + flag!(ipme, 23); + + // PhyRdy Change Interrupt Enable + flag!(prce, 22); + + // Device Mechanical Presence Enable + flag!(dmpe, 7); + + // Port Change Interrupt Enable + flag!(pce, 6); + + // Descriptor Processed Interrupt Enable + flag!(dpe, 5); + + // Unknown FIS Interrupt Enable + flag!(ufe, 4); + + // Set Device Bits FIS Interrupt Enable + flag!(sdbe, 3); + + // DMA Setup FIS Interrupt Enable + flag!(dse, 2); + + // PIO Setup FIS Interrupt Enable + flag!(pse, 1); + + // Device to Host Register FIS Interrupt Enable + flag!(dhre, 0); +} + +/// Port X Command and status +#[repr(transparent)] +pub struct CmdStatus(pub u32); + +impl CmdStatus { + pub fn set_icc(&mut self, icc: InterfaceCommunicationControl) { + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 28)); + write_volatile!( + self.0, + read_volatile!(self.0) | (icc as u32) << 28 + ); + } + + // Aggressive Slumber / Partial + flag!(asp, 27); + + // Aggressive Link Power Management Enable + flag!(alpe, 26); + + // Drive LED on ATAPI Enable + flag!(dlae, 25); + + // Device is ATAPI + flag!(atapi, 24); + + // Automatic Partial to Slumber Transitions Enabled + flag!(apste, 23); + + // FIS-based Switching Capable Port + ro_flag!(fbscp, 22); + + // External SATA Port + ro_flag!(esp, 21); + + // Cold Presence Detection + ro_flag!(cpd, 20); + + // Mechanical Presence Switch Attached to Port + ro_flag!(mpsp, 19); + + // Hot Plug Capable Port + ro_flag!(hpcp, 18); + + // Port Multiplier Attached + flag!(pma, 17); + + // Cold Presence State + ro_flag!(cps, 16); + + // Command List Running + ro_flag!(cr, 15); + + // FIS Receive Running + ro_flag!(fr, 14); + + // Mechanical Presence Switch State + ro_flag!(mpss, 13); + + pub fn get_current_cmd(&mut self) -> u32 { + if !self.is_st() { + return 0; + } + (read_volatile!(self.0) >> 8) & 0x1f + } + + // FIS Receive Enable + flag!(fre, 4); + + // Command List Override + flag!(clo, 3); + + // Power On Device + flag!(pod, 2); + + // Spin-Up Device + flag!(sud, 1); + + // Start + flag!(st, 0); + + pub fn start(&mut self) { + while self.is_cr() {} + self.set_fre(); + self.set_st(); + } + + pub fn stop(&mut self) { + self.unset_st(); + let mut timeout = 0xfffff; + loop { + timeout -= 1; + if timeout == 0 { + panic!("Timeout ended on port stop"); + } + if self.is_cr() { + continue; + } else { + break; + } + } + self.unset_fre(); + let mut timeout = 0xfffff; + loop { + timeout -= 1; + if timeout == 0 { + panic!("Timeout ended on port stop"); + } + if self.is_fr() { + continue; + } else { + break; + } + } + } +} + +/// Port x Task File Data +#[repr(transparent)] +pub struct TaskFileData(pub u32); + +impl TaskFileData { + // Indicates error during transfer + ro_flag!(err, 0); + + // Indicates a data transfer request + ro_flag!(drq, 3); + + // Indicates that the interface is busy + ro_flag!(bsy, 7); + + pub fn error(&self) -> u8 { + (read_volatile!(self.0) >> 8) as u8 + } +} + +/// Port X Signature +#[repr(C)] +pub struct Signature { + pub sector_count: u8, + pub lba_low: u8, + pub lba_mid: u8, + pub lba_high: u8, +} + +impl Signature { + pub fn device_type(&self) -> Result> { + DeviceType::try_from(u32::from_le_bytes([ + self.sector_count, + self.lba_low, + self.lba_mid, + self.lba_high, + ])) + } +} + +/// Port X SATA Status +#[repr(transparent)] +pub struct SataStatus(pub u32); + +impl SataStatus { + pub fn power( + &self, + ) -> Result> { + let power = ((read_volatile!(self.0) >> 8) & 0xf) as u8; + InterfacePowerManagement::try_from(power) + } + + pub fn speed(&self) -> InterfaceSpeed { + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; + unsafe { InterfaceSpeed::unchecked_transmute_from(speed) } + } + + pub fn detection( + &self, + ) -> Result> { + let detection = (read_volatile!(self.0) & 0xf) as u8; + DeviceDetection::try_from(detection) + } +} + +/// Port X SATA control +#[repr(transparent)] +pub struct SataControl(pub u32); + +impl SataControl { + pub fn port_multiplier(&self) -> u8 { + ((read_volatile!(self.0) >> 16) & 0xf) as u8 + } + + pub fn select_power_management(&self) -> u8 { + ((read_volatile!(self.0) >> 12) & 0xf) as u8 + } + + flag!(devslp_disabled, 10); + flag!(slumber_disabled, 9); + flag!(partial_disabled, 8); + + pub fn max_speed(&self) -> InterfaceSpeedRestriction { + let speed = ((read_volatile!(self.0) >> 4) & 0xf) as u8; + unsafe { + InterfaceSpeedRestriction::unchecked_transmute_from(speed) + } + } + + pub fn set_max_speed(&mut self, speed: InterfaceSpeed) { + if speed != InterfaceSpeed::DevNotPresent { + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 4)); + write_volatile!( + self.0, + read_volatile!(self.0) | (speed as u32) << 4 + ); + } + } + + pub fn device_initialization( + &self, + ) -> Result> { + InterfaceInitialization::try_from( + (read_volatile!(self.0) & 0xf) as u8, + ) + } + + // TODO THIS COMMAND ANY MAYBE OTHER SHOULD PROBABLY MOVE TO THE PORT + // SETTING BECAUSE THEY REQUIRE PxCMD.st BIT TO BE SET WHILE THEY ARE + // SET + pub fn set_device_initialization( + &mut self, + init: InterfaceInitialization, + ) { + write_volatile!(self.0, read_volatile!(self.0) & !0xf); + write_volatile!(self.0, read_volatile!(self.0) | init as u32); + } +} + +/// Port X SATA error +#[repr(transparent)] +pub struct SataError(pub u32); + +impl SataError { + pub fn diagnostic(&self) -> impl Iterator { + let diagnostic_errors = + ((read_volatile!(self.0) >> 16) & 0xffff) as u16; + DiagnosticError::iter() + .filter(move |n| *n as u16 & diagnostic_errors != 0) + } + + pub fn error(&self) -> impl Iterator { + let ahci_error = (read_volatile!(self.0) & 0xffff) as u16; + AhciError::iter().filter(move |n| *n as u16 & ahci_error != 0) + } + + pub fn zero_error(&mut self) { + write_volatile!(self.0, read_volatile!(self.0) & !0xffff) + } +} + +/// Port X Sata Active +#[repr(transparent)] +pub struct SataActive(pub u32); + +/// Port X Command issue +#[repr(transparent)] +pub struct CmdIssue(pub Volatile); + +impl CmdIssue { + pub fn issue_cmd(&mut self, cmd: u8) { + self.0.write(self.0.read() | 1 << cmd); + } +} + +/// Port X SATA Notification +#[repr(transparent)] +pub struct SataNotification(pub u32); + +impl SataNotification { + /// Get port multiplier notification + pub fn set_pm_notif(&mut self, pm_port: u8) { + (0x0..0xf).contains(&pm_port).then(|| { + write_volatile!( + self.0, + read_volatile!(self.0) | pm_port as u32 + ) + }); + } + + /// Get port multiplier notification + pub fn get_pm_notif(&self, pm_port: u8) -> bool { + if (0x0..0xf).contains(&pm_port) { + (read_volatile!(self.0) & !0xffff) & (1 << pm_port) != 0 + } else { + false + } + } +} + +/// Port X Frame Information Structure based switching control +#[repr(transparent)] +pub struct FisSwitchControl(pub u32); + +impl FisSwitchControl { + /// Port multiplier device that experienced fatal error + pub fn device_with_error(&self) -> u8 { + ((read_volatile!(self.0) >> 16) & 0xf) as u8 + } + + /// The number of devices that FIS-Based switching has been optimized + /// for. The minimum value for this field should be 0x2. + pub fn active_device_optimization(&self) -> u8 { + ((read_volatile!(self.0) >> 12) & 0xf) as u8 + } + + /// Set the port multiplier port number, that should receive the next + /// command + pub fn device_to_issue(&mut self, dev_num: u8) { + write_volatile!(self.0, read_volatile!(self.0) & !(0xf << 8)); + write_volatile!( + self.0, + read_volatile!(self.0) | (dev_num as u32) << 8 + ); + } + + // Single device error + ro_flag!(sde, 2); + + // Device error clear + rw1_flag!(dec, 1); + + // Enable, should be set if there is a port multiplier + flag!(en, 0); +} + +/// Port x Device sleep +#[repr(transparent)] +pub struct DeviceSleep(pub u32); + +impl DeviceSleep { + /// Device Sleep Idle Timeout Multiplier + pub fn dito_multiplier(&self) -> u8 { + ((read_volatile!(self.0) >> 25) & 0xf) as u8 + } + + /// Raw dito value + /// + /// **Use [`dito_actual`] for the actual wait time** + pub fn dito_ms(&self) -> u16 { + ((read_volatile!(self.0) >> 15) & 0x3ff) as u16 + } + + /// The actual timeout, which is dito * (dito_multiplier + 1) + pub fn dito_actual_ms(&self) -> u16 { + self.dito_ms() * (self.dito_multiplier() + 1) as u16 + } + + /// Minimum device sleep assertion time + /// + /// TODO: currently only read only, if write needed, check + /// documentation about extended cap and writing to this offset + pub fn mdat(&self) -> u8 { + ((read_volatile!(self.0) >> 10) & 0x1f) as u8 + } + + /// Device sleep exit timeout + /// + /// TODO: currently only read only, if write needed, check + /// documentation about extended cap and writing to this offset + pub fn deto_ms(&self) -> u8 { + ((read_volatile!(self.0) >> 2) & 0xff) as u8 + } + + // Device sleep present + ro_flag!(dsp, 1); + + // Aggressive device sleep enable + ro_flag!(adse, 0); +} + +/// Port X Vendor specific +#[repr(transparent)] +pub struct VendorSpecific(pub u32); + +#[repr(C)] +pub struct PortControlRegisters { + /// Port X Command list base address low + pub clb: Volatile, + /// Port X Command list base address high + pub clbu: Volatile, + /// Port X frame information structure base address low + pub fb: Volatile, + /// Port X frame information structure base address high + pub fbu: Volatile, + pub is: PortInterruptStatus, + pub ie: InterruptEnable, + pub cmd: CmdStatus, + _reserved0: u32, + pub tfd: TaskFileData, + pub sig: Signature, + pub ssts: SataStatus, + pub sctl: SataControl, + pub serr: SataError, + pub sact: SataActive, + pub ci: CmdIssue, + pub sntf: SataNotification, + pub fbs: FisSwitchControl, + pub devslp: DeviceSleep, + _reserved1: [u32; 10], + pub vs: [VendorSpecific; 4], +} + +impl PortControlRegisters { + /// Return the full command list address by combining the low and high + /// 32bit parts + pub fn cmd_list(&mut self) -> &mut CmdList { + let cmd_list_addr = ((self.clbu.read() as usize) << 32) + | (self.clb.read() as usize & !((1 << 10) - 1)); + unsafe { &mut *(cmd_list_addr as *mut CmdList) } + } + + pub fn set_cmd_list_address(&mut self, ptr: usize) { + println!("CLB: {:x?}", ptr); + self.clb.write((ptr & 0xffffffff) as u32); + self.clbu.write((ptr >> 32) as u32); + } + + /// Return the full frame information structure address by combining + /// the low and high 32bit parts + pub fn received_fis(&self) -> &ReceivedFis { + let rfis_addr = ((self.fbu.read() as usize) << 32) + | (self.fb.read() as usize & !((1 << 8) - 1)); + unsafe { &*(rfis_addr as *const ReceivedFis) } + } + + pub fn set_received_fis_address(&mut self, ptr: usize) { + println!("FB: {:x?}", ptr); + self.fb.write((ptr & 0xffffffff) as u32); + self.fbu.write((ptr >> 32) as u32); + } + + pub fn set_status(&mut self, port: u8) { + self.cmd.set_st(); + (0x0u8..=0x1fu8).contains(&port).then(|| { + self.sact.0 &= !(0x1f << 8); + self.sact.0 |= (port as u32) << 8; + }); + } + + /// Return the index of an available command slot if one exists + pub fn find_cmd_slot(&self) -> Option { + let mut slots = self.ci.0.read() | self.sact.0; + for i in 0usize..32 { + if slots & 1 == 0 { + return Some(i); + } else { + slots >>= 1 + } + } + None + } + + pub fn identity_packet(&mut self, buf: *mut IdentityPacketData) { + let fis = RegisterH2D::new( + 1 << 7, + AtaCommand::IdentifyDevice, + 0, + 0, + 0, + 0, + 0, + ); + let cmd = &mut self.cmd_list().entries[0]; + let cmd_table = &mut cmd.cmd_table::<8>(); + let prdt_ent = &mut cmd_table.table[0]; + write_volatile!(cmd_table.cfis, Fis { h2d: fis }); + prdt_ent.set_buffer(buf); + prdt_ent.dbc.set_dbc(511); + cmd.info.set_command_fis_len(size_of::()); + cmd.info.set_prdtl(1); + println!("Sending command!"); + self.ci.issue_cmd(0); + + let mut timeout = 0xfffff; + loop { + if self.is.0 != 0 { + if self.is.is_tfes() { + eprintln!("ERROR READING FROM DISK"); + for error in self.serr.error() { + println!("{:?}", error); + } + if self.tfd.is_err() { + println!( + "TASK FILE DATA ERROR STATE\nERROR: {:08b}", + self.tfd.error() + ); + } + } + println!("Finished!"); + println!("{:032b}", self.is.0); + break; + } else { + timeout -= 1 + } + + if timeout == 0 { + panic!("Timeout on identity packet read") + } + } + unsafe { + for w in (&mut *buf).serial_number.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).model_num.chunks_exact_mut(2) { + w.swap(0, 1); + } + for w in (&mut *buf).firmware_rev.chunks_exact_mut(2) { + w.swap(0, 1); + } + } + } +} + +/// TODO, DECIDE IF ITS OK THAT THIS IS ONE BYTE GREATER IN SIZE +#[repr(C, align(256))] +pub struct ReceivedFis { + pub dsfis: Volatile, + _reserved0: u32, + pub psfis: Volatile, + _reserved1: [u32; 3], + pub rfis: Volatile, + _reserved2: u32, + pub sdbfis: Volatile, + pub ufis: Volatile<[u8; 64]>, + _reserved3: [u32; 24], +} + +#[derive(Default)] +pub struct CmdListDescriptionInfo(pub u32); + +impl CmdListDescriptionInfo { + /// Set the Physical region descriptor table length + pub fn set_prdtl(&mut self, size: u16) { + write_volatile!( + self.0, + read_volatile!(self.0) | (size as u32) << 16 + ); + } + + /// Set the port multiplier port + pub fn set_pm_port(&mut self, pm_port: u8) { + write_volatile!( + self.0, + read_volatile!(self.0) | ((pm_port & 0xf) as u32) << 12 + ); + } + + // Clear busy upon R_OK + flag!(c, 10); + + // BIST + flag!(b, 9); + + // Reset + flag!(r, 8); + + // Prefetchable + flag!(p, 7); + + // Write + flag!(w, 6); + + // ATAPI + flag!(a, 5); + + /// Length of command FIS len (internally converted to dw) + pub fn set_command_fis_len(&mut self, len: usize) { + assert!(len < 64, "Len must be smaller then 64"); + assert!(len > 8, "Len must be greater then 8 "); + write_volatile!( + self.0, + read_volatile!(self.0) | (len / size_of::()) as u32 + ); + } +} + +#[repr(C)] +pub struct CmdHeader { + info: CmdListDescriptionInfo, + prdb_byte_count: Volatile, + /// Command table descriptor base address + ctba: Volatile, + /// Command table desciprtor base address upper + ctbau: Volatile, + _reserved: [u32; 4], +} + +impl CmdHeader { + pub fn cmd_table( + &mut self, + ) -> &mut CmdTable { + let cmd_table_addr = ((self.ctbau.read() as usize) << 32) + | (self.ctba.read() as usize); + unsafe { &mut *(cmd_table_addr as *mut CmdTable) } + } + + pub fn set_cmd_table(&mut self, ptr: usize) { + println!("CMD TBL: {:x?}", ptr); + self.ctba.write((ptr & 0xffffffff) as u32); + self.ctbau.write((ptr >> 32) as u32); + } +} + +#[repr(C, align(1024))] +pub struct CmdList { + pub entries: [CmdHeader; 32], +} + +pub struct PrdtDescriptionInfo(pub u32); + +impl PrdtDescriptionInfo { + // Interrupt on completion + flag!(i, 31); + + /// Set the data byte count of the buffer on the prdt + pub fn set_dbc(&mut self, dbc: u32) { + const MB: u32 = 1 << 20; + assert!(dbc < 4 * MB, "DBC should be smaller then 4Mib"); + write_volatile!(self.0, read_volatile!(self.0) | dbc | 1); + } +} + +#[repr(C)] +pub struct CmdTableEntry { + /// Data base address buffer + dba: Volatile, + /// Data base address buffer upper + dbau: Volatile, + _reserved: u32, + /// Data byte count (A maximum of 4mb is available) + dbc: PrdtDescriptionInfo, +} + +impl CmdTableEntry { + pub fn set_buffer(&mut self, buf: *mut T) { + let ptr = buf as usize; + self.dba.write((ptr & 0xffffffff) as u32); + self.dbau.write((ptr >> 32) as u32); + } +} + +#[repr(C, align(256))] +pub struct CmdTable { + cfis: Fis, + /// TODO + acmd: [u8; 0x10], + _reserved: [u8; 0x30], + table: [CmdTableEntry; ENTRIES], +} + +#[repr(C)] +/// Host Bus Adapter Memory Registers +pub struct HBAMemoryRegisters { + pub ghc: GenericHostControl, + pub _reserved: [u8; 0x60], + pub vsr: VendorSpecificRegisters, + + // Not doing 32 ports on purporse! + // Because it makes this structure larger then a page + pub ports: [PortControlRegisters; 30], +} + +impl HBAMemoryRegisters { + pub fn new( + a: PhysicalAddress, + ) -> Result, HbaError> { + if !a.is_aligned(REGULAR_PAGE_ALIGNMENT) { + return Err(HbaError::AddressNotAligned); + } + + a.map( + a.translate(), + PageEntryFlags::regular_io_page_flags(), + PageSize::Regular, + ); + + let mut hba_ptr = + a.translate().as_non_null::(); + + let hba = unsafe { hba_ptr.as_mut() }; + + hba.ghc.ghc.set_ae(); + hba.ghc.ghc.set_ie(); + + if hba.ghc.pi.0 >= (1 << 31) { + panic!("There is no support for HBA's with more then 30 ports") + } + + println!("BIOS / OS Handoff: {}", hba.ghc.cap_ext.is_boh()); + + if hba.ghc.cap_ext.is_boh() { + unimplemented!("Didn't implement bios os handoff") + } + + Ok(hba_ptr) + } + + /// Returns the amount of active devices found and set them into idle + /// state. + pub fn probe_init(&mut self) -> usize { + // println!( + // "Detected {} implemented ports", + // self.ghc.cap.number_of_ports() + // ); + + // println!( + // "Supported command slots: {}, Supported 64bit addresses: + // {}", self.ghc.cap.number_of_commands(), + // self.ghc.cap.is_s64a() + // ); + + // let mut count = 0; + // for (i, port) in self.ports.iter_mut().enumerate() { + // if self.ghc.pi.is_port_implemented(i as u8) + // && let Ok(power) = port.ssts.power() + // && let InterfacePowerManagement::Active = power + // { + // count += 1; + // println!("\nDetected device at port number: {}", i); + // print!(" Device Power: "); + // println!("{:?}", power ; color = + // ColorCode::new(Color::Green, Color::Black)); + // print!(" Device Speed: "); println!("{}", + // port.ssts.speed() ; color = ColorCode::new(Color::Green, + // Color::Black)); print!(" Device type: "); + // match port.sig.device_type() { + // Ok(t) => { + // println!("{:?}", t ; color = + // ColorCode::new(Color::Green, Color::Black) ) + // } Err(e) => { + // println!("{:?}", e ; color = + // ColorCode::new(Color::Red, Color::Black) ) + // } } + // port.cmd.stop(); + + // let clb_fbu_table = unsafe { alloc_pages!(1) }; + // for i in (0..4096).step_by(size_of::()) { + // unsafe { + // core::ptr::write_volatile( + // ((clb_fbu_table + i) + + // PHYSICAL_MEMORY_OFFSET) as + // *mut usize, 0, + // ); + // } + // } + + // port.set_cmd_list_address(clb_fbu_table); + // port.set_received_fis_address( + // clb_fbu_table + size_of::(), + // ); + + // // MAPPING the first header with 8 entries (0x100 in + // total // table size) + // let cmd_list = port.cmd_list(); + // cmd_list.entries[0].set_cmd_table( + // clb_fbu_table + // + size_of::() + // + size_of::(), + // ); + + // port.cmd.set_fre(); + // port.serr.zero_error(); + // // port.ie.set_dhre(); + // // port.ie.set_pse(); + // // port.ie.set_dse(); + // // port.ie.set_tfee(); + // port.is.clear_pending_interrupts(); + // self.ghc.is.clear_all(); + + // port.cmd.set_sud(); + // port.cmd.set_pod(); + // + // port.cmd.set_icc(InterfaceCommunicationControl::Active); + + // loop { + // if !port.tfd.is_bsy() + // && !port.tfd.is_drq() + // && matches!( + // port.ssts.power().unwrap(), + // InterfacePowerManagement::Active + // ) + // { + // break; + // } + // } + // port.cmd.start(); + // println!("Started port number: {}", i) + // } + // } + todo!() + // count + } +} diff --git a/kernel/src/drivers/ata/ahci/mod.rs b/kernel/src/drivers/ata/ahci/mod.rs new file mode 100644 index 0000000..397d27c --- /dev/null +++ b/kernel/src/drivers/ata/ahci/mod.rs @@ -0,0 +1,16 @@ +pub mod fis; +pub mod hba; + +use common::enums::CascadedPicInterruptLine; +use cpu_utils::structures::interrupt_descriptor_table::InterruptStackFrame; +pub use fis::*; +pub use hba::*; + +use crate::{drivers::pic8259::PIC, println}; + +pub extern "x86-interrupt" fn ahci_interrupt( + _stack_frame: InterruptStackFrame, +) { + println!("AHCI Interrupts!"); + unsafe { PIC.end_of_interrupt(CascadedPicInterruptLine::Ahci) }; +} diff --git a/kernel/src/drivers/ata/mod.rs b/kernel/src/drivers/ata/mod.rs new file mode 100644 index 0000000..a790baa --- /dev/null +++ b/kernel/src/drivers/ata/mod.rs @@ -0,0 +1 @@ +pub mod ahci; diff --git a/kernel/src/drivers/disk/mod.rs b/kernel/src/drivers/disk/mod.rs deleted file mode 100644 index 8b13789..0000000 --- a/kernel/src/drivers/disk/mod.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/kernel/src/drivers/interrupt_handlers.rs b/kernel/src/drivers/interrupt_handlers.rs index 3c390da..110b561 100644 --- a/kernel/src/drivers/interrupt_handlers.rs +++ b/kernel/src/drivers/interrupt_handlers.rs @@ -1,5 +1,8 @@ use crate::{ - drivers::{keyboard::keyboard_handler, timer::timer_handler}, + drivers::{ + ata::ahci::ahci_interrupt, keyboard::keyboard_handler, + timer::timer_handler, + }, println, }; use common::{ @@ -63,6 +66,7 @@ pub extern "x86-interrupt" fn invalid_opcode_handler( ) { println!("Interrupt: InvalidOpcode"); println!("Stack frame: {:#?}", stack_frame); + panic!(""); } pub extern "x86-interrupt" fn device_not_found_handler( @@ -164,8 +168,8 @@ pub extern "x86-interrupt" fn page_fault_handler( error_code: u64, ) { println!("Interrupt: PageFault"); - println!("Stack frame: {:#?}", stack_frame); - println!("Error code: {:#x}", error_code); + // println!("Stack frame: {:#?}", stack_frame); + // println!("Error code: {:#x}", error_code); println!("Faulting address: {:x}", cr2::read()); } @@ -394,5 +398,13 @@ pub fn init(idt: &'static mut InterruptDescriptorTable) { ProtectionLevel::Ring0, InterruptType::Trap, ); + idt.set_interrupt_handler( + Interrupt::Ahci, + VirtualAddress::new_unchecked( + ahci_interrupt as *const () as usize, + ), + ProtectionLevel::Ring0, + InterruptType::Trap, + ); } } diff --git a/kernel/src/drivers/keyboard/mod.rs b/kernel/src/drivers/keyboard/mod.rs index e54587a..e2f0823 100644 --- a/kernel/src/drivers/keyboard/mod.rs +++ b/kernel/src/drivers/keyboard/mod.rs @@ -45,7 +45,6 @@ pub extern "x86-interrupt" fn keyboard_handler( } _ => {} } - PIC.assume_init_mut() - .end_of_interrupt(CascadedPicInterruptLine::Keyboard); + PIC.end_of_interrupt(CascadedPicInterruptLine::Keyboard); } } diff --git a/kernel/src/drivers/mod.rs b/kernel/src/drivers/mod.rs index be82b7d..27c499c 100644 --- a/kernel/src/drivers/mod.rs +++ b/kernel/src/drivers/mod.rs @@ -1,4 +1,4 @@ -pub mod disk; +pub mod ata; pub mod interrupt_handlers; pub mod keyboard; pub mod pci; diff --git a/kernel/src/drivers/pci.rs b/kernel/src/drivers/pci.rs index 2601224..8bf8a18 100644 --- a/kernel/src/drivers/pci.rs +++ b/kernel/src/drivers/pci.rs @@ -1,11 +1,11 @@ extern crate alloc; -use crate::memory::allocators::page_allocator::{ - ALLOCATOR, allocator::PhysicalPageAllocator, -}; + +use crate::drivers::ata::ahci::AHCIBaseAddress; use alloc::vec::Vec; use common::enums::{ - ClassCode, DeviceID, HeaderType, PciDeviceType, Port, - ProgrammingInterface, SubClass, VendorDevice, VendorID, + CascadedPicInterruptLine, ClassCode, DeviceID, HeaderType, + PciDeviceType, Port, ProgrammingInterface, SubClass, VendorDevice, + VendorID, }; use cpu_utils::instructions::port::PortExt; use learnix_macros::flag; @@ -67,20 +67,23 @@ impl PciConfigurationCycle { uninit_ptr.byte_add(offset).write_volatile(header_data); } } + // uninit.bus = bus; + // uninit.device = device; + // uninit.function = function; uninit } - pub fn read_pci_device_header( + pub fn read_pci_device( bus: u8, device: u8, function: u8, common: PciCommonHeader, ) -> PciDevice { - let mut uninit = PciDevice { common }; + let mut uninit = PciDeviceHeader { common }; let uninit_ptr = - &mut uninit as *mut PciDevice as usize as *mut u32; - for offset in (size_of::() - ..size_of::()) + &mut uninit as *mut PciDeviceHeader as usize as *mut u32; + for offset in ((size_of::()) + ..size_of::()) .step_by(size_of::()) { unsafe { @@ -94,7 +97,12 @@ impl PciConfigurationCycle { uninit_ptr.byte_add(offset).write_volatile(header_data); } } - uninit + PciDevice { + header: uninit, + bus, + device, + function, + } } } @@ -175,15 +183,15 @@ impl BISTRegister { #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct PciCommonHeader { - vendor_device: VendorDevice, - command: CommandRegister, - status: StatusRegister, - revision: u8, - device_type: PciDeviceType, - cache_size: u8, - latency_timer: u8, - header_type: HeaderType, - bist: BISTRegister, + pub vendor_device: VendorDevice, + pub command: CommandRegister, + pub status: StatusRegister, + pub revision: u8, + pub device_type: PciDeviceType, + pub cache_size: u8, + pub latency_timer: u8, + pub header_type: HeaderType, + pub bist: BISTRegister, } impl PciCommonHeader { @@ -205,29 +213,38 @@ impl PciCommonHeader { latency_timer: 0, header_type: HeaderType::GeneralDevice, bist: BISTRegister(0), + // bus: 0xff, + // device: 0xff, + // function: 0xff, } } } -#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct MemoryBaseAddressRegister(u32); -#[repr(transparent)] #[derive(Debug, Clone, Copy)] pub struct IOBaseAddressRegister(u32); #[derive(Clone, Copy)] pub union BaseAddressRegister { - memory: MemoryBaseAddressRegister, - io: IOBaseAddressRegister, + pub memory: MemoryBaseAddressRegister, + pub io: IOBaseAddressRegister, + pub abar: AHCIBaseAddress, } +#[derive(PartialEq, Eq)] pub enum BaseAddressRegisterType { Memory, IO, } +pub enum BaseAddressRegisterSize { + Bit32 = 0, + Reserved = 1, + Bit64 = 2, +} + impl BaseAddressRegister { pub fn identify(&self) -> BaseAddressRegisterType { // Doesn't matter which variant we take, they are @@ -240,37 +257,52 @@ impl BaseAddressRegister { } } } + + pub fn is_64bit(&self) -> bool { + self.identify() == BaseAddressRegisterType::Memory + && unsafe { + self.memory.0 & BaseAddressRegisterSize::Bit64 as u32 != 0 + } + } + + pub fn address(&self) -> usize { + if !self.is_64bit() { + (unsafe { self.io.0 } & 0xfffffff0) as usize + } else { + unimplemented!("Still didn't implemented 64bit addresses") + } + } } impl core::fmt::Debug for BaseAddressRegister { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - writeln!(f, "Memory: {:?}", unsafe { self.memory })?; - writeln!(f, "I/O: {:?}", unsafe { self.io }) + writeln!(f, "Memory: {:x?}", unsafe { self.memory })?; + writeln!(f, "I/O: {:x?}", unsafe { self.io }) } } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct GeneralDeviceHeader { - common: PciCommonHeader, - bar0: BaseAddressRegister, - bar1: BaseAddressRegister, - bar2: BaseAddressRegister, - bar3: BaseAddressRegister, - bar4: BaseAddressRegister, - bar5: BaseAddressRegister, - cardbus_cis_ptr: u32, - subsystem_vendor_id: u16, - subsystem_id: u16, - expansion_rom_base: u32, - capabilities_ptr: u8, - _reserved0: u8, - _reserved1: u16, - _reserved2: u32, - interrupt_line: u8, - interrupt_pin: u8, - min_grant: u8, - max_latency: u8, + pub common: PciCommonHeader, + pub bar0: BaseAddressRegister, + pub bar1: BaseAddressRegister, + pub bar2: BaseAddressRegister, + pub bar3: BaseAddressRegister, + pub bar4: BaseAddressRegister, + pub bar5: BaseAddressRegister, + pub cardbus_cis_ptr: u32, + pub subsystem_vendor_id: u16, + pub subsystem_id: u16, + pub expansion_rom_base: u32, + pub capabilities_ptr: u8, + pub _reserved0: u8, + pub _reserved1: u16, + pub _reserved2: u32, + pub interrupt_line: u8, + pub interrupt_pin: u8, + pub min_grant: u8, + pub max_latency: u8, } impl GeneralDeviceHeader { @@ -339,13 +371,13 @@ pub struct Pci2PciBridge { bridge_control: u16, } -pub union PciDevice { +pub union PciDeviceHeader { pub common: PciCommonHeader, pub general_device: GeneralDeviceHeader, pub pci2pci_bridge: Pci2PciBridge, } -impl PciDevice { +impl PciDeviceHeader { pub fn identify(&self) -> HeaderType { // Doesn't matter which one we choose, common is the // same for all of them in the same offset. @@ -357,46 +389,57 @@ impl PciDevice { } } -pub fn scan_pci() -> Vec { - let mut v: Vec = - Vec::with_capacity_in(64, unsafe { - ALLOCATOR.assume_init_ref().clone() - }); - for bus in 0..=255 { - for device in 0..32 { - let common = - PciConfigurationCycle::read_common_header(bus, device, 0); - if common.vendor_device.vendor == VendorID::NonExistent { - return v; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device_header( - bus, device, 0, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - if !common.header_type.is_multifunction() { - continue; - } - for function in 1..8 { - let common = PciConfigurationCycle::read_common_header( - bus, device, function, - ); - if common.vendor_device.vendor == VendorID::NonExistent { - break; - } - v.push_within_capacity( - PciConfigurationCycle::read_pci_device_header( - bus, device, function, common, - ), - ) - .unwrap_or_else(|_| { - panic!("PCI Vec cannot push any more items") - }); - } - } - } - v +pub struct PciDevice { + pub header: PciDeviceHeader, + pub bus: u8, + pub device: u8, + pub function: u8, } + +impl PciDevice { + pub fn enable_interrupts(&self, irq: CascadedPicInterruptLine) {} +} + +// pub fn scan_pci() -> Vec { +// let mut v: Vec = +// Vec::with_capacity_in(64, unsafe { +// ALLOCATOR.assume_init_ref().clone() +// }); +// for bus in 0..=255 { +// for device in 0..32 { +// let common = +// PciConfigurationCycle::read_common_header(bus, device, +// 0); if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, 0, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// if !common.header_type.is_multifunction() { +// continue; +// } +// for function in 1..8 { +// let common = PciConfigurationCycle::read_common_header( +// bus, device, function, +// ); +// if common.vendor_device.vendor == VendorID::NonExistent +// { continue; +// } +// v.push_within_capacity( +// PciConfigurationCycle::read_pci_device( +// bus, device, function, common, +// ), +// ) +// .unwrap_or_else(|_| { +// panic!("PCI Vec cannot push any more items") +// }); +// } +// } +// } +// v +// } diff --git a/kernel/src/drivers/pic8259.rs b/kernel/src/drivers/pic8259.rs index a19321d..eedab3f 100644 --- a/kernel/src/drivers/pic8259.rs +++ b/kernel/src/drivers/pic8259.rs @@ -1,5 +1,3 @@ -use core::mem::MaybeUninit; - /// The code in this module is inspired from osdev /// 8259_PIC guide. use common::enums::{ @@ -8,8 +6,7 @@ use common::enums::{ }; use cpu_utils::instructions::port::PortExt; -pub static mut PIC: MaybeUninit = - MaybeUninit::new(CascadedPIC::default()); +pub static mut PIC: CascadedPIC = CascadedPIC::default(); struct ProgrammableInterruptController { command: Port, @@ -81,47 +78,43 @@ impl CascadedPIC { } } - pub fn init(uninit: &'static mut MaybeUninit) { + pub fn init(uninit: &'static mut Self) { unsafe { - let uninitialized = uninit.assume_init_mut(); // Send initialize command to master - uninitialized.master.command.outb( + uninit.master.command.outb( PicCommandCode::Initialize as u8 | PicCommandCode::CascadeMode as u8, ); Port::iowait(); // Send initialize command to slave - uninitialized.slave.command.outb( + uninit.slave.command.outb( PicCommandCode::Initialize as u8 | PicCommandCode::CascadeMode as u8, ); Port::iowait(); // Send IVT offset to master - uninitialized + uninit .master .data - .outb(uninitialized.master.interrupt_offset as u8); + .outb(uninit.master.interrupt_offset as u8); Port::iowait(); // Send IVT offset to slave - uninitialized - .slave - .data - .outb(uninitialized.slave.interrupt_offset as u8); + uninit.slave.data.outb(uninit.slave.interrupt_offset as u8); Port::iowait(); // Tell master how it is connected to slave - uninitialized.master.data.outb(PicInterruptLine::Irq2 as u8); + uninit.master.data.outb(PicInterruptLine::Irq2 as u8); Port::iowait(); // Tell slave how it is connected to master - uninitialized.slave.data.outb(PicInterruptLine::Irq1 as u8); + uninit.slave.data.outb(PicInterruptLine::Irq1 as u8); Port::iowait(); // Set PIC mode of master - uninitialized.master.data.outb(PicMode::Mode8086 as u8); + uninit.master.data.outb(PicMode::Mode8086 as u8); Port::iowait(); // Set PIC mode of slave - uninitialized.slave.data.outb(PicMode::Mode8086 as u8); + uninit.slave.data.outb(PicMode::Mode8086 as u8); Port::iowait(); - uninitialized.master.enable(); - uninitialized.slave.enable(); + uninit.master.enable(); + uninit.slave.enable(); } } diff --git a/kernel/src/drivers/timer.rs b/kernel/src/drivers/timer.rs index 3cadae2..e234cd0 100644 --- a/kernel/src/drivers/timer.rs +++ b/kernel/src/drivers/timer.rs @@ -8,7 +8,6 @@ pub extern "x86-interrupt" fn timer_handler( ) { // print!("."); unsafe { - PIC.assume_init_mut() - .end_of_interrupt(CascadedPicInterruptLine::Timer); + PIC.end_of_interrupt(CascadedPicInterruptLine::Timer); } } diff --git a/kernel/src/drivers/vga_display/mod.rs b/kernel/src/drivers/vga_display/mod.rs index c45602f..9f014f3 100644 --- a/kernel/src/drivers/vga_display/mod.rs +++ b/kernel/src/drivers/vga_display/mod.rs @@ -74,6 +74,7 @@ macro_rules! eprintln { // Case 1: Print "FAIL" with formatted message. ($fmt:expr $(, $arg:tt)*) => {{ use $crate::drivers::vga_display::color_code::ColorCode; + use common::enums::Color; $crate::print!("["); $crate::print!("FAIL" ; color = ColorCode::new(Color::Red, Color::Black)); $crate::print!("]: "); @@ -83,6 +84,7 @@ macro_rules! eprintln { // Case 2: Print "FAIL" with custom message color. ($fmt:expr $(, $arg:tt)* ; color = $color:expr) => {{ use $crate::drivers::vga_display::color_code::ColorCode; + use common::enums::Color; $crate::print!("["); $crate::print!("FAIL" ; color = ColorCode::new(Color::Red, Color::Black)); $crate::print!("]: "); @@ -97,6 +99,7 @@ macro_rules! okprintln { // Case 1: Print "OK" with formatted message. ($fmt:expr $(, $arg:tt)*) => {{ use $crate::drivers::vga_display::color_code::ColorCode; + use common::enums::Color; $crate::print!("["); $crate::print!(" OK " ; color = ColorCode::new(Color::Green, Color::Black)); $crate::print!("]: "); @@ -106,6 +109,7 @@ macro_rules! okprintln { // Case 2: Print "OK" with custom message color. ($fmt:expr $(, $arg:tt)* ; color = $color:expr) => {{ use $crate::drivers::vga_display::color_code::ColorCode; + use common::enums::Color; $crate::print!("["); $crate::print!(" OK " ; color = ColorCode::new(Color::Green, Color::Black)); $crate::print!("]: "); diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 34c7462..df95021 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -17,42 +17,41 @@ #![feature(ascii_char_variants)] #![feature(ascii_char)] #![feature(const_convert)] +#![feature(slice_ptr_get)] +#![feature(core_intrinsics)] +#![feature(explicit_tail_calls)] +#![feature(specialization)] #![deny(clippy::all)] mod drivers; mod memory; -use core::{ - alloc::{Allocator, Layout}, - num::NonZero, - panic::PanicInfo, -}; +use core::{num::NonZero, panic::PanicInfo}; use crate::{ drivers::{ interrupt_handlers, keyboard::{KEYBOARD, ps2_keyboard::Keyboard}, - pci::{self}, pic8259::{CascadedPIC, PIC}, vga_display::color_code::ColorCode, }, memory::{ - allocators::page_allocator::allocator::PhysicalPageAllocator, - memory_map::{ParsedMapDisplay, parse_map}, + allocators::{ + buddy::BUDDY_ALLOCATOR, extensions::PageTableExt, + slab::SLAB_ALLOCATOR, + }, + memory_map::{MemoryMap, parse_map}, + page::{PAGES, map::PageMap}, }, }; -use common::{ - constants::{REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE}, - enums::{Color, HeaderType}, -}; +use common::{constants::REGULAR_PAGE_SIZE, enums::Color}; use cpu_utils::{ instructions::interrupts::{self}, - structures::interrupt_descriptor_table::{ - IDT, InterruptDescriptorTable, + structures::{ + interrupt_descriptor_table::{IDT, InterruptDescriptorTable}, + paging::PageTable, }, }; -use memory::allocators::page_allocator::ALLOCATOR; - #[unsafe(no_mangle)] #[unsafe(link_section = ".start")] #[allow(clippy::missing_safety_doc)] @@ -62,18 +61,31 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Entered Long Mode"); parse_map(); okprintln!("Obtained Memory Map"); - println!("{}", ParsedMapDisplay(parsed_memory_map!())); - PhysicalPageAllocator::init(unsafe { &mut ALLOCATOR }); - okprintln!("Allocator Initialized"); + println!("{}", MemoryMap(parsed_memory_map!())); + + PageMap::init(unsafe { &mut PAGES }, MemoryMap(parsed_memory_map!())); + unsafe { BUDDY_ALLOCATOR.init(MemoryMap(parsed_memory_map!()), 0) }; + + let last = MemoryMap(parsed_memory_map!()).last().unwrap(); + unsafe { - let idt_address = alloc_pages!(1).into(); - InterruptDescriptorTable::init(&mut IDT, idt_address); + PageTable::current_table().as_mut().map_physical_memory( + (last.base_address + last.length) as usize, + ); + } + okprintln!("Initialized buddy allocator"); + unsafe { + InterruptDescriptorTable::init( + &mut IDT, + alloc_pages!(1).translate(), + ); okprintln!("Initialized interrupt descriptor table"); interrupt_handlers::init(IDT.assume_init_mut()); okprintln!("Initialized interrupts handlers"); CascadedPIC::init(&mut PIC); + okprintln!("Initialized Programmable Interrupt Controller"); - let keyboard_buffer_address = alloc_pages!(1).into(); + let keyboard_buffer_address: common::address_types::VirtualAddress = alloc_pages!(1).translate(); Keyboard::init( &mut KEYBOARD, keyboard_buffer_address, @@ -82,31 +94,86 @@ pub unsafe extern "C" fn _start() -> ! { okprintln!("Initialized Keyboard"); interrupts::enable(); } - let pci_devices = pci::scan_pci(); - println!("Press ENTER to enumerate PCI devices!"); - let a = pci_devices.as_ptr() as usize; - println!("pci_devices address: {:x}", a); - for device in pci_devices.iter() { - loop { - unsafe { - let c = KEYBOARD.assume_init_mut().read_char(); - if c == "\n" { - break; - } - } - } - match device.identify() { - HeaderType::GeneralDevice => { - println!("{:#?}", unsafe { device.common }) - } - _ => { - println!("{:#?}", unsafe { device.common }) - } - } - } + + unsafe { SLAB_ALLOCATOR.init() } + okprintln!("Initialized slab allocator"); + + // panic!("") + // let mut pci_devices = pci::scan_pci(); + // println!("Press ENTER to enumerate PCI devices!"); + // let a = pci_devices.as_ptr() as usize; + // println!("pci_devices address: {:x}", a); + + // loop { + // let c = unsafe { KEYBOARD.assume_init_mut().read_raw_scancode() + // }; if let Some(e) = c + // && PS2ScanCode::from_scancode(e) == PS2ScanCode::Enter + // { + // break; + // } + // } + + // unsafe { PIC.enable_irq(CascadedPicInterruptLine::Ahci) }; + // for device in pci_devices.iter_mut() { + // // println!("{:#?}", unsafe { device.common.vendor_device }); + // // println!("{:#?}", unsafe { device.common.header_type }); + // // println!("{:#?}\n", unsafe { device.common.device_type }); + + // if device.header.common().device_type.is_ahci() { + // let a = unsafe { + // PhysicalAddress::new_unchecked( + // device.header.general_device.bar5.address(), + // ) + // }; + + // println!( + // "Bus Master: {}, Interrupts Disable {}, I/O Space: {}, \ + // Memory Space: {}", + // device.header.common().command.is_bus_master(), + // device.header.common().command.is_interrupt_disable(), + // device.header.common().command.is_io_space(), + // device.header.common().command.is_memory_space() + // ); + + // println!( + // "Interrupt Line: {}, Interrupt Pin: {}", + // unsafe { device.header.general_device.interrupt_line }, + // unsafe { device.header.general_device.interrupt_pin } + // ); + + // let aligned = a.align_down(REGULAR_PAGE_ALIGNMENT); + // let hba = HBAMemoryRegisters::new(aligned).unwrap(); + // let _ = hba.probe_init(); + // let p = &mut hba.ports[0]; + + // let buf = + // unsafe { alloc_pages!(1) as *mut IdentityPacketData }; + + // p.identity_packet(buf); + + // let id = unsafe { + // core::ptr::read_volatile( + // (buf as usize + PHYSICAL_MEMORY_OFFSET) + // as *mut IdentityPacketData, + // ) + // }; + + // println!("{:?}", id); + + // println!("Cylinders: {}", id.cylinders); + // println!("Heads: {}", id.heads); + // println!("Sectors: {}", id.sectors); + + // println!("Serial: {:?}", &id.serial_number); + // println!("Model: {:?}", &id.model_num); + // println!("Firmware: {:?}", &id.firmware_rev); + // } + // } + loop { unsafe { - print!("{}", KEYBOARD.assume_init_mut().read_char() ; color = ColorCode::new(Color::Green, Color::Black)); + print!("{}", KEYBOARD.assume_init_mut().read_char() ; color + = ColorCode::new(Color::Green, Color::Black)); } } } diff --git a/kernel/src/memory/allocators/buddy.rs b/kernel/src/memory/allocators/buddy.rs new file mode 100644 index 0000000..4a15b29 --- /dev/null +++ b/kernel/src/memory/allocators/buddy.rs @@ -0,0 +1,266 @@ +use core::ptr::{self, NonNull}; + +use common::{ + address_types::PhysicalAddress, + enums::{BUDDY_MAX_ORDER, BuddyOrder, MemoryRegionType}, + write_volatile, +}; +use cpu_utils::structures::paging::PageTable; + +use crate::memory::{ + memory_map::ParsedMemoryMap, + page::{PAGES, UnassignedPage, meta::BuddyPageMeta}, +}; + +pub static mut BUDDY_ALLOCATOR: BuddyAllocator = BuddyAllocator { + freelist: [const { BuddyPageMeta::default() }; BUDDY_MAX_ORDER], +}; + +#[macro_export] +/// Allocate the amount of pages specified, and return the address +macro_rules! alloc_pages { + ($page_number: expr) => {{ + use $crate::memory::allocators::buddy::BUDDY_ALLOCATOR; + BUDDY_ALLOCATOR.alloc_pages($page_number) + }}; +} + +pub struct BuddyAllocator { + freelist: [BuddyPageMeta; BUDDY_MAX_ORDER], +} + +impl BuddyAllocator { + pub fn alloc_pages(&mut self, num_pages: usize) -> PhysicalAddress { + assert!( + num_pages <= (1 << BuddyOrder::MAX as usize), + "Size cannot be greater then: {}", + 1 << BuddyOrder::MAX as usize + ); + let order = (usize::BITS + - 1 + - num_pages.next_power_of_two().leading_zeros()) + as usize; + + let page = self.freelist[order].detach().unwrap_or_else(|| { + self.split_until(order) + .expect("Out of memory, swap is not implemented") + }); + + unsafe { page.as_ref().physical_address() } + } + + // pub fn free_pages(&self, address: usize) { + // let page_index = address / REGULAR_PAGE_SIZE; + // } + + /// This function assumes that `wanted_order` is empty, and won't check + /// it. + pub fn split_until( + &mut self, + wanted_order: usize, + ) -> Option> { + let closet_order = ((wanted_order + 1)..BUDDY_MAX_ORDER) + .find(|i| self.freelist[*i].next.is_some())?; + + let initial_page = + self.freelist[closet_order].detach::<()>().unwrap(); + + Some(self.split_recursive( + initial_page, + closet_order, + wanted_order, + )) + } + + fn split_recursive( + &mut self, + page: NonNull, + current_order: usize, + target_order: usize, + ) -> NonNull { + debug_assert!( + target_order < current_order, + "Target order cannot be greater then current order" + ); + + if current_order == target_order { + return page; + } + + let (lhs, rhs) = unsafe { BuddyAllocator::split(page).unwrap() }; + + let next_order = current_order - 1; + self.freelist[next_order].attach(rhs); + + become self.split_recursive(lhs, next_order, target_order) + } + + /// This function will try to merge a page on the buddy allocator until + pub fn merge_recursive(&self, page: NonNull) { + if let Some(merged) = + unsafe { BuddyAllocator::merge_with_buddy(page) } + { + become BuddyAllocator::merge_recursive(self, merged); + } + } + + pub fn alloc_table(&mut self) -> NonNull { + unsafe { + let address = self.alloc_pages(1).translate(); + ptr::write_volatile( + address.as_non_null::().as_ptr(), + PageTable::empty(), + ); + address.as_non_null::() + } + } + + /// The code_end number should be the end address of the code. + /// + /// This function will not put in the free list pages that hold + /// addresses from 0->code_end + pub fn init(&'static mut self, map: ParsedMemoryMap, code_end: usize) { + for area in map + .iter() + .filter(|a| a.region_type == MemoryRegionType::Usable) + { + let mut start = UnassignedPage::index_of( + (area.base_address as usize).into(), + ); + let end = UnassignedPage::index_of( + ((area.base_address + area.length) as usize).into(), + ); + + let mut prev = None; + + while start < end { + let largest_order = BuddyOrder::try_from( + ((end - start).ilog2().min(BuddyOrder::MAX as u32)) + as u8, + ) + .unwrap(); + + let curr = unsafe { &mut PAGES[start] }; + let next = unsafe { + &mut PAGES[start + ((1 << largest_order as usize) - 1)] + }; + unsafe { + (*curr.meta.buddy).next = + Some(NonNull::from_mut(next)); + (*curr.meta.buddy).prev = prev; + (*curr.meta.buddy).order = Some(largest_order); + } + prev = Some(NonNull::from_mut(curr)); + + self.freelist[largest_order as usize] + .attach(NonNull::from_mut(curr)); + + start += 1 << largest_order as usize; + } + } + } +} + +impl BuddyAllocator { + /// TODO: Make an unsafe split if relevant + /// + /// # Safety + /// This function does not attach the new references! + #[allow(clippy::type_complexity)] + unsafe fn split( + mut page: NonNull, + ) -> Option<(NonNull, NonNull)> { + // Reduce it's order to find it's order. + let prev_order = BuddyOrder::try_from( + unsafe { page.as_ref().meta.buddy.order? } as u8 - 1, + ) + .expect("Page order cannot be reduced"); + + write_volatile!( + (*page.as_mut().meta.buddy).order, + Some(prev_order) + ); + + let index = unsafe { + ((page.as_ref() as *const _ as usize - PAGES.as_ptr().addr()) + / size_of::()) + + (1 << prev_order as usize) + }; + + // Find it's half + let mut buddy = unsafe { NonNull::from_mut(&mut PAGES[index]) }; + + // Set the order of the buddy. + write_volatile!( + (*buddy.as_mut().meta.buddy).order, + Some(prev_order) + ); + + Some((page, buddy)) + } + + /// This function will detach the given page and it's buddy from their + /// freelist, increase their and attach to the increased order + /// list. + unsafe fn merge_with_buddy( + page: NonNull, + ) -> Option> { + let buddy = BuddyAllocator::buddy_of(page)?; + + let next_order = BuddyOrder::try_from(unsafe { + page.as_ref().meta.buddy.order.unwrap() as u8 + 1 + }) + .unwrap(); + + BuddyAllocator::detach_from_mid(page); + BuddyAllocator::detach_from_mid(buddy); + + // Operate on the page that it's address is lower. + let (mut left, mut right) = if page < buddy { + (page, buddy) + } else { + (buddy, page) + }; + + unsafe { + (*left.as_mut().meta.buddy).order = Some(next_order); + (*right.as_mut().meta.buddy) = BuddyPageMeta::default(); + }; + + Some(left) + } + + // TODO: This function will probably fail, should change that the head + // of the page list is static and the list starts from the second + // node, and then this would work + fn detach_from_mid(page: NonNull) { + let (mut prev, next) = unsafe { + let p_ref = page.as_ref(); + ( + p_ref.meta.buddy.prev.expect("Page has no prev"), + p_ref.meta.buddy.next.expect("Page has no next"), + ) + }; + + unsafe { (*prev.as_mut().meta.buddy).next = Some(next) } + } + + fn buddy_of( + page: NonNull, + ) -> Option> { + let order = unsafe { page.as_ref().meta.buddy.order? }; + if let BuddyOrder::MAX = order { + None + } else { + unsafe { + let buddy_address = page.as_ref() as *const _ as usize + ^ ((1 << order as usize) + * size_of::()); + + Some(NonNull::new_unchecked( + buddy_address as *mut UnassignedPage, + )) + } + } + } +} diff --git a/kernel/src/memory/allocators/extensions.rs b/kernel/src/memory/allocators/extensions.rs new file mode 100644 index 0000000..ed66725 --- /dev/null +++ b/kernel/src/memory/allocators/extensions.rs @@ -0,0 +1,308 @@ +use core::{num::NonZero, ptr::NonNull}; + +use common::{ + address_types::{PhysicalAddress, VirtualAddress}, + constants::{ + BIG_PAGE_SIZE, HUGE_PAGE_SIZE, PAGE_DIRECTORY_ENTRIES, + PHYSICAL_MEMORY_OFFSET, + }, + enums::{PageSize, PageTableLevel}, + error::EntryError, + late_init::LateInit, +}; +use cpu_utils::structures::paging::{ + PageEntryFlags, PageTable, PageTableEntry, +}; +use extend::ext; +use strum::VariantArray; + +use common::error::TableError; +use cpu_utils::structures::paging::EntryIndex; + +use crate::memory::{ + allocators::buddy::BUDDY_ALLOCATOR, page::map::PageMap, +}; + +#[ext] +pub impl PhysicalAddress { + fn map( + &self, + address: VirtualAddress, + flags: PageEntryFlags, + page_size: PageSize, + ) { + address.map(*self, flags, page_size) + } + + fn translate(&self) -> VirtualAddress { + unsafe { + VirtualAddress::new_unchecked( + PHYSICAL_MEMORY_OFFSET + self.as_usize(), + ) + } + } +} + +#[ext] +pub impl PageTableEntry { + /// This function will return a table mapped in this + /// entry if there is one. + /// + /// Else, it will override what is inside the entry and + /// map a new table to it so valid table is guaranteed + /// to be returned. + fn force_resolve_table_mut(&mut self) -> Option> { + match self.mapped_table() { + Ok(table) => Some(table), + Err(EntryError::NotATable) => None, + Err(EntryError::NoMapping) => unsafe { + let resolved_table = BUDDY_ALLOCATOR.alloc_table(); + self.map_unchecked( + PhysicalAddress::new_unchecked( + resolved_table.addr().get(), + ), + PageEntryFlags::table_flags(), + ); + Some(self.mapped_unchecked().as_non_null::()) + }, + } + } +} + +#[ext] +pub impl VirtualAddress { + /// Map this `virtual address` into the given + /// `physical_address` with the current page table, + /// obtained from `cr3` if a page table for the + /// given virtual address doesn't exist, a new table + /// **will** be created for it + /// + /// # Parameters + /// + /// - `address`: The physical address to map this to, this address is + /// needed + /// - `page_size`: The size of the page from the [`PageSize`] enum + fn map( + &self, + address: PhysicalAddress, + flags: PageEntryFlags, + page_size: PageSize, + ) { + if address.is_aligned(page_size.alignment()) + && self.is_aligned(page_size.alignment()) + { + let mut table = PageTable::current_table(); + for level in + PageTableLevel::VARIANTS[0..=page_size as usize].iter() + { + let index = self.index_of(*level); + let entry = unsafe { &mut table.as_mut().entries[index] }; + let resolved_table = entry + .force_resolve_table_mut() + .expect("Tried to create table on a mapped entry"); + table = resolved_table; + } + unsafe { + table.as_mut().entries[self.index_of( + PageTableLevel::VARIANTS[page_size as usize + 1], + )] + .map(address, flags); + } + } else { + panic!( + "address alignment doesn't match page type alignment, \ + todo! raise a page fault" + ) + } + } + + fn set_flags( + &self, + flags: PageEntryFlags, + page_size: PageSize, + num_pages: NonZero, + ) -> Result<(), EntryError> { + let address_index = self.index_of(page_size.min_level()); + + debug_assert!( + address_index + num_pages.get() <= PAGE_DIRECTORY_ENTRIES, + "There are only 512 entries inside a table" + ); + + let mut table = self.walk(page_size.min_level())?; + + unsafe { + table + .as_mut() + .entries + .iter_mut() + .skip(address_index) + .take(num_pages.get()) + .for_each(|entry| entry.set_flags(flags)); + } + + Ok(()) + } + + /// Return the entry that is pointed by the wanted level + fn walk( + &self, + wanted: PageTableLevel, + ) -> Result, EntryError> { + let mut table = PageTable::current_table(); + + for level in PageTableLevel::VARIANTS[0..wanted as usize].iter() { + let entry = + unsafe { &table.as_ref().entries[self.index_of(*level)] }; + table = entry.mapped_table()?; + } + + Ok(table) + } + + fn translate(&self) -> Option { + let mut table = PageTable::current_table(); + + for level in PageTableLevel::VARIANTS.iter() { + let entry = + unsafe { &table.as_mut().entries[self.index_of(*level)] }; + match entry.mapped_table() { + Ok(mapped) => table = mapped, + Err(EntryError::NotATable) => { + return unsafe { Some(entry.mapped_unchecked()) }; + } + Err(EntryError::NoMapping) => return None, + } + } + unreachable!() + } +} + +#[ext] +pub impl PageTable { + // TODO: trn into a tail called function with become + /// Find an avavilable page in the given size. + // ANCHOR: page_table_find_available_page + #[cfg(target_arch = "x86_64")] + fn find_available_page( + page_size: PageSize, + ) -> Result { + const TOTAL_LEVELS: usize = PageTableLevel::VARIANTS.len(); + let mut level_indices = [0usize; TOTAL_LEVELS]; + let mut page_tables = [Self::current_table(); TOTAL_LEVELS]; + let mut current_level = PageTableLevel::PML4; + loop { + let mut current_table = + page_tables[TOTAL_LEVELS - current_level as usize]; + + let ti = unsafe { + current_table.as_mut().try_fetch_table( + level_indices[TOTAL_LEVELS - current_level as usize], + current_level, + page_size, + ) + }; + + let next_table = match ti { + EntryIndex::OutOfEntries | EntryIndex::PageDoesNotFit => { + current_level = current_level.prev()?; + level_indices + [TOTAL_LEVELS - current_level as usize] += 1; + continue; + } + EntryIndex::Entry(entry) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + entry.table_index(); + unsafe { + entry.mapped_unchecked().as_non_null::() + } + } + EntryIndex::Index(i) => { + level_indices[TOTAL_LEVELS - current_level as usize] = + i; + return Ok(VirtualAddress::from_indices( + level_indices, + )); + } + }; + let next_level = current_level + .next() + .expect("Can't go next on a first level table"); + page_tables[TOTAL_LEVELS - next_level as usize] = next_table; + current_level = next_level; + } + } + // ANCHOR_END: page_table_find_available_page + + // TODO: turn into a tail called function with become + /// Map the region of memory from 0 to `mem_size_bytes` + /// at the top of the page table so that + /// + /// ```rust + /// VirtualAddress(0xffff800000000000) -> PhysicalAddress(0) + /// ``` + /// + /// TODO: ADD SUPPORT FOR FULL FLAG + #[allow(unsafe_op_in_unsafe_fn)] + fn map_physical_memory(&mut self, mem_size_bytes: usize) { + let mut second_level_entries_count = + (mem_size_bytes / BIG_PAGE_SIZE) + 1; + let mut third_level_entries_count = + second_level_entries_count.div_ceil(HUGE_PAGE_SIZE) + 1; + let forth_level_entries_count = third_level_entries_count + .div_ceil(PAGE_DIRECTORY_ENTRIES) + .clamp(1, 256); + let mut next_mapped = unsafe { PhysicalAddress::new_unchecked(0) }; + for forth_entry in &mut self.entries[(PAGE_DIRECTORY_ENTRIES / 2) + ..(forth_level_entries_count + (PAGE_DIRECTORY_ENTRIES / 2))] + { + let mut third_table = + forth_entry.force_resolve_table_mut().unwrap(); + + for third_entry in unsafe { + &mut third_table.as_mut().entries[0 + ..third_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { + let mut second_table = + third_entry.force_resolve_table_mut().unwrap(); + + third_level_entries_count -= 1; + for second_entry in unsafe { + &mut second_table.as_mut().entries[0 + ..second_level_entries_count + .min(PAGE_DIRECTORY_ENTRIES)] + } { + if !second_entry.is_present() { + unsafe { + second_entry.map( + next_mapped, + PageEntryFlags::huge_page_flags(), + ); + } + } + next_mapped += BIG_PAGE_SIZE.into(); + second_level_entries_count -= 1; + } + } + } + } +} + +#[ext] +pub impl PageSize { + fn default_flags(&self) -> PageEntryFlags { + match self { + PageSize::Regular => PageEntryFlags::regular_page_flags(), + PageSize::Big | PageSize::Huge => { + PageEntryFlags::huge_page_flags() + } + } + } +} + +#[ext] +pub impl PageMap { + /// Reallocates the page array on the buddy allocator. + fn reallocate(init: &'static mut LateInit) {} +} diff --git a/kernel/src/memory/allocators/mod.rs b/kernel/src/memory/allocators/mod.rs index e6b41e5..6b2929b 100644 --- a/kernel/src/memory/allocators/mod.rs +++ b/kernel/src/memory/allocators/mod.rs @@ -1 +1,3 @@ -pub mod page_allocator; +pub mod buddy; +pub mod extensions; +pub mod slab; diff --git a/kernel/src/memory/allocators/page_allocator/allocator.rs b/kernel/src/memory/allocators/page_allocator/allocator.rs deleted file mode 100644 index 4bde1db..0000000 --- a/kernel/src/memory/allocators/page_allocator/allocator.rs +++ /dev/null @@ -1,241 +0,0 @@ -use core::{ - alloc::{AllocError, Allocator, Layout}, - cell::UnsafeCell, - mem::MaybeUninit, - ptr::{self, NonNull}, -}; - -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - bitmap::{BitMap, ContiguousBlockLayout, Position}, - constants::{ - FIRST_STAGE_OFFSET, PAGE_ALLOCATOR_OFFSET, PHYSICAL_MEMORY_OFFSET, - REGULAR_PAGE_ALIGNMENT, REGULAR_PAGE_SIZE, - }, - enums::MemoryRegionType, -}; -use cpu_utils::structures::paging::PageTable; - -use crate::parsed_memory_map; - -#[derive(Debug)] -// TODO: This is not thread safe, probably should use Mutex -// in the future -/// Physical page allocator implemented with a bitmap, every -/// bit corresponds to a physical page -pub struct PhysicalPageAllocator(UnsafeCell); - -impl Clone for PhysicalPageAllocator { - fn clone(&self) -> Self { - unsafe { - let bitmap = self.map_mut(); - Self(UnsafeCell::new(bitmap.clone())) - } - } -} - -impl PhysicalPageAllocator { - /// Creates a new allocator from the `bitmap_address` - /// and the `memory_size`. - /// - /// # Parameters - /// - /// - `bitmap_address`: Virtual address that is identity mapped and - /// will use to store the map - /// - `memory_size`: Memory size in bytes - #[allow(unsafe_op_in_unsafe_fn)] - pub const unsafe fn new( - bitmap_address: VirtualAddress, - memory_size: usize, - ) -> PhysicalPageAllocator { - let size_in_pages = memory_size / REGULAR_PAGE_SIZE; - let map_size = size_in_pages / u64::BITS as usize; - PhysicalPageAllocator(UnsafeCell::new(BitMap::new( - bitmap_address, - map_size, - ))) - } - - pub const fn address_position( - address: PhysicalAddress, - ) -> Option { - if address.is_aligned(REGULAR_PAGE_ALIGNMENT) { - let bit_index = address.as_usize() / REGULAR_PAGE_SIZE; - return Some(Position::from_abs_bit_index(bit_index)); - } - None - } - - unsafe fn map(&self) -> &BitMap { - unsafe { self.0.as_ref_unchecked() } - } - - #[allow(clippy::mut_from_ref)] - unsafe fn map_mut(&self) -> &mut BitMap { - unsafe { self.0.as_mut_unchecked() } - } - - pub fn init(uninit: &'static mut MaybeUninit) { - unsafe { - let memory_size = parsed_memory_map!() - .iter() - .map(|x| x.length as usize) - .sum::(); - uninit.write(Self::new( - PhysicalAddress::new_unchecked(PAGE_ALLOCATOR_OFFSET) - .translate(), - memory_size, - )); - let initialized = uninit.assume_init_mut(); - - // Set the null page - initialized - .map_mut() - .set_bit(&Position::new_unchecked(0, 0)); - - let start_address = const { - PhysicalAddress::new_unchecked(FIRST_STAGE_OFFSET as usize) - .align_down(REGULAR_PAGE_ALIGNMENT) - }; - let start_position = - Self::address_position(start_address).unwrap(); - // Allocate the addresses that are used for the - // code, and for other variables. - let end_address = PhysicalAddress::new_unchecked( - PAGE_ALLOCATOR_OFFSET - + core::mem::size_of_val(initialized.map().map), - ) - .align_up(REGULAR_PAGE_ALIGNMENT); - let size_bits = ((end_address - start_address) - / REGULAR_PAGE_SIZE) - .as_usize(); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - for region in parsed_memory_map!() { - if region.region_type != MemoryRegionType::Usable { - let start_address_aligned = - PhysicalAddress::new_unchecked( - region.base_address as usize - & (u64::MAX - ^ (REGULAR_PAGE_SIZE as u64 - 1)) - as usize, - ); - let start_position = - Self::address_position(start_address_aligned) - .unwrap(); - let size_bits = - region.length as usize / REGULAR_PAGE_SIZE; - let block = ContiguousBlockLayout::from_start_size( - &start_position, - size_bits, - ); - initialized - .map_mut() - .set_contiguous_block(&start_position, &block); - } - } - }; - } - - /// Resolves `map_index` and `bit_index` into actual - /// physical address - pub fn resolve_position(p: &Position) -> PhysicalAddress { - unsafe { - PhysicalAddress::new_unchecked( - ((p.map_index * (u64::BITS as usize)) + p.bit_index) - * REGULAR_PAGE_SIZE, - ) - } - } - - pub fn resolve_address(address: PhysicalAddress) -> Position { - let starting_bit_idx = address.as_usize() / REGULAR_PAGE_SIZE; - Position::from_abs_bit_index(starting_bit_idx) - } - - pub fn available_memory(&self) -> usize { - unsafe { self.map().count_zeros() * REGULAR_PAGE_SIZE } - } - - /// Return the physical address of this table - pub(super) fn alloc_table(&self) -> &'static mut PageTable { - let free_block = unsafe { self.map().find_free_block(1) }; - - match free_block { - Some((p, _)) => unsafe { - let physical_address = Self::resolve_position(&p); - - ptr::write( - physical_address.translate().as_mut_ptr::(), - PageTable::empty(), - ); - - self.map_mut().set_bit(&p); - - &mut *physical_address.as_mut_ptr::() - }, - - None => panic!( - "No physical memory is available to allocate this table" - ), - } - } -} - -#[allow(unsafe_op_in_unsafe_fn)] -unsafe impl Allocator for PhysicalPageAllocator { - fn allocate( - &self, - layout: Layout, - ) -> Result, AllocError> { - unsafe { - match layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) { - Ok(layout) => { - match self - .map() - .find_free_block(layout.size() / REGULAR_PAGE_SIZE) - { - Some((p, block)) => { - self.map_mut() - .set_contiguous_block(&p, &block); - Ok(NonNull::slice_from_raw_parts( - NonNull::new_unchecked( - Self::resolve_position(&p) - .translate() - .as_mut_ptr::(), - ), - layout.size(), - )) - } - None => Err(AllocError), - } - } - Err(_) => Err(AllocError), - } - } - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if let Ok(layout) = - layout.align_to(REGULAR_PAGE_ALIGNMENT.as_usize()) - { - let start_position = - Self::resolve_address(PhysicalAddress::new_unchecked( - ptr.as_ptr() as usize - PHYSICAL_MEMORY_OFFSET, - )); - let block = ContiguousBlockLayout::from_start_size( - &start_position, - layout.size() / REGULAR_PAGE_SIZE, - ); - self.map_mut() - .unset_contiguous_block(&start_position, &block); - } - } -} - -unsafe impl Sync for PhysicalPageAllocator {} diff --git a/kernel/src/memory/allocators/page_allocator/extensions.rs b/kernel/src/memory/allocators/page_allocator/extensions.rs deleted file mode 100644 index b6ad6a5..0000000 --- a/kernel/src/memory/allocators/page_allocator/extensions.rs +++ /dev/null @@ -1,171 +0,0 @@ -use super::ALLOCATOR; -use common::{ - address_types::{PhysicalAddress, VirtualAddress}, - constants::{ - BIG_PAGE_SIZE, PAGE_ALLOCATOR_OFFSET, PAGE_DIRECTORY_ENTRIES, - PHYSICAL_MEMORY_OFFSET, - }, - enums::{PageSize, PageTableLevel}, -}; -use cpu_utils::structures::paging::{ - PageEntryFlags, PageTable, PageTableEntry, -}; -use extend::ext; -#[ext] -pub impl PhysicalAddress { - fn map( - &self, - address: VirtualAddress, - flags: PageEntryFlags, - page_size: PageSize, - ) { - address.map(*self, flags, page_size) - } - - fn translate(&self) -> VirtualAddress { - unsafe { - VirtualAddress::new_unchecked( - PHYSICAL_MEMORY_OFFSET + self.as_usize(), - ) - } - } -} - -#[ext] -pub impl PageTableEntry { - /// This function will return a table mapped in this - /// entry if there is one. - /// - /// Else, it will override what is inside the entry and - /// map a new table to it so valid table is guaranteed - /// to be returned. - fn force_resolve_table_mut(&mut self) -> &mut PageTable { - if let Ok(table) = self.mapped_table_mut() { - table - } else { - let resolved_table = - unsafe { ALLOCATOR.assume_init_ref().alloc_table() }; - unsafe { - self.map_unchecked( - PhysicalAddress::new_unchecked( - resolved_table.address().as_usize(), - ), - PageEntryFlags::table_flags(), - ); - } - unsafe { - &mut *self.mapped_unchecked().as_mut_ptr::() - } - } - } -} - -#[ext] -pub impl VirtualAddress { - /// Map this `virtual address` into the given - /// `physical_address` with the current page table, - /// obtained from `cr3` if a page table for the - /// given virtual address doesn't exist, a new table - /// **will** be created for it - /// - /// # Parameters - /// - /// - `address`: The physical address to map this to, this address is - /// needed - /// - `page_size`: The size of the page from the [`PageSize`] enum - #[allow(static_mut_refs)] - fn map( - &self, - address: PhysicalAddress, - flags: PageEntryFlags, - page_size: PageSize, - ) { - if address.is_aligned(page_size.alignment()) - && self.is_aligned(page_size.alignment()) - { - let mut table = PageTable::current_table_mut(); - for level in PageTableLevel::iterator() { - let index = self.index_of(*level); - let entry = &mut table.entries[index]; - let resolved_table = entry.force_resolve_table_mut(); - table = resolved_table; - } - unsafe { - table.entries[self - .index_of((3 - page_size as u8).try_into().unwrap())] - .map_unchecked(address, flags); - } - } else { - panic!( - "address alignment doesn't match page type alignment, \ - todo! raise a page fault" - ) - } - } - - fn translate(&self) -> PhysicalAddress { - todo!() - } -} - -#[ext] -pub impl PageTable { - /// Map the region of memory from 0 to `mem_size_bytes` - /// at the top of the page table so that ```rust - /// VirtualAddress(0xffff800000000000) -> - /// PhysicalAddress(0) ``` - /// - /// TODO: ADD SUPPORT FOR FULL FLAG - #[allow(unsafe_op_in_unsafe_fn)] - fn map_physical_memory(&mut self, mem_size_bytes: usize) { - let mut second_level_entries_count = - (mem_size_bytes / BIG_PAGE_SIZE).max(1); - let mut third_level_entries_count = second_level_entries_count - .div_ceil(PAGE_ALLOCATOR_OFFSET) - .max(1); - let forth_level_entries_count = third_level_entries_count - .div_ceil(PAGE_DIRECTORY_ENTRIES) - .clamp(1, 256); - let mut next_mapped = unsafe { PhysicalAddress::new_unchecked(0) }; - for forth_entry in &mut self.entries[(PAGE_DIRECTORY_ENTRIES / 2) - ..(forth_level_entries_count + (PAGE_DIRECTORY_ENTRIES / 2))] - { - let third_table = forth_entry.force_resolve_table_mut(); - - for third_entry in &mut third_table.entries - [0..third_level_entries_count.min(PAGE_DIRECTORY_ENTRIES)] - { - let second_table = third_entry.force_resolve_table_mut(); - - third_level_entries_count -= 1; - for second_entry in &mut second_table.entries[0 - ..second_level_entries_count - .min(PAGE_DIRECTORY_ENTRIES)] - { - if !second_entry.is_present() { - unsafe { - second_entry.map( - next_mapped, - PageEntryFlags::huge_page_flags(), - ); - } - } - next_mapped += BIG_PAGE_SIZE.into(); - second_level_entries_count -= 1; - } - } - } - } -} - -#[ext] -pub impl PageSize { - fn default_flags(&self) -> PageEntryFlags { - match self { - PageSize::Regular => PageEntryFlags::regular_page_flags(), - PageSize::Big | PageSize::Huge => { - PageEntryFlags::huge_page_flags() - } - } - } -} diff --git a/kernel/src/memory/allocators/page_allocator/mod.rs b/kernel/src/memory/allocators/page_allocator/mod.rs deleted file mode 100644 index 13789fb..0000000 --- a/kernel/src/memory/allocators/page_allocator/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -pub mod allocator; -pub mod extensions; - -use allocator::PhysicalPageAllocator; -use core::mem::MaybeUninit; - -pub static mut ALLOCATOR: MaybeUninit = - MaybeUninit::uninit(); - -#[macro_export] -macro_rules! alloc_pages { - ($page_number: literal) => {{ - use $crate::memory::allocators::page_allocator::ALLOCATOR; - ALLOCATOR - .assume_init_ref() - .allocate(Layout::from_size_align_unchecked( - REGULAR_PAGE_SIZE * $page_number, - REGULAR_PAGE_ALIGNMENT.as_usize(), - )) - .unwrap() - .addr() - .get() - }}; -} diff --git a/kernel/src/memory/allocators/slab.rs b/kernel/src/memory/allocators/slab.rs new file mode 100644 index 0000000..886df9d --- /dev/null +++ b/kernel/src/memory/allocators/slab.rs @@ -0,0 +1,187 @@ +pub mod cache; +pub mod descriptor; +pub mod macros; +pub mod traits; + +use learnix_macros::generate_generics; + +use crate::{ + define_slab_system, + memory::{ + allocators::slab::{ + cache::SlabCache, + descriptor::SlabDescriptor, + traits::{Generic, Slab, SlabPosition}, + }, + page::Page, + unassigned::{AssignSlab, UnassignSlab}, + }, +}; +use core::{ + alloc::{AllocError, Allocator}, + ptr::NonNull, +}; + +generate_generics!( + 8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096, 8192 +); + +define_slab_system!( + SlabDescriptor<()>, + Generic8, + Generic16, + Generic32, + Generic64, + Generic96, + Generic128, + Generic192, + Generic256, + Generic512, + Generic1024, + Generic2048, + Generic4096, + Generic8192, +); + +pub static mut SLAB_ALLOCATOR: SlabAllocator = SlabAllocator::new(); + +impl SlabAllocator { + pub fn slab_of(&self) -> NonNull> { + self.slabs[T::SLAB_POSITION].assign::() + } + + pub fn kmalloc(&self) -> NonNull { + let mut slab = self.slab_of::(); + unsafe { slab.as_mut().alloc() } + } + + pub fn kfree(&self, ptr: NonNull) { + let page = unsafe { Page::::from_virt(ptr.into()).as_ref() }; + + let descriptor = unsafe { page.meta.slab.freelist }; + + unsafe { descriptor.assign::().as_mut().dealloc(ptr) }; + } +} + +#[extend::ext] +pub impl NonNull { + fn into_u8(&self) -> NonNull<[u8]> { + unsafe { + let data = NonNull::new_unchecked(self.as_ptr() as *mut u8); + let size = self.as_ref().size(); + NonNull::slice_from_raw_parts(data, size) + } + } + + fn from_u8(data: NonNull) -> NonNull { + unsafe { NonNull::new_unchecked(data.as_ptr() as *mut T) } + } +} + +unsafe impl Allocator for SlabAllocator { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result, core::alloc::AllocError> { + if layout.size() < layout.align() { + return Err(AllocError); + } + + match layout.size() { + Generic8::START..=Generic8::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic16::START..=Generic16::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic32::START..=Generic32::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic64::START..=Generic64::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic96::START..=Generic96::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic128::START..=Generic128::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic192::START..=Generic192::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic256::START..=Generic256::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic512::START..=Generic512::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic1024::START..=Generic1024::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic2048::START..=Generic2048::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic4096::START..=Generic4096::END => { + Ok(self.kmalloc::().into_u8()) + } + Generic8192::START..=Generic8192::END => { + Ok(self.kmalloc::().into_u8()) + } + _ => Err(AllocError), + } + } + + unsafe fn deallocate( + &self, + ptr: core::ptr::NonNull, + layout: core::alloc::Layout, + ) { + match layout.size() { + Generic8::START..=Generic8::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic16::START..=Generic16::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic32::START..=Generic32::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic64::START..=Generic64::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic96::START..=Generic96::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic128::START..=Generic128::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic192::START..=Generic192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic256::START..=Generic256::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic512::START..=Generic512::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic1024::START..=Generic1024::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic2048::START..=Generic2048::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic4096::START..=Generic4096::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + Generic8192::START..=Generic8192::END => { + self.kfree::(NonNull::from_u8(ptr)) + } + _ => unreachable!(), + } + } +} +unsafe impl Send for SlabDescriptor {} +unsafe impl Sync for SlabDescriptor {} +unsafe impl Send for SlabCache {} +unsafe impl Sync for SlabCache {} diff --git a/kernel/src/memory/allocators/slab/cache.rs b/kernel/src/memory/allocators/slab/cache.rs new file mode 100644 index 0000000..eebc815 --- /dev/null +++ b/kernel/src/memory/allocators/slab/cache.rs @@ -0,0 +1,159 @@ +use core::{num::NonZero, ptr::NonNull}; + +use common::address_types::VirtualAddress; + +use crate::memory::{ + allocators::{ + extensions::VirtualAddressExt, + slab::{SLAB_ALLOCATOR, traits::Slab}, + }, + page::UnassignedPage, + unassigned::{AssignSlab, UnassignSlab}, +}; + +use super::{descriptor::SlabDescriptor, traits::SlabCacheConstructor}; + +#[derive(Clone, Debug)] +pub struct SlabCache { + pub buddy_order: usize, + pub free: Option>>, + pub partial: Option>>, + pub full: Option>>, +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabCache<()>) + } + } +} + +impl SlabCache { + /// Allocate a new slab descriptor, attaches it to the free slab list, + /// and initialize it's page. + pub fn grow(&mut self) { + // Allocate a new slab descriptor for this slab + let mut slab = unsafe { + SLAB_ALLOCATOR.kmalloc::>().assign::() + }; + + unsafe { + *slab.as_mut() = + SlabDescriptor::::new(self.buddy_order, self.free) + } + + self.take_ownership(slab); + + self.free = Some(slab); + } + + pub fn take_ownership(&self, slab: NonNull>) { + let slab_address: VirtualAddress = + unsafe { slab.as_ref().objects.as_ptr().addr().into() }; + + slab_address + .set_flags(T::PFLAGS, T::PSIZE, unsafe { + NonZero::::new_unchecked(1 << self.buddy_order) + }) + .unwrap(); + + let slab_page = + unsafe { UnassignedPage::from_virt(slab_address).as_mut() }; + + // Set owner and freelist. + unsafe { + (*slab_page.meta.slab).freelist = slab.as_unassigned(); + (*slab_page.meta.slab).owner = + NonNull::from_ref(self).as_unassigned(); + }; + } + + pub fn alloc(&mut self) -> NonNull { + if let Some(mut partial) = self.partial { + let partial = unsafe { partial.as_mut() }; + + let allocation = partial.alloc(); + + if partial.next_free_idx.is_none() { + self.partial = partial.next; + partial.next = self.full; + self.full = Some(NonNull::from_mut(partial)); + } + return allocation; + } + if let Some(mut free) = self.free { + let free = unsafe { free.as_mut() }; + + let allocation = free.alloc(); + + self.free = free.next; + free.next = self.partial; + self.partial = Some(NonNull::from_mut(free)); + + return allocation; + } + + todo!( + "Handle cases where partial and free are full, and \ + allocation from the page allocator is needed." + ) + } + pub fn dealloc(&self, _ptr: NonNull) { + todo!() + } +} + +impl SlabCache<()> { + pub fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self as *const _ as *mut SlabCache) + } + } +} + +impl SlabCacheConstructor for SlabCache { + default fn new(buddy_order: usize) -> SlabCache { + SlabCache { + buddy_order, + free: None, + partial: None, + full: None, + } + } +} + +impl SlabCacheConstructor for SlabCache> { + fn new(buddy_order: usize) -> SlabCache> { + let partial = + SlabDescriptor::>::initial_descriptor( + buddy_order, + ); + + // This assumption can be made, because the created cache in + // this function will go to the constant position on the slab + // array defined with the `SlabPosition` array + let mut future_owner = + unsafe { SLAB_ALLOCATOR.slab_of::>() }; + + let cache = SlabCache { + buddy_order, + free: None, + partial: Some(partial), + full: None, + }; + + // Only in this function, we initialiuze the global array in the + // new function. + // + // Because then we can use the `take_ownership` function + unsafe { + *future_owner.as_mut() = cache.clone(); + future_owner.as_mut().take_ownership(partial); + } + + cache + } +} diff --git a/kernel/src/memory/allocators/slab/descriptor.rs b/kernel/src/memory/allocators/slab/descriptor.rs new file mode 100644 index 0000000..2844f74 --- /dev/null +++ b/kernel/src/memory/allocators/slab/descriptor.rs @@ -0,0 +1,160 @@ +use crate::{ + alloc_pages, + memory::{ + allocators::slab::traits::Slab, + unassigned::{AssignSlab, UnassignSlab}, + }, +}; +use common::constants::REGULAR_PAGE_SIZE; +use core::{ + fmt::Debug, + mem::{ManuallyDrop, size_of}, + ptr::NonNull, +}; +use nonmax::NonMaxU16; + +/// Preallocated object in the slab allocator. +pub union PreallocatedObject { + pub allocated: ManuallyDrop, + pub next_free_idx: Option, +} + +impl Debug for PreallocatedObject { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct SlabDescriptor { + pub next_free_idx: Option, + pub total_allocated: u16, + pub objects: NonNull<[PreallocatedObject]>, + pub next: Option>>, +} + +impl AssignSlab for NonNull> { + type Target = NonNull>; + + fn assign(&self) -> NonNull> { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor) + } + } +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> Self::Target { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut SlabDescriptor<()>) + } + } +} + +impl SlabDescriptor { + /// Create a new slab descriptor. + /// + /// # Safety + /// This function is marked as unsafe because it does not initialize + /// the page that the allocation is on. + /// + /// This function is meant to be called from the [`grow`] + /// function inside slab cache. (Which is safe and do initialize + /// the page) + pub unsafe fn new( + order: usize, + next: Option>>, + ) -> SlabDescriptor { + let address = unsafe { alloc_pages!(1 << order).translate() }; + + let mut objects = NonNull::slice_from_raw_parts( + address.as_non_null::>(), + ((1 << order) * REGULAR_PAGE_SIZE) + / size_of::>(), + ); + + for (i, object) in + unsafe { objects.as_mut() }.iter_mut().enumerate() + { + *object = PreallocatedObject { + next_free_idx: Some(unsafe { + NonMaxU16::new_unchecked(i as u16 + 1) + }), + } + } + + unsafe { + objects.as_mut().last_mut().unwrap().next_free_idx = None + }; + + SlabDescriptor { + next_free_idx: Some(unsafe { NonMaxU16::new_unchecked(0) }), + total_allocated: 0, + objects, + next, + } + } + + pub fn alloc(&mut self) -> NonNull { + debug_assert!( + self.next_free_idx.is_some(), + "Called allocate on a full slab" + ); + + let idx = self.next_free_idx.unwrap().get() as usize; + let preallocated = unsafe { &mut self.objects.as_mut()[idx] }; + + self.next_free_idx = unsafe { preallocated.next_free_idx }; + + self.total_allocated += 1; + + unsafe { NonNull::from_mut(&mut preallocated.allocated) } + } + + // TODO: In tests rembmber to implement something on T that implement + // drop and see that when freeing the memory it is called + pub unsafe fn dealloc(&mut self, ptr: NonNull) { + todo!("Remember to call drop on the item"); + + let freed_index = (ptr.as_ptr().addr() + - self.objects.as_ptr().addr()) + / size_of::>(); + + unsafe { + self.objects.as_mut()[freed_index].next_free_idx = + self.next_free_idx; + }; + self.next_free_idx = + unsafe { Some(NonMaxU16::new_unchecked(freed_index as u16)) }; + + self.total_allocated -= 1; + } +} + +impl SlabDescriptor> { + /// Return a pointer to the initial descriptor after it allocated + /// himself. + /// + /// The pointer the is returned by this function contains an already + /// initialized descriptor that allocates itself. + pub fn initial_descriptor( + order: usize, + ) -> NonNull>> { + let mut descriptor = unsafe { + SlabDescriptor::>::new(order, None) + }; + + let mut self_allocation = descriptor.alloc(); + + unsafe { + *self_allocation.as_mut() = NonNull::from_ref(&descriptor) + .as_unassigned() + .as_ref() + .clone() + } + + self_allocation.assign::>() + } +} diff --git a/kernel/src/memory/allocators/slab/macros.rs b/kernel/src/memory/allocators/slab/macros.rs new file mode 100644 index 0000000..994f2d2 --- /dev/null +++ b/kernel/src/memory/allocators/slab/macros.rs @@ -0,0 +1,71 @@ +#[macro_export] +macro_rules! register_slabs { + ($($t:ty),* $(,)?) => { + $crate::register_slabs!(@step 0; $($t),*); + }; + + (@step $idx:expr; $head:ty, $($tail:ty),+) => { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { + const SLAB_POSITION: usize = $idx; + } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} + + $crate::register_slabs!(@step $idx + 1; $($tail),*); + }; + + (@step $idx:expr; $head:ty) => { + impl $crate::memory::allocators::slab::traits::SlabPosition for $head { + const SLAB_POSITION: usize = $idx; + } + + impl $crate::memory::allocators::slab::traits::Slab for $head {} + }; + + (@step $idx:expr; ) => {}; +} + +#[macro_export] +macro_rules! define_slab_system { + ($($t:ty),* $(,)?) => { + use common::constants::REGULAR_PAGE_SIZE; + use $crate::memory::allocators::slab::traits::SlabCacheConstructor; + + $crate::register_slabs!($($t),*); + + const COUNT: usize = [$(stringify!($t)),*].len(); + + pub struct SlabAllocator { + slabs: [common::late_init::LateInit>; COUNT] + } + + impl SlabAllocator { + pub const fn new() -> Self { + Self { + slabs: [ + $({ + let _ = stringify!($t); + common::late_init::LateInit::uninit() + }),* + ] + } + } + + pub fn init(&'static mut self) { + $( + let index = <$t>::SLAB_POSITION; + + let initialized = SlabCache::<$t>::new(size_of::<$t>().div_ceil(REGULAR_PAGE_SIZE)); + + let unassigned = NonNull::from_ref(&initialized).as_unassigned(); + + self.slabs[index].write(unsafe { unassigned.as_ref().clone() }); + )* + } + } + } +} + +// TODO implement reverse lookup with an enum that will automatically be +// generated and check the code generated on compiler explorer. if +// interesting, write on it on the book diff --git a/kernel/src/memory/allocators/slab/preallocated.rs b/kernel/src/memory/allocators/slab/preallocated.rs new file mode 100644 index 0000000..e69de29 diff --git a/kernel/src/memory/allocators/slab/traits.rs b/kernel/src/memory/allocators/slab/traits.rs new file mode 100644 index 0000000..16dfc9a --- /dev/null +++ b/kernel/src/memory/allocators/slab/traits.rs @@ -0,0 +1,53 @@ +use common::enums::PageSize; +use cpu_utils::structures::paging::PageEntryFlags; + +/// Get the position on the slab array, for a slab of the given type. +/// +/// Shouldn't implement this trait manually; it is implemented +/// via the `define_slab_system` macro. +pub trait Slab: 'static + Sized + SlabPosition + SlabFlags {} + +impl Slab for () {} + +pub trait SlabPosition { + const SLAB_POSITION: usize; +} + +impl SlabPosition for () { + const SLAB_POSITION: usize = usize::MAX; +} + +pub trait SlabFlags: SlabPosition { + const PFLAGS: PageEntryFlags; + const PSIZE: PageSize; +} + +impl SlabFlags for T { + default const PFLAGS: PageEntryFlags = + PageEntryFlags::regular_page_flags(); + + default const PSIZE: PageSize = PageSize::Regular; +} + +impl SlabFlags for () { + const PFLAGS: PageEntryFlags = PageEntryFlags::default(); + const PSIZE: PageSize = PageSize::Regular; +} + +pub trait SlabCacheConstructor { + fn new(buddy_order: usize) -> Self; +} + +pub trait Generic { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} + +pub trait DmaGeneric { + const START: usize; + const END: usize; + + fn size(&self) -> usize; +} diff --git a/kernel/src/memory/memory_map.rs b/kernel/src/memory/memory_map.rs index f84672b..7ea62f5 100644 --- a/kernel/src/memory/memory_map.rs +++ b/kernel/src/memory/memory_map.rs @@ -3,6 +3,7 @@ use common::{ enums::MemoryRegionType, }; use core::fmt::{self, Display, Formatter}; +use derive_more::{Deref, DerefMut}; #[macro_export] macro_rules! parsed_memory_map { @@ -15,13 +16,15 @@ macro_rules! parsed_memory_map { as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegion>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegion>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::PARSED_MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; @@ -37,12 +40,14 @@ macro_rules! raw_memory_map { common::constants::addresses::MEMORY_MAP_OFFSET as usize, ) .translate() - .as_mut_ptr::<$crate::memory::memory_map::MemoryRegionExtended>(), + .as_non_null::<$crate::memory::memory_map::MemoryRegionExtended>() + .as_ptr(), *(common::address_types::PhysicalAddress::new_unchecked( common::constants::addresses::MEMORY_MAP_LENGTH as usize, ) .translate() - .as_mut_ptr::()) as usize, + .as_non_null::() + .as_ptr()) as usize, ) } }; @@ -104,11 +109,13 @@ impl MemoryRegionTrait for MemoryRegionExtended { } } -pub struct ParsedMapDisplay( - pub &'static [T], -); +#[derive(Deref, DerefMut)] +pub struct MemoryMap(pub &'static [T]); -impl Display for ParsedMapDisplay { +pub type RawMemoryMap = MemoryMap; +pub type ParsedMemoryMap = MemoryMap; + +impl Display for MemoryMap { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut usable = 0u64; let mut reserved = 0u64; @@ -120,10 +127,10 @@ impl Display for ParsedMapDisplay { write!( f, - "[0x{:0>9x} - 0x{:0>9x}]: type: {}", + "[0x{:0>9x} - 0x{:0>9x}]: type: {:?}", entry.base_address(), entry.base_address() + entry.length(), - entry.region_type() as u32 + entry.region_type() )?; match entry.region_type() { diff --git a/kernel/src/memory/mod.rs b/kernel/src/memory/mod.rs index 17bb0dc..eba83e0 100644 --- a/kernel/src/memory/mod.rs +++ b/kernel/src/memory/mod.rs @@ -1,2 +1,4 @@ pub mod allocators; pub mod memory_map; +pub mod page; +pub mod unassigned; diff --git a/kernel/src/memory/page.rs b/kernel/src/memory/page.rs new file mode 100644 index 0000000..13990f3 --- /dev/null +++ b/kernel/src/memory/page.rs @@ -0,0 +1,80 @@ +use core::{marker::PhantomData, ptr::NonNull}; + +use crate::memory::{ + allocators::{extensions::VirtualAddressExt, slab::traits::Slab}, + page::{map::PageMap, meta::PageMeta}, + unassigned::{AssignSlab, UnassignSlab}, +}; +use common::{ + address_types::{PhysicalAddress, VirtualAddress}, + constants::REGULAR_PAGE_SIZE, + late_init::LateInit, +}; + +pub mod map; +pub mod meta; + +pub type UnassignedPage = Page<()>; + +pub static mut PAGES: LateInit = LateInit::uninit(); + +pub struct Page { + pub meta: PageMeta, + _phantom: PhantomData, +} + +impl AssignSlab for NonNull> { + type Target = NonNull>; + + fn assign(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page) } + } +} + +impl UnassignSlab for NonNull> { + type Target = NonNull>; + + fn as_unassigned(&self) -> NonNull> { + unsafe { NonNull::new_unchecked(self.as_ptr() as *mut Page<()>) } + } +} + +impl Page { + pub fn new(meta: PageMeta) -> Page { + Page { + meta, + _phantom: PhantomData::, + } + } + + pub fn physical_address(&self) -> PhysicalAddress { + let index = (self as *const _ as usize + - unsafe { PAGES.as_ptr().addr() }) + / size_of::(); + + unsafe { + PhysicalAddress::new_unchecked(index * REGULAR_PAGE_SIZE) + } + } + + /// Return the index of the page structure inside the [`PAGES`] array + /// pointed by this virtual address. + /// + /// **Note**: if you meant to get the page structure, consider using + /// [`Page::from_virt`] + pub fn index_of(addr: VirtualAddress) -> usize { + addr.translate() + .expect("Address could not be translated") + .as_usize() + / REGULAR_PAGE_SIZE + } + + /// Return the physical page structure that is pointed by this physical + /// address + pub fn from_virt(addr: VirtualAddress) -> NonNull> { + unsafe { + NonNull::from_ref(&PAGES[Page::::index_of(addr)]) + .assign::() + } + } +} diff --git a/kernel/src/memory/page/map.rs b/kernel/src/memory/page/map.rs new file mode 100644 index 0000000..4b1d205 --- /dev/null +++ b/kernel/src/memory/page/map.rs @@ -0,0 +1,78 @@ +use core::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use common::{ + address_types::VirtualAddress, + constants::{PAGE_ALLOCATOR_OFFSET, REGULAR_PAGE_SIZE}, + late_init::LateInit, +}; + +use crate::{ + memory::{ + memory_map::ParsedMemoryMap, + page::{ + PAGES, UnassignedPage, + meta::{BuddyPageMeta, PageMeta}, + }, + }, + println, +}; + +pub struct PageMap(NonNull<[UnassignedPage]>); + +impl Deref for PageMap { + type Target = [UnassignedPage]; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.as_ref() } + } +} + +impl DerefMut for PageMap { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.as_mut() } + } +} + +impl PageMap { + /// Initializes all pages on the constant address + /// ([`PAGE_ALLOCATOR_OFFSET`]) and returns the end address. + pub fn init( + uninit: &'static mut LateInit, + mmap: ParsedMemoryMap, + ) -> VirtualAddress { + let last = mmap.last().unwrap(); + let last_address = (last.base_address + last.length) as usize; + let total_pages = last_address / REGULAR_PAGE_SIZE; + + println!( + "Last address: {}, Total Pages: {}, size_of_array: {:x?} Kib", + last_address, + total_pages, + total_pages * size_of::() / 1024 + ); + unsafe { + let page_map = NonNull::slice_from_raw_parts( + NonNull::new_unchecked( + PAGE_ALLOCATOR_OFFSET as *mut UnassignedPage, + ), + total_pages, + ); + + uninit.write(PageMap(page_map)); + + for p in uninit.as_mut().iter_mut() { + core::ptr::write_volatile( + p as *mut UnassignedPage, + UnassignedPage::new(PageMeta { + buddy: ManuallyDrop::new(BuddyPageMeta::default()), + }), + ) + } + (PAGES.as_ptr_range().end as usize).into() + } + } +} diff --git a/kernel/src/memory/page/meta.rs b/kernel/src/memory/page/meta.rs new file mode 100644 index 0000000..c5ca152 --- /dev/null +++ b/kernel/src/memory/page/meta.rs @@ -0,0 +1,65 @@ +use core::{mem::ManuallyDrop, ptr::NonNull}; + +use common::enums::BuddyOrder; + +use crate::memory::{ + allocators::slab::{ + cache::SlabCache, descriptor::SlabDescriptor, traits::Slab, + }, + page::{Page, UnassignedPage}, + unassigned::{AssignSlab, UnassignSlab}, +}; + +pub union PageMeta { + pub buddy: ManuallyDrop, + pub slab: ManuallyDrop>, +} + +#[derive(Debug)] +pub struct BuddyPageMeta { + pub next: Option>, + pub prev: Option>, + pub order: Option, +} + +impl const Default for BuddyPageMeta { + fn default() -> Self { + Self { + next: None, + prev: None, + order: None, + } + } +} + +impl BuddyPageMeta { + pub fn detach(&mut self) -> Option>> { + let detached = self.next?; // None if there is no page to detach + + self.next = unsafe { detached.as_ref().meta.buddy.next }; + + if let Some(mut next) = self.next { + unsafe { (*next.as_mut().meta.buddy).prev = None } + } + + Some(detached.assign::()) + } + + pub fn attach(&mut self, mut p: NonNull>) { + unsafe { (*p.as_mut().meta.buddy).next = self.next }; + + if let Some(mut next) = self.next { + unsafe { + (*next.as_mut().meta.buddy).prev = Some(p.as_unassigned()) + }; + } + + self.next = Some(p.as_unassigned()) + } +} + +#[derive(Debug)] +pub struct SlabPageMeta { + pub owner: NonNull>, + pub freelist: NonNull>, +} diff --git a/kernel/src/memory/unassigned.rs b/kernel/src/memory/unassigned.rs new file mode 100644 index 0000000..8ad4e38 --- /dev/null +++ b/kernel/src/memory/unassigned.rs @@ -0,0 +1,13 @@ +use crate::memory::allocators::slab::traits::Slab; + +pub trait UnassignSlab { + type Target; + + fn as_unassigned(&self) -> Self::Target; +} + +pub trait AssignSlab { + type Target; + + fn assign(&self) -> Self::Target; +} diff --git a/learnix-macros/src/lib.rs b/learnix-macros/src/lib.rs index e307964..70f8ae0 100644 --- a/learnix-macros/src/lib.rs +++ b/learnix-macros/src/lib.rs @@ -1,7 +1,9 @@ use flag::FlagInput; use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{DeriveInput, parse_macro_input}; +use syn::{ + DeriveInput, LitInt, Token, parse_macro_input, punctuated::Punctuated, +}; mod flag; @@ -18,11 +20,10 @@ pub fn common_address_functions(input: TokenStream) -> TokenStream { pub const fn as_usize(&self) -> usize { self.0 } - pub const unsafe fn as_mut_ptr(&self) -> *mut T { - core::ptr::with_exposed_provenance_mut::(self.0) - } - pub const fn as_ptr(&self) -> *const T { - core::ptr::with_exposed_provenance::(self.0) + pub const fn as_non_null(&self) -> core::ptr::NonNull { + core::ptr::NonNull::new( + core::ptr::with_exposed_provenance_mut::(self.0) + ).expect("Tried to create NonNull from address, found null") } pub const fn is_aligned( &self, @@ -86,8 +87,17 @@ pub fn flag(input: TokenStream) -> TokenStream { #[allow(dead_code)] #[allow(unused_attributes)] /// Sets the corresponding flag - pub const fn #set_ident(&mut self) { - self.0 |= 1 << #bit; + pub fn #set_ident(&mut self) { + unsafe { + let val = core::ptr::read_volatile( + self as *const _ as *mut usize + ); + + core::ptr::write_volatile( + self as *const _ as *mut usize, + val | (1 << #bit) as usize + ) + } } #[inline] @@ -102,19 +112,171 @@ pub fn flag(input: TokenStream) -> TokenStream { #[allow(dead_code)] #[allow(unused_attributes)] /// Unset the corresponding flag - pub const fn #unset_ident(&mut self) { - self.0 &= !(1 << #bit); + pub fn #unset_ident(&mut self) { + unsafe { + let val = core::ptr::read_volatile( + self as *const _ as *mut usize + ); + + core::ptr::write_volatile( + self as *const _ as *mut usize, + val & !(1 << #bit) as usize + ) + } } #[inline] #[allow(dead_code)] #[allow(unused_attributes)] /// Checks if the corresponding flag is set - pub const fn #is_ident(&self) -> bool { - (self.0 & (1 << #bit)) != 0 + pub fn #is_ident(&self) -> bool { + unsafe { + core::ptr::read_volatile( + self as *const _ as *mut usize + ) & ((1<< #bit) as usize) != 0 + } } }; expanded.into() } // ANCHOR_END: flag + +// ANCHOR: ro_flag +/// This macro will obtain `flag_name` and the corresponding +/// `bit_number` and create read-only flag functionality +/// +/// With this information it will automatically generate +/// three methods +/// +/// 1. `is_$flag_name`: return true if the flag is set or false if not +#[proc_macro] +pub fn ro_flag(input: TokenStream) -> TokenStream { + let FlagInput { name, bit, .. } = + syn::parse_macro_input!(input as FlagInput); + + // build identifiers + let name_str = name.to_string(); + let support_ident = format_ident!("is_{}", name_str); + + let expanded = quote! { + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Checks if the corresponding flag is set + pub fn #support_ident(&self) -> bool { + unsafe { + core::ptr::read_volatile( + self as *const _ as *mut usize + ) & ((1<< #bit) as usize) != 0 + } + } + }; + + expanded.into() +} +// ANCHOR_END: ro_flag + +// ANCHOR: rwc_flag +#[proc_macro] +pub fn rwc_flag(input: TokenStream) -> TokenStream { + let FlagInput { name, bit, .. } = + syn::parse_macro_input!(input as FlagInput); + + // build identifiers + let name_str = name.to_string(); + let clear_ident = format_ident!("clear_{}", name_str); + let support_ident = format_ident!("is_{}", name_str); + + let expanded = quote! { + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Sets the corresponding flag + pub const fn #clear_ident(&mut self) { + self.0 |= 1 << #bit; + } + + + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Checks if the corresponding flag is set + pub fn #support_ident(&self) -> bool { + unsafe { + core::ptr::read_volatile( + self as *const _ as *mut usize + ) & ((1<< #bit) as usize) != 0 + } + } + }; + + expanded.into() +} +// ANCHOR_END: rwc_flag + +// ANCHOR: rw1_flag +#[proc_macro] +pub fn rw1_flag(input: TokenStream) -> TokenStream { + let FlagInput { name, bit, .. } = + syn::parse_macro_input!(input as FlagInput); + + // build identifiers + let name_str = name.to_string(); + let set_ident = format_ident!("set_{}", name_str); + + let expanded = quote! { + #[inline] + #[allow(dead_code)] + #[allow(unused_attributes)] + /// Sets the corresponding flag + pub const fn #set_ident(&mut self) { + self.0 |= 1 << #bit; + } + }; + + expanded.into() +} +// ANCHOR_END: rw1_flag + +#[proc_macro] +pub fn generate_generics(input: TokenStream) -> TokenStream { + // Parse the input as a comma-separated list of integers: 8, 16, 32... + let parser = Punctuated::::parse_terminated; + let input = parse_macro_input!(input with parser); + + let mut expanded = quote! {}; + + // initial range for the first item + let mut last_size: usize = 0; + + for lit in input { + let generic_size: usize = lit + .base10_parse() + .expect("Invalid integer format, expected base10"); + + let generic_name = format_ident!("Generic{}", generic_size); + + // minimum size of 8 bytes (usize on 64 bit). + let array_size = generic_size / 8; + + let start = last_size; + let end = generic_size; + + let struct_def = quote! { + #[derive(Debug, Clone, Copy)] + pub struct #generic_name(pub [usize; #array_size]); + + impl Generic for #generic_name { + fn size(&self) -> usize { #generic_size } + const START: usize = #start; + const END: usize = #end; + } + }; + + last_size = generic_size + 1; + expanded.extend(struct_def); + } + + TokenStream::from(expanded) +} diff --git a/shared/common/Cargo.toml b/shared/common/Cargo.toml index e766210..9a893df 100644 --- a/shared/common/Cargo.toml +++ b/shared/common/Cargo.toml @@ -4,8 +4,13 @@ version = "0.1.0" edition = "2024" [dependencies] +num_enum = { version = "0.7.5", default-features = false, features = [ + "complex-expressions", +] } derive_more = { version = "2.0.1", default-features = false, features = [ "full", ] } +strum_macros = { version = "0.27", default-features = false } +strum = { version = "0.27", default-features = false } thiserror = { version = "2.0.12", default-features = false } learnix-macros = { path = "../../learnix-macros" } diff --git a/shared/common/src/address_types.rs b/shared/common/src/address_types.rs index ca6534d..df176b1 100644 --- a/shared/common/src/address_types.rs +++ b/shared/common/src/address_types.rs @@ -1,3 +1,5 @@ +use core::ptr::NonNull; + #[cfg(target_arch = "x86_64")] use crate::constants::PHYSICAL_MEMORY_OFFSET; use crate::enums::PageTableLevel; @@ -61,6 +63,12 @@ impl const From for PhysicalAddress { #[repr(C)] pub struct VirtualAddress(usize); +impl From> for VirtualAddress { + fn from(value: NonNull) -> Self { + unsafe { VirtualAddress::new_unchecked(value.as_ptr().addr()) } + } +} + impl const From for VirtualAddress { // TODO! Change into new in the future fn from(value: usize) -> Self { @@ -100,26 +108,8 @@ impl VirtualAddress { /// 1 -> index of 1st table // ANCHOR: virtual_nth_pt_index_unchecked pub const fn index_of(&self, level: PageTableLevel) -> usize { - (self.0 >> (39 - 9 * (4 - level as usize))) & 0o777 + (self.0 >> (39 - 9 * (level as usize))) & 0o777 } - - // pub fn translate(&self) -> Option { - // let mut current_table = - // PageTable::current_table(); for i in 0..4 { - // let index = self.rev_nth_index_unchecked(i); - // match - // current_table.entries[index].mapped_table_mut() { - // Ok(table) => current_table = table, - // Err(EntryError::NotATable) => { - // return unsafe { - // Some(current_table.entries[index].mapped_unchecked()) - // }; } - // Err(EntryError::NoMapping) => return - // None, Err(EntryError::Full) => - // unreachable!(), } - // } - // None - // } } impl PhysicalAddress { diff --git a/shared/common/src/bitmap.rs b/shared/common/src/bitmap.rs index 06d8031..1adde6a 100644 --- a/shared/common/src/bitmap.rs +++ b/shared/common/src/bitmap.rs @@ -182,7 +182,7 @@ impl BitMap { BitMap { map: unsafe { slice::from_raw_parts_mut( - map_address.as_mut_ptr::(), + map_address.as_non_null::().as_mut(), map_size, ) }, diff --git a/shared/common/src/constants/addresses.rs b/shared/common/src/constants/addresses.rs index a3e482a..d2abce2 100644 --- a/shared/common/src/constants/addresses.rs +++ b/shared/common/src/constants/addresses.rs @@ -16,6 +16,7 @@ pub const TOP_IDENTITY_PAGE_TABLE_L3_OFFSET: usize = 0xe000; pub const TOP_IDENTITY_PAGE_TABLE_L2_OFFSET: usize = 0xf000; pub const KERNEL_OFFSET: u64 = 0x10000; -pub const PAGE_ALLOCATOR_OFFSET: usize = 0x100000; #[cfg(target_arch = "x86_64")] pub const PHYSICAL_MEMORY_OFFSET: usize = 0xffff800000000000; +#[cfg(target_arch = "x86_64")] +pub const PAGE_ALLOCATOR_OFFSET: usize = PHYSICAL_MEMORY_OFFSET + 0x100000; diff --git a/shared/common/src/enums/ahci.rs b/shared/common/src/enums/ahci.rs new file mode 100644 index 0000000..9e9f7bc --- /dev/null +++ b/shared/common/src/enums/ahci.rs @@ -0,0 +1,130 @@ +use crate::error::ConversionError; +use derive_more::Display; +use num_enum::{FromPrimitive, TryFromPrimitive, UnsafeFromPrimitive}; + +// ANCHOR: AHCIInterfaceSpeed +#[repr(u8)] +#[derive( + PartialEq, + Eq, + Display, + Clone, + Copy, + TryFromPrimitive, + UnsafeFromPrimitive, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum InterfaceSpeed { + #[display("Device not present or communication not established")] + DevNotPresent = 0, + #[display("Gen1: 1.5Gb/s")] + Gen1 = 1, + #[display("Gen1: 3.0Gb/s")] + Gen2 = 2, + #[display("Gen1: 6.0Gb/s")] + Gen3 = 3, +} +// ANCHOR_END: AHCIInterfaceSpeed + +// ANCHOR: InterfaceCommunicationControl +#[repr(u8)] +#[derive(Debug, Clone, Copy, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum InterfaceCommunicationControl { + Idle = 0x0, + Active = 0x1, + Partial = 0x2, + Slumber = 0x6, + DevSleep = 0x8, + #[num_enum(alternatives = [3..=5, 7, 9..=14])] + Reserved = 0xf, +} +// ANCHOR_END: InterfaceCommunicationControl + +// ANCHOR: DeviceType +#[repr(u32)] +#[derive(Debug, Clone, Copy, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum DeviceType { + SataDevice = 0x00000101, + AtapiDevice = 0xeb140101, + EnclosureManagementBridge = 0xc33c0101, + PortMultiplier = 0x96690191, +} +// ANCHOR_END: DeviceType + +// ANCHOR: InterfacePowerManagement +#[repr(u8)] +#[derive(Debug, Clone, Copy, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum InterfacePowerManagement { + DevNotPresent = 0, + Active = 1, + Partial = 2, + Slumber = 6, + DevSleep = 8, +} +// ANCHOR_END: InterfacePowerManagement + +// ANCHOR: DeviceDetection +#[repr(u8)] +#[derive(Debug, Clone, Copy, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum DeviceDetection { + NotDetected = 0, + DetectedNoCommunication = 1, + Detected = 3, + Device = 4, +} +// ANCHOR_END: device Detection + +// ANCHOR: SpeedAllowed +#[repr(u8)] +#[derive(Display, Clone, Copy, TryFromPrimitive, UnsafeFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum InterfaceSpeedRestriction { + #[display("Device not present or communication not established")] + NoRestriction = 0, + #[display("Gen1: 1.5Gb/s")] + Gen1 = 1, + #[display("Gen1: 3.0Gb/s")] + Gen2 = 2, + #[display("Gen1: 6.0Gb/s")] + Gen3 = 3, +} +// ANCHOR_END: SpeedAllowed + +// ANCHOR: DeviceDetectionInitialization +#[repr(u8)] +#[derive(Debug, Clone, Copy, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum InterfaceInitialization { + NoInitializationRequested = 0, + CommunicationInitialization = 1, + DisableInterface = 4, +} +// ANCHOR_END: DeviceDetectionInitialization + +// ANCHOR: FisTypes +#[repr(u8)] +#[derive(Debug, Clone, Copy, FromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum FisType { + RegisterFisHost2Device = 0x27, + RegisterFisDevice2Host = 0x34, + DmaActivateFisDevice2Host = 0x39, + DmaSetupFisBiDirectional = 0x41, + DataFisBiDirectional = 0x46, + BistActivateFisBiDirectional = 0x58, + PioSetupFisDevice2Host = 0x5f, + SetDevicesBits = 0xa1, + _Reserved0 = 0xa6, + _Reserved1 = 0xb8, + _Reserved2 = 0xbf, + _VendorSpecific0 = 0xc7, + _VendorSpecific1 = 0xd4, + _Reserved3 = 0xd9, + + #[num_enum(default)] + Unknown = 0xff, +} diff --git a/shared/common/src/enums/ata.rs b/shared/common/src/enums/ata.rs new file mode 100644 index 0000000..89fc0e2 --- /dev/null +++ b/shared/common/src/enums/ata.rs @@ -0,0 +1,13 @@ +use num_enum::TryFromPrimitive; + +use crate::error::ConversionError; + +#[repr(u8)] +#[derive(PartialEq, Eq, Clone, Copy, Debug, TryFromPrimitive)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum AtaCommand { + Nop = 0, + ReadDmaExt = 0x25, + IdentifyPacketDevice = 0xa1, + IdentifyDevice = 0xec, +} diff --git a/shared/common/src/enums/buddy.rs b/shared/common/src/enums/buddy.rs new file mode 100644 index 0000000..5a50fda --- /dev/null +++ b/shared/common/src/enums/buddy.rs @@ -0,0 +1,37 @@ +use crate::error::ConversionError; +use num_enum::{TryFromPrimitive, UnsafeFromPrimitive}; +use strum::VariantArray; +use strum_macros::VariantArray; + +pub const BUDDY_MAX_ORDER: usize = BuddyOrder::VARIANTS.len(); + +#[repr(u8)] +#[derive( + VariantArray, + Clone, + Copy, + PartialEq, + Debug, + Eq, + TryFromPrimitive, + UnsafeFromPrimitive, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum BuddyOrder { + Order0 = 0, + Order1 = 1, + Order2 = 2, + Order3 = 3, + Order4 = 4, + Order5 = 5, + Order6 = 6, + Order7 = 7, + Order8 = 8, + Order9 = 9, + Order10 = 10, +} + +impl BuddyOrder { + pub const MIN: BuddyOrder = *BuddyOrder::VARIANTS.first().unwrap(); + pub const MAX: BuddyOrder = *BuddyOrder::VARIANTS.last().unwrap(); +} diff --git a/shared/common/src/enums/interrupts.rs b/shared/common/src/enums/interrupts.rs index e76c998..ac6be71 100644 --- a/shared/common/src/enums/interrupts.rs +++ b/shared/common/src/enums/interrupts.rs @@ -27,6 +27,7 @@ pub enum Interrupt { // Interrupts until 0x1f are reserved by Intel. Timer = 0x20, Keyboard = 0x21, + Ahci = 0x2a, } #[repr(u8)] #[derive(Clone, Debug, Copy)] diff --git a/shared/common/src/enums/mod.rs b/shared/common/src/enums/mod.rs index 3d78a85..2149e19 100644 --- a/shared/common/src/enums/mod.rs +++ b/shared/common/src/enums/mod.rs @@ -1,4 +1,7 @@ +pub mod ahci; +pub mod ata; pub mod bios_interrupts; +pub mod buddy; pub mod cpuid; pub mod general; pub mod global_descriptor_table; @@ -11,7 +14,10 @@ pub mod pic8259; pub mod ports; pub mod vga; +pub use ahci::*; +pub use ata::*; pub use bios_interrupts::*; +pub use buddy::*; pub use cpuid::*; pub use general::*; pub use global_descriptor_table::*; diff --git a/shared/common/src/enums/paging.rs b/shared/common/src/enums/paging.rs index 986d5e1..9ef30b2 100644 --- a/shared/common/src/enums/paging.rs +++ b/shared/common/src/enums/paging.rs @@ -1,4 +1,6 @@ use core::{alloc::Layout, ptr::Alignment}; +use num_enum::TryFromPrimitive; +use strum_macros::{EnumIter, VariantArray}; use crate::{ constants::{ @@ -7,12 +9,24 @@ use crate::{ }, error::{ConversionError, TableError}, }; -#[derive(Clone, Copy, Debug, PartialEq, Eq)] + +#[repr(u8)] +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + EnumIter, + TryFromPrimitive, + VariantArray, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] pub enum PageTableLevel { - PML4 = 4, - PDPT = 3, + PML4 = 0, + PDPT = 1, PD = 2, - PT = 1, + PT = 3, } impl PageTableLevel { @@ -27,38 +41,12 @@ impl PageTableLevel { .then(|| unsafe { core::mem::transmute(n) }) .ok_or(TableError::Full) } - - pub const fn iterator<'a>() -> impl Iterator - { - const VARIANTS: [PageTableLevel; 4] = [ - PageTableLevel::PML4, - PageTableLevel::PDPT, - PageTableLevel::PD, - PageTableLevel::PT, - ]; - - // Convert the array slice into an iterator. - VARIANTS.iter() - } -} - -impl TryFrom for PageTableLevel { - type Error = ConversionError; - - fn try_from(value: u8) -> Result { - if (1..=4).contains(&value) { - Ok(unsafe { - core::mem::transmute::(value) - }) - } else { - Err(ConversionError::CantConvertFrom(value)) - } - } } - -// impl const From for PageTableLevel {} - -#[derive(Clone, Debug, PartialEq, Eq, Copy)] +#[repr(u8)] +#[derive( + Clone, Copy, Debug, PartialEq, Eq, EnumIter, TryFromPrimitive, +)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] pub enum PageSize { /// 4Kib pages Regular = 2, @@ -79,8 +67,23 @@ impl PageSize { } } - pub fn exceeds(&self, table_level: PageTableLevel) -> bool { - (3 - *self as usize) <= table_level as usize + /// Conclude if a page can be allocated in the give PageTableLevel + /// + /// # Example + /// A huge (2Mib) Page can be allocated on PML4, PDPT and PD so it will + /// return `true` for those, and it cannot be allocated on `PD` so for + /// it is will return `false` + pub fn allocatable_at(&self, table_level: PageTableLevel) -> bool { + (*self as usize + 1) >= table_level as usize + } + + /// The minimal page level that this page size can exist on. + pub fn min_level(&self) -> PageTableLevel { + match self { + PageSize::Regular => PageTableLevel::PT, + PageSize::Big => PageTableLevel::PD, + PageSize::Huge => PageTableLevel::PDPT, + } } /// Determines the appropriate `PageSizeAlignment` for a diff --git a/shared/common/src/enums/pci.rs b/shared/common/src/enums/pci.rs index 442586a..f5a1938 100644 --- a/shared/common/src/enums/pci.rs +++ b/shared/common/src/enums/pci.rs @@ -1,4 +1,5 @@ #[repr(u16)] +#[non_exhaustive] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum VendorID { Intel = 0x8086, @@ -13,12 +14,13 @@ pub enum VendorID { #[repr(u16)] pub enum IntelDeviceID { HostBridge = 0x1237, - PIIX3ISA = 0x700, - PIIX3IDE = 0x701, - PIIX3USB = 0x702, - PIIX3ACPI = 0x703, + PIIX3ISA = 0x7000, + PIIX3IDE = 0x7010, + PIIX3USB = 0x7020, + PIIX3ACPI = 0x7113, ExpressDramController = 0x29C0, - NetworkController = 0x100E, // e1000 again + ICH9SataController = 0x2922, + NetworkController = 0x100E, LPCInterface82801IB = 0x2410, SataControllerAHCI = 0x2822, NonExistent = 0xFFFF, @@ -68,6 +70,7 @@ pub union DeviceID { } #[derive(Clone, Copy)] +#[repr(C)] pub struct VendorDevice { pub vendor: VendorID, pub device: DeviceID, @@ -76,16 +79,29 @@ pub struct VendorDevice { impl core::fmt::Debug for VendorDevice { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "Vendor: {:?} ", self.vendor)?; - write!(f, "Device: 0x{:x?}", unsafe { self.device.num }) - } -} - -impl core::fmt::Debug for DeviceID { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Device: 0x{:x?}", unsafe { self.num }) + write!(f, "Device: ")?; + match self.vendor { + VendorID::Intel => { + write!(f, "{:?}", unsafe { self.device.intel }) + } + VendorID::Nvidia => { + write!(f, "{:?}", unsafe { self.device.nvidia }) + } + VendorID::QEMU => { + write!(f, "{:?}", unsafe { self.device.qemu }) + } + VendorID::Realtek => { + write!(f, "{:?}", unsafe { self.device.realtek }) + } + VendorID::VirtIO => { + write!(f, "{:?}", unsafe { self.device.virtio }) + } + VendorID::NonExistent => { + write!(f, "NonExistent") + } + } } } - #[repr(u8)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ClassCode { @@ -533,6 +549,7 @@ impl core::fmt::Debug for ProgrammingInterface { } } +#[repr(C, packed)] #[derive(Clone, Copy)] pub struct PciDeviceType { pub prog_if: ProgrammingInterface, @@ -715,6 +732,16 @@ impl core::fmt::Debug for PciDeviceType { } } +impl PciDeviceType { + pub fn is_ahci(&self) -> bool { + unsafe { + self.class == ClassCode::MassStorageController + && self.subclass.storage == MassStorageSubClass::SATA + && self.prog_if.sata == SATAControllerPI::AHCI1 + } + } +} + #[repr(u8)] #[derive(Debug, Clone, Copy)] pub enum HeaderType { diff --git a/shared/common/src/enums/pic8259.rs b/shared/common/src/enums/pic8259.rs index bace8f9..d701e76 100644 --- a/shared/common/src/enums/pic8259.rs +++ b/shared/common/src/enums/pic8259.rs @@ -34,7 +34,7 @@ pub enum CascadedPicInterruptLine { Irq7 = 1 << 7, Irq8 = 1 << 8, Irq9 = 1 << 9, - Irq10 = 1 << 10, + Ahci = 1 << 10, Irq11 = 1 << 11, Irq12 = 1 << 12, Irq13 = 1 << 13, diff --git a/shared/common/src/error/ahci.rs b/shared/common/src/error/ahci.rs new file mode 100644 index 0000000..42506e5 --- /dev/null +++ b/shared/common/src/error/ahci.rs @@ -0,0 +1,58 @@ +use crate::error::ConversionError; +use num_enum::TryFromPrimitive; +use strum_macros::EnumIter; +use thiserror::Error; + +// ANCHOR: DiagnosticError + +#[repr(u16)] +#[derive(Debug, Clone, Copy, TryFromPrimitive, Error, EnumIter)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum DiagnosticError { + #[error("Physical ready signal changed state")] + PhyRdyChange = 1 << 0, + #[error("Internal error in the physical layer")] + PhyInternal = 1 << 1, + #[error("Communication wake signal detected")] + CommWake = 1 << 2, + #[error("10B to 8B decoding errors occurred")] + DecodingError = 1 << 3, + #[error("Disparity Error (Not use by AHCI)")] + DisparityError = 1 << 4, + #[error("One or more CRC errors occurred on link layer")] + CrcError = 1 << 5, + #[error("Handshake error, one or more R_ERR responses were received")] + HandshakeError = 1 << 6, + #[error("One or more link state machine errors were encountered")] + LinkSequenceError = 1 << 7, + #[error("Error on transport layer transition change")] + TransportStateError = 1 << 8, + #[error("One or more FISs were received with unknown type")] + UnknownFisType = 1 << 9, + #[error("A change in device presence has been detected")] + Exchanged = 1 << 10, +} + +#[repr(u16)] +#[derive(Debug, Clone, Copy, TryFromPrimitive, Error, EnumIter)] +#[num_enum(error_type(name = ConversionError, constructor = ConversionError::CantConvertFrom))] +pub enum AhciError { + #[error("Data integrity error that occurred was recovered")] + RecoveredDataIntegrityError = 1 << 0, + #[error("Comm between device and host was lost and re-established")] + RecoveredCommunicationError = 1 << 1, + #[error("Data integrity error was occurred and NOT recovered")] + DataIntegrityError = 1 << 8, + #[error("A communication error that was not recovered occurred")] + PersistentCommORDataIntegrityError = 1 << 9, + #[error("A violation of the SATA protocol was detected")] + ProtocolError = 1 << 10, + #[error("The host bus adapter experienced an internal error")] + InternalError = 1 << 11, +} + +#[derive(Debug, Clone, Copy, Error)] +pub enum HbaError { + #[error("Address is not aligned properly")] + AddressNotAligned, +} diff --git a/shared/common/src/error/general.rs b/shared/common/src/error/general.rs index dd7006f..db279b0 100644 --- a/shared/common/src/error/general.rs +++ b/shared/common/src/error/general.rs @@ -3,7 +3,7 @@ use core::fmt::Debug; use thiserror::Error; #[derive(Error, Debug)] -pub enum ConversionError { +pub enum ConversionError { #[error("Cannot convert from {:?}", _0)] CantConvertFrom(T), } diff --git a/shared/common/src/error/mod.rs b/shared/common/src/error/mod.rs index 9aaff43..0e195a8 100644 --- a/shared/common/src/error/mod.rs +++ b/shared/common/src/error/mod.rs @@ -1,5 +1,7 @@ +pub mod ahci; pub mod general; pub mod paging; +pub use ahci::*; pub use general::*; pub use paging::*; diff --git a/shared/common/src/late_init.rs b/shared/common/src/late_init.rs new file mode 100644 index 0000000..a2ba4a5 --- /dev/null +++ b/shared/common/src/late_init.rs @@ -0,0 +1,30 @@ +use core::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; + +pub struct LateInit(MaybeUninit); + +impl LateInit { + pub const fn uninit() -> LateInit { + LateInit::(MaybeUninit::uninit()) + } + + pub const fn write(&mut self, val: T) { + self.0.write(val); + } +} + +impl Deref for LateInit { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { self.0.assume_init_ref() } + } +} + +impl DerefMut for LateInit { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { self.0.assume_init_mut() } + } +} diff --git a/shared/common/src/lib.rs b/shared/common/src/lib.rs index 7bbbbb8..4923b17 100644 --- a/shared/common/src/lib.rs +++ b/shared/common/src/lib.rs @@ -13,7 +13,10 @@ pub mod bitmap; pub mod constants; pub mod enums; pub mod error; +pub mod late_init; pub mod ring_buffer; +pub mod volatile; + struct FakeAllocator; unsafe impl core::alloc::GlobalAlloc for FakeAllocator { diff --git a/shared/common/src/ring_buffer.rs b/shared/common/src/ring_buffer.rs index 6535b6e..d52b3f2 100644 --- a/shared/common/src/ring_buffer.rs +++ b/shared/common/src/ring_buffer.rs @@ -17,7 +17,7 @@ impl RingBuffer { write_idx: 0, buffer: unsafe { slice::from_raw_parts_mut( - buffer_address.as_mut_ptr::(), + buffer_address.as_non_null::().as_mut(), length.get(), ) }, diff --git a/shared/common/src/volatile.rs b/shared/common/src/volatile.rs new file mode 100644 index 0000000..296a209 --- /dev/null +++ b/shared/common/src/volatile.rs @@ -0,0 +1,49 @@ +use core::fmt::Debug; + +#[derive(Copy)] +#[repr(transparent)] +pub struct Volatile(T); + +impl Volatile { + pub fn new(vol: T) -> Volatile { + Volatile(vol) + } + + /// Read from the hardware register + pub fn read(&self) -> T { + unsafe { core::ptr::read_volatile(&self.0) } + } + + /// Write to the hardware register + pub fn write(&mut self, value: T) { + unsafe { core::ptr::write_volatile(&mut self.0 as *mut T, value) } + } +} + +impl Clone for Volatile { + fn clone(&self) -> Self { + Volatile(self.read()) + } +} + +impl Debug for Volatile { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_fmt(format_args!("{:?}", &self.0 as *const T)) + } +} + +#[macro_export] +macro_rules! read_volatile { + ($arg: expr) => { + unsafe { core::ptr::read_volatile(core::ptr::addr_of!($arg)) } + }; +} + +#[macro_export] +macro_rules! write_volatile { + ($arg: expr, $val: expr) => { + unsafe { + core::ptr::write_volatile(core::ptr::addr_of_mut!($arg), $val) + } + }; +} diff --git a/shared/cpu_utils/Cargo.toml b/shared/cpu_utils/Cargo.toml index 97d0fca..d6e005f 100644 --- a/shared/cpu_utils/Cargo.toml +++ b/shared/cpu_utils/Cargo.toml @@ -11,3 +11,5 @@ derive_more = { version = "2.0.1", default-features = false, features = [ thiserror = { version = "2.0.12", default-features = false } extend = "1.2.0" learnix-macros = { path = "../../learnix-macros" } +strum_macros = { version = "0.27", default-features = false } +strum = { version = "0.27", default-features = false } diff --git a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs index 07c727f..7043601 100644 --- a/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs +++ b/shared/cpu_utils/src/structures/interrupt_descriptor_table.rs @@ -106,14 +106,20 @@ impl InterruptDescriptorTable { gdt.load_tss(tss); unsafe { ptr::write_volatile( - base_address.as_mut_ptr::(), + base_address + .as_non_null::() + .as_ptr(), InterruptDescriptorTable { interrupts: [const { InterruptDescriptorTableEntry::missing() }; 256], }, ); - uninit.write(&mut *base_address.as_mut_ptr::()); + uninit.write( + base_address + .as_non_null::() + .as_mut(), + ); uninit.assume_init_ref().load(); } } diff --git a/shared/cpu_utils/src/structures/paging/entry_flags.rs b/shared/cpu_utils/src/structures/paging/entry_flags.rs index 7165f9f..bc10733 100644 --- a/shared/cpu_utils/src/structures/paging/entry_flags.rs +++ b/shared/cpu_utils/src/structures/paging/entry_flags.rs @@ -44,7 +44,7 @@ macro_rules! table_entry_flags { // ANCHOR: page_entry_flags /// A wrapper for `PageTableEntry` flags for easier use -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct PageEntryFlags(pub u64); // ANCHOR_END: page_entry_flags @@ -73,5 +73,22 @@ impl PageEntryFlags { pub const fn regular_page_flags() -> Self { PageEntryFlags::default().present().writable() } + + pub const fn regular_io_page_flags() -> Self { + PageEntryFlags::default() + .present() + .writable() + .disable_cache() + .global() + } + + pub const fn huge_io_page_flags() -> Self { + PageEntryFlags::default() + .present() + .writable() + .huge_page() + .disable_cache() + .global() + } } // ANCHOR_END: impl_page_entry_flags diff --git a/shared/cpu_utils/src/structures/paging/init.rs b/shared/cpu_utils/src/structures/paging/init.rs index e762460..4b8a6a3 100644 --- a/shared/cpu_utils/src/structures/paging/init.rs +++ b/shared/cpu_utils/src/structures/paging/init.rs @@ -81,7 +81,7 @@ pub fn enable() -> Option<()> { ); top_identity_page_table_l2.entries[0].map_unchecked( PhysicalAddress::new_unchecked(0), - PageEntryFlags::huge_page_flags(), + PageEntryFlags::huge_io_page_flags(), ); } // ANCHOR_END: setup_top_page_tables diff --git a/shared/cpu_utils/src/structures/paging/page_table.rs b/shared/cpu_utils/src/structures/paging/page_table.rs index 5d597f3..9b7739b 100644 --- a/shared/cpu_utils/src/structures/paging/page_table.rs +++ b/shared/cpu_utils/src/structures/paging/page_table.rs @@ -1,11 +1,11 @@ -use core::ptr; +use core::ptr::{self, NonNull}; use crate::{registers::cr3, structures::paging::PageTableEntry}; use common::{ address_types::VirtualAddress, constants::{PAGE_DIRECTORY_ENTRIES, REGULAR_PAGE_ALIGNMENT}, enums::{PageSize, PageTableLevel}, - error::{EntryError, TableError}, + error::EntryError, }; // ANCHOR: page_table @@ -17,6 +17,14 @@ pub struct PageTable { } // ANCHOR_END: page_table +#[derive(Debug)] +pub enum EntryIndex { + Entry(&'static PageTableEntry), + Index(usize), + PageDoesNotFit, + OutOfEntries, +} + // ANCHOR: page_table_impl impl PageTable { // ANCHOR: page_table_empty @@ -40,35 +48,25 @@ impl PageTable { #[inline] pub unsafe fn empty_from_ptr( page_table_ptr: VirtualAddress, - ) -> Option<&'static mut PageTable> { + ) -> Option> { if !page_table_ptr.is_aligned(REGULAR_PAGE_ALIGNMENT) { return None; } unsafe { ptr::write_volatile( - page_table_ptr.as_mut_ptr::(), + page_table_ptr.as_non_null::().as_ptr(), PageTable::empty(), ); - Some(&mut *page_table_ptr.as_mut_ptr::()) + Some(page_table_ptr.as_non_null::()) } } // ANCHOR_END: page_table_empty_from_ptr // ANCHOR: page_table_current_table #[inline] - pub fn current_table() -> &'static PageTable { - unsafe { - &*core::ptr::with_exposed_provenance(cr3::read() as usize) - } - } - - #[inline] - pub fn current_table_mut() -> &'static mut PageTable { - unsafe { - &mut *core::ptr::with_exposed_provenance_mut( - cr3::read() as usize - ) - } + pub fn current_table() -> NonNull { + NonNull::new(cr3::read() as usize as *mut PageTable) + .expect("Page table pointer is not present in cr3, found NULL") } // ANCHOR_END: page_table_current_table @@ -88,70 +86,28 @@ impl PageTable { /// Returns the index of the found entry and the page table if found. // Anchor: page_table_try_fetch_table #[cfg(target_arch = "x86_64")] - fn try_fetch_table( - &self, + pub fn try_fetch_table( + &'static self, start_at: usize, table_level: PageTableLevel, page_size: PageSize, - ) -> (usize, Option<&PageTable>) { + ) -> EntryIndex { + if !page_size.allocatable_at(table_level) { + return EntryIndex::PageDoesNotFit; + } + for (i, entry) in self.entries.iter().enumerate().skip(start_at) { match entry.mapped_table() { - Ok(v) => { - if page_size.exceeds(table_level) { - continue; - } - return (i, Some(v)); + Ok(_) => { + return EntryIndex::Entry(entry); } Err(EntryError::NoMapping) => { - return (i, None); + return EntryIndex::Index(i); } Err(EntryError::NotATable) => continue, } } - (PAGE_DIRECTORY_ENTRIES, None) - } - - /// Find an avavilable page in the given size. - // ANCHOR: page_table_find_available_page - #[cfg(target_arch = "x86_64")] - pub fn find_available_page( - page_size: PageSize, - ) -> Result { - const LEVELS: usize = 4; - let mut level_indices = [0usize; LEVELS]; - let mut page_tables = [Self::current_table(); LEVELS]; - let mut current_level = PageTableLevel::PML4; - loop { - let current_table = page_tables[current_level as usize]; - - let next_table = match current_table.try_fetch_table( - level_indices[current_level as usize], - current_level, - page_size, - ) { - (PAGE_DIRECTORY_ENTRIES, None) => { - current_level = current_level.prev()?; - level_indices[current_level as usize] += 1; - continue; - } - (i, Some(table)) => { - level_indices[current_level as usize] = i; - table - } - (i, None) => { - level_indices[current_level as usize] = i; - return Ok(VirtualAddress::from_indices( - level_indices, - )); - } - }; - let next_level = current_level - .next() - .expect("Can't go next on a first level table"); - page_tables[next_level as usize] = next_table; - level_indices[next_level as usize] += 1; - } + EntryIndex::OutOfEntries } - // ANCHOR_END: page_table_find_available_page } // ANCHOR_END: page_table_impl diff --git a/shared/cpu_utils/src/structures/paging/page_table_entry.rs b/shared/cpu_utils/src/structures/paging/page_table_entry.rs index db567cb..6e6c990 100644 --- a/shared/cpu_utils/src/structures/paging/page_table_entry.rs +++ b/shared/cpu_utils/src/structures/paging/page_table_entry.rs @@ -1,3 +1,6 @@ +#[cfg(target_arch = "x86_64")] +use core::ptr::NonNull; + use common::{ address_types::PhysicalAddress, constants::{ENTRY_ADDRESS_MASK, REGULAR_PAGE_ALIGNMENT}, @@ -63,7 +66,7 @@ impl PageTableEntry { /// the entry was already mapped. // ANCHOR: page_table_entry_map_unchecked #[inline] - pub const unsafe fn map_unchecked( + pub unsafe fn map_unchecked( &mut self, frame: PhysicalAddress, flags: PageEntryFlags, @@ -97,7 +100,7 @@ impl PageTableEntry { /// allocator // ANCHOR: page_table_entry_map #[inline] - pub const unsafe fn map( + pub unsafe fn map( &mut self, frame: PhysicalAddress, flags: PageEntryFlags, @@ -141,14 +144,12 @@ impl PageTableEntry { /// /// This method assumes all page tables are identity /// mapped. - // ANCHOR: page_table_entry_mapped_table_mut + // ANCHOR: page_table_entry_mapped_table #[cfg(target_arch = "x86_64")] #[allow(clippy::mut_from_ref)] - pub fn mapped_table_mut(&self) -> Result<&mut PageTable, EntryError> { + pub fn mapped_table(&self) -> Result, EntryError> { // first check if the entry is mapped. - let pt = unsafe { - &mut *self.mapped()?.translate().as_mut_ptr::() - }; + let pt = self.mapped()?.translate().as_non_null::(); // then check if it is a table. if !self.is_huge_page() && self.is_table() { Ok(pt) @@ -156,20 +157,10 @@ impl PageTableEntry { Err(EntryError::NotATable) } } - // ANCHOR_END: page_table_entry_mapped_table_mut + // ANCHOR_END: page_table_entry_mapped_table - // ANCHOR: page_table_entry_mapped_table - #[cfg(target_arch = "x86_64")] - pub fn mapped_table(&self) -> Result<&PageTable, EntryError> { - // first check if the entry is mapped. - let pt = - unsafe { &*self.mapped()?.translate().as_ptr::() }; - // then check if it is a table. - if !self.is_huge_page() && self.is_table() { - Ok(pt) - } else { - Err(EntryError::NotATable) - } + pub fn table_index(&self) -> usize { + let table_offset = self as *const _ as usize & ((1 << 12) - 1); + table_offset / size_of::() } - // ANCHOR_END: page_table_entry_mapped_table }