From 27cff81358e84df57a979394ec259c493485dc6b Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Fri, 4 Nov 2022 15:50:19 -0700 Subject: [PATCH 01/13] [no ci] WIP temp saving cpu_local crate --- Cargo.lock | 9 +++++++++ kernel/cpu_local/Cargo.toml | 13 +++++++++++++ kernel/cpu_local/src/lib.rs | 15 +++++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 kernel/cpu_local/Cargo.toml create mode 100644 kernel/cpu_local/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index ab2b7e698a..233dd7cfe9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -540,6 +540,15 @@ dependencies = [ "terminal_print", ] +[[package]] +name = "cpu_local" +version = "0.1.0" +dependencies = [ + "log", + "memory", + "spin 0.9.0", +] + [[package]] name = "cranelift-entity" version = "0.77.0" diff --git a/kernel/cpu_local/Cargo.toml b/kernel/cpu_local/Cargo.toml new file mode 100644 index 0000000000..189378c76c --- /dev/null +++ b/kernel/cpu_local/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["Kevin Boos "] +name = "cpu_local" +description = "Support for CPU-local storage; per-CPU variables" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4.8" +spin = "0.9.0" + +[dependencies.memory] +path = "../memory" diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs new file mode 100644 index 0000000000..8873151bf3 --- /dev/null +++ b/kernel/cpu_local/src/lib.rs @@ -0,0 +1,15 @@ +//! Offers types and macros to declare and access CPU-local storage (per-CPU variables). +//! +//! CPU-local variables cannot be used until after a given CPU has bee initialized, +//! i.e., its Local APIC (on x86_64) has been discovered and properly configured. +//! +//! Note that Rust offers the `#[thread_local]` attribute for thread-local storage (TLS), +//! but there is no equivalent for CPU-local storage. +//! On x86_64, TLS areas use the `fs` segment register for the TLS base, +//! and this crates uses the `gs` segment register for the CPU-local base. + +#![no_std] + +extern crate alloc; + +// TODO From fe0fd143406334c63c176fd1ac57f85206d4e6ee Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Fri, 31 Mar 2023 19:22:01 -0700 Subject: [PATCH 02/13] [no ci] WIP, cpu locals mostly work. STill needs improvement --- Cargo.lock | 17 ++- kernel/ap_start/Cargo.toml | 1 + kernel/ap_start/src/lib.rs | 2 + kernel/captain/Cargo.toml | 1 + kernel/captain/src/lib.rs | 1 + kernel/cpu_local/Cargo.toml | 5 +- kernel/cpu_local/src/lib.rs | 246 ++++++++++++++++++++++++++++++++++- kernel/per_cpu/Cargo.toml | 14 ++ kernel/per_cpu/src/lib.rs | 76 +++++++++++ kernel/preemption/src/lib.rs | 6 + 10 files changed, 364 insertions(+), 5 deletions(-) create mode 100644 kernel/per_cpu/Cargo.toml create mode 100644 kernel/per_cpu/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 62ea6445e4..b6d4c61694 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,6 +100,7 @@ dependencies = [ "memory", "no_drop", "page_attribute_table", + "per_cpu", "scheduler", "spawn", "stack", @@ -377,6 +378,7 @@ dependencies = [ "no_drop", "ota_update_client", "page_attribute_table", + "per_cpu", "scheduler", "simd_personality", "spawn", @@ -617,9 +619,11 @@ dependencies = [ name = "cpu_local" version = "0.1.0" dependencies = [ + "crossbeam-utils", "log", "memory", - "spin 0.9.0", + "spin 0.9.4", + "x86_64", ] [[package]] @@ -2589,6 +2593,17 @@ dependencies = [ "spin 0.9.4", ] +[[package]] +name = "per_cpu" +version = "0.1.0" +dependencies = [ + "cpu", + "cpu_local", + "log", + "preemption", + "task", +] + [[package]] name = "percent-encoding" version = "1.0.2" diff --git a/kernel/ap_start/Cargo.toml b/kernel/ap_start/Cargo.toml index c4364e6429..b8e4a9ab42 100644 --- a/kernel/ap_start/Cargo.toml +++ b/kernel/ap_start/Cargo.toml @@ -16,6 +16,7 @@ scheduler = { path = "../scheduler" } spawn = { path = "../spawn" } kernel_config = { path = "../kernel_config" } cpu = { path = "../cpu" } +per_cpu = { path = "../per_cpu" } no_drop = { path = "../no_drop" } early_tls = { path = "../early_tls" } diff --git a/kernel/ap_start/src/lib.rs b/kernel/ap_start/src/lib.rs index d218a2200b..b6675be1c5 100644 --- a/kernel/ap_start/src/lib.rs +++ b/kernel/ap_start/src/lib.rs @@ -61,6 +61,8 @@ pub fn kstart_ap( // so all we need to do here is to reload it on this CPU. early_tls::reload(); + per_cpu::init(cpu_id).unwrap(); + // get the stack that was allocated for us (this AP) by the BSP. let this_ap_stack = take_ap_stack(cpu_id.value()).unwrap_or_else( || panic!("BUG: kstart_ap(): couldn't get stack created for CPU {}", cpu_id) diff --git a/kernel/captain/Cargo.toml b/kernel/captain/Cargo.toml index 269f5823bb..17cffb52f7 100644 --- a/kernel/captain/Cargo.toml +++ b/kernel/captain/Cargo.toml @@ -23,6 +23,7 @@ spawn = { path = "../spawn" } stack = { path = "../stack" } task = { path = "../task" } cpu = { path = "../cpu" } +per_cpu = { path = "../per_cpu" } [target.'cfg(target_arch = "x86_64")'.dependencies] logger_x86_64 = { path = "../logger_x86_64" } diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index b274f636b9..33a4f88b60 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -117,6 +117,7 @@ pub fn init( // get BSP's CPU ID let bsp_id = cpu::bootstrap_cpu().ok_or("captain::init(): couldn't get ID of bootstrap CPU!")?; + per_cpu::init(bsp_id)?; // Initialize the scheduler and create the initial `Task`, // which is bootstrapped from this current execution context. diff --git a/kernel/cpu_local/Cargo.toml b/kernel/cpu_local/Cargo.toml index 189378c76c..1e239075a1 100644 --- a/kernel/cpu_local/Cargo.toml +++ b/kernel/cpu_local/Cargo.toml @@ -6,8 +6,9 @@ version = "0.1.0" edition = "2021" [dependencies] +crossbeam-utils = { version = "0.8.12", default-features = false } log = "0.4.8" spin = "0.9.0" +x86_64 = "0.14.8" -[dependencies.memory] -path = "../memory" +memory = { path = "../memory" } diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 8873151bf3..28e0d180c2 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -9,7 +9,249 @@ //! and this crates uses the `gs` segment register for the CPU-local base. #![no_std] - +#![feature(thread_local)] extern crate alloc; -// TODO +use core::{ + arch::asm, + marker::PhantomData, + mem::{size_of, align_of}, +}; +use alloc::collections::{BTreeMap, btree_map::Entry}; +use memory::{MappedPages, PteFlags}; +use spin::Mutex; + +#[cfg(target_arch = "x86_64")] +use x86_64::{registers::model_specific::{GsBase, KernelGsBase}, VirtAddr}; + +pub struct FixedCpuLocal { + offset: usize, + size: usize, + align: usize, +} +// NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. +impl FixedCpuLocal { + const SELF_PTR_OFFSET: usize = 0; + pub const CPU_ID: Self = Self { offset: 8, size: 4, align: 4 }; + pub const PREEMPTION_COUNT: Self = Self { offset: 12, size: 1, align: 1 }; + pub const TASK_SWITCH_PREEMPTION_GUARD: Self = Self { offset: 16, size: 8, align: 4 }; + pub const DROP_AFTER_TASK_SWITCH: Self = Self { offset: 24, size: 8, align: 8 }; + pub const TEST_VALUE: Self = Self { offset: 32, size: 8, align: 8 }; +} + + + +/// A reference to a CPU-local variable. +/// +/// Note that this struct doesn't contain an instance of the type `T`, +/// and dropping it has no effect. +pub struct CpuLocal(PhantomData<*mut T>); +impl CpuLocal { + /// Creates a + /// + /// ## Safety + /// The caller must guarantee that the type `T` is correct for the + /// given `FixedCpuLocal`. + /// This is unsafe because we currently do not have a way to guarantee + /// that the + pub const unsafe fn new_fixed( + fixed: FixedCpuLocal, + ) -> Self { + // Compile-time sanity checks. + assert!(OFFSET == fixed.offset); + assert!(size_of::() == fixed.size); + assert!(align_of::() == fixed.align); + Self(PhantomData) + } + + /// Invokes the given `func` with a mutable reference to this `CpuLocal` variable. + /// + /// This will initialize this `CpuLocal` if it has not already been initialized. + /// + /// TODO: disable preemption + /// Preemption will be temporarily disabled for the duration of this function + /// in order to ensure that + /// + /// If the caller has already disabled preemption, they can pass in an optional + /// preemption guard to prove that preemption is currently disabled. + pub fn with(&self, func: F) -> R + where + F: FnOnce(&mut T) -> R, + { + let local_ref = unsafe { + &mut *((self.self_ptr() + OFFSET) as *mut T) + }; + func(local_ref) + } + + /// Returns the value of the self pointer, which points to this CPU's `PerCpuData`. + fn self_ptr(&self) -> usize { + let self_ptr: usize; + unsafe { + #[cfg(target_arch = "x86_64")] + asm!( + concat!("mov {}, gs:[0]"), // the SELF_PTR_OFFSET is 0 + lateout(reg) self_ptr, + options(nostack, preserves_flags, readonly, pure) + ); + + #[cfg(not(target_arch = "x86_64"))] + todo!("CPU Locals are not yet supported on non-x86_64 platforms"); + }; + self_ptr + } +} + +impl CpuLocal { + /// Returns a copy of this `CpuLocal`'s inner value of type `T`. + /// + /// This is only available for types where `T: Copy`. + pub fn get(&self) -> T { + self.with(|v| *v) + } +} + + +#[derive(Debug)] +struct CpuLocalDataImage(MappedPages); +impl CpuLocalDataImage { + /// This function does 3 main things: + /// 1. Allocates a new CPU-local data image for this CPU. + /// 2. Sets the self pointer value such that it can be properly accessed. + /// 3. Sets this CPU's base register (e.g., GsBase on x86_64) to the address + /// of this new data image, making it "currently active" and accessible. + fn new() -> Result { + // 1. Allocate a single page to store each CPU's local data. + let mut mp = memory::create_mapping(1, PteFlags::new().writable(true).valid(true))?; + + // 2. Set up the self pointer for the new data image. + let self_ptr_value = mp.start_address().value(); + let self_ptr_dest = mp.as_type_mut::(0)?; + *self_ptr_dest = self_ptr_value; + + // 3. Set the base register used for CPU-local data. + { + #[cfg(target_arch = "x86_64")] { + let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); + log::warn!("Writing value {:#X} to GSBASE", gsbase_val); + GsBase::write(gsbase_val); + KernelGsBase::write(gsbase_val); + } + + #[cfg(not(target_arch = "x86_64"))] + todo!("Per-cpu storage is not yet implemented on this architecture") + } + + Ok(CpuLocalDataImage(mp)) + } +} + + +#[inline(never)] +pub fn init

( + cpu_id: u32, + per_cpu_data_initializer: impl FnOnce(usize) -> P +) -> Result<(), &'static str> { + static CPU_LOCAL_DATA_REGIONS: Mutex> = Mutex::new(BTreeMap::new()); + + let mut regions = CPU_LOCAL_DATA_REGIONS.lock(); + log::warn!("cpu_local::init(CPU {}): {:?}", cpu_id, regions); + let entry = regions.entry(cpu_id); + let data_image = match entry { + Entry::Vacant(v) => v.insert(CpuLocalDataImage::new()?), + Entry::Occupied(_) => return Err("BUG: cannot init CPU-local data more than once"), + }; + + let self_ptr = data_image.0.start_address().value(); + // Run the given initializer function on the per-CPU data. + let new_data_image = CpuLocal::<{FixedCpuLocal::SELF_PTR_OFFSET}, P>(PhantomData); + new_data_image.with(|per_cpu_data_mut| { + *per_cpu_data_mut = per_cpu_data_initializer(self_ptr); + }); + + // TODO Remove, temp tests + if true { + let test_value = CpuLocal::<8, u64>(PhantomData); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); + log::warn!("Setting test_value to 0x12345678..."); + test_value.with(|tv| { *tv = 0x12345678; }); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); + core::mem::forget(test_value); + + let test_string = CpuLocal::<16, alloc::string::String>(PhantomData); + test_string.with(|s| log::warn!("Got test_string: {:?}", s)); + let new_str = ", world!"; + log::warn!("Appending {:?} to test_string...", new_str); + test_string.with(|s| s.push_str(new_str)); + test_string.with(|s| log::warn!("Got test_string: {:?}", s)); + core::mem::forget(test_string); + } + Ok(()) +} + + +// NOTE: +// we don't currently use this because we always load a pointer to the CpuLocal +// instead of loading or storing the value directly. +// If/when we wish to support these direct loads/stores of values from/to a +// GS-based offset, then we can uncomment this module. +/* +mod load_store_direct { + + mod sealed { + pub(crate) trait SingleMovGs { + unsafe fn load(offset: usize) -> Self; + unsafe fn store(offset: usize, val: Self); + } + } + pub(crate) use sealed::SingleMovGs; + + macro_rules! impl_single_mov_gs { + ($type:ty, $reg:ident, $reg_str:literal) => { + impl SingleMovGs for [u8; size_of::<$type>()] { + #[inline] + unsafe fn load(offset: usize) -> Self { + let val: $type; + asm!( + concat!("mov ", $reg_str, ", gs:[{}]"), + lateout($reg) val, in(reg) offset, + options(nostack, preserves_flags, readonly, pure) + ); + val.to_ne_bytes() + } + #[inline] + unsafe fn store(offset: usize, val: Self) { + asm!( + concat!("mov gs:[{}], ", $reg_str), + in(reg) offset, in($reg) <$type>::from_ne_bytes(val), + options(nostack, preserves_flags) + ); + } + } + }; + } + + impl_single_mov_gs!(u64, reg, "{}"); + impl_single_mov_gs!(u32, reg, "{:e}"); + impl_single_mov_gs!(u16, reg, "{:x}"); + impl_single_mov_gs!(u8, reg_byte, "{}"); + + /// Load `SIZE` bytes from the offset relative to the GsBase segment register. + #[inline] + unsafe fn load(offset: usize) -> [u8; SIZE] + where + [u8; SIZE]: SingleMovGs, + { + SingleMovGs::load(offset) + } + + /// Store `val` at the offset relative to the GsBase segment register. + #[inline] + unsafe fn store(offset: usize, val: [u8; SIZE]) + where + [u8; SIZE]: SingleMovGs, + { + SingleMovGs::store(offset, val) + } +} +*/ diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml new file mode 100644 index 0000000000..a3d0a5c93c --- /dev/null +++ b/kernel/per_cpu/Cargo.toml @@ -0,0 +1,14 @@ +[package] +authors = ["Kevin Boos "] +name = "per_cpu" +description = "Definition of the struct containing data maintained for each CPU" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4.8" + +cpu = { path = "../cpu" } +cpu_local = { path = "../cpu_local" } +preemption = { path = "../preemption" } +task = { path = "../task" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs new file mode 100644 index 0000000000..32d5461cf5 --- /dev/null +++ b/kernel/per_cpu/src/lib.rs @@ -0,0 +1,76 @@ +//! Contains [`PerCpuData`], the data stored on a per-CPU basis in Theseus. +//! +//! Each CPU has its own instance of `PerCpuData`, and each CPU's instance +//! can only be accessed by itself. +//! +//! ## Relationship to `cpu_local` +//! * This crate `per_cpu` directly depends on many other kernel crates, +//! specifically the ones that define the types needed for each field of [`PerCpuData`]. +//! * The `cpu_local` crate is the "top-level" crate that is depended upon +//! by each of the crates that needs to access per-CPU data. +//! * This crate also depends on `cpu_local` in order to initialize itself +//! for each CPU right after that CPU has booted. +//! + +#![no_std] + +extern crate alloc; // TODO temp remove this + +use cpu::CpuId; +use preemption::{PreemptionCount, PreemptionGuard}; +use task::TaskRef; + + +/// The data stored on a per-CPU basis in Theseus. +/// +/// Currently, we do not support additional arbitrary per-CPU states, +/// e.g., dynamically adding or removing them. +/// +/// This is not directly accessible, it must be accessed by other crates +/// via the functions in the [`cpu_local`] crate. +#[allow(dead_code)] // These fields are accessed via `cpu_local` functions. +#[repr(C)] +pub struct PerCpuData { + /// A pointer to the start of this struct in memory, similar to a TLS self pointer. + /// This has a different initial value for each CPU's data image, of course. + /// + /// We use this to allow writes to this entire structure (for initialization), + /// and also to allow faster access to large fields herein () accelerate accesses to large items + self_ptr: usize, + // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. + /* + cpu_id: CpuId, + preemption_count: PreemptionCount, + task_switch_preemption_guard: Option, + drop_after_task_switch: Option, + */ + test_value: u64, + test_string: alloc::string::String, +} +impl PerCpuData { + pub fn new(self_ptr: usize, cpu_id: CpuId) -> Self { + Self { + self_ptr, + /* + cpu_id, + preemption_count: PreemptionCount::new(), + task_switch_preemption_guard: None, + drop_after_task_switch: None, + */ + test_value: 0xDEADBEEF, + test_string: alloc::string::String::from("test_string hello"), + } + } +} + + +/// Initializes the current CPU's `PerCpuData`. +/// +/// This must be invoked from (run on) the actual CPU with the given `cpu_id`; +/// the main bootstrap CPU cannot run this for all CPUs itself. +pub fn init(cpu_id: CpuId) -> Result<(), &'static str> { + cpu_local::init( + cpu_id.value(), + |self_ptr| PerCpuData::new(self_ptr, cpu_id), + ) +} diff --git a/kernel/preemption/src/lib.rs b/kernel/preemption/src/lib.rs index e5f86f551f..9fb7350dcb 100644 --- a/kernel/preemption/src/lib.rs +++ b/kernel/preemption/src/lib.rs @@ -27,6 +27,12 @@ const ATOMIC_U8_ZERO: AtomicU8 = AtomicU8::new(0); /// If a CPU's preemption count is greater than `0`, preemption is disabled. static PREEMPTION_COUNT: [AtomicU8; MAX_CPU_CORES] = [ATOMIC_U8_ZERO; MAX_CPU_CORES]; +pub struct PreemptionCount(AtomicU8); +impl PreemptionCount { + pub const fn new() -> Self { + PreemptionCount(ATOMIC_U8_ZERO) + } +} /// Prevents preemption (preemptive task switching) from occurring /// until the returned guard object is dropped. From 7999e496c3c8c39a7ce94929de4f1670109cf12d Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sat, 1 Apr 2023 20:43:19 -0700 Subject: [PATCH 03/13] Finish CPU-local variables / per-CPU storage --- Cargo.lock | 3 +- kernel/ap_start/src/lib.rs | 11 ++-- kernel/cpu_local/Cargo.toml | 4 +- kernel/cpu_local/src/lib.rs | 113 +++++++++++++++++++++++------------- kernel/per_cpu/Cargo.toml | 4 +- kernel/per_cpu/src/lib.rs | 70 ++++++++++++++++------ 6 files changed, 136 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6d4c61694..8d89c49d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -620,8 +620,10 @@ name = "cpu_local" version = "0.1.0" dependencies = [ "crossbeam-utils", + "irq_safety", "log", "memory", + "preemption", "spin 0.9.4", "x86_64", ] @@ -2599,7 +2601,6 @@ version = "0.1.0" dependencies = [ "cpu", "cpu_local", - "log", "preemption", "task", ] diff --git a/kernel/ap_start/src/lib.rs b/kernel/ap_start/src/lib.rs index b6675be1c5..829c447003 100644 --- a/kernel/ap_start/src/lib.rs +++ b/kernel/ap_start/src/lib.rs @@ -61,8 +61,6 @@ pub fn kstart_ap( // so all we need to do here is to reload it on this CPU. early_tls::reload(); - per_cpu::init(cpu_id).unwrap(); - // get the stack that was allocated for us (this AP) by the BSP. let this_ap_stack = take_ap_stack(cpu_id.value()).unwrap_or_else( || panic!("BUG: kstart_ap(): couldn't get stack created for CPU {}", cpu_id) @@ -93,8 +91,8 @@ pub fn kstart_ap( interrupts::init_ap(); // Initialize this CPU's Local APIC such that we can use everything that depends on APIC IDs. - // This must be done before initializing task spawning, because that relies on the ability to - // enable/disable preemption, which is partially implemented by the Local APIC. + // This must be done before initializing per-CPU storage, task spawning, and more, + // as those rely on enable/disable preemption, which itself depends on the Local APIC. #[cfg(target_arch = "x86_64")] LocalApic::init( &mut kernel_mmi_ref.lock().page_table, @@ -104,9 +102,10 @@ pub fn kstart_ap( nmi_lint, nmi_flags, ).unwrap(); - + // Now that the Local APIC has been initialized for this CPU, we can initialize the - // task management subsystem and create the idle task for this CPU. + // per-CPU storage, tasking, and create the idle task for this CPU. + per_cpu::init(cpu_id).unwrap(); let bootstrap_task = spawn::init(kernel_mmi_ref.clone(), cpu_id, this_ap_stack).unwrap(); spawn::create_idle_task().unwrap(); diff --git a/kernel/cpu_local/Cargo.toml b/kernel/cpu_local/Cargo.toml index 1e239075a1..6afba24665 100644 --- a/kernel/cpu_local/Cargo.toml +++ b/kernel/cpu_local/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Kevin Boos "] name = "cpu_local" -description = "Support for CPU-local storage; per-CPU variables" +description = "Support for accessing CPU-local storage via per-CPU variables" version = "0.1.0" edition = "2021" @@ -11,4 +11,6 @@ log = "0.4.8" spin = "0.9.0" x86_64 = "0.14.8" +irq_safety = { git = "https://github.com/theseus-os/irq_safety" } memory = { path = "../memory" } +preemption = { path = "../preemption" } diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 28e0d180c2..7ad6ecbf0a 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -1,7 +1,9 @@ //! Offers types and macros to declare and access CPU-local storage (per-CPU variables). //! -//! CPU-local variables cannot be used until after a given CPU has bee initialized, +//! CPU-local variables cannot be used until after a given CPU has been initialized, //! i.e., its Local APIC (on x86_64) has been discovered and properly configured. +//! Currently, the [`init()`] routine in this crate should be invoked by +//! another init routine from the `per_cpu` crate. //! //! Note that Rust offers the `#[thread_local]` attribute for thread-local storage (TLS), //! but there is no equivalent for CPU-local storage. @@ -19,15 +21,17 @@ use core::{ }; use alloc::collections::{BTreeMap, btree_map::Entry}; use memory::{MappedPages, PteFlags}; +use preemption::{hold_preemption, PreemptionGuard}; use spin::Mutex; #[cfg(target_arch = "x86_64")] -use x86_64::{registers::model_specific::{GsBase, KernelGsBase}, VirtAddr}; +use x86_64::{registers::model_specific::GsBase, VirtAddr}; +/// Info use to obtain a reference to a CPU-local variable; see [`CpuLocal::new()`]. pub struct FixedCpuLocal { offset: usize, - size: usize, - align: usize, + size: usize, + align: usize, } // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. impl FixedCpuLocal { @@ -36,54 +40,80 @@ impl FixedCpuLocal { pub const PREEMPTION_COUNT: Self = Self { offset: 12, size: 1, align: 1 }; pub const TASK_SWITCH_PREEMPTION_GUARD: Self = Self { offset: 16, size: 8, align: 4 }; pub const DROP_AFTER_TASK_SWITCH: Self = Self { offset: 24, size: 8, align: 8 }; - pub const TEST_VALUE: Self = Self { offset: 32, size: 8, align: 8 }; } - - /// A reference to a CPU-local variable. /// /// Note that this struct doesn't contain an instance of the type `T`, /// and dropping it has no effect. pub struct CpuLocal(PhantomData<*mut T>); impl CpuLocal { - /// Creates a + /// Creates a new reference to a predefined CPU-local variable. /// - /// ## Safety - /// The caller must guarantee that the type `T` is correct for the - /// given `FixedCpuLocal`. + /// # Arguments + /// * `fixed`: information about this CPU-local variable. + /// Must be one of the public constants defined in [`FixedCpuLocal`]. + /// + /// # Safety /// This is unsafe because we currently do not have a way to guarantee - /// that the + /// that the caller-provided type `T` is the same as the type intended for use + /// by the given `FixedCpuLocal`. + /// + /// Thus, the caller must guarantee that the type `T` is correct for the + /// given `FixedCpuLocal`. pub const unsafe fn new_fixed( fixed: FixedCpuLocal, ) -> Self { - // Compile-time sanity checks. assert!(OFFSET == fixed.offset); assert!(size_of::() == fixed.size); assert!(align_of::() == fixed.align); Self(PhantomData) } - /// Invokes the given `func` with a mutable reference to this `CpuLocal` variable. + /// Invokes the given `func` with an immutable reference to this `CpuLocal` variable. /// - /// This will initialize this `CpuLocal` if it has not already been initialized. + /// Preemption will be disabled for the duration of this function + /// in order to ensure that this task cannot be switched away from + /// or migrated to another CPU. /// - /// TODO: disable preemption - /// Preemption will be temporarily disabled for the duration of this function - /// in order to ensure that - /// - /// If the caller has already disabled preemption, they can pass in an optional - /// preemption guard to prove that preemption is currently disabled. + /// If the caller has already disabled preemption, it is more efficient to + /// use the [`with_preempt()`] function, which allows the caller to pass in + /// an existing preemption guard to prove that preemption is already disabled. pub fn with(&self, func: F) -> R where - F: FnOnce(&mut T) -> R, + F: FnOnce(&T) -> R, { - let local_ref = unsafe { - &mut *((self.self_ptr() + OFFSET) as *mut T) - }; + let guard = hold_preemption(); + self.with_preempt(&guard, func) + } + + /// Invokes the given `func` with an immutable reference to this `CpuLocal` variable. + /// + /// This function accepts an existing preemption guard, which efficiently proves + /// that preemption has already been disabled. + pub fn with_preempt(&self, _guard: &PreemptionGuard, func: F) -> R + where + F: FnOnce(&T) -> R, + { + let ptr_to_cpu_local = self.self_ptr() + OFFSET; + let local_ref = unsafe { &*(ptr_to_cpu_local as *const T) }; func(local_ref) } + /// Invokes the given `func` with a mutable reference to this `CpuLocal` variable. + /// + /// Interrupts will be disabled for the duration of this function + /// in order to ensure atomicity while this per-CPU state is being modified. + pub fn with_mut(&self, func: F) -> R + where + F: FnOnce(&mut T) -> R, + { + let _held_interrupts = irq_safety::hold_interrupts(); + let ptr_to_cpu_local = self.self_ptr() + OFFSET; + let local_ref_mut = unsafe { &mut *(ptr_to_cpu_local as *mut T) }; + func(local_ref_mut) + } + /// Returns the value of the self pointer, which points to this CPU's `PerCpuData`. fn self_ptr(&self) -> usize { let self_ptr: usize; @@ -105,13 +135,14 @@ impl CpuLocal { impl CpuLocal { /// Returns a copy of this `CpuLocal`'s inner value of type `T`. /// - /// This is only available for types where `T: Copy`. + /// This is a convenience functiononly available for types where `T: Copy`. pub fn get(&self) -> T { self.with(|v| *v) } } +/// The underlying memory region for each CPU's per-CPU data. #[derive(Debug)] struct CpuLocalDataImage(MappedPages); impl CpuLocalDataImage { @@ -135,11 +166,10 @@ impl CpuLocalDataImage { let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); log::warn!("Writing value {:#X} to GSBASE", gsbase_val); GsBase::write(gsbase_val); - KernelGsBase::write(gsbase_val); } #[cfg(not(target_arch = "x86_64"))] - todo!("Per-cpu storage is not yet implemented on this architecture") + todo!("CPU-local variable access is not yet implemented on this architecture") } Ok(CpuLocalDataImage(mp)) @@ -147,44 +177,49 @@ impl CpuLocalDataImage { } -#[inline(never)] +/// Initializes the CPU-local data image for this CPU. +/// +/// Note: this is invoked by the `per_cpu` crate; +/// other crates do not need to invoke this. pub fn init

( cpu_id: u32, per_cpu_data_initializer: impl FnOnce(usize) -> P ) -> Result<(), &'static str> { + /// The global set of all per-CPU data regions. static CPU_LOCAL_DATA_REGIONS: Mutex> = Mutex::new(BTreeMap::new()); let mut regions = CPU_LOCAL_DATA_REGIONS.lock(); - log::warn!("cpu_local::init(CPU {}): {:?}", cpu_id, regions); let entry = regions.entry(cpu_id); let data_image = match entry { Entry::Vacant(v) => v.insert(CpuLocalDataImage::new()?), Entry::Occupied(_) => return Err("BUG: cannot init CPU-local data more than once"), }; - let self_ptr = data_image.0.start_address().value(); + // Run the given initializer function on the per-CPU data. let new_data_image = CpuLocal::<{FixedCpuLocal::SELF_PTR_OFFSET}, P>(PhantomData); - new_data_image.with(|per_cpu_data_mut| { - *per_cpu_data_mut = per_cpu_data_initializer(self_ptr); + new_data_image.with_mut(|per_cpu_data_mut| { + let initial_value = per_cpu_data_initializer(self_ptr); + let existing_junk = core::mem::replace(per_cpu_data_mut, initial_value); + // The memory contents we just replaced was random junk, so it'd be invalid + // to run any destructors on it. Thus, we must forget it here. + core::mem::forget(existing_junk); }); // TODO Remove, temp tests if true { - let test_value = CpuLocal::<8, u64>(PhantomData); + let test_value = CpuLocal::<32, u64>(PhantomData); test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); log::warn!("Setting test_value to 0x12345678..."); - test_value.with(|tv| { *tv = 0x12345678; }); + test_value.with_mut(|tv| { *tv = 0x12345678; }); test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); - core::mem::forget(test_value); - let test_string = CpuLocal::<16, alloc::string::String>(PhantomData); + let test_string = CpuLocal::<40, alloc::string::String>(PhantomData); test_string.with(|s| log::warn!("Got test_string: {:?}", s)); let new_str = ", world!"; log::warn!("Appending {:?} to test_string...", new_str); - test_string.with(|s| s.push_str(new_str)); + test_string.with_mut(|s| s.push_str(new_str)); test_string.with(|s| log::warn!("Got test_string: {:?}", s)); - core::mem::forget(test_string); } Ok(()) } diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml index a3d0a5c93c..2e99caabd6 100644 --- a/kernel/per_cpu/Cargo.toml +++ b/kernel/per_cpu/Cargo.toml @@ -1,13 +1,11 @@ [package] authors = ["Kevin Boos "] name = "per_cpu" -description = "Definition of the struct containing data maintained for each CPU" +description = "Support for defining per-CPU data types" version = "0.1.0" edition = "2021" [dependencies] -log = "0.4.8" - cpu = { path = "../cpu" } cpu_local = { path = "../cpu_local" } preemption = { path = "../preemption" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 32d5461cf5..093c5fdddb 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -3,31 +3,57 @@ //! Each CPU has its own instance of `PerCpuData`, and each CPU's instance //! can only be accessed by itself. //! -//! ## Relationship to `cpu_local` -//! * This crate `per_cpu` directly depends on many other kernel crates, -//! specifically the ones that define the types needed for each field of [`PerCpuData`]. -//! * The `cpu_local` crate is the "top-level" crate that is depended upon -//! by each of the crates that needs to access per-CPU data. -//! * This crate also depends on `cpu_local` in order to initialize itself -//! for each CPU right after that CPU has booted. -//! +//! ## This `per_cpu` crate vs. the `cpu_local` crate +//! These two crates exist to solve a circular dependency problem: +//! the crate that defines the per-CPU data structure (this `per_cpu` crate) +//! must depend on all the foreign crates that define the types used for +//! each field in the per-CPU data structure. +//! However, those foreign crates also want to *access* these per-CPU states, +//! which would require depending on this `per_cpu` crate. +//! This would create a cyclic dependency, so we break it into two crates. +//! +//! 1. This crate `per_cpu` directly depends on many other kernel crates, +//! specifically the ones that define the types needed for each field of [`PerCpuData`]. +//! * If you want to add another piece of per-CPU data, you can do that here +//! by modifying the [`PerCpuData`] struct, and then updating the const definitions +//! of offsets and other metadata in `cpu_local::FixedCpuLocal`. +//! * To actually access per-CPU data, do not use this crate, +//! use `cpu_local::CpuLocal` instead. +//! +//! 2. The `cpu_local` crate is the "top-level" crate that is depended upon +//! by each of the crates that needs to access per-CPU data. +//! * `cpu_local` is a mostly standalone crate that does not depend +//! on any of the specific types from other Theseus crates, +//! which allows other Theseus crates to depend upon it. +//! * `cpu_local` effectively decouples the definitions +//! * This `per_cpu` crate also depends on `cpu_local` in order to initialize itself +//! for each CPU right after that CPU has booted. +//! #![no_std] -extern crate alloc; // TODO temp remove this +extern crate alloc; // TODO temp remove this use cpu::CpuId; use preemption::{PreemptionCount, PreemptionGuard}; use task::TaskRef; +struct TestU32(u32); +impl Drop for TestU32 { + fn drop(&mut self) { + panic!("Dropping TestU32({})", self.0); + } +} + /// The data stored on a per-CPU basis in Theseus. /// -/// Currently, we do not support additional arbitrary per-CPU states, -/// e.g., dynamically adding or removing them. +/// Currently, we do not support additional arbitrary per-CPU states, e.g., +/// dynamically adding or removing states, or defining per-CPU states +/// outside this struct. /// -/// This is not directly accessible, it must be accessed by other crates -/// via the functions in the [`cpu_local`] crate. +/// This struct is not directly accessible; per-CPU states are accessible +/// by other crates using the functions in the [`cpu_local`] crate. #[allow(dead_code)] // These fields are accessed via `cpu_local` functions. #[repr(C)] pub struct PerCpuData { @@ -38,32 +64,38 @@ pub struct PerCpuData { /// and also to allow faster access to large fields herein () accelerate accesses to large items self_ptr: usize, // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. - /* + /// The unique ID of this CPU. cpu_id: CpuId, + /// The current preemption count of this CPU, which is used to determine + /// whether task switching can occur or not. preemption_count: PreemptionCount, + /// A preemption guard used during task switching to ensure that one task switch + /// cannot interrupt (preempt) another task switch already in progress. + // task_switch_preemption_guard: Option, // TODO temp remove this task_switch_preemption_guard: Option, + /// Data that should be dropped after switching away from a task that has exited. + /// Currently, this contains the previous task's `TaskRef` that was removed + /// from its TLS area during the last task switch away from it. drop_after_task_switch: Option, - */ test_value: u64, test_string: alloc::string::String, } impl PerCpuData { - pub fn new(self_ptr: usize, cpu_id: CpuId) -> Self { + /// Defines the initial values of each per-CPU state. + fn new(self_ptr: usize, cpu_id: CpuId) -> Self { Self { self_ptr, - /* cpu_id, preemption_count: PreemptionCount::new(), task_switch_preemption_guard: None, drop_after_task_switch: None, - */ test_value: 0xDEADBEEF, test_string: alloc::string::String::from("test_string hello"), + } } } - /// Initializes the current CPU's `PerCpuData`. /// /// This must be invoked from (run on) the actual CPU with the given `cpu_id`; From c60bb2e2a189d6faab2fc100a69dd0090a7c4ecd Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Mon, 3 Apr 2023 10:32:39 -0700 Subject: [PATCH 04/13] address all concerns except trying new enum/trait-based design --- kernel/ap_start/src/lib.rs | 2 +- kernel/cpu_local/src/lib.rs | 80 ++++++++++++++++++------------------- kernel/per_cpu/src/lib.rs | 5 ++- 3 files changed, 44 insertions(+), 43 deletions(-) diff --git a/kernel/ap_start/src/lib.rs b/kernel/ap_start/src/lib.rs index 829c447003..884fc5733b 100644 --- a/kernel/ap_start/src/lib.rs +++ b/kernel/ap_start/src/lib.rs @@ -102,7 +102,7 @@ pub fn kstart_ap( nmi_lint, nmi_flags, ).unwrap(); - + // Now that the Local APIC has been initialized for this CPU, we can initialize the // per-CPU storage, tasking, and create the idle task for this CPU. per_cpu::init(cpu_id).unwrap(); diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 7ad6ecbf0a..fdab4b0b36 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -35,7 +35,6 @@ pub struct FixedCpuLocal { } // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. impl FixedCpuLocal { - const SELF_PTR_OFFSET: usize = 0; pub const CPU_ID: Self = Self { offset: 8, size: 4, align: 4 }; pub const PREEMPTION_COUNT: Self = Self { offset: 12, size: 1, align: 1 }; pub const TASK_SWITCH_PREEMPTION_GUARD: Self = Self { offset: 16, size: 8, align: 4 }; @@ -120,7 +119,7 @@ impl CpuLocal { unsafe { #[cfg(target_arch = "x86_64")] asm!( - concat!("mov {}, gs:[0]"), // the SELF_PTR_OFFSET is 0 + "mov {}, gs:[0]", // the SELF_PTR_OFFSET is 0 lateout(reg) self_ptr, options(nostack, preserves_flags, readonly, pure) ); @@ -135,7 +134,7 @@ impl CpuLocal { impl CpuLocal { /// Returns a copy of this `CpuLocal`'s inner value of type `T`. /// - /// This is a convenience functiononly available for types where `T: Copy`. + /// This is a convenience function only available for types where `T: Copy`. pub fn get(&self) -> T { self.with(|v| *v) } @@ -144,67 +143,66 @@ impl CpuLocal { /// The underlying memory region for each CPU's per-CPU data. #[derive(Debug)] -struct CpuLocalDataImage(MappedPages); -impl CpuLocalDataImage { - /// This function does 3 main things: - /// 1. Allocates a new CPU-local data image for this CPU. - /// 2. Sets the self pointer value such that it can be properly accessed. - /// 3. Sets this CPU's base register (e.g., GsBase on x86_64) to the address - /// of this new data image, making it "currently active" and accessible. - fn new() -> Result { - // 1. Allocate a single page to store each CPU's local data. - let mut mp = memory::create_mapping(1, PteFlags::new().writable(true).valid(true))?; - - // 2. Set up the self pointer for the new data image. - let self_ptr_value = mp.start_address().value(); - let self_ptr_dest = mp.as_type_mut::(0)?; - *self_ptr_dest = self_ptr_value; +struct CpuLocalDataRegion(MappedPages); +impl CpuLocalDataRegion { + /// Allocates a new CPU-local data image. + fn new(size_of_per_cpu_data: usize) -> Result { + let mp = memory::create_mapping( + size_of_per_cpu_data, + PteFlags::new().writable(true).valid(true), + )?; + Ok(CpuLocalDataRegion(mp)) + } - // 3. Set the base register used for CPU-local data. - { - #[cfg(target_arch = "x86_64")] { - let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); - log::warn!("Writing value {:#X} to GSBASE", gsbase_val); - GsBase::write(gsbase_val); - } + /// Sets this CPU's base register (e.g., GsBase on x86_64) to the address + /// of this CPU-local data image, making it "currently active" and accessible. + fn set_as_current_cpu_local_base(&self) { + let self_ptr_value = self.0.start_address().value(); - #[cfg(not(target_arch = "x86_64"))] - todo!("CPU-local variable access is not yet implemented on this architecture") + #[cfg(target_arch = "x86_64")] { + let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); + log::warn!("Writing value {:#X} to GSBASE", gsbase_val); + GsBase::write(gsbase_val); } - Ok(CpuLocalDataImage(mp)) + #[cfg(not(target_arch = "x86_64"))] + todo!("CPU-local variable access is not yet implemented on this architecture") } } -/// Initializes the CPU-local data image for this CPU. +/// Initializes the CPU-local data region for this CPU. /// /// Note: this is invoked by the `per_cpu` crate; /// other crates do not need to invoke this. pub fn init

( cpu_id: u32, + size_of_per_cpu_data: usize, per_cpu_data_initializer: impl FnOnce(usize) -> P ) -> Result<(), &'static str> { /// The global set of all per-CPU data regions. - static CPU_LOCAL_DATA_REGIONS: Mutex> = Mutex::new(BTreeMap::new()); + static CPU_LOCAL_DATA_REGIONS: Mutex> = Mutex::new(BTreeMap::new()); let mut regions = CPU_LOCAL_DATA_REGIONS.lock(); let entry = regions.entry(cpu_id); - let data_image = match entry { - Entry::Vacant(v) => v.insert(CpuLocalDataImage::new()?), + let data_region = match entry { + Entry::Vacant(v) => v.insert(CpuLocalDataRegion::new(size_of_per_cpu_data)?), Entry::Occupied(_) => return Err("BUG: cannot init CPU-local data more than once"), }; - let self_ptr = data_image.0.start_address().value(); - // Run the given initializer function on the per-CPU data. - let new_data_image = CpuLocal::<{FixedCpuLocal::SELF_PTR_OFFSET}, P>(PhantomData); - new_data_image.with_mut(|per_cpu_data_mut| { + // Run the given initializer function to initialize the per-CPU data region. + { + let self_ptr = data_region.0.start_address().value(); let initial_value = per_cpu_data_initializer(self_ptr); - let existing_junk = core::mem::replace(per_cpu_data_mut, initial_value); - // The memory contents we just replaced was random junk, so it'd be invalid - // to run any destructors on it. Thus, we must forget it here. - core::mem::forget(existing_junk); - }); + // SAFETY: + // ✅ We just allocated memory for the self ptr above, it is only accessible by us. + // ✅ That memory is mutable (writable) and is aligned to a page boundary. + // ✅ The memory is at least as large as `size_of::

()`. + unsafe { core::ptr::write(self_ptr as *mut P, initial_value) }; + } + + // Set the new CPU-local data region as active and ready to be used on this CPU. + data_region.set_as_current_cpu_local_base(); // TODO Remove, temp tests if true { diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 093c5fdddb..e4bf7631e0 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -61,7 +61,9 @@ pub struct PerCpuData { /// This has a different initial value for each CPU's data image, of course. /// /// We use this to allow writes to this entire structure (for initialization), - /// and also to allow faster access to large fields herein () accelerate accesses to large items + /// and also to allow faster access to large fields herein, as they don't need to be + /// loaded in full before accessing a single sub-field. See this for more: + /// . self_ptr: usize, // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. /// The unique ID of this CPU. @@ -103,6 +105,7 @@ impl PerCpuData { pub fn init(cpu_id: CpuId) -> Result<(), &'static str> { cpu_local::init( cpu_id.value(), + core::mem::size_of::(), |self_ptr| PerCpuData::new(self_ptr, cpu_id), ) } From 0bbb4f877c361f9e10fb416f46088ba62275c8fb Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Mon, 3 Apr 2023 12:09:22 -0700 Subject: [PATCH 05/13] Add assertions in `per_cpu` for offset definitions in `cpu_local` --- Cargo.lock | 10 +++++++++ kernel/cpu_local/src/lib.rs | 11 ++++++---- kernel/per_cpu/Cargo.toml | 2 ++ kernel/per_cpu/src/lib.rs | 42 +++++++++++++++++++++++++++++-------- 4 files changed, 52 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d89c49d26..b00f23612f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1948,6 +1948,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + [[package]] name = "memory" version = "0.1.0" @@ -2601,6 +2610,7 @@ version = "0.1.0" dependencies = [ "cpu", "cpu_local", + "memoffset 0.8.0", "preemption", "task", ] diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index fdab4b0b36..be8fd091a9 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -28,10 +28,13 @@ use spin::Mutex; use x86_64::{registers::model_specific::GsBase, VirtAddr}; /// Info use to obtain a reference to a CPU-local variable; see [`CpuLocal::new()`]. +/// +/// This struct is marked `#[non_exhaustive]`, so it cannot be instantiated elsewhere. +#[non_exhaustive] pub struct FixedCpuLocal { - offset: usize, - size: usize, - align: usize, + pub offset: usize, + pub size: usize, + pub align: usize, } // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. impl FixedCpuLocal { @@ -119,7 +122,7 @@ impl CpuLocal { unsafe { #[cfg(target_arch = "x86_64")] asm!( - "mov {}, gs:[0]", // the SELF_PTR_OFFSET is 0 + "mov {}, gs:[0]", // the self ptr offset is 0 lateout(reg) self_ptr, options(nostack, preserves_flags, readonly, pure) ); diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml index 2e99caabd6..9730312827 100644 --- a/kernel/per_cpu/Cargo.toml +++ b/kernel/per_cpu/Cargo.toml @@ -6,6 +6,8 @@ version = "0.1.0" edition = "2021" [dependencies] +memoffset = "0.8.0" + cpu = { path = "../cpu" } cpu_local = { path = "../cpu_local" } preemption = { path = "../preemption" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index e4bf7631e0..91df926cfd 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -31,6 +31,7 @@ //! #![no_std] +#![feature(const_refs_to_cell)] extern crate alloc; // TODO temp remove this @@ -38,14 +39,6 @@ use cpu::CpuId; use preemption::{PreemptionCount, PreemptionGuard}; use task::TaskRef; - -struct TestU32(u32); -impl Drop for TestU32 { - fn drop(&mut self) { - panic!("Dropping TestU32({})", self.0); - } -} - /// The data stored on a per-CPU basis in Theseus. /// /// Currently, we do not support additional arbitrary per-CPU states, e.g., @@ -56,6 +49,11 @@ impl Drop for TestU32 { /// by other crates using the functions in the [`cpu_local`] crate. #[allow(dead_code)] // These fields are accessed via `cpu_local` functions. #[repr(C)] +// +// IMPORTANT NOTE: +// * These fields must be kept in sync with `cpu_local::FixedCpuLocal`. +// * The same applies for the `const_assertions` module at the end of this file. +// pub struct PerCpuData { /// A pointer to the start of this struct in memory, similar to a TLS self pointer. /// This has a different initial value for each CPU's data image, of course. @@ -65,7 +63,6 @@ pub struct PerCpuData { /// loaded in full before accessing a single sub-field. See this for more: /// . self_ptr: usize, - // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. /// The unique ID of this CPU. cpu_id: CpuId, /// The current preemption count of this CPU, which is used to determine @@ -109,3 +106,30 @@ pub fn init(cpu_id: CpuId) -> Result<(), &'static str> { |self_ptr| PerCpuData::new(self_ptr, cpu_id), ) } + +mod const_assertions { + use core::mem::{align_of, size_of}; + use cpu_local::FixedCpuLocal; + use memoffset::offset_of; + use super::*; + + const _: () = assert!(0 == offset_of!(PerCpuData, self_ptr)); + const _: () = assert!(8 == size_of::()); + const _: () = assert!(8 == align_of::()); + + const _: () = assert!(FixedCpuLocal::CPU_ID.offset == offset_of!(PerCpuData, cpu_id)); + const _: () = assert!(FixedCpuLocal::CPU_ID.size == size_of::()); + const _: () = assert!(FixedCpuLocal::CPU_ID.align == align_of::()); + + const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.offset == offset_of!(PerCpuData, preemption_count)); + const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.size == size_of::()); + const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.align == align_of::()); + + const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.offset == offset_of!(PerCpuData, task_switch_preemption_guard)); + const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.size == size_of::>()); + const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.align == align_of::>()); + + const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.offset == offset_of!(PerCpuData, drop_after_task_switch)); + const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.size == size_of::>()); + const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.align == align_of::>()); +} From b0ce230b862b6eb22a96cc2fc716214e58b2dc99 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 5 Apr 2023 00:35:08 +1000 Subject: [PATCH 06/13] Enum API Signed-off-by: Klim Tsoutsman --- kernel/cpu_local/src/lib.rs | 104 ++++++++++++++++++++++++++---------- kernel/per_cpu/src/lib.rs | 72 +++++++++++++++---------- 2 files changed, 121 insertions(+), 55 deletions(-) diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index be8fd091a9..ea2fd56655 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -36,20 +36,64 @@ pub struct FixedCpuLocal { pub size: usize, pub align: usize, } + // NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. -impl FixedCpuLocal { - pub const CPU_ID: Self = Self { offset: 8, size: 4, align: 4 }; - pub const PREEMPTION_COUNT: Self = Self { offset: 12, size: 1, align: 1 }; - pub const TASK_SWITCH_PREEMPTION_GUARD: Self = Self { offset: 16, size: 8, align: 4 }; - pub const DROP_AFTER_TASK_SWITCH: Self = Self { offset: 24, size: 8, align: 8 }; +pub enum CpuLocalField { + CpuId, + PreemptionCount, + TaskSwitchPreemptionGuard, + DropAfterTaskSwitch, +} + +impl CpuLocalField { + pub const fn offset(&self) -> usize { + match self { + Self::CpuId => 8, + Self::PreemptionCount => 12, + Self::TaskSwitchPreemptionGuard => 16, + Self::DropAfterTaskSwitch => 24, + } + } + + // TODO: Do we even need to know the size and alignment? + + // pub const fn size(&self) -> usize { + // match self { + // Self::CpuId => 4, + // Self::PreemptionCount => 1, + // Self::TaskSwitchPreemptionGuard => 8, + // Self::DropAfterTaskSwitch => 8, + // } + // } + + // pub const fn align(&self) -> usize { + // match self { + // Self::CpuId => 4, + // Self::PreemptionCount => 1, + // Self::TaskSwitchPreemptionGuard => 4, + // Self::DropAfterTaskSwitch => 8, + // } + // } +} + +// TODO: Could come up with a more imaginative name. +pub unsafe trait Field: Sized { + const FIELD: CpuLocalField; + + // const SIZE_CHECK: () = assert!(Self::FIELD.size() == size_of::()); + // const ALIGN_CHECK: () = assert!(Self::FIELD.align() == align_of::()); } /// A reference to a CPU-local variable. /// /// Note that this struct doesn't contain an instance of the type `T`, /// and dropping it has no effect. -pub struct CpuLocal(PhantomData<*mut T>); -impl CpuLocal { +pub struct CpuLocal(PhantomData<*mut T>); + +impl CpuLocal +where + T: Field, +{ /// Creates a new reference to a predefined CPU-local variable. /// /// # Arguments @@ -63,12 +107,8 @@ impl CpuLocal { /// /// Thus, the caller must guarantee that the type `T` is correct for the /// given `FixedCpuLocal`. - pub const unsafe fn new_fixed( - fixed: FixedCpuLocal, - ) -> Self { - assert!(OFFSET == fixed.offset); - assert!(size_of::() == fixed.size); - assert!(align_of::() == fixed.align); + pub const unsafe fn new_fixed() -> Self { + // FIXME: Should this still be unsafe? Self(PhantomData) } @@ -97,7 +137,7 @@ impl CpuLocal { where F: FnOnce(&T) -> R, { - let ptr_to_cpu_local = self.self_ptr() + OFFSET; + let ptr_to_cpu_local = self.self_ptr() + T::FIELD.offset(); let local_ref = unsafe { &*(ptr_to_cpu_local as *const T) }; func(local_ref) } @@ -111,7 +151,7 @@ impl CpuLocal { F: FnOnce(&mut T) -> R, { let _held_interrupts = irq_safety::hold_interrupts(); - let ptr_to_cpu_local = self.self_ptr() + OFFSET; + let ptr_to_cpu_local = self.self_ptr() + T::FIELD.offset(); let local_ref_mut = unsafe { &mut *(ptr_to_cpu_local as *mut T) }; func(local_ref_mut) } @@ -134,7 +174,10 @@ impl CpuLocal { } } -impl CpuLocal { +impl CpuLocal +where + T: Copy + Field, +{ /// Returns a copy of this `CpuLocal`'s inner value of type `T`. /// /// This is a convenience function only available for types where `T: Copy`. @@ -209,18 +252,23 @@ pub fn init

( // TODO Remove, temp tests if true { - let test_value = CpuLocal::<32, u64>(PhantomData); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); - log::warn!("Setting test_value to 0x12345678..."); - test_value.with_mut(|tv| { *tv = 0x12345678; }); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); - - let test_string = CpuLocal::<40, alloc::string::String>(PhantomData); - test_string.with(|s| log::warn!("Got test_string: {:?}", s)); - let new_str = ", world!"; - log::warn!("Appending {:?} to test_string...", new_str); - test_string.with_mut(|s| s.push_str(new_str)); - test_string.with(|s| log::warn!("Got test_string: {:?}", s)); + // NOTE: The fact that this previously compiled in `cpu_local` is + // indicative of an unsafe API, as the offsets (and types) are + // completely arbitrary. There's nothing stopping us from trying to + // access CpuLocal::<16, String> which would be undefined behaviour. + + // let test_value = CpuLocal::<32, u64>(PhantomData); + // test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); + // log::warn!("Setting test_value to 0x12345678..."); + // test_value.with_mut(|tv| { *tv = 0x12345678; }); + // test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); + + // let test_string = CpuLocal::<40, alloc::string::String>(PhantomData); + // test_string.with(|s| log::warn!("Got test_string: {:?}", s)); + // let new_str = ", world!"; + // log::warn!("Appending {:?} to test_string...", new_str); + // test_string.with_mut(|s| s.push_str(new_str)); + // test_string.with(|s| log::warn!("Got test_string: {:?}", s)); } Ok(()) } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 91df926cfd..5d51ca6ff9 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -35,8 +35,8 @@ extern crate alloc; // TODO temp remove this -use cpu::CpuId; -use preemption::{PreemptionCount, PreemptionGuard}; +use cpu_local::{CpuLocalField, Field}; +use preemption::PreemptionGuard; use task::TaskRef; /// The data stored on a per-CPU basis in Theseus. @@ -71,23 +71,24 @@ pub struct PerCpuData { /// A preemption guard used during task switching to ensure that one task switch /// cannot interrupt (preempt) another task switch already in progress. // task_switch_preemption_guard: Option, // TODO temp remove this - task_switch_preemption_guard: Option, + task_switch_preemption_guard: TaskSwitchPreemptionGuard, /// Data that should be dropped after switching away from a task that has exited. /// Currently, this contains the previous task's `TaskRef` that was removed /// from its TLS area during the last task switch away from it. - drop_after_task_switch: Option, + drop_after_task_switch: DropAfterTaskSwitch, test_value: u64, test_string: alloc::string::String, } + impl PerCpuData { /// Defines the initial values of each per-CPU state. - fn new(self_ptr: usize, cpu_id: CpuId) -> Self { + fn new(self_ptr: usize, cpu_id: cpu::CpuId) -> Self { Self { self_ptr, - cpu_id, - preemption_count: PreemptionCount::new(), - task_switch_preemption_guard: None, - drop_after_task_switch: None, + cpu_id: CpuId(cpu_id), + preemption_count: PreemptionCount(preemption::PreemptionCount::new()), + task_switch_preemption_guard: TaskSwitchPreemptionGuard(None), + drop_after_task_switch: DropAfterTaskSwitch(None), test_value: 0xDEADBEEF, test_string: alloc::string::String::from("test_string hello"), @@ -95,11 +96,39 @@ impl PerCpuData { } } +#[repr(transparent)] +pub struct CpuId(pub cpu::CpuId); + +unsafe impl Field for CpuId { + const FIELD: CpuLocalField = CpuLocalField::CpuId; +} + +#[repr(transparent)] +pub struct PreemptionCount(pub preemption::PreemptionCount); + +unsafe impl Field for PreemptionCount { + const FIELD: CpuLocalField = CpuLocalField::PreemptionCount; +} + +#[repr(transparent)] +pub struct TaskSwitchPreemptionGuard(pub Option); + +unsafe impl Field for TaskSwitchPreemptionGuard { + const FIELD: CpuLocalField = CpuLocalField::TaskSwitchPreemptionGuard; +} + +#[repr(transparent)] +pub struct DropAfterTaskSwitch(pub Option); + +unsafe impl Field for DropAfterTaskSwitch { + const FIELD: CpuLocalField = CpuLocalField::DropAfterTaskSwitch; +} + /// Initializes the current CPU's `PerCpuData`. /// /// This must be invoked from (run on) the actual CPU with the given `cpu_id`; /// the main bootstrap CPU cannot run this for all CPUs itself. -pub fn init(cpu_id: CpuId) -> Result<(), &'static str> { +pub fn init(cpu_id: cpu::CpuId) -> Result<(), &'static str> { cpu_local::init( cpu_id.value(), core::mem::size_of::(), @@ -109,27 +138,16 @@ pub fn init(cpu_id: CpuId) -> Result<(), &'static str> { mod const_assertions { use core::mem::{align_of, size_of}; - use cpu_local::FixedCpuLocal; + use cpu_local::CpuLocalField; use memoffset::offset_of; use super::*; - const _: () = assert!(0 == offset_of!(PerCpuData, self_ptr)); const _: () = assert!(8 == size_of::()); const _: () = assert!(8 == align_of::()); - const _: () = assert!(FixedCpuLocal::CPU_ID.offset == offset_of!(PerCpuData, cpu_id)); - const _: () = assert!(FixedCpuLocal::CPU_ID.size == size_of::()); - const _: () = assert!(FixedCpuLocal::CPU_ID.align == align_of::()); - - const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.offset == offset_of!(PerCpuData, preemption_count)); - const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.size == size_of::()); - const _: () = assert!(FixedCpuLocal::PREEMPTION_COUNT.align == align_of::()); - - const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.offset == offset_of!(PerCpuData, task_switch_preemption_guard)); - const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.size == size_of::>()); - const _: () = assert!(FixedCpuLocal::TASK_SWITCH_PREEMPTION_GUARD.align == align_of::>()); - - const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.offset == offset_of!(PerCpuData, drop_after_task_switch)); - const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.size == size_of::>()); - const _: () = assert!(FixedCpuLocal::DROP_AFTER_TASK_SWITCH.align == align_of::>()); + const _: () = assert!(0 == offset_of!(PerCpuData, self_ptr)); + const _: () = assert!(CpuLocalField::CpuId.offset() == offset_of!(PerCpuData, cpu_id)); + const _: () = assert!(CpuLocalField::PreemptionCount.offset() == offset_of!(PerCpuData, preemption_count)); + const _: () = assert!(CpuLocalField::TaskSwitchPreemptionGuard.offset() == offset_of!(PerCpuData, task_switch_preemption_guard)); + const _: () = assert!(CpuLocalField::DropAfterTaskSwitch.offset() == offset_of!(PerCpuData, drop_after_task_switch)); } From 469d4e38be90f2445b914cd174f5c7015b8d87ab Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 16:25:00 -0700 Subject: [PATCH 07/13] Use trait-based design for CpuLocal --- Cargo.lock | 1 + kernel/cpu_local/src/lib.rs | 154 +++++++++++++++++------------------- kernel/per_cpu/Cargo.toml | 1 + kernel/per_cpu/src/lib.rs | 84 ++++++++++---------- 4 files changed, 117 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b00f23612f..04ebe53f0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2610,6 +2610,7 @@ version = "0.1.0" dependencies = [ "cpu", "cpu_local", + "log", "memoffset 0.8.0", "preemption", "task", diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index ea2fd56655..1748a62a04 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -17,7 +17,6 @@ extern crate alloc; use core::{ arch::asm, marker::PhantomData, - mem::{size_of, align_of}, }; use alloc::collections::{BTreeMap, btree_map::Entry}; use memory::{MappedPages, PteFlags}; @@ -27,88 +26,102 @@ use spin::Mutex; #[cfg(target_arch = "x86_64")] use x86_64::{registers::model_specific::GsBase, VirtAddr}; -/// Info use to obtain a reference to a CPU-local variable; see [`CpuLocal::new()`]. -/// -/// This struct is marked `#[non_exhaustive]`, so it cannot be instantiated elsewhere. -#[non_exhaustive] -pub struct FixedCpuLocal { - pub offset: usize, - pub size: usize, - pub align: usize, -} -// NOTE: These fields must be kept in sync with `cpu_local::FixedCpuLocal`. -pub enum CpuLocalField { +/// The available CPU-local variables, i.e., fields in `per_cpu::PerCpuData` struct. +// +// NOTE: These fields and their offsets must be kept in sync with `per_cpu::PerCpuData`. +// +pub enum PerCpuField { CpuId, PreemptionCount, TaskSwitchPreemptionGuard, DropAfterTaskSwitch, + TestValue, + TestString, } - -impl CpuLocalField { +impl PerCpuField { + /// Returns the offset of this field in the `per_cpu::PerCpuData` struct. pub const fn offset(&self) -> usize { match self { Self::CpuId => 8, Self::PreemptionCount => 12, Self::TaskSwitchPreemptionGuard => 16, Self::DropAfterTaskSwitch => 24, + Self::TestValue => 32, + Self::TestString => 40, } } - - // TODO: Do we even need to know the size and alignment? - - // pub const fn size(&self) -> usize { - // match self { - // Self::CpuId => 4, - // Self::PreemptionCount => 1, - // Self::TaskSwitchPreemptionGuard => 8, - // Self::DropAfterTaskSwitch => 8, - // } - // } - - // pub const fn align(&self) -> usize { - // match self { - // Self::CpuId => 4, - // Self::PreemptionCount => 1, - // Self::TaskSwitchPreemptionGuard => 4, - // Self::DropAfterTaskSwitch => 8, - // } - // } } -// TODO: Could come up with a more imaginative name. -pub unsafe trait Field: Sized { - const FIELD: CpuLocalField; - // const SIZE_CHECK: () = assert!(Self::FIELD.size() == size_of::()); +/// This trait must be implemented for each field in `per_cpu::PerCpuData`. +/// +/// Use the [`define_per_cpu_field!()`] macro to implement this trait. +/// +/// ## Safety +/// This is marked `unsafe` because the implementor must guarantee +/// that the associated `FIELD` constant is correctly specified. +/// * For example, the implementation of this trait for `CpuId` must specify +/// the `FIELD` const as [`PerCpuField::CpuId`], +/// but we cannot verify that here due to cyclic dependency issues. +pub unsafe trait CpuLocalField: Sized { + const FIELD: PerCpuField; + + // In the future, we will add a `DeadlockPrevention` parameter here + // to allow each field to dictate what needs to be temporarily disabled + // while accessing this field immutably or mutably. + // For example, disabling preemption, interrupts, or nothing. + + // const SIZE_CHECK: () = assert!(Self::FIELD.size() == size_of::()); // const ALIGN_CHECK: () = assert!(Self::FIELD.align() == align_of::()); } -/// A reference to a CPU-local variable. +/// A macro used to define a type wrapper for inclusion in the `PerCpuData` struct. /// -/// Note that this struct doesn't contain an instance of the type `T`, -/// and dropping it has no effect. -pub struct CpuLocal(PhantomData<*mut T>); +/// This macro implements the [`CpuLocalField`] +#[macro_export] +macro_rules! define_per_cpu_field { + ($TypeWrapper:ident, $InnerType:ty, $PerCpuField:expr) => { + #[repr(transparent)] + struct $TypeWrapper($InnerType); + unsafe impl $crate::CpuLocalField for $TypeWrapper { + const FIELD: $crate::PerCpuField = $PerCpuField; + } -impl CpuLocal -where - T: Field, -{ + impl core::ops::Deref for $TypeWrapper { + type Target = $InnerType; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl core::ops::DerefMut for $TypeWrapper { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }; +} + +/// A reference to a CPU-local variable. +/// +/// ## Usage Notes +/// * This does not currently permit or handle usage of `CpuLocal::with_mut()` +/// from within an interrupt handler context. +/// * Interrupt handler contexts should only access a `CpuLocal` *immutably*. +/// * If you need to mutate/modify a CPU-local variable from within an +/// interrupt handler, please file an issue to alert the Theseus developers. +/// * This struct does not contain an instance of the type `T`. +/// Thus, dropping it has no effect. +pub struct CpuLocal(PhantomData<*const T>); +impl CpuLocal { /// Creates a new reference to a predefined CPU-local variable. /// - /// # Arguments - /// * `fixed`: information about this CPU-local variable. - /// Must be one of the public constants defined in [`FixedCpuLocal`]. - /// - /// # Safety - /// This is unsafe because we currently do not have a way to guarantee - /// that the caller-provided type `T` is the same as the type intended for use - /// by the given `FixedCpuLocal`. - /// - /// Thus, the caller must guarantee that the type `T` is correct for the - /// given `FixedCpuLocal`. - pub const unsafe fn new_fixed() -> Self { - // FIXME: Should this still be unsafe? + /// The type `T: CpuLocalField` must be specified with the turbofish operator: + /// ```rust,no_run + /// static CPU_ID: CpuLocal = CpuId::new(); + /// ``` + pub const fn new() -> Self { Self(PhantomData) } @@ -176,7 +189,7 @@ where impl CpuLocal where - T: Copy + Field, + T: Copy + CpuLocalField, { /// Returns a copy of this `CpuLocal`'s inner value of type `T`. /// @@ -249,27 +262,6 @@ pub fn init

( // Set the new CPU-local data region as active and ready to be used on this CPU. data_region.set_as_current_cpu_local_base(); - - // TODO Remove, temp tests - if true { - // NOTE: The fact that this previously compiled in `cpu_local` is - // indicative of an unsafe API, as the offsets (and types) are - // completely arbitrary. There's nothing stopping us from trying to - // access CpuLocal::<16, String> which would be undefined behaviour. - - // let test_value = CpuLocal::<32, u64>(PhantomData); - // test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); - // log::warn!("Setting test_value to 0x12345678..."); - // test_value.with_mut(|tv| { *tv = 0x12345678; }); - // test_value.with(|tv| log::warn!("Got test_value: {:#X}", *tv)); - - // let test_string = CpuLocal::<40, alloc::string::String>(PhantomData); - // test_string.with(|s| log::warn!("Got test_string: {:?}", s)); - // let new_str = ", world!"; - // log::warn!("Appending {:?} to test_string...", new_str); - // test_string.with_mut(|s| s.push_str(new_str)); - // test_string.with(|s| log::warn!("Got test_string: {:?}", s)); - } Ok(()) } diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml index 9730312827..7afa28de1f 100644 --- a/kernel/per_cpu/Cargo.toml +++ b/kernel/per_cpu/Cargo.toml @@ -6,6 +6,7 @@ version = "0.1.0" edition = "2021" [dependencies] +log = "0.4.8" memoffset = "0.8.0" cpu = { path = "../cpu" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 5d51ca6ff9..6fdbfcb98e 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -35,8 +35,7 @@ extern crate alloc; // TODO temp remove this -use cpu_local::{CpuLocalField, Field}; -use preemption::PreemptionGuard; +use cpu_local::{define_per_cpu_field, PerCpuField}; use task::TaskRef; /// The data stored on a per-CPU basis in Theseus. @@ -47,11 +46,17 @@ use task::TaskRef; /// /// This struct is not directly accessible; per-CPU states are accessible /// by other crates using the functions in the [`cpu_local`] crate. +/// +/// ## Required traits +/// Each field in this struct must implement [`cpu_local::PerCpuField`], +/// which in turn mandates that each field have a unique type distinct from the type +/// of every other field. +/// Currently we achieve this with newtype wrappers #[allow(dead_code)] // These fields are accessed via `cpu_local` functions. #[repr(C)] // // IMPORTANT NOTE: -// * These fields must be kept in sync with `cpu_local::FixedCpuLocal`. +// * These fields must be kept in sync with `cpu_local::PerCpuField`. // * The same applies for the `const_assertions` module at the end of this file. // pub struct PerCpuData { @@ -76,8 +81,8 @@ pub struct PerCpuData { /// Currently, this contains the previous task's `TaskRef` that was removed /// from its TLS area during the last task switch away from it. drop_after_task_switch: DropAfterTaskSwitch, - test_value: u64, - test_string: alloc::string::String, + test_value: TestValue, + test_string: TestString, } impl PerCpuData { @@ -89,40 +94,22 @@ impl PerCpuData { preemption_count: PreemptionCount(preemption::PreemptionCount::new()), task_switch_preemption_guard: TaskSwitchPreemptionGuard(None), drop_after_task_switch: DropAfterTaskSwitch(None), - test_value: 0xDEADBEEF, - test_string: alloc::string::String::from("test_string hello"), + test_value: TestValue(0xDEADBEEF), + test_string: TestString(alloc::string::String::from("test_string hello")), } } } -#[repr(transparent)] -pub struct CpuId(pub cpu::CpuId); - -unsafe impl Field for CpuId { - const FIELD: CpuLocalField = CpuLocalField::CpuId; -} - -#[repr(transparent)] -pub struct PreemptionCount(pub preemption::PreemptionCount); - -unsafe impl Field for PreemptionCount { - const FIELD: CpuLocalField = CpuLocalField::PreemptionCount; -} - -#[repr(transparent)] -pub struct TaskSwitchPreemptionGuard(pub Option); - -unsafe impl Field for TaskSwitchPreemptionGuard { - const FIELD: CpuLocalField = CpuLocalField::TaskSwitchPreemptionGuard; -} +define_per_cpu_field!(CpuId, cpu::CpuId, PerCpuField::CpuId); +define_per_cpu_field!(PreemptionCount, preemption::PreemptionCount, PerCpuField::PreemptionCount); +define_per_cpu_field!(TaskSwitchPreemptionGuard, Option, PerCpuField::TaskSwitchPreemptionGuard); +define_per_cpu_field!(DropAfterTaskSwitch, Option, PerCpuField::DropAfterTaskSwitch); -#[repr(transparent)] -pub struct DropAfterTaskSwitch(pub Option); +// TODO: temp testing, remove these +define_per_cpu_field!(TestValue, u64, PerCpuField::TestValue); +define_per_cpu_field!(TestString, alloc::string::String, PerCpuField::TestString); -unsafe impl Field for DropAfterTaskSwitch { - const FIELD: CpuLocalField = CpuLocalField::DropAfterTaskSwitch; -} /// Initializes the current CPU's `PerCpuData`. /// @@ -133,21 +120,34 @@ pub fn init(cpu_id: cpu::CpuId) -> Result<(), &'static str> { cpu_id.value(), core::mem::size_of::(), |self_ptr| PerCpuData::new(self_ptr, cpu_id), - ) + )?; + + // TODO Remove, temp tests + if true { + let test_value = cpu_local::CpuLocal::::new(); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", &**tv)); + log::warn!("Setting test_value to 0x12345678..."); + test_value.with_mut(|tv| { **tv = 0x12345678; }); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", &**tv)); + + let test_string = cpu_local::CpuLocal::::new(); + test_string.with(|s| log::warn!("Got test_string: {:?}", &**s)); + let new_str = ", world!"; + log::warn!("Appending {:?} to test_string...", new_str); + test_string.with_mut(|s| s.push_str(new_str)); + test_string.with(|s| log::warn!("Got test_string: {:?}", &**s)); + } + + Ok(()) } mod const_assertions { - use core::mem::{align_of, size_of}; - use cpu_local::CpuLocalField; use memoffset::offset_of; use super::*; - const _: () = assert!(8 == size_of::()); - const _: () = assert!(8 == align_of::()); - const _: () = assert!(0 == offset_of!(PerCpuData, self_ptr)); - const _: () = assert!(CpuLocalField::CpuId.offset() == offset_of!(PerCpuData, cpu_id)); - const _: () = assert!(CpuLocalField::PreemptionCount.offset() == offset_of!(PerCpuData, preemption_count)); - const _: () = assert!(CpuLocalField::TaskSwitchPreemptionGuard.offset() == offset_of!(PerCpuData, task_switch_preemption_guard)); - const _: () = assert!(CpuLocalField::DropAfterTaskSwitch.offset() == offset_of!(PerCpuData, drop_after_task_switch)); + const _: () = assert!(PerCpuField::CpuId.offset() == offset_of!(PerCpuData, cpu_id)); + const _: () = assert!(PerCpuField::PreemptionCount.offset() == offset_of!(PerCpuData, preemption_count)); + const _: () = assert!(PerCpuField::TaskSwitchPreemptionGuard.offset() == offset_of!(PerCpuData, task_switch_preemption_guard)); + const _: () = assert!(PerCpuField::DropAfterTaskSwitch.offset() == offset_of!(PerCpuData, drop_after_task_switch)); } From 5d64794e646fdef4f8bb5f75b8e997ea0b19f321 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 20:26:34 -0700 Subject: [PATCH 08/13] * impl newtype wrappers for Cpu-local compatibility in the crates that define the inner type being wrapped. * CpuId is a special case. * We can't yet impl CpuLocalField for PreemptionCount, as that requires merging the `cpu_local` and `preemption` crates. --- Cargo.lock | 1 + kernel/cpu_local/src/lib.rs | 48 ++++++------------------ kernel/per_cpu/src/lib.rs | 74 +++++++++++++++++++++++++------------ kernel/task/Cargo.toml | 1 + kernel/task/src/lib.rs | 26 +++++++++++++ 5 files changed, 91 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44ab808b77..78bb46b817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3723,6 +3723,7 @@ version = "0.1.0" dependencies = [ "context_switch", "cpu", + "cpu_local", "crossbeam-utils", "environment", "irq_safety", diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 1748a62a04..ac5d772ef1 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -8,15 +8,16 @@ //! Note that Rust offers the `#[thread_local]` attribute for thread-local storage (TLS), //! but there is no equivalent for CPU-local storage. //! On x86_64, TLS areas use the `fs` segment register for the TLS base, -//! and this crates uses the `gs` segment register for the CPU-local base. +//! and this crate uses the `gs` segment register for the CPU-local base. #![no_std] #![feature(thread_local)] + extern crate alloc; use core::{ arch::asm, - marker::PhantomData, + marker::PhantomData, ops::Deref, }; use alloc::collections::{BTreeMap, btree_map::Entry}; use memory::{MappedPages, PteFlags}; @@ -31,6 +32,7 @@ use x86_64::{registers::model_specific::GsBase, VirtAddr}; // // NOTE: These fields and their offsets must be kept in sync with `per_cpu::PerCpuData`. // +#[derive(PartialEq, Eq)] pub enum PerCpuField { CpuId, PreemptionCount, @@ -56,8 +58,6 @@ impl PerCpuField { /// This trait must be implemented for each field in `per_cpu::PerCpuData`. /// -/// Use the [`define_per_cpu_field!()`] macro to implement this trait. -/// /// ## Safety /// This is marked `unsafe` because the implementor must guarantee /// that the associated `FIELD` constant is correctly specified. @@ -71,37 +71,8 @@ pub unsafe trait CpuLocalField: Sized { // to allow each field to dictate what needs to be temporarily disabled // while accessing this field immutably or mutably. // For example, disabling preemption, interrupts, or nothing. - - // const SIZE_CHECK: () = assert!(Self::FIELD.size() == size_of::()); - // const ALIGN_CHECK: () = assert!(Self::FIELD.align() == align_of::()); } -/// A macro used to define a type wrapper for inclusion in the `PerCpuData` struct. -/// -/// This macro implements the [`CpuLocalField`] -#[macro_export] -macro_rules! define_per_cpu_field { - ($TypeWrapper:ident, $InnerType:ty, $PerCpuField:expr) => { - #[repr(transparent)] - struct $TypeWrapper($InnerType); - unsafe impl $crate::CpuLocalField for $TypeWrapper { - const FIELD: $crate::PerCpuField = $PerCpuField; - } - - impl core::ops::Deref for $TypeWrapper { - type Target = $InnerType; - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - impl core::ops::DerefMut for $TypeWrapper { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } - } - }; -} /// A reference to a CPU-local variable. /// @@ -113,15 +84,20 @@ macro_rules! define_per_cpu_field { /// interrupt handler, please file an issue to alert the Theseus developers. /// * This struct does not contain an instance of the type `T`. /// Thus, dropping it has no effect. -pub struct CpuLocal(PhantomData<*const T>); +pub struct CpuLocal(PhantomData); impl CpuLocal { /// Creates a new reference to a predefined CPU-local variable. /// + /// ## Arguments + /// * `field`: the field in the `per_cpu::PerCpuData` struct that + /// you wish to access via the returned `CpuLocal` object. + /// /// The type `T: CpuLocalField` must be specified with the turbofish operator: /// ```rust,no_run - /// static CPU_ID: CpuLocal = CpuId::new(); + /// static CPU_ID: CpuLocal = CpuId::new(PerCpuField::CpuId); /// ``` - pub const fn new() -> Self { + pub const fn new(field: PerCpuField) -> Self { + assert!(field.offset() == T::FIELD.offset()); Self(PhantomData) } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 6fdbfcb98e..2dd01d7507 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -35,8 +35,12 @@ extern crate alloc; // TODO temp remove this -use cpu_local::{define_per_cpu_field, PerCpuField}; -use task::TaskRef; +use core::ops::Deref; + +use cpu::CpuId; +use cpu_local::PerCpuField; +use task::{DropAfterTaskSwitch, TaskSwitchPreemptionGuard}; +use preemption::PreemptionCount; /// The data stored on a per-CPU basis in Theseus. /// @@ -68,14 +72,16 @@ pub struct PerCpuData { /// loaded in full before accessing a single sub-field. See this for more: /// . self_ptr: usize, - /// The unique ID of this CPU. - cpu_id: CpuId, + /// The unique ID of this CPU. This is immutable. + cpu_id: CpuLocalCpuId, /// The current preemption count of this CPU, which is used to determine /// whether task switching can occur or not. + /// + /// TODO: we must merge `cpu_local` and `preemption` in order to use something like + /// `CpuLocal` in the `preemption` crate. preemption_count: PreemptionCount, /// A preemption guard used during task switching to ensure that one task switch /// cannot interrupt (preempt) another task switch already in progress. - // task_switch_preemption_guard: Option, // TODO temp remove this task_switch_preemption_guard: TaskSwitchPreemptionGuard, /// Data that should be dropped after switching away from a task that has exited. /// Currently, this contains the previous task's `TaskRef` that was removed @@ -90,10 +96,10 @@ impl PerCpuData { fn new(self_ptr: usize, cpu_id: cpu::CpuId) -> Self { Self { self_ptr, - cpu_id: CpuId(cpu_id), - preemption_count: PreemptionCount(preemption::PreemptionCount::new()), - task_switch_preemption_guard: TaskSwitchPreemptionGuard(None), - drop_after_task_switch: DropAfterTaskSwitch(None), + cpu_id: CpuLocalCpuId(cpu_id), + preemption_count: PreemptionCount::new(), + task_switch_preemption_guard: TaskSwitchPreemptionGuard::new(), + drop_after_task_switch: DropAfterTaskSwitch::new(), test_value: TestValue(0xDEADBEEF), test_string: TestString(alloc::string::String::from("test_string hello")), @@ -101,14 +107,33 @@ impl PerCpuData { } } -define_per_cpu_field!(CpuId, cpu::CpuId, PerCpuField::CpuId); -define_per_cpu_field!(PreemptionCount, preemption::PreemptionCount, PerCpuField::PreemptionCount); -define_per_cpu_field!(TaskSwitchPreemptionGuard, Option, PerCpuField::TaskSwitchPreemptionGuard); -define_per_cpu_field!(DropAfterTaskSwitch, Option, PerCpuField::DropAfterTaskSwitch); + +/// An immutable type wrapper for this CPU's unique ID, kept in CPU-local storage. +/// +/// Derefs into a [`CpuId`]. +pub struct CpuLocalCpuId(CpuId); +impl Deref for CpuLocalCpuId { + type Target = CpuId; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +// SAFETY: The `CpuLocalCpuId` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::CpuId`. +unsafe impl cpu_local::CpuLocalField for CpuLocalCpuId { + const FIELD: PerCpuField = PerCpuField::CpuId; +} + // TODO: temp testing, remove these -define_per_cpu_field!(TestValue, u64, PerCpuField::TestValue); -define_per_cpu_field!(TestString, alloc::string::String, PerCpuField::TestString); +pub struct TestValue(u64); +unsafe impl cpu_local::CpuLocalField for TestValue { + const FIELD: PerCpuField = PerCpuField::TestValue; +} +pub struct TestString(alloc::string::String); +unsafe impl cpu_local::CpuLocalField for TestString { + const FIELD: PerCpuField = PerCpuField::TestString; +} /// Initializes the current CPU's `PerCpuData`. @@ -124,18 +149,21 @@ pub fn init(cpu_id: cpu::CpuId) -> Result<(), &'static str> { // TODO Remove, temp tests if true { - let test_value = cpu_local::CpuLocal::::new(); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", &**tv)); + let test_value = cpu_local::CpuLocal::::new(PerCpuField::TestValue); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", tv.0)); log::warn!("Setting test_value to 0x12345678..."); - test_value.with_mut(|tv| { **tv = 0x12345678; }); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", &**tv)); + test_value.with_mut(|tv| { tv.0 = 0x12345678; }); + test_value.with(|tv| log::warn!("Got test_value: {:#X}", tv.0)); - let test_string = cpu_local::CpuLocal::::new(); - test_string.with(|s| log::warn!("Got test_string: {:?}", &**s)); + let test_string = cpu_local::CpuLocal::::new(PerCpuField::TestString); + test_string.with(|s| log::warn!("Got test_string: {:?}", s.0)); let new_str = ", world!"; log::warn!("Appending {:?} to test_string...", new_str); - test_string.with_mut(|s| s.push_str(new_str)); - test_string.with(|s| log::warn!("Got test_string: {:?}", &**s)); + test_string.with_mut(|s| s.0.push_str(new_str)); + test_string.with(|s| log::warn!("Got test_string: {:?}", s.0)); + + let test_cpu_id = cpu_local::CpuLocal::::new(PerCpuField::CpuId); + test_cpu_id.with(|id| log::warn!("Got test_cpu_id: {:?}", &**id)); } Ok(()) diff --git a/kernel/task/Cargo.toml b/kernel/task/Cargo.toml index 5eb0936c10..7c570d88f8 100644 --- a/kernel/task/Cargo.toml +++ b/kernel/task/Cargo.toml @@ -15,6 +15,7 @@ irq_safety = { git = "https://github.com/theseus-os/irq_safety" } context_switch = { path = "../context_switch" } cpu = { path = "../cpu" } +cpu_local = { path = "../cpu_local" } environment = { path = "../environment" } memory = { path = "../memory" } mod_mgmt = { path = "../mod_mgmt" } diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index 7cd36cabde..10c65c4f9e 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -44,6 +44,7 @@ use core::{ task::Waker, }; use cpu::CpuId; +use cpu_local::{CpuLocalField, PerCpuField}; use crossbeam_utils::atomic::AtomicCell; use irq_safety::{hold_interrupts, MutexIrqSafe}; use log::error; @@ -91,6 +92,31 @@ pub fn all_tasks() -> Vec<(usize, WeakTaskRef)> { pub type FailureCleanupFunction = fn(ExitableTaskRef, KillReason) -> !; +/// A type wrapper used to hold a CPU-local `PreemptionGuard` +/// on the current CPU during a task switch operation. +pub struct TaskSwitchPreemptionGuard(Option); +impl TaskSwitchPreemptionGuard { + pub fn new() -> Self { Self(None) } +} +// SAFETY: The `TaskSwitchPreemptionGuard` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::TaskSwitchPreemptionGuard`. +unsafe impl CpuLocalField for TaskSwitchPreemptionGuard { + const FIELD: PerCpuField = PerCpuField::TaskSwitchPreemptionGuard; +} + +/// A type wrapper used to hold CPU-local data that should be dropped +/// after switching away from a task that has exited. +pub struct DropAfterTaskSwitch(Option); +impl DropAfterTaskSwitch { + pub fn new() -> Self { Self(None) } +} +// SAFETY: The `DropAfterTaskSwitch` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::DropAfterTaskSwitch`. +unsafe impl CpuLocalField for DropAfterTaskSwitch { + const FIELD: PerCpuField = PerCpuField::DropAfterTaskSwitch; +} + + /// A shareable, cloneable reference to a `Task` that exposes more methods /// for task management and auto-derefs into an immutable `&Task` reference. /// From a9262875cc2cd881a6c6e90f1b02f5592a80577c Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 20:34:39 -0700 Subject: [PATCH 09/13] fuck off clippy --- kernel/cpu_local/src/lib.rs | 2 +- kernel/task/src/lib.rs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index ac5d772ef1..2172fda39a 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -17,7 +17,7 @@ extern crate alloc; use core::{ arch::asm, - marker::PhantomData, ops::Deref, + marker::PhantomData, }; use alloc::collections::{BTreeMap, btree_map::Entry}; use memory::{MappedPages, PteFlags}; diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index 10c65c4f9e..fe13527a39 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -94,9 +94,10 @@ pub type FailureCleanupFunction = fn(ExitableTaskRef, KillReason) -> !; /// A type wrapper used to hold a CPU-local `PreemptionGuard` /// on the current CPU during a task switch operation. +#[derive(Default)] pub struct TaskSwitchPreemptionGuard(Option); impl TaskSwitchPreemptionGuard { - pub fn new() -> Self { Self(None) } + pub const fn new() -> Self { Self(None) } } // SAFETY: The `TaskSwitchPreemptionGuard` type corresponds to a field in `PerCpuData` // with the offset specified by `PerCpuField::TaskSwitchPreemptionGuard`. @@ -104,11 +105,13 @@ unsafe impl CpuLocalField for TaskSwitchPreemptionGuard { const FIELD: PerCpuField = PerCpuField::TaskSwitchPreemptionGuard; } + /// A type wrapper used to hold CPU-local data that should be dropped /// after switching away from a task that has exited. +#[derive(Default)] pub struct DropAfterTaskSwitch(Option); impl DropAfterTaskSwitch { - pub fn new() -> Self { Self(None) } + pub const fn new() -> Self { Self(None) } } // SAFETY: The `DropAfterTaskSwitch` type corresponds to a field in `PerCpuData` // with the offset specified by `PerCpuField::DropAfterTaskSwitch`. From c2d108ebd44a289d4a43b426a4a980a144577ba0 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:29:16 -0700 Subject: [PATCH 10/13] doc fix Co-authored-by: Klim Tsoutsman --- kernel/cpu_local/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 2172fda39a..947608773c 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -94,7 +94,7 @@ impl CpuLocal { /// /// The type `T: CpuLocalField` must be specified with the turbofish operator: /// ```rust,no_run - /// static CPU_ID: CpuLocal = CpuId::new(PerCpuField::CpuId); + /// static CPU_ID: CpuLocal = CpuId::new(PerCpuField::CpuId); /// ``` pub const fn new(field: PerCpuField) -> Self { assert!(field.offset() == T::FIELD.offset()); From 9c8cfbe0dacae33debd8dfa4263a84f81165b71b Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 21:47:36 -0700 Subject: [PATCH 11/13] remove code for testing cpu locals --- kernel/cpu_local/src/lib.rs | 4 ---- kernel/per_cpu/src/lib.rs | 42 +------------------------------------ 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 947608773c..c8e072f145 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -38,8 +38,6 @@ pub enum PerCpuField { PreemptionCount, TaskSwitchPreemptionGuard, DropAfterTaskSwitch, - TestValue, - TestString, } impl PerCpuField { /// Returns the offset of this field in the `per_cpu::PerCpuData` struct. @@ -49,8 +47,6 @@ impl PerCpuField { Self::PreemptionCount => 12, Self::TaskSwitchPreemptionGuard => 16, Self::DropAfterTaskSwitch => 24, - Self::TestValue => 32, - Self::TestString => 40, } } } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 2dd01d7507..174048c574 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -33,10 +33,7 @@ #![no_std] #![feature(const_refs_to_cell)] -extern crate alloc; // TODO temp remove this - use core::ops::Deref; - use cpu::CpuId; use cpu_local::PerCpuField; use task::{DropAfterTaskSwitch, TaskSwitchPreemptionGuard}; @@ -87,8 +84,6 @@ pub struct PerCpuData { /// Currently, this contains the previous task's `TaskRef` that was removed /// from its TLS area during the last task switch away from it. drop_after_task_switch: DropAfterTaskSwitch, - test_value: TestValue, - test_string: TestString, } impl PerCpuData { @@ -100,9 +95,6 @@ impl PerCpuData { preemption_count: PreemptionCount::new(), task_switch_preemption_guard: TaskSwitchPreemptionGuard::new(), drop_after_task_switch: DropAfterTaskSwitch::new(), - test_value: TestValue(0xDEADBEEF), - test_string: TestString(alloc::string::String::from("test_string hello")), - } } } @@ -125,17 +117,6 @@ unsafe impl cpu_local::CpuLocalField for CpuLocalCpuId { } -// TODO: temp testing, remove these -pub struct TestValue(u64); -unsafe impl cpu_local::CpuLocalField for TestValue { - const FIELD: PerCpuField = PerCpuField::TestValue; -} -pub struct TestString(alloc::string::String); -unsafe impl cpu_local::CpuLocalField for TestString { - const FIELD: PerCpuField = PerCpuField::TestString; -} - - /// Initializes the current CPU's `PerCpuData`. /// /// This must be invoked from (run on) the actual CPU with the given `cpu_id`; @@ -145,28 +126,7 @@ pub fn init(cpu_id: cpu::CpuId) -> Result<(), &'static str> { cpu_id.value(), core::mem::size_of::(), |self_ptr| PerCpuData::new(self_ptr, cpu_id), - )?; - - // TODO Remove, temp tests - if true { - let test_value = cpu_local::CpuLocal::::new(PerCpuField::TestValue); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", tv.0)); - log::warn!("Setting test_value to 0x12345678..."); - test_value.with_mut(|tv| { tv.0 = 0x12345678; }); - test_value.with(|tv| log::warn!("Got test_value: {:#X}", tv.0)); - - let test_string = cpu_local::CpuLocal::::new(PerCpuField::TestString); - test_string.with(|s| log::warn!("Got test_string: {:?}", s.0)); - let new_str = ", world!"; - log::warn!("Appending {:?} to test_string...", new_str); - test_string.with_mut(|s| s.0.push_str(new_str)); - test_string.with(|s| log::warn!("Got test_string: {:?}", s.0)); - - let test_cpu_id = cpu_local::CpuLocal::::new(PerCpuField::CpuId); - test_cpu_id.with(|id| log::warn!("Got test_cpu_id: {:?}", &**id)); - } - - Ok(()) + ) } mod const_assertions { From b20b88382c096fa0a55f591174ee26eb7cc10486 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 21:50:04 -0700 Subject: [PATCH 12/13] remove log msg --- kernel/cpu_local/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index c8e072f145..44d7ccde28 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -192,7 +192,6 @@ impl CpuLocalDataRegion { #[cfg(target_arch = "x86_64")] { let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); - log::warn!("Writing value {:#X} to GSBASE", gsbase_val); GsBase::write(gsbase_val); } From ad65acadc58d89139eb750919f42e2dff3e65a08 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Wed, 5 Apr 2023 22:06:45 -0700 Subject: [PATCH 13/13] fix aarch64 build --- kernel/cpu_local/Cargo.toml | 4 +++- kernel/cpu_local/src/lib.rs | 23 ++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/kernel/cpu_local/Cargo.toml b/kernel/cpu_local/Cargo.toml index 6afba24665..3734b87c50 100644 --- a/kernel/cpu_local/Cargo.toml +++ b/kernel/cpu_local/Cargo.toml @@ -9,8 +9,10 @@ edition = "2021" crossbeam-utils = { version = "0.8.12", default-features = false } log = "0.4.8" spin = "0.9.0" -x86_64 = "0.14.8" irq_safety = { git = "https://github.com/theseus-os/irq_safety" } memory = { path = "../memory" } preemption = { path = "../preemption" } + +[target.'cfg(target_arch = "x86_64")'.dependencies] +x86_64 = "0.14.8" diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index 44d7ccde28..a60de4bde3 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -15,10 +15,7 @@ extern crate alloc; -use core::{ - arch::asm, - marker::PhantomData, -}; +use core::marker::PhantomData; use alloc::collections::{BTreeMap, btree_map::Entry}; use memory::{MappedPages, PteFlags}; use preemption::{hold_preemption, PreemptionGuard}; @@ -142,19 +139,21 @@ impl CpuLocal { } /// Returns the value of the self pointer, which points to this CPU's `PerCpuData`. + #[cfg_attr(not(target_arch = "x86_64"), allow(unreachable_code, unused))] fn self_ptr(&self) -> usize { let self_ptr: usize; + #[cfg(target_arch = "x86_64")] unsafe { - #[cfg(target_arch = "x86_64")] - asm!( + core::arch::asm!( "mov {}, gs:[0]", // the self ptr offset is 0 lateout(reg) self_ptr, options(nostack, preserves_flags, readonly, pure) ); + } + + #[cfg(not(target_arch = "x86_64"))] + todo!("CPU Locals are not yet supported on non-x86_64 platforms"); - #[cfg(not(target_arch = "x86_64"))] - todo!("CPU Locals are not yet supported on non-x86_64 platforms"); - }; self_ptr } } @@ -195,8 +194,10 @@ impl CpuLocalDataRegion { GsBase::write(gsbase_val); } - #[cfg(not(target_arch = "x86_64"))] - todo!("CPU-local variable access is not yet implemented on this architecture") + #[cfg(not(target_arch = "x86_64"))] { + let _ = self_ptr_value; // TODO + todo!("CPU-local variable access is not yet implemented on this architecture") + } } }