From 6ab8786ee97205cfb1b6048792591e08da7db51e Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Mon, 11 May 2026 22:58:14 +0200 Subject: [PATCH 01/11] initial metrics feature --- Cargo.toml | 3 +- build.rs | 4 + machine/api/Cargo.toml | 6 + machine/api/build.rs | 6 + machine/api/src/stack.rs | 27 +++ machine/cortex-m/Cargo.toml | 3 +- machine/cortex-m/build.rs | 5 + machine/cortex-m/src/native/sched.rs | 103 ++++++++++++ options.toml | 6 + presets/stm32l4r5zi_def.toml | 1 + src/lib.rs | 3 + src/mem.rs | 6 + src/mem/alloc/bestfit.rs | 240 ++++++++++++++++++++++++++- src/mem/vmm/nommu.rs | 7 + src/metrics.rs | 42 +++++ src/sched.rs | 10 ++ src/sched/task.rs | 5 + src/sched/thread.rs | 5 + 18 files changed, 479 insertions(+), 3 deletions(-) create mode 100644 machine/api/build.rs create mode 100644 src/metrics.rs diff --git a/Cargo.toml b/Cargo.toml index 4d9004a..bceef6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ no-atomic-cas = [] multi-core = [] error-msg = [] defmt = ["dep:defmt", "dep:defmt-rtt"] +metrics = ["hal_api/metrics"] [build-dependencies] cbindgen = "0.28.0" @@ -53,7 +54,7 @@ rand = "0.8.5" cfg_aliases = "0.2.1" [lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(osiris_metrics)'] } [profile.dev] panic = "abort" diff --git a/build.rs b/build.rs index 9807319..38e99e7 100644 --- a/build.rs +++ b/build.rs @@ -15,6 +15,10 @@ extern crate cbindgen; fn main() { println!("cargo::rerun-if-changed=src"); println!("cargo::rerun-if-changed=build.rs"); + println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); + if std::env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=osiris_metrics"); + } let out_dir = std::env::var("OUT_DIR").unwrap(); if gen_syscall_match(Path::new("src/syscalls"), Path::new(&out_dir)).is_err() { diff --git a/machine/api/Cargo.toml b/machine/api/Cargo.toml index e4eab78..d5a560d 100644 --- a/machine/api/Cargo.toml +++ b/machine/api/Cargo.toml @@ -3,5 +3,11 @@ name = "hal-api" version = "0.1.0" edition = "2024" +[features] +metrics = [] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(osiris_metrics)'] } + [dependencies] seq-macro = "0.3.6" \ No newline at end of file diff --git a/machine/api/build.rs b/machine/api/build.rs new file mode 100644 index 0000000..825ae33 --- /dev/null +++ b/machine/api/build.rs @@ -0,0 +1,6 @@ +fn main() { + println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); + if std::env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=osiris_metrics"); + } +} diff --git a/machine/api/src/stack.rs b/machine/api/src/stack.rs index 4e7e46e..3d14832 100644 --- a/machine/api/src/stack.rs +++ b/machine/api/src/stack.rs @@ -12,6 +12,21 @@ pub struct Descriptor { pub fin: Option, } +/// Per-stack resource snapshot. Available when the `metrics` feature is enabled. +/// Backends that do not override `Stacklike::metrics` return all-zero values. +#[cfg(any(feature = "metrics", osiris_metrics))] +#[derive(Debug, Clone, Copy)] +pub struct StackMetrics { + /// Total bytes allocated for this stack. + pub total_bytes: usize, + /// Bytes currently consumed (from stack top down to current SP). + pub used_bytes: usize, + /// Bytes still available for use. + pub free_bytes: usize, + /// Peak bytes ever used since the stack was created (high-water mark). + pub peak_used_bytes: usize, +} + pub trait Stacklike { type ElemSize: Copy; type StackPtr; @@ -25,6 +40,18 @@ pub trait Stacklike { fn sp(&self) -> *mut c_void; + /// Returns a metrics snapshot for this stack. + /// Backends that do not implement full metrics tracking return all-zero values. + #[cfg(any(feature = "metrics", osiris_metrics))] + fn metrics(&self) -> StackMetrics { + StackMetrics { + total_bytes: 0, + used_bytes: 0, + free_bytes: 0, + peak_used_bytes: 0, + } + } + //fn push_tinit(&mut self, init: &ThreadInitializer) -> Result; // Pushes a function context onto the stack, which will be executed when the IRQ returns. diff --git a/machine/cortex-m/Cargo.toml b/machine/cortex-m/Cargo.toml index 5871c24..a6ac0bf 100644 --- a/machine/cortex-m/Cargo.toml +++ b/machine/cortex-m/Cargo.toml @@ -25,6 +25,7 @@ syn = { version = "2.0.36", features = ["full"] } [features] panic-exit = [] panic-uart = [] +metrics = ["hal-api/metrics"] [lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(cortex_m)', 'cfg(disabled)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(cortex_m)', 'cfg(disabled)', 'cfg(osiris_metrics)'] } diff --git a/machine/cortex-m/build.rs b/machine/cortex-m/build.rs index 573a0c3..19b0018 100644 --- a/machine/cortex-m/build.rs +++ b/machine/cortex-m/build.rs @@ -329,6 +329,11 @@ mod vector_table { /// /// Exits with error code 1 if any critical build step fails fn main() { + println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); + if env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=osiris_metrics"); + } + if !hal_builder::check_enabled("cortex-m") || !check_cortex_m() { return; } diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index 82b7ffa..ec7b114 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -62,6 +62,9 @@ pub struct ArmStack { sp: StackPtr, /// The size of the stack size: NonZero, + /// High-water mark: largest sp offset ever recorded via set_sp. + #[cfg(any(feature = "metrics", osiris_metrics))] + peak_offset: usize, } impl ArmStack { @@ -178,6 +181,87 @@ impl ArmStack { } } +#[cfg(all(test, any(feature = "metrics", osiris_metrics)))] +mod metrics_tests { + use super::*; + use core::num::NonZero; + use hal_api::stack::{Descriptor, Stacklike}; + use hal_api::mem::PhysAddr; + + const STACK_WORDS: usize = 256; + + // Each test gets its own static buffer to avoid aliasing between parallel tests. + static mut BUF_A: [u32; STACK_WORDS] = [0u32; STACK_WORDS]; + static mut BUF_B: [u32; STACK_WORDS] = [0u32; STACK_WORDS]; + + fn make_stack(buf: &mut [u32; STACK_WORDS]) -> ArmStack { + let top = unsafe { buf.as_mut_ptr().add(STACK_WORDS) }; + extern "C" fn entry() {} + unsafe { + ArmStack::new(Descriptor { + top: PhysAddr::new(top as usize), + size: NonZero::new(STACK_WORDS).unwrap(), + entry, + fin: None, + }) + .unwrap() + } + } + + #[test] + fn metrics_total_bytes_matches_size() { + let stack = make_stack(unsafe { &mut BUF_A }); + let m = stack.metrics(); + let expected_total = STACK_WORDS * core::mem::size_of::(); + assert_eq!(m.total_bytes, expected_total); + assert_eq!(m.total_bytes, m.used_bytes + m.free_bytes); + } + + #[test] + fn metrics_used_bytes_after_init() { + // After new(), push_irq_ret_fn has consumed FRAME_WORDS (18) words. + let stack = make_stack(unsafe { &mut BUF_A }); + let m = stack.metrics(); + let word = core::mem::size_of::(); + // Frame is 18 words; we allow for an optional alignment word. + assert!(m.used_bytes >= 18 * word); + assert!(m.used_bytes <= 20 * word); + assert!(m.free_bytes < m.total_bytes); + } + + #[test] + fn metrics_peak_starts_at_zero() { + // peak_offset is only updated through set_sp; new() increments sp directly. + let stack = make_stack(unsafe { &mut BUF_A }); + assert_eq!(stack.metrics().peak_used_bytes, 0); + } + + #[test] + fn metrics_peak_tracks_high_water_mark() { + let mut stack = make_stack(unsafe { &mut BUF_A }); + let word = core::mem::size_of::(); + + // Simulate two context saves at increasing depths. + let sp_deep = StackPtr { offset: 50 }; + stack.set_sp(sp_deep); + assert_eq!(stack.metrics().peak_used_bytes, 50 * word); + + let sp_shallow = StackPtr { offset: 20 }; + stack.set_sp(sp_shallow); + // Peak must not decrease. + assert_eq!(stack.metrics().peak_used_bytes, 50 * word); + assert_eq!(stack.metrics().used_bytes, 20 * word); + } + + #[test] + fn metrics_free_plus_used_equals_total() { + let mut stack = make_stack(unsafe { &mut BUF_B }); + stack.set_sp(StackPtr { offset: 100 }); + let m = stack.metrics(); + assert_eq!(m.used_bytes + m.free_bytes, m.total_bytes); + } +} + impl hal_api::stack::Stacklike for ArmStack { type ElemSize = u32; type StackPtr = StackPtr; @@ -202,6 +286,8 @@ impl hal_api::stack::Stacklike for ArmStack { top, sp: StackPtr { offset: 0 }, size, + #[cfg(any(feature = "metrics", osiris_metrics))] + peak_offset: 0, }; stack.push_irq_ret_fn(entry, ctx, fin)?; @@ -217,9 +303,26 @@ impl hal_api::stack::Stacklike for ArmStack { } fn set_sp(&mut self, sp: StackPtr) { + #[cfg(any(feature = "metrics", osiris_metrics))] + if sp.offset > self.peak_offset { + self.peak_offset = sp.offset; + } self.sp = sp; } + #[cfg(any(feature = "metrics", osiris_metrics))] + fn metrics(&self) -> hal_api::stack::StackMetrics { + let word = core::mem::size_of::(); + let total_bytes = self.size.get() * word; + let used_bytes = self.sp.offset * word; + hal_api::stack::StackMetrics { + total_bytes, + used_bytes, + free_bytes: total_bytes - used_bytes, + peak_used_bytes: self.peak_offset * word, + } + } + fn sp(&self) -> *mut c_void { self.sp.as_ptr(self.top).as_ptr() as *mut c_void } diff --git a/options.toml b/options.toml index 088cdb3..7180dd5 100644 --- a/options.toml +++ b/options.toml @@ -10,6 +10,12 @@ name = "Runtime Symbols" description = "Enables runtime symbols for debugging. This will increase the binary size by potentially a lot. When enabled e.g. backtraces can display function names." type = "Boolean" +[debug.metrics] +name = "Metrics" +description = "Enables runtime metrics collection for heap allocator and stack usage. Increases binary size and adds small per-alloc/free overhead." +type = "Boolean" +default = false + [debug.uart] name = "Debug UART" description = "Select the UART peripheral to use for debug output." diff --git a/presets/stm32l4r5zi_def.toml b/presets/stm32l4r5zi_def.toml index ee3bf89..e6cb304 100644 --- a/presets/stm32l4r5zi_def.toml +++ b/presets/stm32l4r5zi_def.toml @@ -8,6 +8,7 @@ OSIRIS_MACHINE = "cortex-m" # Debugging configuration OSIRIS_DEBUG_UART = "LPUART1" OSIRIS_DEBUG_RUNTIMESYMBOLS = "false" +OSIRIS_DEBUG_METRICS = "false" # Tuning parameters OSIRIS_TUNING_ENABLEFPU = "false" diff --git a/src/lib.rs b/src/lib.rs index 15c260d..2804721 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,6 +20,9 @@ mod sync; mod syscalls; mod time; +#[cfg(any(feature = "metrics", osiris_metrics))] +pub mod metrics; + // Public, for now. pub mod drivers; pub mod uapi; diff --git a/src/mem.rs b/src/mem.rs index 36d610e..263c3c8 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -91,6 +91,12 @@ pub unsafe fn free(ptr: NonNull, size: usize) { unsafe { allocator.free(ptr, size) }; } +/// Returns a metrics snapshot of the global kernel heap. +#[cfg(any(feature = "metrics", osiris_metrics))] +pub fn global_metrics() -> alloc::bestfit::AllocatorMetrics { + GLOBAL_ALLOCATOR.lock().metrics() +} + /// Aligns a size to be a multiple of the u128 alignment. /// /// `size` - The size to align. diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index 32d3d64..16cee6a 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -12,6 +12,26 @@ struct BestFitMeta { next: Option>, } +/// Snapshot of allocator resource usage. Available when the `metrics` feature is enabled. +#[cfg(any(feature = "metrics", osiris_metrics))] +#[derive(Debug, Clone, Copy)] +pub struct AllocatorMetrics { + /// Total bytes under allocator management (sum of all `add_range` sizes). + pub total_bytes: usize, + /// User-accessible bytes currently in the free list. + pub free_bytes: usize, + /// `total_bytes` minus `free_bytes`; includes metadata overhead and allocated user bytes. + pub allocated_bytes: usize, + /// Number of distinct free blocks (fragmentation indicator). + pub free_blocks: usize, + /// Largest single allocation currently possible. + pub largest_free_block: usize, + /// Lifetime count of successful `malloc` calls. + pub alloc_count: u64, + /// Lifetime count of `free` calls. + pub free_count: u64, +} + /// This is an allocator implementation that uses the best fit strategy. /// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size. /// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks. @@ -20,6 +40,12 @@ struct BestFitMeta { pub struct BestFitAllocator { /// Head of the free block list. head: Option>, + #[cfg(any(feature = "metrics", osiris_metrics))] + total_bytes: usize, + #[cfg(any(feature = "metrics", osiris_metrics))] + alloc_count: u64, + #[cfg(any(feature = "metrics", osiris_metrics))] + free_count: u64, } // Safety: BestFitAllocator is not Copy or Clone. @@ -37,7 +63,15 @@ impl BestFitAllocator { /// /// Returns the new BestFitAllocator. pub const fn new() -> Self { - Self { head: None } + Self { + head: None, + #[cfg(any(feature = "metrics", osiris_metrics))] + total_bytes: 0, + #[cfg(any(feature = "metrics", osiris_metrics))] + alloc_count: 0, + #[cfg(any(feature = "metrics", osiris_metrics))] + free_count: 0, + } } /// Adds a range of memory to the allocator. @@ -81,6 +115,12 @@ impl BestFitAllocator { // Set the head to the new block. self.head = Some(unsafe { NonNull::new_unchecked(ptr.as_mut_ptr::()) }); + + #[cfg(any(feature = "metrics", osiris_metrics))] + { + self.total_bytes = self.total_bytes.saturating_add(range.end.diff(range.start)); + } + Ok(()) } @@ -334,6 +374,11 @@ impl super::Allocator for BestFitAllocator { }); } + #[cfg(any(feature = "metrics", osiris_metrics))] + { + self.alloc_count += 1; + } + // Return the user pointer. Ok(unsafe { Self::user_ptr(block).cast() }) } @@ -359,11 +404,204 @@ impl super::Allocator for BestFitAllocator { // Set the block as the new head. self.head = Some(block); + + #[cfg(any(feature = "metrics", osiris_metrics))] + { + self.free_count += 1; + } + } +} + +#[cfg(any(feature = "metrics", osiris_metrics))] +impl BestFitAllocator { + pub fn metrics(&self) -> AllocatorMetrics { + let mut free_bytes = 0usize; + let mut free_blocks = 0usize; + let mut largest_free_block = 0usize; + + let mut current = self.head; + while let Some(ptr) = current { + let meta = unsafe { ptr.cast::().as_ref() }; + free_bytes = free_bytes.saturating_add(meta.size); + free_blocks += 1; + if meta.size > largest_free_block { + largest_free_block = meta.size; + } + current = meta.next; + } + + AllocatorMetrics { + total_bytes: self.total_bytes, + free_bytes, + allocated_bytes: self.total_bytes.saturating_sub(free_bytes), + free_blocks, + largest_free_block, + alloc_count: self.alloc_count, + free_count: self.free_count, + } } } // TESTING ------------------------------------------------------------------------------------------------------------ +#[cfg(all(test, any(feature = "metrics", osiris_metrics)))] +mod metrics_tests { + use super::*; + use super::super::*; + use core::mem::size_of; + + fn alloc_range(length: usize) -> std::ops::Range { + use crate::hal::mem::PhysAddr; + let layout = + std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length) + } + + #[test] + fn metrics_fresh_allocator_is_zero() { + let allocator = BestFitAllocator::new(); + let m = allocator.metrics(); + assert_eq!(m.total_bytes, 0); + assert_eq!(m.free_bytes, 0); + assert_eq!(m.allocated_bytes, 0); + assert_eq!(m.free_blocks, 0); + assert_eq!(m.largest_free_block, 0); + assert_eq!(m.alloc_count, 0); + assert_eq!(m.free_count, 0); + } + + #[test] + fn metrics_after_add_range() { + let mut allocator = BestFitAllocator::new(); + let range_len = 4096usize; + let range = alloc_range(range_len); + unsafe { allocator.add_range(&range).unwrap() }; + + let m = allocator.metrics(); + assert_eq!(m.total_bytes, range_len); + assert_eq!(m.free_blocks, 1); + assert!(m.free_bytes > 0); + assert!(m.free_bytes < range_len, "metadata must consume some bytes"); + assert_eq!(m.largest_free_block, m.free_bytes); + assert_eq!(m.allocated_bytes, range_len - m.free_bytes); + assert_eq!(m.alloc_count, 0); + assert_eq!(m.free_count, 0); + } + + #[test] + fn metrics_alloc_increments_count_and_reduces_free() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { allocator.add_range(&range).unwrap() }; + let before = allocator.metrics(); + + let _ptr = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + let after = allocator.metrics(); + + assert_eq!(after.alloc_count, 1); + assert_eq!(after.free_count, 0); + assert!(after.free_bytes < before.free_bytes); + } + + #[test] + fn metrics_free_increments_count_and_restores_free_bytes() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { allocator.add_range(&range).unwrap() }; + + let ptr = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + let after_alloc = allocator.metrics(); + + unsafe { allocator.free(ptr, 128) }; + let after_free = allocator.metrics(); + + assert_eq!(after_free.alloc_count, 1); + assert_eq!(after_free.free_count, 1); + // Freeing must return bytes to the free pool. + assert!(after_free.free_bytes > after_alloc.free_bytes); + } + + #[test] + fn metrics_free_blocks_count() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { allocator.add_range(&range).unwrap() }; + + let p1 = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + let p2 = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + let after_two_allocs = allocator.metrics(); + + unsafe { allocator.free(p1, 128) }; + let after_free1 = allocator.metrics(); + + unsafe { allocator.free(p2, 128) }; + let after_free2 = allocator.metrics(); + + // Each free prepends one block to the free list. + assert_eq!(after_free1.free_blocks, after_two_allocs.free_blocks + 1); + assert_eq!(after_free2.free_blocks, after_two_allocs.free_blocks + 2); + assert_eq!(after_free2.alloc_count, 2); + assert_eq!(after_free2.free_count, 2); + } + + #[test] + fn metrics_largest_free_block_single_range() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { allocator.add_range(&range).unwrap() }; + + let m = allocator.metrics(); + // Single block: largest == only free block. + assert_eq!(m.largest_free_block, m.free_bytes); + + let _p = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + let m2 = allocator.metrics(); + // Largest free block shrinks after allocation. + assert!(m2.largest_free_block <= m.largest_free_block); + } + + #[test] + fn metrics_multiple_ranges_total_bytes() { + let mut allocator = BestFitAllocator::new(); + const RANGE_LEN: usize = 1024; + const RANGES: usize = 3; + + for _ in 0..RANGES { + let range = alloc_range(RANGE_LEN); + unsafe { allocator.add_range(&range).unwrap() }; + } + + let m = allocator.metrics(); + assert_eq!(m.total_bytes, RANGE_LEN * RANGES); + assert_eq!(m.free_blocks, RANGES); + } + + #[test] + fn metrics_exact_fit_no_split() { + // Allocate the entire usable space of a single-block range so no split occurs. + let mut allocator = BestFitAllocator::new(); + let overhead = size_of::() + BestFitAllocator::align_up(); + let user_size = 128usize; + let range = alloc_range(user_size + overhead); + unsafe { allocator.add_range(&range).unwrap() }; + + let before = allocator.metrics(); + assert_eq!(before.free_blocks, 1); + + let ptr = unsafe { allocator.malloc::(user_size, 1, None).unwrap() }; + let after_alloc = allocator.metrics(); + // Exact fit: no remainder block left. + assert_eq!(after_alloc.free_blocks, 0); + assert_eq!(after_alloc.free_bytes, 0); + + unsafe { allocator.free(ptr, user_size) }; + let after_free = allocator.metrics(); + assert_eq!(after_free.free_blocks, 1); + assert_eq!(after_free.free_bytes, before.free_bytes); + } +} + #[cfg(test)] mod tests { use crate::mem::align_up; diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index e47533a..512e691 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -17,6 +17,13 @@ pub struct AddressSpace { allocator: bestfit::BestFitAllocator, } +#[cfg(any(feature = "metrics", osiris_metrics))] +impl AddressSpace { + pub(crate) fn metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { + self.allocator.metrics() + } +} + impl vmm::AddressSpacelike for AddressSpace { fn new(pgs: usize) -> Result { let begin = pfa::alloc_page(pgs).ok_or(kerr!(ENOMEM))?; diff --git a/src/metrics.rs b/src/metrics.rs new file mode 100644 index 0000000..6c0f80c --- /dev/null +++ b/src/metrics.rs @@ -0,0 +1,42 @@ +//! Unified kernel metrics API. +//! +//! Enabled by the `metrics` Cargo feature. Provides: +//! - Global heap metrics via [`kernel_metrics`] / [`global_heap_metrics`] +//! - Per-task heap metrics via [`task_heap_metrics`] +//! - Per-thread stack metrics via [`thread_stack_metrics`] +//! +//! For full stack metrics, the backend crate's `metrics` feature must also be +//! enabled (e.g. `hal_cortex_m/metrics`). Without it, stack metrics return zeros. + +use crate::mem::alloc::bestfit::AllocatorMetrics; +use crate::sched::{self, task, thread}; +use crate::mem; + +/// Aggregated snapshot of global kernel resources. +pub struct KernelMetrics { + pub heap: AllocatorMetrics, +} + +/// Returns a snapshot of global kernel metrics (heap only). +pub fn kernel_metrics() -> KernelMetrics { + KernelMetrics { + heap: mem::global_metrics(), + } +} + +/// Returns heap metrics for the global kernel allocator. +pub fn global_heap_metrics() -> AllocatorMetrics { + mem::global_metrics() +} + +/// Returns stack metrics for the thread identified by `tid`, or `None` if the +/// thread does not exist. +pub fn thread_stack_metrics(tid: thread::UId) -> Option { + sched::with(|sched| sched.thread_stack_metrics(tid)) +} + +/// Returns heap metrics for the address space owned by task `task_id`, or +/// `None` if the task does not exist. +pub fn task_heap_metrics(task_id: task::UId) -> Option { + sched::with(|sched| sched.task_heap_metrics(task_id)) +} diff --git a/src/sched.rs b/src/sched.rs index c5f01d6..147f776 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -377,6 +377,16 @@ impl Scheduler { Ok(()) } + #[cfg(any(feature = "metrics", osiris_metrics))] + pub fn thread_stack_metrics(&self, tid: thread::UId) -> Option { + self.threads.get(tid).map(|t| t.stack_metrics()) + } + + #[cfg(any(feature = "metrics", osiris_metrics))] + pub fn task_heap_metrics(&self, task_id: task::UId) -> Option { + self.tasks.get(task_id).map(|t| t.heap_metrics()) + } + pub fn create_thread( &mut self, task: Option, diff --git a/src/sched/task.rs b/src/sched/task.rs index 09e7296..86be6b5 100644 --- a/src/sched/task.rs +++ b/src/sched/task.rs @@ -137,4 +137,9 @@ impl Task { pub fn threads(&self) -> &list::List { &self.threads } + + #[cfg(any(feature = "metrics", osiris_metrics))] + pub(crate) fn heap_metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { + self.address_space.metrics() + } } diff --git a/src/sched/thread.rs b/src/sched/thread.rs index 77d1321..7a6e19b 100644 --- a/src/sched/thread.rs +++ b/src/sched/thread.rs @@ -342,6 +342,11 @@ impl Thread { self.state.stack.sp() } + #[cfg(any(feature = "metrics", osiris_metrics))] + pub fn stack_metrics(&self) -> crate::hal::stack::StackMetrics { + self.state.stack.metrics() + } + pub fn uid(&self) -> UId { self.uid } From 51686dd013499de8a6351562b41f11a1c61036c8 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Mon, 11 May 2026 23:08:43 +0200 Subject: [PATCH 02/11] formatting --- src/mem/alloc/bestfit.rs | 5 ++--- src/metrics.rs | 2 +- src/sched.rs | 10 ++++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index 16cee6a..3fda0b3 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -446,14 +446,13 @@ impl BestFitAllocator { #[cfg(all(test, any(feature = "metrics", osiris_metrics)))] mod metrics_tests { - use super::*; use super::super::*; + use super::*; use core::mem::size_of; fn alloc_range(length: usize) -> std::ops::Range { use crate::hal::mem::PhysAddr; - let layout = - std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); + let layout = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); let ptr = unsafe { std::alloc::alloc(layout) }; PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length) } diff --git a/src/metrics.rs b/src/metrics.rs index 6c0f80c..c188ab1 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -8,9 +8,9 @@ //! For full stack metrics, the backend crate's `metrics` feature must also be //! enabled (e.g. `hal_cortex_m/metrics`). Without it, stack metrics return zeros. +use crate::mem; use crate::mem::alloc::bestfit::AllocatorMetrics; use crate::sched::{self, task, thread}; -use crate::mem; /// Aggregated snapshot of global kernel resources. pub struct KernelMetrics { diff --git a/src/sched.rs b/src/sched.rs index 147f776..7ad70cb 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -378,12 +378,18 @@ impl Scheduler { } #[cfg(any(feature = "metrics", osiris_metrics))] - pub fn thread_stack_metrics(&self, tid: thread::UId) -> Option { + pub fn thread_stack_metrics( + &self, + tid: thread::UId, + ) -> Option { self.threads.get(tid).map(|t| t.stack_metrics()) } #[cfg(any(feature = "metrics", osiris_metrics))] - pub fn task_heap_metrics(&self, task_id: task::UId) -> Option { + pub fn task_heap_metrics( + &self, + task_id: task::UId, + ) -> Option { self.tasks.get(task_id).map(|t| t.heap_metrics()) } From 00165c0e77faa95046134a66c553a6d5270b22e0 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 00:31:12 +0200 Subject: [PATCH 03/11] no sched lock taking in uspace --- src/mem.rs | 2 +- src/mem/alloc/bestfit.rs | 55 +++++++++++++++++---- src/metrics.rs | 42 ---------------- src/metrics/mod.rs | 22 +++++++++ src/metrics/store.rs | 66 ++++++++++++++++++++++++++ src/sched.rs | 100 +++++++++++++++++++++++++++++++-------- src/sched/task.rs | 6 ++- src/sync.rs | 2 + src/sync/seqlock.rs | 48 +++++++++++++++++++ src/types/array.rs | 11 +++++ 10 files changed, 281 insertions(+), 73 deletions(-) delete mode 100644 src/metrics.rs create mode 100644 src/metrics/mod.rs create mode 100644 src/metrics/store.rs create mode 100644 src/sync/seqlock.rs diff --git a/src/mem.rs b/src/mem.rs index 263c3c8..ea60844 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -93,7 +93,7 @@ pub unsafe fn free(ptr: NonNull, size: usize) { /// Returns a metrics snapshot of the global kernel heap. #[cfg(any(feature = "metrics", osiris_metrics))] -pub fn global_metrics() -> alloc::bestfit::AllocatorMetrics { +pub(crate) fn global_metrics() -> alloc::bestfit::AllocatorMetrics { GLOBAL_ALLOCATOR.lock().metrics() } diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index 3fda0b3..e6f0192 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -42,6 +42,12 @@ pub struct BestFitAllocator { head: Option>, #[cfg(any(feature = "metrics", osiris_metrics))] total_bytes: usize, + /// Sum of `meta.size` across all free blocks. Updated O(1) on every alloc/free. + #[cfg(any(feature = "metrics", osiris_metrics))] + free_bytes: usize, + /// Count of free blocks. Updated O(1) on every alloc/free. + #[cfg(any(feature = "metrics", osiris_metrics))] + free_blocks: usize, #[cfg(any(feature = "metrics", osiris_metrics))] alloc_count: u64, #[cfg(any(feature = "metrics", osiris_metrics))] @@ -68,6 +74,10 @@ impl BestFitAllocator { #[cfg(any(feature = "metrics", osiris_metrics))] total_bytes: 0, #[cfg(any(feature = "metrics", osiris_metrics))] + free_bytes: 0, + #[cfg(any(feature = "metrics", osiris_metrics))] + free_blocks: 0, + #[cfg(any(feature = "metrics", osiris_metrics))] alloc_count: 0, #[cfg(any(feature = "metrics", osiris_metrics))] free_count: 0, @@ -104,9 +114,11 @@ impl BestFitAllocator { // The user pointer is the pointer to the user memory. So we need to add the size of the meta data and possibly add padding. let user_pointer = ptr + size_of::() + Self::align_up(); + let usable = range.end.diff(user_pointer); + // Set the current head as the next block, so we can add the new block to the head. let meta = BestFitMeta { - size: range.end.diff(user_pointer), + size: usable, next: self.head, }; @@ -119,6 +131,8 @@ impl BestFitAllocator { #[cfg(any(feature = "metrics", osiris_metrics))] { self.total_bytes = self.total_bytes.saturating_add(range.end.diff(range.start)); + self.free_bytes = self.free_bytes.saturating_add(usable); + self.free_blocks += 1; } Ok(()) @@ -282,6 +296,12 @@ impl super::Allocator for BestFitAllocator { debug_assert!(aligned_size >= size); debug_assert!(aligned_size <= isize::MAX as usize); + // Tracking variables for O(1) metrics update after the allocation. + #[cfg(any(feature = "metrics", osiris_metrics))] + let mut free_sub: usize = 0; + #[cfg(any(feature = "metrics", osiris_metrics))] + let mut blocks_sub: usize = 0; + // Find the best fit block. let (split, block, prev) = match self.select_block(aligned_size, request) { Ok((block, prev)) => { @@ -312,6 +332,11 @@ impl super::Allocator for BestFitAllocator { // If the block is big enough to split. Then it also needs to be big enough to store the metadata + align of the next block. if meta.size > min { + // Split: old free block (meta.size) leaves, remainder (meta.size - min) stays. + // Net free_bytes change: -min. free_blocks unchanged (one out, one in). + #[cfg(any(feature = "metrics", osiris_metrics))] + { free_sub = min; } + // Calculate the remaining size of the block and thus the next metadata. let remaining_meta = BestFitMeta { size: meta.size - min, @@ -342,11 +367,22 @@ impl super::Allocator for BestFitAllocator { (true, block, prev) } else { + // No split: entire free block (meta.size) is consumed. + #[cfg(any(feature = "metrics", osiris_metrics))] + { free_sub = meta.size; blocks_sub = 1; } + (false, block, prev) } } Err(_) => { let (block, prev) = self.select_block(size, request)?; + // Retry succeeded with original size; always no-split. + #[cfg(any(feature = "metrics", osiris_metrics))] + { + let meta = unsafe { block.cast::().as_ref() }; + free_sub = meta.size; + blocks_sub = 1; + } (false, block, prev) } }; @@ -376,6 +412,8 @@ impl super::Allocator for BestFitAllocator { #[cfg(any(feature = "metrics", osiris_metrics))] { + self.free_bytes = self.free_bytes.saturating_sub(free_sub); + self.free_blocks = self.free_blocks.saturating_sub(blocks_sub); self.alloc_count += 1; } @@ -407,6 +445,8 @@ impl super::Allocator for BestFitAllocator { #[cfg(any(feature = "metrics", osiris_metrics))] { + self.free_bytes = self.free_bytes.saturating_add(meta.size); + self.free_blocks += 1; self.free_count += 1; } } @@ -414,16 +454,13 @@ impl super::Allocator for BestFitAllocator { #[cfg(any(feature = "metrics", osiris_metrics))] impl BestFitAllocator { + /// Returns an O(1) snapshot using eagerly maintained counters. + /// `largest_free_block` still requires a free-list walk. pub fn metrics(&self) -> AllocatorMetrics { - let mut free_bytes = 0usize; - let mut free_blocks = 0usize; let mut largest_free_block = 0usize; - let mut current = self.head; while let Some(ptr) = current { let meta = unsafe { ptr.cast::().as_ref() }; - free_bytes = free_bytes.saturating_add(meta.size); - free_blocks += 1; if meta.size > largest_free_block { largest_free_block = meta.size; } @@ -432,9 +469,9 @@ impl BestFitAllocator { AllocatorMetrics { total_bytes: self.total_bytes, - free_bytes, - allocated_bytes: self.total_bytes.saturating_sub(free_bytes), - free_blocks, + free_bytes: self.free_bytes, + allocated_bytes: self.total_bytes.saturating_sub(self.free_bytes), + free_blocks: self.free_blocks, largest_free_block, alloc_count: self.alloc_count, free_count: self.free_count, diff --git a/src/metrics.rs b/src/metrics.rs deleted file mode 100644 index c188ab1..0000000 --- a/src/metrics.rs +++ /dev/null @@ -1,42 +0,0 @@ -//! Unified kernel metrics API. -//! -//! Enabled by the `metrics` Cargo feature. Provides: -//! - Global heap metrics via [`kernel_metrics`] / [`global_heap_metrics`] -//! - Per-task heap metrics via [`task_heap_metrics`] -//! - Per-thread stack metrics via [`thread_stack_metrics`] -//! -//! For full stack metrics, the backend crate's `metrics` feature must also be -//! enabled (e.g. `hal_cortex_m/metrics`). Without it, stack metrics return zeros. - -use crate::mem; -use crate::mem::alloc::bestfit::AllocatorMetrics; -use crate::sched::{self, task, thread}; - -/// Aggregated snapshot of global kernel resources. -pub struct KernelMetrics { - pub heap: AllocatorMetrics, -} - -/// Returns a snapshot of global kernel metrics (heap only). -pub fn kernel_metrics() -> KernelMetrics { - KernelMetrics { - heap: mem::global_metrics(), - } -} - -/// Returns heap metrics for the global kernel allocator. -pub fn global_heap_metrics() -> AllocatorMetrics { - mem::global_metrics() -} - -/// Returns stack metrics for the thread identified by `tid`, or `None` if the -/// thread does not exist. -pub fn thread_stack_metrics(tid: thread::UId) -> Option { - sched::with(|sched| sched.thread_stack_metrics(tid)) -} - -/// Returns heap metrics for the address space owned by task `task_id`, or -/// `None` if the task does not exist. -pub fn task_heap_metrics(task_id: task::UId) -> Option { - sched::with(|sched| sched.task_heap_metrics(task_id)) -} diff --git a/src/metrics/mod.rs b/src/metrics/mod.rs new file mode 100644 index 0000000..7536fae --- /dev/null +++ b/src/metrics/mod.rs @@ -0,0 +1,22 @@ +pub(crate) mod store; +pub use store::{HeapSnapshot, StackSnapshot}; + +use crate::sched::{task, thread}; + +/// Returns the latest global kernel heap snapshot, or `None` if the scheduler +/// has not yet run a single reschedule. +pub fn global_heap() -> Option { + store::read_global_heap() +} + +/// Returns the latest heap snapshot for the task identified by `task_id`, or +/// `None` if no snapshot exists for that slot. +pub fn task_heap(task_id: task::UId) -> Option { + store::read_task_heap(task_id.as_usize()) +} + +/// Returns the latest stack snapshot for the thread identified by `uid`, or +/// `None` if no snapshot exists for that slot. +pub fn thread_stack(uid: thread::UId) -> Option { + store::read_thread_stack(uid.as_usize()) +} diff --git a/src/metrics/store.rs b/src/metrics/store.rs new file mode 100644 index 0000000..83ba415 --- /dev/null +++ b/src/metrics/store.rs @@ -0,0 +1,66 @@ +use crate::sync::seqlock::Seqlock; + +pub(crate) const SLOTS: usize = crate::sched::THREAD_COUNT; + +#[derive(Debug, Clone, Copy)] +pub struct HeapSnapshot { + pub total_bytes: usize, + pub free_bytes: usize, + pub used_bytes: usize, + pub alloc_count: u64, + pub free_count: u64, +} + +#[derive(Debug, Clone, Copy)] +pub struct StackSnapshot { + pub total_bytes: usize, + pub used_bytes: usize, + pub free_bytes: usize, + pub peak_used_bytes: usize, +} + +static GLOBAL_HEAP: Seqlock> = Seqlock::new(None); +static TASK_HEAPS: [Seqlock>; SLOTS] = + [const { Seqlock::new(None) }; SLOTS]; +static THREAD_STACKS: [Seqlock>; SLOTS] = + [const { Seqlock::new(None) }; SLOTS]; + +pub(crate) fn write_global_heap(s: HeapSnapshot) { + GLOBAL_HEAP.write(Some(s)); +} + +pub(crate) fn write_task_heap(slot: usize, s: HeapSnapshot) { + if slot < SLOTS { + TASK_HEAPS[slot].write(Some(s)); + } +} + +pub(crate) fn clear_task_heap(slot: usize) { + if slot < SLOTS { + TASK_HEAPS[slot].write(None); + } +} + +pub(crate) fn write_thread_stack(slot: usize, s: StackSnapshot) { + if slot < SLOTS { + THREAD_STACKS[slot].write(Some(s)); + } +} + +pub(crate) fn clear_thread_stack(slot: usize) { + if slot < SLOTS { + THREAD_STACKS[slot].write(None); + } +} + +pub fn read_global_heap() -> Option { + GLOBAL_HEAP.read() +} + +pub fn read_task_heap(slot: usize) -> Option { + if slot < SLOTS { TASK_HEAPS[slot].read() } else { None } +} + +pub fn read_thread_stack(slot: usize) -> Option { + if slot < SLOTS { THREAD_STACKS[slot].read() } else { None } +} diff --git a/src/sched.rs b/src/sched.rs index 7ad70cb..0a46312 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -29,7 +29,7 @@ use crate::{ type ThreadMap = BitReclaimMap; type TaskMap = BitReclaimMap; -const THREAD_COUNT: usize = 32; +pub(crate) const THREAD_COUNT: usize = 32; type GlobalScheduler = Scheduler; static SCHED: SpinLocked = SpinLocked::new(GlobalScheduler::new()); @@ -342,10 +342,24 @@ impl Scheduler { } pub fn create_task(&mut self, attrs: task::Attributes) -> Result { - self.tasks.insert_with(|idx| { + let task_id = self.tasks.insert_with(|idx| { let task = task::Task::new(task::UId::new(idx), attrs); task.map(|t| (task::UId::new(idx), t)) - }) + })?; + + #[cfg(any(feature = "metrics", osiris_metrics))] + if let Some(task) = self.tasks.get(task_id) { + let m = task.allocator_metrics(); + crate::metrics::store::write_task_heap(task_id.as_usize(), crate::metrics::store::HeapSnapshot { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes, + alloc_count: m.alloc_count, + free_count: m.free_count, + }); + } + + Ok(task_id) } /// Dequeues all threads of the task and removes the task. If the current thread belongs to the task, reschedule will be triggered. @@ -367,6 +381,9 @@ impl Scheduler { bug!("failed to remove thread {} from thread list.", id); } + #[cfg(any(feature = "metrics", osiris_metrics))] + crate::metrics::store::clear_thread_stack(id.as_usize()); + if Some(id) == self.current { self.current = None; reschedule(); @@ -374,23 +391,11 @@ impl Scheduler { } self.tasks.remove(&uid).ok_or(kerr!(EINVAL))?; - Ok(()) - } - #[cfg(any(feature = "metrics", osiris_metrics))] - pub fn thread_stack_metrics( - &self, - tid: thread::UId, - ) -> Option { - self.threads.get(tid).map(|t| t.stack_metrics()) - } + #[cfg(any(feature = "metrics", osiris_metrics))] + crate::metrics::store::clear_task_heap(uid.as_usize()); - #[cfg(any(feature = "metrics", osiris_metrics))] - pub fn task_heap_metrics( - &self, - task_id: task::UId, - ) -> Option { - self.tasks.get(task_id).map(|t| t.heap_metrics()) + Ok(()) } pub fn create_thread( @@ -404,7 +409,7 @@ impl Scheduler { }; let task = self.tasks.get_mut(task).ok_or(kerr!(EINVAL))?; - self.threads + let uid = self.threads .insert_with(|idx| { let uid = task.allocate_tid().get_uid(idx); let stack = task.allocate_stack(attrs)?; @@ -414,7 +419,20 @@ impl Scheduler { .and_then(|k| { task.register_thread(k, &mut self.threads)?; Ok(k) - }) + })?; + + #[cfg(any(feature = "metrics", osiris_metrics))] + if let Some(thread) = self.threads.get(uid) { + let m = thread.stack_metrics(); + crate::metrics::store::write_thread_stack(uid.as_usize(), crate::metrics::store::StackSnapshot { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + }); + } + + Ok(uid) } /// Dequeues a thread and removes it from its corresponding task. If the thread is currently running, reschedule will be triggered. @@ -437,12 +455,51 @@ impl Scheduler { self.threads.remove(&uid).ok_or(kerr!(EINVAL))?; + #[cfg(any(feature = "metrics", osiris_metrics))] + crate::metrics::store::clear_thread_stack(uid.as_usize()); + if Some(uid) == self.current { self.current = None; reschedule(); } Ok(()) } + + /// Copies live stats from all threads and tasks into the lock-free mirror. + /// Called on every reschedule so external readers can access metrics without + /// acquiring the scheduler lock. + #[cfg(any(feature = "metrics", osiris_metrics))] + fn mirror_stats(&self) { + let global = crate::mem::global_metrics(); + crate::metrics::store::write_global_heap(crate::metrics::store::HeapSnapshot { + total_bytes: global.total_bytes, + free_bytes: global.free_bytes, + used_bytes: global.allocated_bytes, + alloc_count: global.alloc_count, + free_count: global.free_count, + }); + + self.tasks.for_each(|slot, task| { + let m = task.allocator_metrics(); + crate::metrics::store::write_task_heap(slot, crate::metrics::store::HeapSnapshot { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes, + alloc_count: m.alloc_count, + free_count: m.free_count, + }); + }); + + self.threads.for_each(|slot, thread| { + let m = thread.stack_metrics(); + crate::metrics::store::write_thread_stack(slot, crate::metrics::store::StackSnapshot { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + }); + }); + } } /// This function provides safe access to the global scheduler. @@ -533,6 +590,9 @@ pub extern "C" fn sched_enter(mut ctx: *mut c_void) -> *mut c_void { ctx = new; } + #[cfg(any(feature = "metrics", osiris_metrics))] + sched.mirror_stats(); + ctx }) } diff --git a/src/sched/task.rs b/src/sched/task.rs index 86be6b5..fe89f74 100644 --- a/src/sched/task.rs +++ b/src/sched/task.rs @@ -37,6 +37,10 @@ impl UId { Self { uid } } + pub fn as_usize(&self) -> usize { + self.uid + } + pub fn is_kernel(&self) -> bool { self.uid == 0 } @@ -139,7 +143,7 @@ impl Task { } #[cfg(any(feature = "metrics", osiris_metrics))] - pub(crate) fn heap_metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { + pub(crate) fn allocator_metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { self.address_space.metrics() } } diff --git a/src/sync.rs b/src/sync.rs index 85005f8..8c20d4e 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,4 +1,6 @@ pub mod atomic; pub mod once; +#[cfg(any(feature = "metrics", osiris_metrics))] +pub mod seqlock; pub mod spinlock; pub mod waiter; diff --git a/src/sync/seqlock.rs b/src/sync/seqlock.rs new file mode 100644 index 0000000..07f446b --- /dev/null +++ b/src/sync/seqlock.rs @@ -0,0 +1,48 @@ +use core::cell::UnsafeCell; +use core::hint::spin_loop; +use core::sync::atomic::{AtomicUsize, Ordering}; + +/// Single-writer, multi-reader seqlock. +/// +/// Odd `seq` means a write is in progress; even means data is stable. +/// Readers spin on odd seq and retry if seq changes while reading. +pub struct Seqlock { + seq: AtomicUsize, + data: UnsafeCell, +} + +unsafe impl Send for Seqlock {} +unsafe impl Sync for Seqlock {} + +impl Seqlock { + pub const fn new(val: T) -> Self { + Self { + seq: AtomicUsize::new(0), + data: UnsafeCell::new(val), + } + } + + /// Overwrite the value. Only one writer at a time is supported. + pub fn write(&self, val: T) { + self.seq.fetch_add(1, Ordering::SeqCst); // even → odd: write in progress + unsafe { core::ptr::write_volatile(self.data.get(), val) }; + self.seq.fetch_add(1, Ordering::SeqCst); // odd → even: write complete + } + + /// Read the current value. Retries if a write is in progress or races a write. + pub fn read(&self) -> T { + loop { + let seq1 = self.seq.load(Ordering::SeqCst); + if seq1 & 1 != 0 { + spin_loop(); + continue; + } + // Safety: seq is even so no write is in progress on this single-core target. + let val = unsafe { core::ptr::read_volatile(self.data.get()) }; + let seq2 = self.seq.load(Ordering::SeqCst); + if seq1 == seq2 { + return val; + } + } + } +} diff --git a/src/types/array.rs b/src/types/array.rs index b7956c5..01a0aef 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -760,6 +760,17 @@ impl BitReclaimMap { } } +impl BitReclaimMap { + /// Call `f(slot, value)` for every occupied slot in the map. + pub fn for_each(&self, mut f: F) { + for slot in 0..N { + if let Some(v) = self.map.raw_at(slot) { + f(slot, v); + } + } + } +} + impl BitReclaimMap { pub fn insert_with(&mut self, f: impl FnOnce(usize) -> Result<(K, V)>) -> Result { let idx = self.free.alloc(1).ok_or(kerr!(ENOMEM))?; From 69e5efe7bb099cee879e0e43d87d71fcc258d112 Mon Sep 17 00:00:00 2001 From: F42J <57958582+F42J@users.noreply.github.com> Date: Wed, 13 May 2026 21:40:06 +0200 Subject: [PATCH 04/11] Err handling for tests from copilot comment Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --- src/mem/alloc/bestfit.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index e6f0192..bbaaf83 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -491,6 +491,9 @@ mod metrics_tests { use crate::hal::mem::PhysAddr; let layout = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); let ptr = unsafe { std::alloc::alloc(layout) }; + if ptr.is_null() { + std::alloc::handle_alloc_error(layout); + } PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length) } From ea42b8c785a88495d558cdb45a8d9b93da0e4604 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 22:12:11 +0200 Subject: [PATCH 05/11] addressed review comments, sc heduler now only updates active task metrics --- Cargo.toml | 2 +- build.rs | 6 +- machine/api/Cargo.toml | 2 +- machine/api/build.rs | 6 +- machine/api/src/stack.rs | 4 +- machine/cortex-m/Cargo.toml | 2 +- machine/cortex-m/build.rs | 6 +- machine/cortex-m/src/native/sched.rs | 12 +-- presets/stm32l4r5zi_def.toml | 2 +- src/lib.rs | 2 +- src/mem.rs | 4 +- src/mem/alloc.rs | 46 ++++++++++ src/mem/alloc/bestfit.rs | 122 ++++++--------------------- src/mem/vmm/nommu.rs | 4 +- src/metrics.rs | 2 + src/metrics/mod.rs | 22 ----- src/metrics/store.rs | 6 +- src/sched.rs | 73 ++++++++-------- src/sched/task.rs | 4 +- src/sched/thread.rs | 2 +- src/sync.rs | 2 +- 21 files changed, 145 insertions(+), 186 deletions(-) create mode 100644 src/metrics.rs delete mode 100644 src/metrics/mod.rs diff --git a/Cargo.toml b/Cargo.toml index bceef6d..32b83c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,7 +54,7 @@ rand = "0.8.5" cfg_aliases = "0.2.1" [lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(osiris_metrics)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(metrics)'] } [profile.dev] panic = "abort" diff --git a/build.rs b/build.rs index 38e99e7..a20ee0d 100644 --- a/build.rs +++ b/build.rs @@ -15,9 +15,9 @@ extern crate cbindgen; fn main() { println!("cargo::rerun-if-changed=src"); println!("cargo::rerun-if-changed=build.rs"); - println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); - if std::env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { - println!("cargo::rustc-cfg=osiris_metrics"); + println!("cargo::rerun-if-env-changed=METRICS"); + if std::env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=metrics"); } let out_dir = std::env::var("OUT_DIR").unwrap(); diff --git a/machine/api/Cargo.toml b/machine/api/Cargo.toml index d5a560d..27d9322 100644 --- a/machine/api/Cargo.toml +++ b/machine/api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2024" metrics = [] [lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(osiris_metrics)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(metrics)'] } [dependencies] seq-macro = "0.3.6" \ No newline at end of file diff --git a/machine/api/build.rs b/machine/api/build.rs index 825ae33..c37cd25 100644 --- a/machine/api/build.rs +++ b/machine/api/build.rs @@ -1,6 +1,6 @@ fn main() { - println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); - if std::env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { - println!("cargo::rustc-cfg=osiris_metrics"); + println!("cargo::rerun-if-env-changed=METRICS"); + if std::env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=metrics"); } } diff --git a/machine/api/src/stack.rs b/machine/api/src/stack.rs index 3d14832..449072d 100644 --- a/machine/api/src/stack.rs +++ b/machine/api/src/stack.rs @@ -14,7 +14,7 @@ pub struct Descriptor { /// Per-stack resource snapshot. Available when the `metrics` feature is enabled. /// Backends that do not override `Stacklike::metrics` return all-zero values. -#[cfg(any(feature = "metrics", osiris_metrics))] +#[cfg(any(feature = "metrics", metrics))] #[derive(Debug, Clone, Copy)] pub struct StackMetrics { /// Total bytes allocated for this stack. @@ -42,7 +42,7 @@ pub trait Stacklike { /// Returns a metrics snapshot for this stack. /// Backends that do not implement full metrics tracking return all-zero values. - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] fn metrics(&self) -> StackMetrics { StackMetrics { total_bytes: 0, diff --git a/machine/cortex-m/Cargo.toml b/machine/cortex-m/Cargo.toml index a6ac0bf..dadb37a 100644 --- a/machine/cortex-m/Cargo.toml +++ b/machine/cortex-m/Cargo.toml @@ -28,4 +28,4 @@ panic-uart = [] metrics = ["hal-api/metrics"] [lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(cortex_m)', 'cfg(disabled)', 'cfg(osiris_metrics)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)', 'cfg(cortex_m)', 'cfg(disabled)', 'cfg(metrics)'] } diff --git a/machine/cortex-m/build.rs b/machine/cortex-m/build.rs index 19b0018..45faef0 100644 --- a/machine/cortex-m/build.rs +++ b/machine/cortex-m/build.rs @@ -329,9 +329,9 @@ mod vector_table { /// /// Exits with error code 1 if any critical build step fails fn main() { - println!("cargo::rerun-if-env-changed=OSIRIS_DEBUG_METRICS"); - if env::var("OSIRIS_DEBUG_METRICS").map_or(false, |v| v == "true" || v == "1") { - println!("cargo::rustc-cfg=osiris_metrics"); + println!("cargo::rerun-if-env-changed=METRICS"); + if env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rustc-cfg=metrics"); } if !hal_builder::check_enabled("cortex-m") || !check_cortex_m() { diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index ec7b114..c5dbe3d 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -63,7 +63,7 @@ pub struct ArmStack { /// The size of the stack size: NonZero, /// High-water mark: largest sp offset ever recorded via set_sp. - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] peak_offset: usize, } @@ -181,7 +181,7 @@ impl ArmStack { } } -#[cfg(all(test, any(feature = "metrics", osiris_metrics)))] +#[cfg(all(test, any(feature = "metrics", metrics)))] mod metrics_tests { use super::*; use core::num::NonZero; @@ -286,7 +286,7 @@ impl hal_api::stack::Stacklike for ArmStack { top, sp: StackPtr { offset: 0 }, size, - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] peak_offset: 0, }; @@ -303,14 +303,14 @@ impl hal_api::stack::Stacklike for ArmStack { } fn set_sp(&mut self, sp: StackPtr) { - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] if sp.offset > self.peak_offset { self.peak_offset = sp.offset; } self.sp = sp; } - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] fn metrics(&self) -> hal_api::stack::StackMetrics { let word = core::mem::size_of::(); let total_bytes = self.size.get() * word; @@ -318,7 +318,7 @@ impl hal_api::stack::Stacklike for ArmStack { hal_api::stack::StackMetrics { total_bytes, used_bytes, - free_bytes: total_bytes - used_bytes, + free_bytes: total_bytes.saturating_sub(used_bytes), peak_used_bytes: self.peak_offset * word, } } diff --git a/presets/stm32l4r5zi_def.toml b/presets/stm32l4r5zi_def.toml index e6cb304..69c3ac3 100644 --- a/presets/stm32l4r5zi_def.toml +++ b/presets/stm32l4r5zi_def.toml @@ -8,7 +8,7 @@ OSIRIS_MACHINE = "cortex-m" # Debugging configuration OSIRIS_DEBUG_UART = "LPUART1" OSIRIS_DEBUG_RUNTIMESYMBOLS = "false" -OSIRIS_DEBUG_METRICS = "false" +METRICS = "false" # Tuning parameters OSIRIS_TUNING_ENABLEFPU = "false" diff --git a/src/lib.rs b/src/lib.rs index 2804721..2e195e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,7 @@ mod sync; mod syscalls; mod time; -#[cfg(any(feature = "metrics", osiris_metrics))] +#[cfg(any(feature = "metrics", metrics))] pub mod metrics; // Public, for now. diff --git a/src/mem.rs b/src/mem.rs index ea60844..9b42bc5 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -92,8 +92,8 @@ pub unsafe fn free(ptr: NonNull, size: usize) { } /// Returns a metrics snapshot of the global kernel heap. -#[cfg(any(feature = "metrics", osiris_metrics))] -pub(crate) fn global_metrics() -> alloc::bestfit::AllocatorMetrics { +#[cfg(any(feature = "metrics", metrics))] +pub(crate) fn global_metrics() -> alloc::Metrics { GLOBAL_ALLOCATOR.lock().metrics() } diff --git a/src/mem/alloc.rs b/src/mem/alloc.rs index bde5db2..b633ab7 100644 --- a/src/mem/alloc.rs +++ b/src/mem/alloc.rs @@ -9,6 +9,52 @@ use crate::error::Result; pub mod bestfit; +/// Snapshot of allocator resource usage. Available when the `metrics` feature is enabled. +#[cfg(any(feature = "metrics", metrics))] +#[derive(Debug, Clone, Copy, Default)] +pub struct Metrics { + pub total_bytes: usize, + pub free_bytes: usize, + pub free_blocks: usize, + pub alloc_count: u64, + pub free_count: u64, +} + +#[cfg(any(feature = "metrics", metrics))] +impl Metrics { + pub const fn new() -> Self { + Self { + total_bytes: 0, + free_bytes: 0, + free_blocks: 0, + alloc_count: 0, + free_count: 0, + } + } + + pub fn allocated_bytes(&self) -> usize { + self.total_bytes.saturating_sub(self.free_bytes) + } + + pub(crate) fn record_add_range(&mut self, total: usize, free: usize) { + self.total_bytes = self.total_bytes.saturating_add(total); + self.free_bytes = self.free_bytes.saturating_add(free); + self.free_blocks += 1; + } + + pub(crate) fn record_alloc(&mut self, consumed_bytes: usize, blocks_removed: usize) { + self.free_bytes = self.free_bytes.saturating_sub(consumed_bytes); + self.free_blocks = self.free_blocks.saturating_sub(blocks_removed); + self.alloc_count += 1; + } + + pub(crate) fn record_free(&mut self, added_bytes: usize) { + self.free_bytes = self.free_bytes.saturating_add(added_bytes); + self.free_blocks += 1; + self.free_count += 1; + } +} + #[cfg(target_pointer_width = "64")] pub const MAX_ADDR: usize = 2_usize.pow(48); diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index bbaaf83..f2dc557 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -12,26 +12,6 @@ struct BestFitMeta { next: Option>, } -/// Snapshot of allocator resource usage. Available when the `metrics` feature is enabled. -#[cfg(any(feature = "metrics", osiris_metrics))] -#[derive(Debug, Clone, Copy)] -pub struct AllocatorMetrics { - /// Total bytes under allocator management (sum of all `add_range` sizes). - pub total_bytes: usize, - /// User-accessible bytes currently in the free list. - pub free_bytes: usize, - /// `total_bytes` minus `free_bytes`; includes metadata overhead and allocated user bytes. - pub allocated_bytes: usize, - /// Number of distinct free blocks (fragmentation indicator). - pub free_blocks: usize, - /// Largest single allocation currently possible. - pub largest_free_block: usize, - /// Lifetime count of successful `malloc` calls. - pub alloc_count: u64, - /// Lifetime count of `free` calls. - pub free_count: u64, -} - /// This is an allocator implementation that uses the best fit strategy. /// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size. /// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks. @@ -40,18 +20,8 @@ pub struct AllocatorMetrics { pub struct BestFitAllocator { /// Head of the free block list. head: Option>, - #[cfg(any(feature = "metrics", osiris_metrics))] - total_bytes: usize, - /// Sum of `meta.size` across all free blocks. Updated O(1) on every alloc/free. - #[cfg(any(feature = "metrics", osiris_metrics))] - free_bytes: usize, - /// Count of free blocks. Updated O(1) on every alloc/free. - #[cfg(any(feature = "metrics", osiris_metrics))] - free_blocks: usize, - #[cfg(any(feature = "metrics", osiris_metrics))] - alloc_count: u64, - #[cfg(any(feature = "metrics", osiris_metrics))] - free_count: u64, + #[cfg(any(feature = "metrics", metrics))] + metrics: super::Metrics, } // Safety: BestFitAllocator is not Copy or Clone. @@ -71,16 +41,8 @@ impl BestFitAllocator { pub const fn new() -> Self { Self { head: None, - #[cfg(any(feature = "metrics", osiris_metrics))] - total_bytes: 0, - #[cfg(any(feature = "metrics", osiris_metrics))] - free_bytes: 0, - #[cfg(any(feature = "metrics", osiris_metrics))] - free_blocks: 0, - #[cfg(any(feature = "metrics", osiris_metrics))] - alloc_count: 0, - #[cfg(any(feature = "metrics", osiris_metrics))] - free_count: 0, + #[cfg(any(feature = "metrics", metrics))] + metrics: super::Metrics::new(), } } @@ -128,12 +90,8 @@ impl BestFitAllocator { // Set the head to the new block. self.head = Some(unsafe { NonNull::new_unchecked(ptr.as_mut_ptr::()) }); - #[cfg(any(feature = "metrics", osiris_metrics))] - { - self.total_bytes = self.total_bytes.saturating_add(range.end.diff(range.start)); - self.free_bytes = self.free_bytes.saturating_add(usable); - self.free_blocks += 1; - } + #[cfg(any(feature = "metrics", metrics))] + self.metrics.record_add_range(range.end.diff(range.start), usable); Ok(()) } @@ -297,9 +255,9 @@ impl super::Allocator for BestFitAllocator { debug_assert!(aligned_size <= isize::MAX as usize); // Tracking variables for O(1) metrics update after the allocation. - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] let mut free_sub: usize = 0; - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] let mut blocks_sub: usize = 0; // Find the best fit block. @@ -334,7 +292,7 @@ impl super::Allocator for BestFitAllocator { if meta.size > min { // Split: old free block (meta.size) leaves, remainder (meta.size - min) stays. // Net free_bytes change: -min. free_blocks unchanged (one out, one in). - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] { free_sub = min; } // Calculate the remaining size of the block and thus the next metadata. @@ -368,7 +326,7 @@ impl super::Allocator for BestFitAllocator { (true, block, prev) } else { // No split: entire free block (meta.size) is consumed. - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] { free_sub = meta.size; blocks_sub = 1; } (false, block, prev) @@ -377,7 +335,7 @@ impl super::Allocator for BestFitAllocator { Err(_) => { let (block, prev) = self.select_block(size, request)?; // Retry succeeded with original size; always no-split. - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] { let meta = unsafe { block.cast::().as_ref() }; free_sub = meta.size; @@ -410,12 +368,8 @@ impl super::Allocator for BestFitAllocator { }); } - #[cfg(any(feature = "metrics", osiris_metrics))] - { - self.free_bytes = self.free_bytes.saturating_sub(free_sub); - self.free_blocks = self.free_blocks.saturating_sub(blocks_sub); - self.alloc_count += 1; - } + #[cfg(any(feature = "metrics", metrics))] + self.metrics.record_alloc(free_sub, blocks_sub); // Return the user pointer. Ok(unsafe { Self::user_ptr(block).cast() }) @@ -443,45 +397,21 @@ impl super::Allocator for BestFitAllocator { // Set the block as the new head. self.head = Some(block); - #[cfg(any(feature = "metrics", osiris_metrics))] - { - self.free_bytes = self.free_bytes.saturating_add(meta.size); - self.free_blocks += 1; - self.free_count += 1; - } + #[cfg(any(feature = "metrics", metrics))] + self.metrics.record_free(meta.size); } } -#[cfg(any(feature = "metrics", osiris_metrics))] +#[cfg(any(feature = "metrics", metrics))] impl BestFitAllocator { - /// Returns an O(1) snapshot using eagerly maintained counters. - /// `largest_free_block` still requires a free-list walk. - pub fn metrics(&self) -> AllocatorMetrics { - let mut largest_free_block = 0usize; - let mut current = self.head; - while let Some(ptr) = current { - let meta = unsafe { ptr.cast::().as_ref() }; - if meta.size > largest_free_block { - largest_free_block = meta.size; - } - current = meta.next; - } - - AllocatorMetrics { - total_bytes: self.total_bytes, - free_bytes: self.free_bytes, - allocated_bytes: self.total_bytes.saturating_sub(self.free_bytes), - free_blocks: self.free_blocks, - largest_free_block, - alloc_count: self.alloc_count, - free_count: self.free_count, - } + pub fn metrics(&self) -> super::Metrics { + self.metrics } } // TESTING ------------------------------------------------------------------------------------------------------------ -#[cfg(all(test, any(feature = "metrics", osiris_metrics)))] +#[cfg(all(test, any(feature = "metrics", metrics)))] mod metrics_tests { use super::super::*; use super::*; @@ -503,9 +433,8 @@ mod metrics_tests { let m = allocator.metrics(); assert_eq!(m.total_bytes, 0); assert_eq!(m.free_bytes, 0); - assert_eq!(m.allocated_bytes, 0); + assert_eq!(m.allocated_bytes(), 0); assert_eq!(m.free_blocks, 0); - assert_eq!(m.largest_free_block, 0); assert_eq!(m.alloc_count, 0); assert_eq!(m.free_count, 0); } @@ -522,8 +451,7 @@ mod metrics_tests { assert_eq!(m.free_blocks, 1); assert!(m.free_bytes > 0); assert!(m.free_bytes < range_len, "metadata must consume some bytes"); - assert_eq!(m.largest_free_block, m.free_bytes); - assert_eq!(m.allocated_bytes, range_len - m.free_bytes); + assert_eq!(m.allocated_bytes(), range_len - m.free_bytes); assert_eq!(m.alloc_count, 0); assert_eq!(m.free_count, 0); } @@ -591,13 +519,13 @@ mod metrics_tests { unsafe { allocator.add_range(&range).unwrap() }; let m = allocator.metrics(); - // Single block: largest == only free block. - assert_eq!(m.largest_free_block, m.free_bytes); + // Single block: all free bytes in one block. + assert_eq!(m.free_blocks, 1); let _p = unsafe { allocator.malloc::(128, 1, None).unwrap() }; let m2 = allocator.metrics(); - // Largest free block shrinks after allocation. - assert!(m2.largest_free_block <= m.largest_free_block); + // Free bytes shrink after allocation. + assert!(m2.free_bytes <= m.free_bytes); } #[test] diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index 512e691..027c591 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -17,9 +17,9 @@ pub struct AddressSpace { allocator: bestfit::BestFitAllocator, } -#[cfg(any(feature = "metrics", osiris_metrics))] +#[cfg(any(feature = "metrics", metrics))] impl AddressSpace { - pub(crate) fn metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { + pub(crate) fn metrics(&self) -> crate::mem::alloc::Metrics { self.allocator.metrics() } } diff --git a/src/metrics.rs b/src/metrics.rs new file mode 100644 index 0000000..04ef82e --- /dev/null +++ b/src/metrics.rs @@ -0,0 +1,2 @@ +pub mod store; +pub use store::{HeapSnapshot, StackSnapshot}; diff --git a/src/metrics/mod.rs b/src/metrics/mod.rs deleted file mode 100644 index 7536fae..0000000 --- a/src/metrics/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -pub(crate) mod store; -pub use store::{HeapSnapshot, StackSnapshot}; - -use crate::sched::{task, thread}; - -/// Returns the latest global kernel heap snapshot, or `None` if the scheduler -/// has not yet run a single reschedule. -pub fn global_heap() -> Option { - store::read_global_heap() -} - -/// Returns the latest heap snapshot for the task identified by `task_id`, or -/// `None` if no snapshot exists for that slot. -pub fn task_heap(task_id: task::UId) -> Option { - store::read_task_heap(task_id.as_usize()) -} - -/// Returns the latest stack snapshot for the thread identified by `uid`, or -/// `None` if no snapshot exists for that slot. -pub fn thread_stack(uid: thread::UId) -> Option { - store::read_thread_stack(uid.as_usize()) -} diff --git a/src/metrics/store.rs b/src/metrics/store.rs index 83ba415..4012f1a 100644 --- a/src/metrics/store.rs +++ b/src/metrics/store.rs @@ -53,14 +53,14 @@ pub(crate) fn clear_thread_stack(slot: usize) { } } -pub fn read_global_heap() -> Option { +pub fn global_heap() -> Option { GLOBAL_HEAP.read() } -pub fn read_task_heap(slot: usize) -> Option { +pub fn task_heap(slot: usize) -> Option { if slot < SLOTS { TASK_HEAPS[slot].read() } else { None } } -pub fn read_thread_stack(slot: usize) -> Option { +pub fn thread_stack(slot: usize) -> Option { if slot < SLOTS { THREAD_STACKS[slot].read() } else { None } } diff --git a/src/sched.rs b/src/sched.rs index 0a46312..c0b4812 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -347,13 +347,13 @@ impl Scheduler { task.map(|t| (task::UId::new(idx), t)) })?; - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] if let Some(task) = self.tasks.get(task_id) { let m = task.allocator_metrics(); crate::metrics::store::write_task_heap(task_id.as_usize(), crate::metrics::store::HeapSnapshot { total_bytes: m.total_bytes, free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes, + used_bytes: m.allocated_bytes(), alloc_count: m.alloc_count, free_count: m.free_count, }); @@ -381,7 +381,7 @@ impl Scheduler { bug!("failed to remove thread {} from thread list.", id); } - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] crate::metrics::store::clear_thread_stack(id.as_usize()); if Some(id) == self.current { @@ -392,7 +392,7 @@ impl Scheduler { self.tasks.remove(&uid).ok_or(kerr!(EINVAL))?; - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] crate::metrics::store::clear_task_heap(uid.as_usize()); Ok(()) @@ -421,7 +421,7 @@ impl Scheduler { Ok(k) })?; - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] if let Some(thread) = self.threads.get(uid) { let m = thread.stack_metrics(); crate::metrics::store::write_thread_stack(uid.as_usize(), crate::metrics::store::StackSnapshot { @@ -455,7 +455,7 @@ impl Scheduler { self.threads.remove(&uid).ok_or(kerr!(EINVAL))?; - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] crate::metrics::store::clear_thread_stack(uid.as_usize()); if Some(uid) == self.current { @@ -465,40 +465,42 @@ impl Scheduler { Ok(()) } - /// Copies live stats from all threads and tasks into the lock-free mirror. - /// Called on every reschedule so external readers can access metrics without - /// acquiring the scheduler lock. - #[cfg(any(feature = "metrics", osiris_metrics))] + /// Updates the lock-free mirror for the currently scheduled thread and its task. + /// Called on every reschedule; only the thread that just ran needs updating. + #[cfg(any(feature = "metrics", metrics))] fn mirror_stats(&self) { let global = crate::mem::global_metrics(); crate::metrics::store::write_global_heap(crate::metrics::store::HeapSnapshot { total_bytes: global.total_bytes, free_bytes: global.free_bytes, - used_bytes: global.allocated_bytes, + used_bytes: global.allocated_bytes(), alloc_count: global.alloc_count, free_count: global.free_count, }); - self.tasks.for_each(|slot, task| { - let m = task.allocator_metrics(); - crate::metrics::store::write_task_heap(slot, crate::metrics::store::HeapSnapshot { - total_bytes: m.total_bytes, - free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes, - alloc_count: m.alloc_count, - free_count: m.free_count, - }); - }); - - self.threads.for_each(|slot, thread| { - let m = thread.stack_metrics(); - crate::metrics::store::write_thread_stack(slot, crate::metrics::store::StackSnapshot { - total_bytes: m.total_bytes, - used_bytes: m.used_bytes, - free_bytes: m.free_bytes, - peak_used_bytes: m.peak_used_bytes, - }); - }); + if let Some(uid) = self.current { + if let Some(thread) = self.threads.get(uid) { + let m = thread.stack_metrics(); + crate::metrics::store::write_thread_stack(uid.as_usize(), crate::metrics::store::StackSnapshot { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + }); + + let task_id = thread.task_id(); + if let Some(task) = self.tasks.get(task_id) { + let m = task.allocator_metrics(); + crate::metrics::store::write_task_heap(task_id.as_usize(), crate::metrics::store::HeapSnapshot { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes(), + alloc_count: m.alloc_count, + free_count: m.free_count, + }); + } + } + } } } @@ -583,6 +585,12 @@ pub extern "C" fn sched_enter(mut ctx: *mut c_void) -> *mut c_void { let old = sched.current.map(|c| c.owner()); sched.land(ctx); + // Mirror stats while self.current still points to the outgoing thread — + // its stack context was just saved by land() and its task reflects any + // allocations made since the last reschedule. + #[cfg(any(feature = "metrics", metrics))] + sched.mirror_stats(); + if let Some((new, task)) = sched.do_sched(time::tick()) { if old != Some(task.id) { dispch::prepare(task); @@ -590,9 +598,6 @@ pub extern "C" fn sched_enter(mut ctx: *mut c_void) -> *mut c_void { ctx = new; } - #[cfg(any(feature = "metrics", osiris_metrics))] - sched.mirror_stats(); - ctx }) } diff --git a/src/sched/task.rs b/src/sched/task.rs index fe89f74..eec12da 100644 --- a/src/sched/task.rs +++ b/src/sched/task.rs @@ -142,8 +142,8 @@ impl Task { &self.threads } - #[cfg(any(feature = "metrics", osiris_metrics))] - pub(crate) fn allocator_metrics(&self) -> crate::mem::alloc::bestfit::AllocatorMetrics { + #[cfg(any(feature = "metrics", metrics))] + pub(crate) fn allocator_metrics(&self) -> crate::mem::alloc::Metrics { self.address_space.metrics() } } diff --git a/src/sched/thread.rs b/src/sched/thread.rs index 7a6e19b..7f6d52c 100644 --- a/src/sched/thread.rs +++ b/src/sched/thread.rs @@ -342,7 +342,7 @@ impl Thread { self.state.stack.sp() } - #[cfg(any(feature = "metrics", osiris_metrics))] + #[cfg(any(feature = "metrics", metrics))] pub fn stack_metrics(&self) -> crate::hal::stack::StackMetrics { self.state.stack.metrics() } diff --git a/src/sync.rs b/src/sync.rs index 8c20d4e..2c6ae5f 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,6 +1,6 @@ pub mod atomic; pub mod once; -#[cfg(any(feature = "metrics", osiris_metrics))] +#[cfg(any(feature = "metrics", metrics))] pub mod seqlock; pub mod spinlock; pub mod waiter; From 4e5f16861af979cd88516f1fb18f8d047bff54bf Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 22:29:12 +0200 Subject: [PATCH 06/11] formatting --- src/mem/alloc/bestfit.rs | 12 +++++-- src/metrics/store.rs | 15 ++++++--- src/sched.rs | 67 ++++++++++++++++++++++++---------------- 3 files changed, 60 insertions(+), 34 deletions(-) diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index f2dc557..aadbea6 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -91,7 +91,8 @@ impl BestFitAllocator { self.head = Some(unsafe { NonNull::new_unchecked(ptr.as_mut_ptr::()) }); #[cfg(any(feature = "metrics", metrics))] - self.metrics.record_add_range(range.end.diff(range.start), usable); + self.metrics + .record_add_range(range.end.diff(range.start), usable); Ok(()) } @@ -293,7 +294,9 @@ impl super::Allocator for BestFitAllocator { // Split: old free block (meta.size) leaves, remainder (meta.size - min) stays. // Net free_bytes change: -min. free_blocks unchanged (one out, one in). #[cfg(any(feature = "metrics", metrics))] - { free_sub = min; } + { + free_sub = min; + } // Calculate the remaining size of the block and thus the next metadata. let remaining_meta = BestFitMeta { @@ -327,7 +330,10 @@ impl super::Allocator for BestFitAllocator { } else { // No split: entire free block (meta.size) is consumed. #[cfg(any(feature = "metrics", metrics))] - { free_sub = meta.size; blocks_sub = 1; } + { + free_sub = meta.size; + blocks_sub = 1; + } (false, block, prev) } diff --git a/src/metrics/store.rs b/src/metrics/store.rs index 4012f1a..3caae1c 100644 --- a/src/metrics/store.rs +++ b/src/metrics/store.rs @@ -20,8 +20,7 @@ pub struct StackSnapshot { } static GLOBAL_HEAP: Seqlock> = Seqlock::new(None); -static TASK_HEAPS: [Seqlock>; SLOTS] = - [const { Seqlock::new(None) }; SLOTS]; +static TASK_HEAPS: [Seqlock>; SLOTS] = [const { Seqlock::new(None) }; SLOTS]; static THREAD_STACKS: [Seqlock>; SLOTS] = [const { Seqlock::new(None) }; SLOTS]; @@ -58,9 +57,17 @@ pub fn global_heap() -> Option { } pub fn task_heap(slot: usize) -> Option { - if slot < SLOTS { TASK_HEAPS[slot].read() } else { None } + if slot < SLOTS { + TASK_HEAPS[slot].read() + } else { + None + } } pub fn thread_stack(slot: usize) -> Option { - if slot < SLOTS { THREAD_STACKS[slot].read() } else { None } + if slot < SLOTS { + THREAD_STACKS[slot].read() + } else { + None + } } diff --git a/src/sched.rs b/src/sched.rs index c0b4812..a26ff4c 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -350,13 +350,16 @@ impl Scheduler { #[cfg(any(feature = "metrics", metrics))] if let Some(task) = self.tasks.get(task_id) { let m = task.allocator_metrics(); - crate::metrics::store::write_task_heap(task_id.as_usize(), crate::metrics::store::HeapSnapshot { - total_bytes: m.total_bytes, - free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes(), - alloc_count: m.alloc_count, - free_count: m.free_count, - }); + crate::metrics::store::write_task_heap( + task_id.as_usize(), + crate::metrics::store::HeapSnapshot { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes(), + alloc_count: m.alloc_count, + free_count: m.free_count, + }, + ); } Ok(task_id) @@ -409,7 +412,8 @@ impl Scheduler { }; let task = self.tasks.get_mut(task).ok_or(kerr!(EINVAL))?; - let uid = self.threads + let uid = self + .threads .insert_with(|idx| { let uid = task.allocate_tid().get_uid(idx); let stack = task.allocate_stack(attrs)?; @@ -424,12 +428,15 @@ impl Scheduler { #[cfg(any(feature = "metrics", metrics))] if let Some(thread) = self.threads.get(uid) { let m = thread.stack_metrics(); - crate::metrics::store::write_thread_stack(uid.as_usize(), crate::metrics::store::StackSnapshot { - total_bytes: m.total_bytes, - used_bytes: m.used_bytes, - free_bytes: m.free_bytes, - peak_used_bytes: m.peak_used_bytes, - }); + crate::metrics::store::write_thread_stack( + uid.as_usize(), + crate::metrics::store::StackSnapshot { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + }, + ); } Ok(uid) @@ -481,23 +488,29 @@ impl Scheduler { if let Some(uid) = self.current { if let Some(thread) = self.threads.get(uid) { let m = thread.stack_metrics(); - crate::metrics::store::write_thread_stack(uid.as_usize(), crate::metrics::store::StackSnapshot { - total_bytes: m.total_bytes, - used_bytes: m.used_bytes, - free_bytes: m.free_bytes, - peak_used_bytes: m.peak_used_bytes, - }); + crate::metrics::store::write_thread_stack( + uid.as_usize(), + crate::metrics::store::StackSnapshot { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + }, + ); let task_id = thread.task_id(); if let Some(task) = self.tasks.get(task_id) { let m = task.allocator_metrics(); - crate::metrics::store::write_task_heap(task_id.as_usize(), crate::metrics::store::HeapSnapshot { - total_bytes: m.total_bytes, - free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes(), - alloc_count: m.alloc_count, - free_count: m.free_count, - }); + crate::metrics::store::write_task_heap( + task_id.as_usize(), + crate::metrics::store::HeapSnapshot { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes(), + alloc_count: m.alloc_count, + free_count: m.free_count, + }, + ); } } } From b5eae60ce8066cadd00414548b02d9240f4d1b7f Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 22:40:20 +0200 Subject: [PATCH 07/11] fix env var: OSIRIS_METRICS prefix --- build.rs | 4 ++-- machine/api/build.rs | 4 ++-- machine/cortex-m/build.rs | 4 ++-- presets/stm32l4r5zi_def.toml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build.rs b/build.rs index a20ee0d..7a49783 100644 --- a/build.rs +++ b/build.rs @@ -15,8 +15,8 @@ extern crate cbindgen; fn main() { println!("cargo::rerun-if-changed=src"); println!("cargo::rerun-if-changed=build.rs"); - println!("cargo::rerun-if-env-changed=METRICS"); - if std::env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rerun-if-env-changed=OSIRIS_METRICS"); + if std::env::var("OSIRIS_METRICS").map_or(false, |v| v == "true" || v == "1") { println!("cargo::rustc-cfg=metrics"); } let out_dir = std::env::var("OUT_DIR").unwrap(); diff --git a/machine/api/build.rs b/machine/api/build.rs index c37cd25..667df70 100644 --- a/machine/api/build.rs +++ b/machine/api/build.rs @@ -1,6 +1,6 @@ fn main() { - println!("cargo::rerun-if-env-changed=METRICS"); - if std::env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rerun-if-env-changed=OSIRIS_METRICS"); + if std::env::var("OSIRIS_METRICS").map_or(false, |v| v == "true" || v == "1") { println!("cargo::rustc-cfg=metrics"); } } diff --git a/machine/cortex-m/build.rs b/machine/cortex-m/build.rs index 45faef0..da1c593 100644 --- a/machine/cortex-m/build.rs +++ b/machine/cortex-m/build.rs @@ -329,8 +329,8 @@ mod vector_table { /// /// Exits with error code 1 if any critical build step fails fn main() { - println!("cargo::rerun-if-env-changed=METRICS"); - if env::var("METRICS").map_or(false, |v| v == "true" || v == "1") { + println!("cargo::rerun-if-env-changed=OSIRIS_METRICS"); + if env::var("OSIRIS_METRICS").map_or(false, |v| v == "true" || v == "1") { println!("cargo::rustc-cfg=metrics"); } diff --git a/presets/stm32l4r5zi_def.toml b/presets/stm32l4r5zi_def.toml index 69c3ac3..04b8885 100644 --- a/presets/stm32l4r5zi_def.toml +++ b/presets/stm32l4r5zi_def.toml @@ -8,7 +8,7 @@ OSIRIS_MACHINE = "cortex-m" # Debugging configuration OSIRIS_DEBUG_UART = "LPUART1" OSIRIS_DEBUG_RUNTIMESYMBOLS = "false" -METRICS = "false" +OSIRIS_METRICS = "false" # Tuning parameters OSIRIS_TUNING_ENABLEFPU = "false" From 649e7c0a7cdd0ea8b5e570f8be428c752c6e2201 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 22:56:48 +0200 Subject: [PATCH 08/11] refactored mirror_stats --- src/metrics/store.rs | 25 +++++++++++++++++++++++++ src/sched.rs | 34 +++++----------------------------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/src/metrics/store.rs b/src/metrics/store.rs index 3caae1c..a503ec1 100644 --- a/src/metrics/store.rs +++ b/src/metrics/store.rs @@ -1,5 +1,30 @@ +use crate::hal::stack::StackMetrics; +use crate::mem::alloc::Metrics as AllocMetrics; use crate::sync::seqlock::Seqlock; +impl From for HeapSnapshot { + fn from(m: AllocMetrics) -> Self { + Self { + total_bytes: m.total_bytes, + free_bytes: m.free_bytes, + used_bytes: m.allocated_bytes(), + alloc_count: m.alloc_count, + free_count: m.free_count, + } + } +} + +impl From for StackSnapshot { + fn from(m: StackMetrics) -> Self { + Self { + total_bytes: m.total_bytes, + used_bytes: m.used_bytes, + free_bytes: m.free_bytes, + peak_used_bytes: m.peak_used_bytes, + } + } +} + pub(crate) const SLOTS: usize = crate::sched::THREAD_COUNT; #[derive(Debug, Clone, Copy)] diff --git a/src/sched.rs b/src/sched.rs index a26ff4c..c264f5c 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -476,41 +476,17 @@ impl Scheduler { /// Called on every reschedule; only the thread that just ran needs updating. #[cfg(any(feature = "metrics", metrics))] fn mirror_stats(&self) { - let global = crate::mem::global_metrics(); - crate::metrics::store::write_global_heap(crate::metrics::store::HeapSnapshot { - total_bytes: global.total_bytes, - free_bytes: global.free_bytes, - used_bytes: global.allocated_bytes(), - alloc_count: global.alloc_count, - free_count: global.free_count, - }); + use crate::metrics::store; + + store::write_global_heap(crate::mem::global_metrics().into()); if let Some(uid) = self.current { if let Some(thread) = self.threads.get(uid) { - let m = thread.stack_metrics(); - crate::metrics::store::write_thread_stack( - uid.as_usize(), - crate::metrics::store::StackSnapshot { - total_bytes: m.total_bytes, - used_bytes: m.used_bytes, - free_bytes: m.free_bytes, - peak_used_bytes: m.peak_used_bytes, - }, - ); + store::write_thread_stack(uid.as_usize(), thread.stack_metrics().into()); let task_id = thread.task_id(); if let Some(task) = self.tasks.get(task_id) { - let m = task.allocator_metrics(); - crate::metrics::store::write_task_heap( - task_id.as_usize(), - crate::metrics::store::HeapSnapshot { - total_bytes: m.total_bytes, - free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes(), - alloc_count: m.alloc_count, - free_count: m.free_count, - }, - ); + store::write_task_heap(task_id.as_usize(), task.allocator_metrics().into()); } } } From 21b41d255e436d0dec2a34303deeaf2937935e05 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Wed, 13 May 2026 23:07:21 +0200 Subject: [PATCH 09/11] metrics conversions using into --- src/sched.rs | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/src/sched.rs b/src/sched.rs index c264f5c..c39c7d0 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -349,16 +349,9 @@ impl Scheduler { #[cfg(any(feature = "metrics", metrics))] if let Some(task) = self.tasks.get(task_id) { - let m = task.allocator_metrics(); crate::metrics::store::write_task_heap( task_id.as_usize(), - crate::metrics::store::HeapSnapshot { - total_bytes: m.total_bytes, - free_bytes: m.free_bytes, - used_bytes: m.allocated_bytes(), - alloc_count: m.alloc_count, - free_count: m.free_count, - }, + task.allocator_metrics().into(), ); } @@ -427,15 +420,9 @@ impl Scheduler { #[cfg(any(feature = "metrics", metrics))] if let Some(thread) = self.threads.get(uid) { - let m = thread.stack_metrics(); crate::metrics::store::write_thread_stack( uid.as_usize(), - crate::metrics::store::StackSnapshot { - total_bytes: m.total_bytes, - used_bytes: m.used_bytes, - free_bytes: m.free_bytes, - peak_used_bytes: m.peak_used_bytes, - }, + thread.stack_metrics().into(), ); } From bd3710a45fb3865b080b96ac42a7d4b3c3186831 Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Thu, 14 May 2026 00:04:30 +0200 Subject: [PATCH 10/11] claude review feedback --- machine/cortex-m/src/native/sched.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index c5dbe3d..4864250 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -171,10 +171,10 @@ impl ArmStack { // We should have written exactly FRAME_WORDS words. debug_assert!(write_index == self.top.sub(self.sp.offset() + FRAME_WORDS)); - - self.sp += FRAME_WORDS; } + self.set_sp(self.sp + FRAME_WORDS); + // The returned stack pointer must be call-aligned. debug_assert!(Self::is_call_aligned(self.sp)); Ok(()) @@ -230,10 +230,12 @@ mod metrics_tests { } #[test] - fn metrics_peak_starts_at_zero() { - // peak_offset is only updated through set_sp; new() increments sp directly. + fn metrics_peak_includes_entry_frame() { let stack = make_stack(unsafe { &mut BUF_A }); - assert_eq!(stack.metrics().peak_used_bytes, 0); + let word = core::mem::size_of::(); + // The entry frame (18 words) is pushed during new(), so peak starts there. + assert_eq!(stack.metrics().peak_used_bytes, stack.metrics().used_bytes); + assert!(stack.metrics().peak_used_bytes >= 18 * word); } #[test] From 57b231aa3705ada0748950b32f1063a2c2e8b6aa Mon Sep 17 00:00:00 2001 From: Jakob Fuchs Date: Thu, 14 May 2026 00:12:41 +0200 Subject: [PATCH 11/11] fix: import Stacklike trait for set_sp call in push_irq_ret_fn --- machine/cortex-m/src/native/sched.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index 4864250..1e6476a 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -7,7 +7,7 @@ use core::{ ptr::NonNull, }; -use hal_api::{Result, stack::Descriptor}; +use hal_api::{Result, stack::{Descriptor, Stacklike}}; // A default finalizer used if none is supplied: just spins forever. #[inline(never)]