Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions machine/cortex-m/src/native/sched.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,12 @@ impl hal_api::stack::Stacklike for ArmStack {
let top = NonNull::new(top.as_mut_ptr::<u32>())
.ok_or(hal_api::PosixError::EINVAL)?;

// `size` is in bytes (per the Descriptor contract); `does_fit` and
// `in_bounds` work in u32 words. Convert here so the stack's internal
// unit stays consistent with `StackPtr::offset`.
let size = NonZero::new(size.get() / core::mem::size_of::<u32>())
.ok_or(hal_api::PosixError::EINVAL)?;

let mut stack = Self {
top,
sp: StackPtr { offset: 0 },
Expand Down
27 changes: 27 additions & 0 deletions src/mem/alloc/bestfit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,15 @@ impl super::Allocator for BestFitAllocator {
/// `size` - The size of the block. (This is used to check if the size of the block is correct.)
unsafe fn free<T>(&mut self, ptr: NonNull<T>, size: usize) {
let block = unsafe { Self::control_ptr(ptr.cast()) };

// Walking the free list catches a double-free before it can self-loop the list
// and turn the next `malloc` into an infinite traversal.
let mut walk = self.head;
while let Some(p) = walk {
bug_on!(p == block, "double free");
walk = unsafe { p.cast::<BestFitMeta>().as_ref().next };
}

let meta = unsafe { block.cast::<BestFitMeta>().as_mut() };

// The next block of a free block is always the current head. We essentially insert the block at the beginning of the list.
Expand Down Expand Up @@ -680,6 +689,24 @@ mod tests {
}
}

#[test]
#[should_panic(expected = "double free")]
fn double_free_panics() {
let mut allocator = BestFitAllocator::new();
let range = alloc_range(4096);
unsafe {
allocator.add_range(&range).unwrap();
}

let ptr = unsafe { allocator.malloc::<u8>(128, 1, None).unwrap() };
unsafe {
allocator.free(ptr, 128);
// Without the defensive walk in free(), this re-insert builds a
// self-loop in the free list and the next malloc spins forever.
allocator.free(ptr, 128);
}
}

#[test]
fn multi_range_oom() {
// This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom.
Expand Down
74 changes: 71 additions & 3 deletions src/mem/pfa/bitset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,78 @@ impl<const WORDS: usize> super::Allocator<WORDS> for Allocator<WORDS> {
}

fn free(&mut self, addr: PhysAddr, page_count: usize) {
if !addr.is_multiple_of(super::PAGE_SIZE) {
panic!("Address must be page aligned");
}
bug_on!(
!addr.is_multiple_of(super::PAGE_SIZE),
"free address {} is not page-aligned",
addr
);
// diff() is absolute, so a sub-begin address would silently map to a
// bit elsewhere in the bitmap.
bug_on!(
addr < self.begin,
"free address {} below allocator begin {}",
addr,
self.begin
);
let idx = addr.diff(self.begin) / super::PAGE_SIZE;
self.bitalloc.free(idx, page_count);
}
}

#[cfg(test)]
mod tests {
use super::super::Allocator as _;
use super::*;

fn test_begin() -> PhysAddr {
let layout = std::alloc::Layout::from_size_align(
2 * 64 * super::super::PAGE_SIZE,
super::super::PAGE_SIZE,
)
.unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
PhysAddr::new(ptr as usize)
}

#[test]
fn alloc_free_roundtrip() {
let begin = test_begin();
let mut alloc = Allocator::<2>::new(begin).unwrap();

let a = alloc.alloc(1).unwrap();
let b = alloc.alloc(1).unwrap();
assert_ne!(a, b);

alloc.free(a, 1);
let c = alloc.alloc(1).unwrap();
assert_eq!(a, c, "freed page is returned by next alloc");
}

#[test]
fn alloc_returns_addresses_in_range() {
let begin = test_begin();
let mut alloc = Allocator::<1>::new(begin).unwrap();
let end = begin + 64 * super::super::PAGE_SIZE;

while let Some(addr) = alloc.alloc(1) {
assert!(
addr >= begin && addr < end,
"addr {addr} outside [{begin}, {end})"
);
assert!(
addr.is_multiple_of(super::super::PAGE_SIZE),
"addr {addr} not page-aligned"
);
}
}

#[test]
#[should_panic(expected = "below allocator begin")]
fn free_below_begin_panics() {
let begin = test_begin() + super::super::PAGE_SIZE;
let mut alloc = Allocator::<2>::new(begin).unwrap();
// diff() is absolute, so without the bound check a sub-begin address
// would silently clear a bit elsewhere in the bitmap.
alloc.free(begin - super::super::PAGE_SIZE, 1);
}
}
40 changes: 39 additions & 1 deletion src/mem/vmm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,45 @@ impl Region {

#[allow(dead_code)]
pub fn contains(&self, addr: VirtAddr) -> bool {
self.start().saturating_add(self.len()) > addr && addr >= self.start()
let Some(start) = self.start else {
return false;
};
start.saturating_add(self.len()) > addr && addr >= start
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn unplaced_region_contains_nothing() {
let r = Region::new(None, 100, Backing::Uninit, Perms::Read);
assert!(!r.contains(VirtAddr::new(0)));
assert!(!r.contains(VirtAddr::new(50)));
assert!(!r.contains(VirtAddr::new(100)));
}

#[test]
fn placed_region_contains_within_bounds() {
let r = Region::new(Some(VirtAddr::new(100)), 50, Backing::Uninit, Perms::Read);
assert!(!r.contains(VirtAddr::new(99)));
assert!(r.contains(VirtAddr::new(100)));
assert!(r.contains(VirtAddr::new(149)));
assert!(!r.contains(VirtAddr::new(150)));
}

#[test]
fn placed_region_saturates_at_usize_max() {
let r = Region::new(
Some(VirtAddr::new(usize::MAX - 10)),
100,
Backing::Uninit,
Perms::Read,
);
assert!(r.contains(VirtAddr::new(usize::MAX - 10)));
assert!(r.contains(VirtAddr::new(usize::MAX - 1)));
assert!(!r.contains(VirtAddr::new(usize::MAX)));
}
}

Expand Down
99 changes: 94 additions & 5 deletions src/mem/vmm/nommu.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use core::ptr::copy_nonoverlapping;
use core::ptr::{NonNull, copy_nonoverlapping};

use crate::hal::mem::{PhysAddr, VirtAddr};

Expand Down Expand Up @@ -55,7 +55,11 @@ impl vmm::AddressSpacelike for AddressSpace {
Ok(start.into())
}

fn unmap(&mut self, _region: &vmm::Region) -> Result<()> {
fn unmap(&mut self, region: &vmm::Region) -> Result<()> {
let virt = region.start.ok_or(kerr!(EINVAL))?;
let phys = self.virt_to_phys(virt).ok_or(kerr!(EINVAL))?;
let ptr = NonNull::new(phys.as_mut_ptr::<u8>()).ok_or(kerr!(EINVAL))?;
unsafe { self.allocator.free(ptr, region.len()) };
Ok(())
}

Expand All @@ -64,20 +68,105 @@ impl vmm::AddressSpacelike for AddressSpace {
}

fn phys_to_virt(&self, addr: PhysAddr) -> Option<VirtAddr> {
if addr < self.begin || addr >= self.end {
return None;
}
addr.checked_sub(self.begin.as_usize())
.map(|phys| VirtAddr::new(phys.as_usize()))
}

fn virt_to_phys(&self, addr: VirtAddr) -> Option<PhysAddr> {
self.begin.checked_add(addr.as_usize())
let phys = self.begin.checked_add(addr.as_usize())?;
if phys >= self.end {
return None;
}
Some(phys)
}

fn end(&self) -> VirtAddr {
// This should always succeed.
self.phys_to_virt(self.end).unwrap()
VirtAddr::new(self.end.diff(self.begin))
}

fn activate(&self) -> Result<()> {
Ok(())
}
}

impl Drop for AddressSpace {
fn drop(&mut self) {
// Without this the per-task page reservation returns to the PFA only on
// process death, which means PFA exhaustion under task churn.
let pgs = self.end.diff(self.begin) / pfa::PAGE_SIZE;
if pgs > 0 {
pfa::free_page(self.begin, pgs);
}
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region};

fn make_addr_space(size: usize) -> AddressSpace {
let layout =
std::alloc::Layout::from_size_align(size, core::mem::align_of::<u128>()).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
let begin = PhysAddr::new(ptr as usize);
let end = begin + size;
let mut allocator = bestfit::BestFitAllocator::new();
unsafe { allocator.add_range(&(begin..end)).unwrap() };
AddressSpace {
begin,
end,
allocator,
}
}

#[test]
fn unmap_returns_space_to_allocator() {
let mut as_ = make_addr_space(4096);

let region = Region::new(None, 2048, Backing::Uninit, Perms::Read);
let phys = as_.map(region).unwrap();

let virt = as_.phys_to_virt(phys).unwrap();
let placed = Region::new(Some(virt), 2048, Backing::Uninit, Perms::Read);
as_.unmap(&placed).unwrap();

let region2 = Region::new(None, 2048, Backing::Uninit, Perms::Read);
as_.map(region2).expect("re-map after unmap should not OOM");
}

#[test]
fn unmap_unplaced_region_rejected() {
let mut as_ = make_addr_space(4096);
let region = Region::new(None, 128, Backing::Uninit, Perms::Read);
assert!(as_.unmap(&region).is_err());
}

#[test]
fn virt_to_phys_rejects_out_of_range() {
let as_ = make_addr_space(4096);
let size = as_.end.diff(as_.begin);
assert!(as_.virt_to_phys(VirtAddr::new(size)).is_none());
assert!(as_.virt_to_phys(VirtAddr::new(size + 1)).is_none());
assert!(as_.virt_to_phys(VirtAddr::new(usize::MAX)).is_none());
}

#[test]
fn phys_to_virt_rejects_out_of_range() {
let as_ = make_addr_space(4096);
assert!(as_.phys_to_virt(as_.end).is_none());
assert!(as_.phys_to_virt(as_.begin - 1).is_none());
assert!(as_.phys_to_virt(as_.end + 1).is_none());
}

#[test]
fn virt_phys_roundtrip() {
let as_ = make_addr_space(4096);
let v = VirtAddr::new(128);
let p = as_.virt_to_phys(v).unwrap();
assert_eq!(as_.phys_to_virt(p), Some(v));
}
}
Loading
Loading