From 977fc9f72c6bdf4cc037b5267a22fbb3ec82c793 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Mon, 30 Sep 2024 18:03:31 +0200 Subject: [PATCH 1/2] mm/virtualrange: introduce the VRangeAlloc type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a new smart pointer type for virtual range allocations. The new type automatically frees the virtual range from the correct allocator on drop. This significantly simplifies the logic for the PerCPUPageMappingGuard methods. Signed-off-by: Carlos López --- kernel/src/mm/ptguards.rs | 82 +++++++++++-------------------- kernel/src/mm/virtualrange.rs | 92 ++++++++++++++++++++--------------- 2 files changed, 82 insertions(+), 92 deletions(-) diff --git a/kernel/src/mm/ptguards.rs b/kernel/src/mm/ptguards.rs index da02f82ff..0836a42b8 100644 --- a/kernel/src/mm/ptguards.rs +++ b/kernel/src/mm/ptguards.rs @@ -10,9 +10,7 @@ use crate::cpu::percpu::this_cpu; use crate::cpu::tlb::flush_address_percpu; use crate::error::SvsmError; use crate::insn_decode::{InsnError, InsnMachineMem}; -use crate::mm::virtualrange::{ - virt_alloc_range_2m, virt_alloc_range_4k, virt_free_range_2m, virt_free_range_4k, -}; +use crate::mm::virtualrange::VRangeAlloc; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::MemoryRegion; use core::marker::PhantomData; @@ -21,8 +19,7 @@ use core::marker::PhantomData; #[derive(Debug)] #[must_use = "if unused the mapping will immediately be unmapped"] pub struct PerCPUPageMappingGuard { - mapping: MemoryRegion, - huge: bool, + mapping: VRangeAlloc, } impl PerCPUPageMappingGuard { @@ -58,32 +55,22 @@ impl PerCPUPageMappingGuard { let flags = PTEntryFlags::data(); let huge = ((paddr_start.bits() & (PAGE_SIZE_2M - 1)) == 0) && ((paddr_end.bits() & (PAGE_SIZE_2M - 1)) == 0); - let raw_mapping = if huge { - let region = virt_alloc_range_2m(size, 0)?; - if let Err(e) = this_cpu() + + let mapping = if huge { + let range = VRangeAlloc::new_2m(size, 0)?; + this_cpu() .get_pgtable() - .map_region_2m(region, paddr_start, flags) - { - virt_free_range_2m(region); - return Err(e); - } - region + .map_region_2m(range.region(), paddr_start, flags)?; + range } else { - let region = virt_alloc_range_4k(size, 0)?; - if let Err(e) = this_cpu() + let range = VRangeAlloc::new_4k(size, 0)?; + this_cpu() .get_pgtable() - .map_region_4k(region, paddr_start, flags) - { - virt_free_range_4k(region); - return Err(e); - } - region + .map_region_4k(range.region(), paddr_start, flags)?; + range }; - Ok(PerCPUPageMappingGuard { - mapping: raw_mapping, - huge, - }) + Ok(Self { mapping }) } /// Creates a new [`PerCPUPageMappingGuard`] for a 4KB page at the @@ -94,7 +81,7 @@ impl PerCPUPageMappingGuard { /// Returns the virtual address associated with the guard. pub fn virt_addr(&self) -> VirtAddr { - self.mapping.start() + self.mapping.region().start() } /// Creates a virtual contigous mapping for the given 4k physical pages which @@ -112,49 +99,36 @@ impl PerCPUPageMappingGuard { /// mapping that was created. If an error occurs while creating the page /// mapping, it returns a `SvsmError`. pub fn create_4k_pages(pages: &[(PhysAddr, bool)]) -> Result { - let region = virt_alloc_range_4k(pages.len() * PAGE_SIZE, 0)?; + let mapping = VRangeAlloc::new_4k(pages.len() * PAGE_SIZE, 0)?; let flags = PTEntryFlags::data(); let mut pgtable = this_cpu().get_pgtable(); - for (i, addr) in region.iter_pages(PageSize::Regular).enumerate() { + for (i, addr) in mapping.region().iter_pages(PageSize::Regular).enumerate() { assert!(pages[i].0.is_aligned(PAGE_SIZE)); - pgtable - .map_4k(addr, pages[i].0, flags) - .and_then(|_| { - if pages[i].1 { - pgtable.set_shared_4k(addr) - } else { - Ok(()) - } - }) - .map_err(|e| { - virt_free_range_4k(region); - e - })?; + pgtable.map_4k(addr, pages[i].0, flags)?; + if pages[i].1 { + pgtable.set_shared_4k(addr)?; + } } - Ok(PerCPUPageMappingGuard { - mapping: region, - huge: false, - }) + Ok(Self { mapping }) } } impl Drop for PerCPUPageMappingGuard { fn drop(&mut self) { - let size = if self.huge { - this_cpu().get_pgtable().unmap_region_2m(self.mapping); - virt_free_range_2m(self.mapping); + let region = self.mapping.region(); + let size = if self.mapping.huge() { + this_cpu().get_pgtable().unmap_region_2m(region); PageSize::Huge } else { - this_cpu().get_pgtable().unmap_region_4k(self.mapping); - virt_free_range_4k(self.mapping); + this_cpu().get_pgtable().unmap_region_4k(region); PageSize::Regular }; // This iterative flush is acceptable for same-CPU mappings because no // broadcast is involved for each iteration. - for page in self.mapping.iter_pages(size) { + for page in region.iter_pages(size) { flush_address_percpu(page); } } @@ -185,7 +159,7 @@ impl MemMappingGuard { /// /// Self is returned. pub fn new(guard: PerCPUPageMappingGuard, start_off: usize) -> Result { - if start_off >= guard.mapping.len() { + if start_off >= guard.mapping.region().len() { Err(SvsmError::Mem) } else { Ok(Self { @@ -257,7 +231,7 @@ impl MemMappingGuard { .checked_add(self.start_off + offset)?, len, ) - .filter(|v| self.guard.mapping.contains_region(v)) + .filter(|v| self.guard.mapping.region().contains_region(v)) } else { None } diff --git a/kernel/src/mm/virtualrange.rs b/kernel/src/mm/virtualrange.rs index 7670f919b..3fb6ee297 100644 --- a/kernel/src/mm/virtualrange.rs +++ b/kernel/src/mm/virtualrange.rs @@ -80,50 +80,66 @@ pub fn virt_log_usage() { ); } -pub fn virt_alloc_range_4k( - size_bytes: usize, - alignment: usize, -) -> Result, SvsmError> { - // Each bit in our bitmap represents a 4K page - if (size_bytes & (PAGE_SIZE - 1)) != 0 { - return Err(SvsmError::Mem); - } - let page_count = size_bytes >> PAGE_SHIFT; - let addr = this_cpu() - .vrange_4k - .borrow_mut() - .alloc(page_count, alignment)?; - Ok(MemoryRegion::new(addr, size_bytes)) +#[derive(Debug)] +pub struct VRangeAlloc { + region: MemoryRegion, + huge: bool, } -pub fn virt_free_range_4k(vregion: MemoryRegion) { - this_cpu() - .vrange_4k - .borrow_mut() - .free(vregion.start(), vregion.len() >> PAGE_SHIFT); -} +impl VRangeAlloc { + /// Returns a virtual memory region in the 4K virtual range. + pub fn new_4k(size: usize, align: usize) -> Result { + // Each bit in our bitmap represents a 4K page + if (size & (PAGE_SIZE - 1)) != 0 { + return Err(SvsmError::Mem); + } + let page_count = size >> PAGE_SHIFT; + let addr = this_cpu().vrange_4k.borrow_mut().alloc(page_count, align)?; + let region = MemoryRegion::new(addr, size); + Ok(Self { + region, + huge: false, + }) + } -pub fn virt_alloc_range_2m( - size_bytes: usize, - alignment: usize, -) -> Result, SvsmError> { - // Each bit in our bitmap represents a 2M page - if (size_bytes & (PAGE_SIZE_2M - 1)) != 0 { - return Err(SvsmError::Mem); + /// Returns a virtual memory region in the 2M virtual range. + pub fn new_2m(size: usize, align: usize) -> Result { + // Each bit in our bitmap represents a 2M page + if (size & (PAGE_SIZE_2M - 1)) != 0 { + return Err(SvsmError::Mem); + } + let page_count = size >> PAGE_SHIFT_2M; + let addr = this_cpu().vrange_2m.borrow_mut().alloc(page_count, align)?; + let region = MemoryRegion::new(addr, size); + Ok(Self { region, huge: true }) + } + + /// Returns the virtual memory region that this allocation spans. + pub const fn region(&self) -> MemoryRegion { + self.region + } + + /// Returns true if the allocation was made from the huge (2M) virtual range. + pub const fn huge(&self) -> bool { + self.huge } - let page_count = size_bytes >> PAGE_SHIFT_2M; - let addr = this_cpu() - .vrange_2m - .borrow_mut() - .alloc(page_count, alignment)?; - Ok(MemoryRegion::new(addr, size_bytes)) } -pub fn virt_free_range_2m(vregion: MemoryRegion) { - this_cpu() - .vrange_2m - .borrow_mut() - .free(vregion.start(), vregion.len() >> PAGE_SHIFT_2M); +impl Drop for VRangeAlloc { + fn drop(&mut self) { + let region = self.region(); + if self.huge { + this_cpu() + .vrange_2m + .borrow_mut() + .free(region.start(), region.len() >> PAGE_SHIFT_2M); + } else { + this_cpu() + .vrange_4k + .borrow_mut() + .free(region.start(), region.len() >> PAGE_SHIFT); + } + } } #[cfg(test)] From c6146509a5c42cd7013bdaf702ee3dfa750792ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Mon, 30 Sep 2024 18:03:31 +0200 Subject: [PATCH 2/2] mm/ptguards: use iterator instead of slice indexing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zip two iterators instead of manually indexing into a slice in PerCPUPageMappingGuard::create_4k_pages(), as it is more conventional Rust. Signed-off-by: Carlos López --- kernel/src/mm/ptguards.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kernel/src/mm/ptguards.rs b/kernel/src/mm/ptguards.rs index 0836a42b8..abaa3cc4e 100644 --- a/kernel/src/mm/ptguards.rs +++ b/kernel/src/mm/ptguards.rs @@ -103,12 +103,15 @@ impl PerCPUPageMappingGuard { let flags = PTEntryFlags::data(); let mut pgtable = this_cpu().get_pgtable(); - for (i, addr) in mapping.region().iter_pages(PageSize::Regular).enumerate() { - assert!(pages[i].0.is_aligned(PAGE_SIZE)); - - pgtable.map_4k(addr, pages[i].0, flags)?; - if pages[i].1 { - pgtable.set_shared_4k(addr)?; + for (vaddr, (paddr, shared)) in mapping + .region() + .iter_pages(PageSize::Regular) + .zip(pages.iter().copied()) + { + assert!(paddr.is_page_aligned()); + pgtable.map_4k(vaddr, paddr, flags)?; + if shared { + pgtable.set_shared_4k(vaddr)?; } }