diff --git a/.github/scripts/ci-style.sh b/.github/scripts/ci-style.sh index 51b6a095bc..eb9ccf49ec 100755 --- a/.github/scripts/ci-style.sh +++ b/.github/scripts/ci-style.sh @@ -1,6 +1,6 @@ . $(dirname "$0")/ci-common.sh -export RUSTFLAGS="-D warnings" +export RUSTFLAGS="-D warnings -A unknown-lints" # --- Check main crate --- diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 6327207601..875981151c 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -88,7 +88,12 @@ pub fn mmtk_init(builder: &MMTKBuilder) -> Box> { #[cfg(feature = "vm_space")] pub fn lazy_init_vm_space(mmtk: &'static mut MMTK, start: Address, size: usize) { - mmtk.plan.base_mut().vm_space.lazy_initialize(start, size); + unsafe { + mmtk.get_plan_mut() + .base_mut() + .vm_space + .lazy_initialize(start, size); + } } /// Request MMTk to create a mutator for the given thread. The ownership @@ -345,7 +350,7 @@ pub fn get_allocator_mapping( mmtk: &MMTK, semantics: AllocationSemantics, ) -> AllocatorSelector { - mmtk.plan.get_allocator_mapping()[semantics] + mmtk.get_plan().get_allocator_mapping()[semantics] } /// The standard malloc. MMTk either uses its own allocator, or forward the call to a @@ -467,11 +472,14 @@ pub fn start_worker( /// Collection::spawn_gc_thread() so that the VM knows the context. pub fn initialize_collection(mmtk: &'static MMTK, tls: VMThread) { assert!( - !mmtk.plan.is_initialized(), + !mmtk.get_plan().is_initialized(), "MMTk collection has been initialized (was initialize_collection() already called before?)" ); mmtk.scheduler.spawn_gc_threads(mmtk, tls); - mmtk.plan.base().initialized.store(true, Ordering::SeqCst); + mmtk.get_plan() + .base() + .initialized + .store(true, Ordering::SeqCst); probe!(mmtk, collection_initialized); } @@ -483,10 +491,10 @@ pub fn initialize_collection(mmtk: &'static MMTK, tls: VMThre /// * `mmtk`: A reference to an MMTk instance. pub fn enable_collection(mmtk: &'static MMTK) { debug_assert!( - !mmtk.plan.should_trigger_gc_when_heap_is_full(), + !mmtk.get_plan().should_trigger_gc_when_heap_is_full(), "enable_collection() is called when GC is already enabled." ); - mmtk.plan + mmtk.get_plan() .base() .trigger_gc_when_heap_is_full .store(true, Ordering::SeqCst); @@ -504,10 +512,10 @@ pub fn enable_collection(mmtk: &'static MMTK) { /// * `mmtk`: A reference to an MMTk instance. pub fn disable_collection(mmtk: &'static MMTK) { debug_assert!( - mmtk.plan.should_trigger_gc_when_heap_is_full(), + mmtk.get_plan().should_trigger_gc_when_heap_is_full(), "disable_collection() is called when GC is not enabled." ); - mmtk.plan + mmtk.get_plan() .base() .trigger_gc_when_heap_is_full .store(false, Ordering::SeqCst); @@ -538,7 +546,7 @@ pub fn process_bulk(builder: &mut MMTKBuilder, options: &str) -> bool { /// Arguments: /// * `mmtk`: A reference to an MMTk instance. pub fn used_bytes(mmtk: &MMTK) -> usize { - mmtk.plan.get_used_pages() << LOG_BYTES_IN_PAGE + mmtk.get_plan().get_used_pages() << LOG_BYTES_IN_PAGE } /// Return free memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in @@ -547,7 +555,7 @@ pub fn used_bytes(mmtk: &MMTK) -> usize { /// Arguments: /// * `mmtk`: A reference to an MMTk instance. pub fn free_bytes(mmtk: &MMTK) -> usize { - mmtk.plan.get_free_pages() << LOG_BYTES_IN_PAGE + mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE } /// Return the size of all the live objects in bytes in the last GC. MMTk usually accounts for memory in pages. @@ -558,7 +566,7 @@ pub fn free_bytes(mmtk: &MMTK) -> usize { /// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads). #[cfg(feature = "count_live_bytes_in_gc")] pub fn live_bytes_in_last_gc(mmtk: &MMTK) -> usize { - mmtk.plan + mmtk.get_plan() .base() .live_bytes_in_last_gc .load(Ordering::SeqCst) @@ -581,7 +589,7 @@ pub fn last_heap_address() -> Address { /// Arguments: /// * `mmtk`: A reference to an MMTk instance. pub fn total_bytes(mmtk: &MMTK) -> usize { - mmtk.plan.get_total_pages() << LOG_BYTES_IN_PAGE + mmtk.get_plan().get_total_pages() << LOG_BYTES_IN_PAGE } /// Trigger a garbage collection as requested by the user. @@ -590,7 +598,8 @@ pub fn total_bytes(mmtk: &MMTK) -> usize { /// * `mmtk`: A reference to an MMTk instance. /// * `tls`: The thread that triggers this collection request. pub fn handle_user_collection_request(mmtk: &MMTK, tls: VMMutatorThread) { - mmtk.plan.handle_user_collection_request(tls, false, false); + mmtk.get_plan() + .handle_user_collection_request(tls, false, false); } /// Is the object alive? @@ -709,7 +718,7 @@ pub fn is_mapped_address(address: Address) -> bool { /// * `mmtk`: A reference to an MMTk instance. /// * `object`: The object to check. pub fn modify_check(mmtk: &MMTK, object: ObjectReference) { - mmtk.plan.modify_check(object); + mmtk.get_plan().modify_check(object); } /// Add a reference to the list of weak references. A binding may diff --git a/src/mmtk.rs b/src/mmtk.rs index 687b6894fa..b7fef582d0 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -15,6 +15,7 @@ use crate::util::reference_processor::ReferenceProcessors; use crate::util::sanity::sanity_checker::SanityChecker; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; +use std::cell::UnsafeCell; use std::default::Default; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -30,10 +31,10 @@ lazy_static! { // TODO: We should refactor this when we know more about how multiple MMTK instances work. /// A global VMMap that manages the mapping of spaces to virtual memory ranges. - pub static ref VM_MAP: Box = layout::create_vm_map(); + pub static ref VM_MAP: Box = layout::create_vm_map(); /// A global Mmapper for mmaping and protection of virtual memory. - pub static ref MMAPPER: Box = layout::create_mmapper(); + pub static ref MMAPPER: Box = layout::create_mmapper(); } use crate::util::rust_util::InitializeOnce; @@ -88,7 +89,7 @@ impl Default for MMTKBuilder { /// *Note that multi-instances is not fully supported yet* pub struct MMTK { pub(crate) options: Arc, - pub(crate) plan: Box>, + pub(crate) plan: UnsafeCell>>, pub(crate) reference_processors: ReferenceProcessors, pub(crate) finalizable_processor: Mutex>::FinalizableType>>, @@ -100,6 +101,9 @@ pub struct MMTK { inside_harness: AtomicBool, } +unsafe impl Sync for MMTK {} +unsafe impl Send for MMTK {} + impl MMTK { pub fn new(options: Arc) -> Self { // Initialize SFT first in case we need to use this in the constructor. @@ -136,7 +140,7 @@ impl MMTK { MMTK { options, - plan, + plan: UnsafeCell::new(plan), reference_processors: ReferenceProcessors::new(), finalizable_processor: Mutex::new(FinalizableProcessor::< >::FinalizableType, @@ -152,20 +156,31 @@ impl MMTK { pub fn harness_begin(&self, tls: VMMutatorThread) { probe!(mmtk, harness_begin); - self.plan.handle_user_collection_request(tls, true, true); + self.get_plan() + .handle_user_collection_request(tls, true, true); self.inside_harness.store(true, Ordering::SeqCst); - self.plan.base().stats.start_all(); + self.get_plan().base().stats.start_all(); self.scheduler.enable_stat(); } pub fn harness_end(&'static self) { - self.plan.base().stats.stop_all(self); + self.get_plan().base().stats.stop_all(self); self.inside_harness.store(false, Ordering::SeqCst); probe!(mmtk, harness_end); } pub fn get_plan(&self) -> &dyn Plan { - self.plan.as_ref() + unsafe { &**(self.plan.get()) } + } + + /// Get the plan as mutable reference. + /// + /// # Safety + /// + /// This is unsafe because the caller must ensure that the plan is not used by other threads. + #[allow(clippy::mut_from_ref)] + pub unsafe fn get_plan_mut(&self) -> &mut dyn Plan { + &mut **(self.plan.get()) } pub fn get_options(&self) -> &Options { diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index a7e013f999..dc8ae7025b 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -32,16 +32,19 @@ pub fn create_gencopy_mutator( mutator_tls: VMMutatorThread, mmtk: &'static MMTK, ) -> Mutator { - let gencopy = mmtk.plan.downcast_ref::>().unwrap(); + let gencopy = mmtk.get_plan().downcast_ref::>().unwrap(); let config = MutatorConfig { allocator_mapping: &ALLOCATOR_MAPPING, - space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &gencopy.gen.nursery)), + space_mapping: Box::new(create_gen_space_mapping( + mmtk.get_plan(), + &gencopy.gen.nursery, + )), prepare_func: &gencopy_mutator_prepare, release_func: &gencopy_mutator_release, }; Mutator { - allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), + allocators: Allocators::::new(mutator_tls, mmtk.get_plan(), &config.space_mapping), barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( mmtk, gencopy, ))), diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 0ebd617ff6..88abf09c48 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -102,7 +102,12 @@ impl GCWork for ProcessModBuf { ); } // scan modbuf only if the current GC is a nursery GC - if mmtk.plan.generational().unwrap().is_current_gc_nursery() { + if mmtk + .get_plan() + .generational() + .unwrap() + .is_current_gc_nursery() + { // Scan objects in the modbuf and forward pointers let modbuf = std::mem::take(&mut self.modbuf); GCWork::do_work( @@ -135,7 +140,12 @@ impl ProcessRegionModBuf { impl GCWork for ProcessRegionModBuf { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { // Scan modbuf only if the current GC is a nursery GC - if mmtk.plan.generational().unwrap().is_current_gc_nursery() { + if mmtk + .get_plan() + .generational() + .unwrap() + .is_current_gc_nursery() + { // Collect all the entries in all the slices let mut edges = vec![]; for slice in &self.modbuf { diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index bad646e5db..892d430d03 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -30,16 +30,19 @@ pub fn create_genimmix_mutator( mutator_tls: VMMutatorThread, mmtk: &'static MMTK, ) -> Mutator { - let genimmix = mmtk.plan.downcast_ref::>().unwrap(); + let genimmix = mmtk.get_plan().downcast_ref::>().unwrap(); let config = MutatorConfig { allocator_mapping: &ALLOCATOR_MAPPING, - space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &genimmix.gen.nursery)), + space_mapping: Box::new(create_gen_space_mapping( + mmtk.get_plan(), + &genimmix.gen.nursery, + )), prepare_func: &genimmix_mutator_prepare, release_func: &genimmix_mutator_release, }; Mutator { - allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), + allocators: Allocators::::new(mutator_tls, mmtk.get_plan(), &config.space_mapping), barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( mmtk, genimmix, ))), diff --git a/src/plan/global.rs b/src/plan/global.rs index 1ae518478c..c718c0c816 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -40,9 +40,9 @@ pub fn create_mutator( mmtk: &'static MMTK, ) -> Box> { Box::new(match *mmtk.options.plan { - PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, &*mmtk.plan), + PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, mmtk.get_plan()), PlanSelector::SemiSpace => { - crate::plan::semispace::mutator::create_ss_mutator(tls, &*mmtk.plan) + crate::plan::semispace::mutator::create_ss_mutator(tls, mmtk.get_plan()) } PlanSelector::GenCopy => { crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk) @@ -51,14 +51,16 @@ pub fn create_mutator( crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk) } PlanSelector::MarkSweep => { - crate::plan::marksweep::mutator::create_ms_mutator(tls, &*mmtk.plan) + crate::plan::marksweep::mutator::create_ms_mutator(tls, mmtk.get_plan()) + } + PlanSelector::Immix => { + crate::plan::immix::mutator::create_immix_mutator(tls, mmtk.get_plan()) } - PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, &*mmtk.plan), PlanSelector::PageProtect => { - crate::plan::pageprotect::mutator::create_pp_mutator(tls, &*mmtk.plan) + crate::plan::pageprotect::mutator::create_pp_mutator(tls, mmtk.get_plan()) } PlanSelector::MarkCompact => { - crate::plan::markcompact::mutator::create_markcompact_mutator(tls, &*mmtk.plan) + crate::plan::markcompact::mutator::create_markcompact_mutator(tls, mmtk.get_plan()) } PlanSelector::StickyImmix => { crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk) @@ -137,7 +139,7 @@ pub fn create_gc_worker_context( tls: VMWorkerThread, mmtk: &'static MMTK, ) -> GCWorkerCopyContext { - GCWorkerCopyContext::::new(tls, &*mmtk.plan, mmtk.plan.create_copy_config()) + GCWorkerCopyContext::::new(tls, mmtk.get_plan(), mmtk.get_plan().create_copy_config()) } /// A plan describes the global core functionality for all memory management schemes. @@ -857,6 +859,7 @@ impl BasePlan { } #[allow(unused_variables)] // depending on the enabled features, base may not be used. + #[allow(clippy::needless_pass_by_ref_mut)] // depending on the enabled features, base may not be used. pub(crate) fn verify_side_metadata_sanity( &self, side_metadata_sanity_checker: &mut SideMetadataSanity, diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index eded813463..5d9c0e70c4 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -39,7 +39,7 @@ impl GCWork for UpdateReferences { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { // The following needs to be done right before the second round of root scanning VM::VMScanning::prepare_for_roots_re_scanning(); - mmtk.plan.base().prepare_for_stack_scanning(); + mmtk.get_plan().base().prepare_for_stack_scanning(); #[cfg(feature = "extreme_assertions")] mmtk.edge_logger.reset(); diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 6a59794b64..c5b7b17721 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -24,12 +24,12 @@ pub fn create_stickyimmix_mutator( mutator_tls: VMMutatorThread, mmtk: &'static MMTK, ) -> Mutator { - let stickyimmix = mmtk.plan.downcast_ref::>().unwrap(); + let stickyimmix = mmtk.get_plan().downcast_ref::>().unwrap(); let config = MutatorConfig { allocator_mapping: &ALLOCATOR_MAPPING, space_mapping: Box::new({ let mut vec = - create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, &*mmtk.plan); + create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, mmtk.get_plan()); vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); vec }), @@ -38,13 +38,13 @@ pub fn create_stickyimmix_mutator( }; Mutator { - allocators: Allocators::::new(mutator_tls, &*mmtk.plan, &config.space_mapping), + allocators: Allocators::::new(mutator_tls, mmtk.get_plan(), &config.space_mapping), barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new( mmtk, stickyimmix, ))), mutator_tls, config, - plan: &*mmtk.plan, + plan: mmtk.get_plan(), } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index c13e4ca76c..2cfc4256c9 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -195,7 +195,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace if Block::containing::(object).is_defrag_source() { debug_assert!(self.in_defrag()); debug_assert!( - !crate::plan::is_nursery_gc(&*worker.mmtk.plan), + !crate::plan::is_nursery_gc(worker.mmtk.get_plan()), "Calling PolicyTraceObject on Immix in nursery GC" ); self.trace_object_with_opportunistic_copy( diff --git a/src/scheduler/controller.rs b/src/scheduler/controller.rs index 91ba487f34..93e655e312 100644 --- a/src/scheduler/controller.rs +++ b/src/scheduler/controller.rs @@ -130,7 +130,12 @@ impl GCController { self.scheduler.deactivate_all(); // Tell GC trigger that GC ended - this happens before EndOfGC where we resume mutators. - self.mmtk.plan.base().gc_trigger.policy.on_gc_end(self.mmtk); + self.mmtk + .get_plan() + .base() + .gc_trigger + .policy + .on_gc_end(self.mmtk); // Finalization: Resume mutators, reset gc states // Note: Resume-mutators must happen after all work buckets are closed. diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index bd69b0eae6..47d6d70761 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -14,14 +14,14 @@ pub struct ScheduleCollection; impl GCWork for ScheduleCollection { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - mmtk.plan.schedule_collection(worker.scheduler()); + mmtk.get_plan().schedule_collection(worker.scheduler()); // Tell GC trigger that GC started. // We now know what kind of GC this is (e.g. nursery vs mature in gen copy, defrag vs fast in Immix) // TODO: Depending on the OS scheduling, other workers can run so fast that they can finish // everything in the `Unconstrained` and the `Prepare` buckets before we execute the next // statement. Consider if there is a better place to call `on_gc_start`. - mmtk.plan.base().gc_trigger.policy.on_gc_start(mmtk); + mmtk.get_plan().base().gc_trigger.policy.on_gc_start(mmtk); } } @@ -33,11 +33,13 @@ impl GCWork for ScheduleCollection { /// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may /// be a race condition. pub struct Prepare { - pub plan: &'static C::PlanType, + pub plan: *const C::PlanType, } +unsafe impl Send for Prepare {} + impl Prepare { - pub fn new(plan: &'static C::PlanType) -> Self { + pub fn new(plan: *const C::PlanType) -> Self { Self { plan } } } @@ -46,7 +48,6 @@ impl GCWork for Prepare { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Prepare Global"); // We assume this is the only running work packet that accesses plan at the point of execution - #[allow(clippy::cast_ref_to_mut)] let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.prepare(worker.tls); @@ -89,7 +90,7 @@ impl GCWork for PrepareCollector { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Prepare Collector"); worker.get_copy_context_mut().prepare(); - mmtk.plan.prepare_worker(worker); + mmtk.get_plan().prepare_worker(worker); } } @@ -101,23 +102,26 @@ impl GCWork for PrepareCollector { /// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may /// be a race condition. pub struct Release { - pub plan: &'static C::PlanType, + pub plan: *const C::PlanType, } impl Release { - pub fn new(plan: &'static C::PlanType) -> Self { + pub fn new(plan: *const C::PlanType) -> Self { Self { plan } } } +unsafe impl Send for Release {} + impl GCWork for Release { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Release Global"); - self.plan.base().gc_trigger.policy.on_gc_release(mmtk); - + unsafe { + (*self.plan).base().gc_trigger.policy.on_gc_release(mmtk); + } // We assume this is the only running work packet that accesses plan at the point of execution - #[allow(clippy::cast_ref_to_mut)] + let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.release(worker.tls); @@ -136,7 +140,7 @@ impl GCWork for Release { .scheduler .worker_group .get_and_clear_worker_live_bytes(); - self.plan + mmtk.get_plan() .base() .live_bytes_in_last_gc .store(live_bytes, std::sync::atomic::Ordering::SeqCst); @@ -190,7 +194,7 @@ impl StopMutators { impl GCWork for StopMutators { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("stop_all_mutators start"); - mmtk.plan.base().prepare_for_stack_scanning(); + mmtk.get_plan().base().prepare_for_stack_scanning(); ::VMCollection::stop_all_mutators(worker.tls, |mutator| { // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused). // Should we push to Unconstrained instead? @@ -212,20 +216,20 @@ impl GCWork for EndOfGC { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { info!( "End of GC ({}/{} pages, took {} ms)", - mmtk.plan.get_reserved_pages(), - mmtk.plan.get_total_pages(), + mmtk.get_plan().get_reserved_pages(), + mmtk.get_plan().get_total_pages(), self.elapsed.as_millis() ); #[cfg(feature = "count_live_bytes_in_gc")] { let live_bytes = mmtk - .plan + .get_plan() .base() .live_bytes_in_last_gc .load(std::sync::atomic::Ordering::SeqCst); let used_bytes = - mmtk.plan.get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE; + mmtk.get_plan().get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE; debug_assert!( live_bytes <= used_bytes, "Live bytes of all live objects ({} bytes) is larger than used pages ({} bytes), something is wrong.", @@ -235,25 +239,24 @@ impl GCWork for EndOfGC { "Live objects = {} bytes ({:04.1}% of {} used pages)", live_bytes, live_bytes as f64 * 100.0 / used_bytes as f64, - mmtk.plan.get_used_pages() + mmtk.get_plan().get_used_pages() ); } // We assume this is the only running work packet that accesses plan at the point of execution - #[allow(clippy::cast_ref_to_mut)] - let plan_mut: &mut dyn Plan = unsafe { &mut *(&*mmtk.plan as *const _ as *mut _) }; + let plan_mut: &mut dyn Plan = unsafe { mmtk.get_plan_mut() }; plan_mut.end_of_gc(worker.tls); #[cfg(feature = "extreme_assertions")] - if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) { + if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) { // reset the logging info at the end of each GC mmtk.edge_logger.reset(); } - mmtk.plan.base().set_gc_status(GcStatus::NotInGC); + mmtk.get_plan().base().set_gc_status(GcStatus::NotInGC); // Reset the triggering information. - mmtk.plan.base().reset_collection_trigger(); + mmtk.get_plan().base().reset_collection_trigger(); ::VMCollection::resume_mutators(worker.tls); } @@ -448,7 +451,7 @@ pub struct ScanMutatorRoots(pub &'static mut Mutator GCWork for ScanMutatorRoots { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls()); - let base = &mmtk.plan.base(); + let base = mmtk.get_plan().base(); let mutators = ::VMActivePlan::number_of_mutators(); let factory = ProcessEdgesWorkRootsWorkFactory::::new(mmtk); ::VMScanning::scan_roots_in_mutator_thread( @@ -458,7 +461,7 @@ impl GCWork for ScanMutatorRoots { ); self.0.flush(); - if mmtk.plan.base().inform_stack_scanned(mutators) { + if mmtk.get_plan().base().inform_stack_scanned(mutators) { ::VMScanning::notify_initial_thread_scan_complete( false, worker.tls, ); @@ -501,7 +504,7 @@ impl ProcessEdgesBase { // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced. pub fn new(edges: Vec, roots: bool, mmtk: &'static MMTK) -> Self { #[cfg(feature = "extreme_assertions")] - if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) { + if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) { for edge in &edges { // log edge, panic if already logged mmtk.edge_logger.log_edge(*edge); @@ -528,7 +531,7 @@ impl ProcessEdgesBase { } pub fn plan(&self) -> &'static dyn Plan { - &*self.mmtk.plan + self.mmtk.get_plan() } /// Pop all nodes from nodes, and clear nodes to an empty vector. @@ -640,7 +643,7 @@ impl GCWork for E { self.flush(); } #[cfg(feature = "sanity")] - if self.roots && !_mmtk.plan.is_in_sanity() { + if self.roots && !_mmtk.get_plan().is_in_sanity() { self.cache_roots_for_sanity_gc(); } trace!("ProcessEdgesWork End"); @@ -760,7 +763,7 @@ pub trait ScanObjectsWork: GCWork + Sized { #[cfg(feature = "sanity")] { - if self.roots() && !mmtk.plan.is_in_sanity() { + if self.roots() && !mmtk.get_plan().is_in_sanity() { mmtk.sanity_checker .lock() .unwrap() diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index cedbbf16cd..7836417e4b 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -7,7 +7,7 @@ mod scheduler; pub(crate) use scheduler::GCWorkScheduler; mod stat; -pub(self) mod work_counter; +mod work_counter; mod work; pub use work::GCWork; diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index fb3928d357..02b6699fe0 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -93,7 +93,7 @@ impl GCWorkScheduler { ); let gc_controller = GCController::new( mmtk, - mmtk.plan.base().gc_requester.clone(), + mmtk.get_plan().base().gc_requester.clone(), self.clone(), coordinator_worker, ); @@ -399,7 +399,7 @@ impl GCWorkScheduler { } pub fn notify_mutators_paused(&self, mmtk: &'static MMTK) { - mmtk.plan.base().gc_requester.clear_request(); + mmtk.get_plan().base().gc_requester.clear_request(); let first_stw_bucket = &self.work_buckets[WorkBucketStage::first_stw_stage()]; debug_assert!(!first_stw_bucket.is_activated()); // Note: This is the only place where a non-coordinator thread opens a bucket. diff --git a/src/util/analysis/mod.rs b/src/util/analysis/mod.rs index 6df78658d1..8b25a17f59 100644 --- a/src/util/analysis/mod.rs +++ b/src/util/analysis/mod.rs @@ -33,7 +33,7 @@ pub struct GcHookWork; impl GCWork for GcHookWork { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - let base = &mmtk.plan.base(); + let base = &mmtk.get_plan().base(); base.analysis_manager.gc_hook(mmtk); } } diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 72545083a8..b15a961014 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -149,7 +149,7 @@ impl GCWork for Finalization { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - finalizable_processor.scan(worker.tls, &mut w, is_nursery_gc(&*mmtk.plan)); + finalizable_processor.scan(worker.tls, &mut w, is_nursery_gc(mmtk.get_plan())); debug!( "Finished finalization, {} objects in candidates, {} objects ready to finalize", finalizable_processor.candidates.len(), @@ -172,9 +172,9 @@ impl GCWork for ForwardFinalization { let mut finalizable_processor = mmtk.finalizable_processor.lock().unwrap(); let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - finalizable_processor.forward_candidate(&mut w, is_nursery_gc(&*mmtk.plan)); + finalizable_processor.forward_candidate(&mut w, is_nursery_gc(mmtk.get_plan())); - finalizable_processor.forward_finalizable(&mut w, is_nursery_gc(&*mmtk.plan)); + finalizable_processor.forward_finalizable(&mut w, is_nursery_gc(mmtk.get_plan())); trace!("Finished forwarding finlizable"); } } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 12173e0add..5552c9a01c 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -1,3 +1,4 @@ +use std::cell::UnsafeCell; use std::ops::{Deref, DerefMut}; use std::sync::{Mutex, MutexGuard}; @@ -37,14 +38,21 @@ impl CommonFreeListPageResource { } pub struct FreeListPageResource { - common: CommonPageResource, - common_flpr: Box, + inner: UnsafeCell, sync: Mutex, _p: PhantomData, /// Protect memory on release, and unprotect on re-allocate. pub(crate) protect_memory_on_release: bool, } +unsafe impl Send for FreeListPageResource {} +unsafe impl Sync for FreeListPageResource {} + +struct FreeListPageResourceInner { + common: CommonPageResource, + common_flpr: Box, +} + struct FreeListPageResourceSync { pages_currently_on_freelist: usize, highwater_mark: i32, @@ -54,11 +62,25 @@ impl Deref for FreeListPageResource { type Target = CommonFreeListPageResource; fn deref(&self) -> &CommonFreeListPageResource { - &self.common_flpr + &self.inner().common_flpr } } impl DerefMut for FreeListPageResource { + fn deref_mut(&mut self) -> &mut CommonFreeListPageResource { + &mut self.inner.get_mut().common_flpr + } +} + +impl Deref for FreeListPageResourceInner { + type Target = CommonFreeListPageResource; + + fn deref(&self) -> &CommonFreeListPageResource { + &self.common_flpr + } +} + +impl DerefMut for FreeListPageResourceInner { fn deref_mut(&mut self) -> &mut CommonFreeListPageResource { &mut self.common_flpr } @@ -66,22 +88,23 @@ impl DerefMut for FreeListPageResource { impl PageResource for FreeListPageResource { fn common(&self) -> &CommonPageResource { - &self.common + &self.inner().common } fn common_mut(&mut self) -> &mut CommonPageResource { - &mut self.common + &mut self.inner.get_mut().common } fn get_available_physical_pages(&self) -> usize { let mut rtn = self.sync.lock().unwrap().pages_currently_on_freelist; - if !self.common.contiguous { + if !self.inner().common.contiguous { let chunks: usize = self + .inner() .common .vm_map .get_available_discontiguous_chunks() - .saturating_sub(self.common.vm_map.get_chunk_consumer_count()); + .saturating_sub(self.inner().common.vm_map.get_chunk_consumer_count()); rtn += chunks * PAGES_IN_CHUNK; - } else if self.common.growable && cfg!(target_pointer_width = "64") { + } else if self.inner().common.growable && cfg!(target_pointer_width = "64") { rtn = vm_layout().pages_in_space64() - self.reserved_pages(); } @@ -96,14 +119,14 @@ impl PageResource for FreeListPageResource { tls: VMThread, ) -> Result { // FIXME: We need a safe implementation - #[allow(clippy::cast_ref_to_mut)] - let self_mut: &mut Self = unsafe { &mut *(self as *const _ as *mut _) }; + let self_mut = unsafe { self.inner_mut() }; let mut sync = self.sync.lock().unwrap(); let mut new_chunk = false; let mut page_offset = self_mut.free_list.alloc(required_pages as _); - if page_offset == freelist::FAILURE && self.common.growable { - page_offset = - self_mut.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync); + if page_offset == freelist::FAILURE && self.inner().common.growable { + page_offset = unsafe { + self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync) + }; new_chunk = true; } @@ -157,15 +180,17 @@ impl FreeListPageResource { // `CommonFreeListPageResource` lives as a member in space instances. // Since `Space` instances are always stored as global variables, so it is okay here // to turn `&CommonFreeListPageResource` into `&'static CommonFreeListPageResource` - vm_map.bind_freelist(unsafe { - &*(&common_flpr as &CommonFreeListPageResource as *const _) - }); + unsafe { + vm_map.bind_freelist(&*(&common_flpr as &CommonFreeListPageResource as *const _)); + } common_flpr }; let growable = cfg!(target_pointer_width = "64"); FreeListPageResource { - common: CommonPageResource::new(true, growable, vm_map), - common_flpr, + inner: UnsafeCell::new(FreeListPageResourceInner { + common: CommonPageResource::new(true, growable, vm_map), + common_flpr, + }), sync: Mutex::new(FreeListPageResourceSync { pages_currently_on_freelist: if growable { 0 } else { pages }, highwater_mark: UNINITIALIZED_WATER_MARK, @@ -185,14 +210,16 @@ impl FreeListPageResource { // `CommonFreeListPageResource` lives as a member in space instances. // Since `Space` instances are always stored as global variables, so it is okay here // to turn `&CommonFreeListPageResource` into `&'static CommonFreeListPageResource` - vm_map.bind_freelist(unsafe { - &*(&common_flpr as &CommonFreeListPageResource as *const _) - }); + unsafe { + vm_map.bind_freelist(&*(&common_flpr as &CommonFreeListPageResource as *const _)); + } common_flpr }; FreeListPageResource { - common: CommonPageResource::new(false, true, vm_map), - common_flpr, + inner: UnsafeCell::new(FreeListPageResourceInner { + common: CommonPageResource::new(false, true, vm_map), + common_flpr, + }), sync: Mutex::new(FreeListPageResourceSync { pages_currently_on_freelist: 0, highwater_mark: UNINITIALIZED_WATER_MARK, @@ -202,6 +229,14 @@ impl FreeListPageResource { } } + fn inner(&self) -> &FreeListPageResourceInner { + unsafe { &*self.inner.get() } + } + #[allow(clippy::mut_from_ref)] + unsafe fn inner_mut(&self) -> &mut FreeListPageResourceInner { + &mut *self.inner.get() + } + /// Protect the memory fn mprotect(&self, start: Address, pages: usize) { // We may fail here for ENOMEM, especially in PageProtect plan. @@ -236,13 +271,11 @@ impl FreeListPageResource { &self, space_descriptor: SpaceDescriptor, ) -> Result { - assert!(self.common.growable); + assert!(self.inner().common.growable); // FIXME: We need a safe implementation - #[allow(clippy::cast_ref_to_mut)] - let self_mut: &mut Self = unsafe { &mut *(self as *const _ as *mut _) }; let mut sync = self.sync.lock().unwrap(); let page_offset = - self_mut.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync); + unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) }; if page_offset == freelist::FAILURE { return Result::Err(PRAllocFail); @@ -261,8 +294,8 @@ impl FreeListPageResource { }) } - fn allocate_contiguous_chunks( - &mut self, + unsafe fn allocate_contiguous_chunks( + &self, space_descriptor: SpaceDescriptor, pages: usize, sync: &mut MutexGuard, @@ -270,35 +303,44 @@ impl FreeListPageResource { let mut rtn = freelist::FAILURE; let required_chunks = crate::policy::space::required_chunks(pages); let region = self + .inner() .common .grow_discontiguous_space(space_descriptor, required_chunks); if !region.is_zero() { let region_start = conversions::bytes_to_pages(region - self.start); let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1; - self.free_list.set_uncoalescable(region_start as _); - self.free_list.set_uncoalescable(region_end as i32 + 1); + self.inner_mut() + .free_list + .set_uncoalescable(region_start as _); + self.inner_mut() + .free_list + .set_uncoalescable(region_end as i32 + 1); for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) { if p != region_start { - self.free_list.clear_uncoalescable(p as _); + self.inner_mut().free_list.clear_uncoalescable(p as _); } - let liberated = self.free_list.free(p as _, true); // add chunk to our free list + let liberated = self.inner_mut().free_list.free(p as _, true); // add chunk to our free list debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start)); sync.pages_currently_on_freelist += PAGES_IN_CHUNK; } - rtn = self.free_list.alloc(pages as _); // re-do the request which triggered this call + rtn = self.inner_mut().free_list.alloc(pages as _); // re-do the request which triggered this call } + rtn } - fn free_contiguous_chunk(&mut self, chunk: Address, sync: &mut FreeListPageResourceSync) { + unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) { let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk); /* nail down all pages associated with the chunk, so it is no longer on our free list */ let mut chunk_start = conversions::bytes_to_pages(chunk - self.start); let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK); while chunk_start < chunk_end { - self.free_list.set_uncoalescable(chunk_start as _); + self.inner_mut() + .free_list + .set_uncoalescable(chunk_start as _); let tmp = self + .inner_mut() .free_list .alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _) as usize; // then alloc the entire chunk @@ -307,7 +349,8 @@ impl FreeListPageResource { sync.pages_currently_on_freelist -= PAGES_IN_CHUNK; } /* now return the address space associated with the chunk for global reuse */ - self.common.release_discontiguous_chunks(chunk); + + self.inner_mut().common.release_discontiguous_chunks(chunk); } pub fn release_pages(&self, first: Address) { @@ -316,27 +359,26 @@ impl FreeListPageResource { let pages = self.free_list.size(page_offset as _); // if (VM.config.ZERO_PAGES_ON_RELEASE) // VM.memory.zero(false, first, Conversions.pagesToBytes(pages)); - debug_assert!(pages as usize <= self.common.accounting.get_committed_pages()); + debug_assert!(pages as usize <= self.inner().common.accounting.get_committed_pages()); if self.protect_memory_on_release { self.mprotect(first, pages as _); } let mut sync = self.sync.lock().unwrap(); - // FIXME - #[allow(clippy::cast_ref_to_mut)] - let me = unsafe { &mut *(self as *const _ as *mut Self) }; - self.common.accounting.release(pages as _); + // FIXME: We need a safe implementation + let me = unsafe { self.inner_mut() }; + self.inner().common.accounting.release(pages as _); let freed = me.free_list.free(page_offset as _, true); sync.pages_currently_on_freelist += pages as usize; - if !self.common.contiguous { + if !self.inner().common.contiguous { // only discontiguous spaces use chunks - me.release_free_chunks(first, freed as _, &mut sync); + self.release_free_chunks(first, freed as _, &mut sync); } } fn release_free_chunks( - &mut self, + &self, freed_page: Address, pages_freed: usize, sync: &mut FreeListPageResourceSync, @@ -362,7 +404,12 @@ impl FreeListPageResource { debug_assert!(next_region_start < freelist::MAX_UNITS as usize); if pages_freed == next_region_start - region_start { let start = self.start; - self.free_contiguous_chunk(start + conversions::pages_to_bytes(region_start), sync); + unsafe { + self.free_contiguous_chunk( + start + conversions::pages_to_bytes(region_start), + sync, + ); + } } } } diff --git a/src/util/heap/gc_trigger.rs b/src/util/heap/gc_trigger.rs index 9614d08ec5..c79254c756 100644 --- a/src/util/heap/gc_trigger.rs +++ b/src/util/heap/gc_trigger.rs @@ -286,22 +286,22 @@ impl MemBalancerStats { fn non_generational_mem_stats_on_gc_start(&mut self, mmtk: &'static MMTK) { self.allocation_pages = mmtk - .plan + .get_plan() .get_reserved_pages() .saturating_sub(self.gc_end_live_pages) as f64; trace!( "allocated pages = used {} - live in last gc {} = {}", - mmtk.plan.get_reserved_pages(), + mmtk.get_plan().get_reserved_pages(), self.gc_end_live_pages, self.allocation_pages ); } fn non_generational_mem_stats_on_gc_release(&mut self, mmtk: &'static MMTK) { - self.gc_release_live_pages = mmtk.plan.get_reserved_pages(); + self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages(); trace!("live before release = {}", self.gc_release_live_pages); } fn non_generational_mem_stats_on_gc_end(&mut self, mmtk: &'static MMTK) { - self.gc_end_live_pages = mmtk.plan.get_reserved_pages(); + self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages(); trace!("live pages = {}", self.gc_end_live_pages); self.collection_pages = self .gc_release_live_pages @@ -331,7 +331,7 @@ impl GCTriggerPolicy for MemBalancerTrigger { stats.allocation_time ); - if let Some(plan) = mmtk.plan.generational() { + if let Some(plan) = mmtk.get_plan().generational() { stats.generational_mem_stats_on_gc_start(plan); } else { stats.non_generational_mem_stats_on_gc_start(mmtk); @@ -342,7 +342,7 @@ impl GCTriggerPolicy for MemBalancerTrigger { fn on_gc_release(&self, mmtk: &'static MMTK) { trace!("=== on_gc_release ==="); self.access_stats(|stats| { - if let Some(plan) = mmtk.plan.generational() { + if let Some(plan) = mmtk.get_plan().generational() { stats.generational_mem_stats_on_gc_release(plan); } else { stats.non_generational_mem_stats_on_gc_release(mmtk); @@ -361,13 +361,13 @@ impl GCTriggerPolicy for MemBalancerTrigger { stats.collection_time ); - if let Some(plan) = mmtk.plan.generational() { + if let Some(plan) = mmtk.get_plan().generational() { if stats.generational_mem_stats_on_gc_end(plan) { self.compute_new_heap_limit( - mmtk.plan.get_reserved_pages(), + mmtk.get_plan().get_reserved_pages(), // We reserve an extra of min nursery. This ensures that we will not trigger // a full heap GC in the next GC (if available pages is smaller than min nursery, we will force a full heap GC) - mmtk.plan.get_collection_reserved_pages() + mmtk.get_plan().get_collection_reserved_pages() + mmtk.options.get_min_nursery_pages(), stats, ); @@ -375,8 +375,8 @@ impl GCTriggerPolicy for MemBalancerTrigger { } else { stats.non_generational_mem_stats_on_gc_end(mmtk); self.compute_new_heap_limit( - mmtk.plan.get_reserved_pages(), - mmtk.plan.get_collection_reserved_pages(), + mmtk.get_plan().get_reserved_pages(), + mmtk.get_plan().get_collection_reserved_pages(), stats, ); } diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index 3630f84cc2..1223eb74de 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -6,6 +6,7 @@ use crate::util::heap::layout::vm_layout::*; use crate::util::memory::MmapStrategy; use crate::util::Address; use atomic::{Atomic, Ordering}; +use std::cell::UnsafeCell; use std::fmt; use std::io::Result; use std::mem::transmute; @@ -46,6 +47,13 @@ type Slab = [Atomic; MMAP_NUM_CHUNKS]; pub struct FragmentedMapper { lock: Mutex<()>, + inner: UnsafeCell, +} + +unsafe impl Send for FragmentedMapper {} +unsafe impl Sync for FragmentedMapper {} + +struct InnerFragmentedMapper { free_slab_index: usize, free_slabs: Vec>>, slab_table: Vec>>, @@ -61,7 +69,7 @@ impl fmt::Debug for FragmentedMapper { impl Mmapper for FragmentedMapper { fn set_mmap_strategy(&self, strategy: MmapStrategy) { - self.strategy.store(strategy, Ordering::Relaxed); + self.inner().strategy.store(strategy, Ordering::Relaxed); } fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) {} @@ -132,7 +140,7 @@ impl Mmapper for FragmentedMapper { MapState::bulk_transition_to_quarantined( state_slices.as_slice(), mmap_start, - self.strategy.load(Ordering::Relaxed), + self.inner().strategy.load(Ordering::Relaxed), )?; } @@ -167,7 +175,7 @@ impl Mmapper for FragmentedMapper { MapState::transition_to_mapped( entry, mmap_start, - self.strategy.load(Ordering::Relaxed), + self.inner().strategy.load(Ordering::Relaxed), )?; } start = high; @@ -223,11 +231,13 @@ impl FragmentedMapper { pub fn new() -> Self { Self { lock: Mutex::new(()), - free_slab_index: 0, - free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(), - slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(), - slab_map: vec![SENTINEL; SLAB_TABLE_SIZE], - strategy: Atomic::new(MmapStrategy::Normal), + inner: UnsafeCell::new(InnerFragmentedMapper { + free_slab_index: 0, + free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(), + slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(), + slab_map: vec![SENTINEL; SLAB_TABLE_SIZE], + strategy: Atomic::new(MmapStrategy::Normal), + }), } } @@ -248,23 +258,24 @@ impl FragmentedMapper { } fn slab_table(&self, addr: Address) -> Option<&Slab> { - unsafe { self.mut_self() }.get_or_optionally_allocate_slab_table(addr, false) + self.get_or_optionally_allocate_slab_table(addr, false) } fn get_or_allocate_slab_table(&self, addr: Address) -> &Slab { - unsafe { self.mut_self() } - .get_or_optionally_allocate_slab_table(addr, true) + self.get_or_optionally_allocate_slab_table(addr, true) .unwrap() } - #[allow(clippy::cast_ref_to_mut)] + fn inner(&self) -> &InnerFragmentedMapper { + unsafe { &*self.inner.get() } + } #[allow(clippy::mut_from_ref)] - unsafe fn mut_self(&self) -> &mut Self { - &mut *(self as *const _ as *mut _) + fn inner_mut(&self) -> &mut InnerFragmentedMapper { + unsafe { &mut *self.inner.get() } } fn get_or_optionally_allocate_slab_table( - &mut self, + &self, addr: Address, allocate: bool, ) -> Option<&Slab> { @@ -274,25 +285,27 @@ impl FragmentedMapper { let mut index = hash; // Use 'index' to iterate over the hash table so that we remember where we started loop { /* Check for a hash-table hit. Should be the frequent case. */ - if base == self.slab_map[index] { + if base == self.inner().slab_map[index] { return self.slab_table_for(addr, index); } let _guard = self.lock.lock().unwrap(); /* Check whether another thread has allocated a slab while we were acquiring the lock */ - if base == self.slab_map[index] { + if base == self.inner().slab_map[index] { // drop(guard); return self.slab_table_for(addr, index); } /* Check for a free slot */ - if self.slab_map[index] == SENTINEL { + if self.inner().slab_map[index] == SENTINEL { if !allocate { // drop(guard); return None; } - unsafe { self.mut_self() }.commit_free_slab(index); - self.slab_map[index] = base; + unsafe { + self.commit_free_slab(index); + } + self.inner_mut().slab_map[index] = base; return self.slab_table_for(addr, index); } // lock.release(); @@ -303,8 +316,8 @@ impl FragmentedMapper { } fn slab_table_for(&self, _addr: Address, index: usize) -> Option<&Slab> { - debug_assert!(self.slab_table[index].is_some()); - self.slab_table[index].as_ref().map(|x| x as &Slab) + debug_assert!(self.inner().slab_table[index].is_some()); + self.inner().slab_table[index].as_ref().map(|x| x as &Slab) } /** @@ -312,18 +325,21 @@ impl FragmentedMapper { * at the correct index in the slabTable. * @param index slab table index */ - fn commit_free_slab(&mut self, index: usize) { + /// # Safety + /// + /// Caller must ensure that only one thread is calling this function at a time. + unsafe fn commit_free_slab(&self, index: usize) { assert!( - self.free_slab_index < MAX_SLABS, + self.inner().free_slab_index < MAX_SLABS, "All free slabs used: virtual address space is exhausled." ); - debug_assert!(self.slab_table[index].is_none()); - debug_assert!(self.free_slabs[self.free_slab_index].is_some()); + debug_assert!(self.inner().slab_table[index].is_none()); + debug_assert!(self.inner().free_slabs[self.inner().free_slab_index].is_some()); ::std::mem::swap( - &mut self.slab_table[index], - &mut self.free_slabs[self.free_slab_index], + &mut self.inner_mut().slab_table[index], + &mut self.inner_mut().free_slabs[self.inner().free_slab_index], ); - self.free_slab_index += 1; + self.inner_mut().free_slab_index += 1; } fn chunk_index_to_address(base: Address, chunk: usize) -> Address { diff --git a/src/util/heap/layout/map.rs b/src/util/heap/layout/map.rs index 7b25d3be8f..889800f59c 100644 --- a/src/util/heap/layout/map.rs +++ b/src/util/heap/layout/map.rs @@ -17,9 +17,17 @@ pub trait VMMap: Sync { /// Bind a created freelist with the page resource. /// This must called after create_freelist() or create_parent_freelist(). - fn bind_freelist(&self, pr: &'static CommonFreeListPageResource); - - fn allocate_contiguous_chunks( + /// + /// # Safety + /// + /// * `pr` must be a valid pointer to a CommonFreeListPageResource and be alive + /// for the duration of the VMMap. + unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource); + + /// # Safety + /// + /// Caller must ensure that only one thread is calling this method. + unsafe fn allocate_contiguous_chunks( &self, descriptor: SpaceDescriptor, chunks: usize, @@ -42,7 +50,10 @@ pub trait VMMap: Sync { fn free_all_chunks(&self, any_chunk: Address); - fn free_contiguous_chunks(&self, start: Address) -> usize; + /// # Safety + /// + /// Caller must ensure that only one thread is calling this method. + unsafe fn free_contiguous_chunks(&self, start: Address) -> usize; fn boot(&self) {} diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index d25e0a55f7..c4aa08f52f 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -8,19 +8,26 @@ use crate::util::heap::layout::vm_layout::*; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::int_array_freelist::IntArrayFreeList; use crate::util::Address; +use std::cell::UnsafeCell; +use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Mutex, MutexGuard}; pub struct Map32 { + sync: Mutex<()>, + inner: UnsafeCell, +} + +#[doc(hidden)] +pub struct Map32Inner { prev_link: Vec, next_link: Vec, region_map: IntArrayFreeList, global_page_map: IntArrayFreeList, shared_discontig_fl_count: usize, - shared_fl_map: Vec>, + shared_fl_map: Vec>>, total_available_discontiguous_chunks: usize, finalized: bool, - sync: Mutex<()>, descriptor_map: Vec, // TODO: Is this the right place for this field? @@ -30,30 +37,42 @@ pub struct Map32 { cumulative_committed_pages: AtomicUsize, } +unsafe impl Send for Map32 {} +unsafe impl Sync for Map32 {} + impl Map32 { pub fn new() -> Self { let max_chunks = vm_layout().max_chunks(); Map32 { - prev_link: vec![0; max_chunks], - next_link: vec![0; max_chunks], - region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1), - global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES), - shared_discontig_fl_count: 0, - shared_fl_map: vec![None; MAX_SPACES], - total_available_discontiguous_chunks: 0, - finalized: false, + inner: UnsafeCell::new(Map32Inner { + prev_link: vec![0; max_chunks], + next_link: vec![0; max_chunks], + region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1), + global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES), + shared_discontig_fl_count: 0, + shared_fl_map: vec![None; MAX_SPACES], + total_available_discontiguous_chunks: 0, + finalized: false, + descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks], + cumulative_committed_pages: AtomicUsize::new(0), + }), sync: Mutex::new(()), - descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks], - cumulative_committed_pages: AtomicUsize::new(0), } } } +impl std::ops::Deref for Map32 { + type Target = Map32Inner; + fn deref(&self) -> &Self::Target { + unsafe { &*self.inner.get() } + } +} + impl VMMap for Map32 { fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) { // Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map, // as each space will update different indices. - let self_mut: &mut Self = unsafe { self.mut_self() }; + let self_mut: &mut Map32Inner = unsafe { self.mut_self() }; let mut e = 0; while e < extent { let index = (start + e).chunk_index(); @@ -88,17 +107,17 @@ impl VMMap for Map32 { Box::new(IntArrayFreeList::new(units, grain, 1)) } - fn bind_freelist(&self, pr: &'static CommonFreeListPageResource) { - let ordinal: usize = pr + unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) { + let ordinal: usize = (*pr) .free_list .downcast_ref::() .unwrap() .get_ordinal() as usize; - let self_mut: &mut Self = unsafe { self.mut_self() }; - self_mut.shared_fl_map[ordinal] = Some(pr); + let self_mut: &mut Map32Inner = self.mut_self(); + self_mut.shared_fl_map[ordinal] = Some(NonNull::new_unchecked(pr as *mut _)); } - fn allocate_contiguous_chunks( + unsafe fn allocate_contiguous_chunks( &self, descriptor: SpaceDescriptor, chunks: usize, @@ -108,7 +127,7 @@ impl VMMap for Map32 { let chunk = self_mut.region_map.alloc(chunks as _); debug_assert!(chunk != 0); if chunk == -1 { - return unsafe { Address::zero() }; + return Address::zero(); } self_mut.total_available_discontiguous_chunks -= chunks; let rtn = conversions::chunk_index_to_address(chunk as _); @@ -151,7 +170,7 @@ impl VMMap for Map32 { fn get_chunk_consumer_count(&self) -> usize { self.shared_discontig_fl_count } - + #[allow(clippy::while_immutable_condition)] fn free_all_chunks(&self, any_chunk: Address) { debug!("free_all_chunks: {}", any_chunk); let (_sync, self_mut) = self.mut_self_with_sync(); @@ -160,28 +179,28 @@ impl VMMap for Map32 { let chunk = any_chunk.chunk_index(); while self_mut.next_link[chunk] != 0 { let x = self_mut.next_link[chunk]; - self_mut.free_contiguous_chunks_no_lock(x); + self.free_contiguous_chunks_no_lock(x); } while self_mut.prev_link[chunk] != 0 { let x = self_mut.prev_link[chunk]; - self_mut.free_contiguous_chunks_no_lock(x); + self.free_contiguous_chunks_no_lock(x); } - self_mut.free_contiguous_chunks_no_lock(chunk as _); + self.free_contiguous_chunks_no_lock(chunk as _); } } - fn free_contiguous_chunks(&self, start: Address) -> usize { + unsafe fn free_contiguous_chunks(&self, start: Address) -> usize { debug!("free_contiguous_chunks: {}", start); - let (_sync, self_mut) = self.mut_self_with_sync(); + let (_sync, _) = self.mut_self_with_sync(); debug_assert!(start == conversions::chunk_align_down(start)); let chunk = start.chunk_index(); - self_mut.free_contiguous_chunks_no_lock(chunk as _) + self.free_contiguous_chunks_no_lock(chunk as _) } fn finalize_static_space_map(&self, from: Address, to: Address) { // This is only called during boot process by a single thread. // It is fine to get a mutable reference. - let self_mut: &mut Self = unsafe { self.mut_self() }; + let self_mut: &mut Map32Inner = unsafe { self.mut_self() }; /* establish bounds of discontiguous space */ let start_address = from; let first_chunk = start_address.chunk_index(); @@ -197,13 +216,9 @@ impl VMMap for Map32 { // Yi: I am not doing this refactoring right now, as I am not familiar with flatten() and // there is no test to ensure the refactoring will be correct. #[allow(clippy::manual_flatten)] - for fl in self_mut.shared_fl_map.iter() { - if let Some(fl) = fl { - #[allow(clippy::cast_ref_to_mut)] - let fl_mut: &mut CommonFreeListPageResource = unsafe { - &mut *(*fl as *const CommonFreeListPageResource - as *mut CommonFreeListPageResource) - }; + for fl in self_mut.shared_fl_map.iter().copied() { + if let Some(mut fl) = fl { + let fl_mut = unsafe { fl.as_mut() }; fl_mut.resize_freelist(start_address); } } @@ -262,43 +277,44 @@ impl Map32 { /// The caller needs to guarantee there is no race condition. Either only one single thread /// is using this method, or multiple threads are accessing mutally exclusive data (e.g. different indices in arrays). /// In other cases, use mut_self_with_sync(). - #[allow(clippy::cast_ref_to_mut)] #[allow(clippy::mut_from_ref)] - unsafe fn mut_self(&self) -> &mut Self { - &mut *(self as *const _ as *mut _) + unsafe fn mut_self(&self) -> &mut Map32Inner { + &mut *self.inner.get() } - fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Self) { + fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Map32Inner) { let guard = self.sync.lock().unwrap(); (guard, unsafe { self.mut_self() }) } - fn free_contiguous_chunks_no_lock(&mut self, chunk: i32) -> usize { - let chunks = self.region_map.free(chunk, false); - self.total_available_discontiguous_chunks += chunks as usize; - let next = self.next_link[chunk as usize]; - let prev = self.prev_link[chunk as usize]; - if next != 0 { - self.prev_link[next as usize] = prev - }; - if prev != 0 { - self.next_link[prev as usize] = next - }; - self.prev_link[chunk as usize] = 0; - self.next_link[chunk as usize] = 0; - for offset in 0..chunks { - let index = (chunk + offset) as usize; - let chunk_start = conversions::chunk_index_to_address(index); - debug!("Clear descriptor for Chunk {}", chunk_start); - self.descriptor_map[index] = SpaceDescriptor::UNINITIALIZED; - unsafe { SFT_MAP.clear(chunk_start) }; + fn free_contiguous_chunks_no_lock(&self, chunk: i32) -> usize { + unsafe { + let chunks = self.mut_self().region_map.free(chunk, false); + self.mut_self().total_available_discontiguous_chunks += chunks as usize; + let next = self.next_link[chunk as usize]; + let prev = self.prev_link[chunk as usize]; + if next != 0 { + self.mut_self().prev_link[next as usize] = prev + }; + if prev != 0 { + self.mut_self().next_link[prev as usize] = next + }; + self.mut_self().prev_link[chunk as usize] = 0; + self.mut_self().next_link[chunk as usize] = 0; + for offset in 0..chunks { + let index = (chunk + offset) as usize; + let chunk_start = conversions::chunk_index_to_address(index); + debug!("Clear descriptor for Chunk {}", chunk_start); + self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED; + SFT_MAP.clear(chunk_start); + } + chunks as _ } - chunks as _ } fn get_discontig_freelist_pr_ordinal(&self) -> usize { // This is only called during creating a page resource/space/plan/mmtk instance, which is single threaded. - let self_mut: &mut Self = unsafe { self.mut_self() }; + let self_mut: &mut Map32Inner = unsafe { self.mut_self() }; self_mut.shared_discontig_fl_count += 1; self.shared_discontig_fl_count } diff --git a/src/util/heap/layout/map64.rs b/src/util/heap/layout/map64.rs index aa04f406cf..ea9db40044 100644 --- a/src/util/heap/layout/map64.rs +++ b/src/util/heap/layout/map64.rs @@ -10,13 +10,19 @@ use crate::util::memory::MmapStrategy; use crate::util::raw_memory_freelist::RawMemoryFreeList; use crate::util::rust_util::zeroed_alloc::new_zeroed_vec; use crate::util::Address; +use std::cell::UnsafeCell; +use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; const NON_MAP_FRACTION: f64 = 1.0 - 8.0 / 4096.0; pub struct Map64 { - fl_page_resources: Vec>, - fl_map: Vec>, + inner: UnsafeCell, +} + +struct Map64Inner { + fl_page_resources: Vec>>, + fl_map: Vec>>, finalized: bool, descriptor_map: Vec, base_address: Vec
, @@ -29,6 +35,9 @@ pub struct Map64 { cumulative_committed_pages: AtomicUsize, } +unsafe impl Send for Map64 {} +unsafe impl Sync for Map64 {} + impl Map64 { pub fn new() -> Self { let mut high_water = vec![Address::ZERO; MAX_SPACES]; @@ -41,18 +50,20 @@ impl Map64 { } Self { - // Note: descriptor_map is very large. Although it is initialized to - // SpaceDescriptor(0), the compiler and the standard library are not smart enough to - // elide the storing of 0 for each of the element. Using standard vector creation, - // such as `vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS]`, will cause severe - // slowdown during start-up. - descriptor_map: unsafe { new_zeroed_vec::(vm_layout().max_chunks()) }, - high_water, - base_address, - fl_page_resources: vec![None; MAX_SPACES], - fl_map: vec![None; MAX_SPACES], - finalized: false, - cumulative_committed_pages: AtomicUsize::new(0), + inner: UnsafeCell::new(Map64Inner { + // Note: descriptor_map is very large. Although it is initialized to + // SpaceDescriptor(0), the compiler and the standard library are not smart enough to + // elide the storing of 0 for each of the element. Using standard vector creation, + // such as `vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS]`, will cause severe + // slowdown during start-up. + descriptor_map: unsafe { new_zeroed_vec::(vm_layout().max_chunks()) }, + high_water, + base_address, + fl_page_resources: vec![None; MAX_SPACES], + fl_map: vec![None; MAX_SPACES], + finalized: false, + cumulative_committed_pages: AtomicUsize::new(0), + }), } } } @@ -89,7 +100,7 @@ impl VMMap for Map64 { let heads = 1; let pages_per_block = RawMemoryFreeList::default_block_size(units as _, heads); - let list = Box::new(RawMemoryFreeList::new( + let mut list = Box::new(RawMemoryFreeList::new( start, start + list_extent, pages_per_block, @@ -99,9 +110,10 @@ impl VMMap for Map64 { MmapStrategy::Normal, )); - self_mut.fl_map[index] = + /*self_mut.fl_map[index] = Some(unsafe { &*(&list as &RawMemoryFreeList as *const RawMemoryFreeList) }); - + */ + self_mut.fl_map[index] = unsafe { Some(NonNull::new_unchecked(&mut *list)) }; /* Adjust the base address and highwater to account for the allocated chunks for the map */ let base = conversions::chunk_align_up(start + list_extent); @@ -110,13 +122,16 @@ impl VMMap for Map64 { list } - fn bind_freelist(&self, pr: &'static CommonFreeListPageResource) { - let index = Self::space_index(pr.get_start()).unwrap(); - let self_mut = unsafe { self.mut_self() }; - self_mut.fl_page_resources[index] = Some(pr); + unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) { + let index = Self::space_index((*pr).get_start()).unwrap(); + let self_mut = self.mut_self(); + self_mut.fl_page_resources[index] = Some(NonNull::new_unchecked(pr as _)); } - fn allocate_contiguous_chunks( + /// # Safety + /// + /// Caller must ensure that only one thread is calling this method. + unsafe fn allocate_contiguous_chunks( &self, descriptor: SpaceDescriptor, chunks: usize, @@ -125,20 +140,19 @@ impl VMMap for Map64 { debug_assert!(Self::space_index(descriptor.get_start()).unwrap() == descriptor.get_index()); // Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map, // as each space will update different indices. - let self_mut = unsafe { self.mut_self() }; + let self_mut = self.mut_self(); let index = descriptor.get_index(); - let rtn = self.high_water[index]; + let rtn = self.inner().high_water[index]; let extent = chunks << LOG_BYTES_IN_CHUNK; self_mut.high_water[index] = rtn + extent; /* Grow the free list to accommodate the new chunks */ - let free_list = self.fl_map[Self::space_index(descriptor.get_start()).unwrap()]; - if let Some(free_list) = free_list { - let free_list = - unsafe { &mut *(free_list as *const _ as usize as *mut RawMemoryFreeList) }; + let free_list = self.inner().fl_map[Self::space_index(descriptor.get_start()).unwrap()]; + if let Some(mut free_list) = free_list { + let free_list = free_list.as_mut(); free_list.grow_freelist(conversions::bytes_to_pages(extent) as _); - let base_page = conversions::bytes_to_pages(rtn - self.base_address[index]); + let base_page = conversions::bytes_to_pages(rtn - self.inner().base_address[index]); for offset in (0..(chunks * PAGES_IN_CHUNK)).step_by(PAGES_IN_CHUNK) { free_list.set_uncoalescable((base_page + offset) as _); /* The 32-bit implementation requires that pages are returned allocated to the caller */ @@ -172,18 +186,17 @@ impl VMMap for Map64 { unreachable!() } - fn free_contiguous_chunks(&self, _start: Address) -> usize { + unsafe fn free_contiguous_chunks(&self, _start: Address) -> usize { unreachable!() } fn boot(&self) { // This is only called during boot process by a single thread. // It is fine to get a mutable reference. - let self_mut: &mut Self = unsafe { self.mut_self() }; + let self_mut: &mut Map64Inner = unsafe { self.mut_self() }; for pr in 0..MAX_SPACES { - if let Some(fl) = self_mut.fl_map[pr] { - #[allow(clippy::cast_ref_to_mut)] - let fl_mut: &mut RawMemoryFreeList = unsafe { &mut *(fl as *const _ as *mut _) }; + if let Some(mut fl) = self_mut.fl_map[pr] { + let fl_mut: &mut RawMemoryFreeList = unsafe { fl.as_mut() }; fl_mut.grow_freelist(0); } } @@ -192,31 +205,30 @@ impl VMMap for Map64 { fn finalize_static_space_map(&self, _from: Address, _to: Address) { // This is only called during boot process by a single thread. // It is fine to get a mutable reference. - let self_mut: &mut Self = unsafe { self.mut_self() }; + let self_mut: &mut Map64Inner = unsafe { self.mut_self() }; for pr in 0..MAX_SPACES { - if let Some(fl) = self_mut.fl_page_resources[pr] { - #[allow(clippy::cast_ref_to_mut)] - let fl_mut: &mut CommonFreeListPageResource = - unsafe { &mut *(fl as *const _ as *mut _) }; - fl_mut.resize_freelist(conversions::chunk_align_up( - self.fl_map[pr].unwrap().get_limit(), - )); + if let Some(mut fl) = self_mut.fl_page_resources[pr] { + let fl_mut = unsafe { fl.as_mut() }; + fl_mut.resize_freelist(conversions::chunk_align_up(unsafe { + self.inner().fl_map[pr].unwrap().as_ref().get_limit() + })); } } self_mut.finalized = true; } fn is_finalized(&self) -> bool { - self.finalized + self.inner().finalized } fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor { let index = Self::space_index(address).unwrap(); - self.descriptor_map[index] + self.inner().descriptor_map[index] } fn add_to_cumulative_committed_pages(&self, pages: usize) { - self.cumulative_committed_pages + self.inner() + .cumulative_committed_pages .fetch_add(pages, Ordering::Relaxed); } } @@ -227,10 +239,13 @@ impl Map64 { /// The caller needs to guarantee there is no race condition. Either only one single thread /// is using this method, or multiple threads are accessing mutally exclusive data (e.g. different indices in arrays). /// In other cases, use mut_self_with_sync(). - #[allow(clippy::cast_ref_to_mut)] #[allow(clippy::mut_from_ref)] - unsafe fn mut_self(&self) -> &mut Self { - &mut *(self as *const _ as *mut _) + unsafe fn mut_self(&self) -> &mut Map64Inner { + &mut *self.inner.get() + } + + fn inner(&self) -> &Map64Inner { + unsafe { &*self.inner.get() } } fn space_index(addr: Address) -> Option { diff --git a/src/util/heap/layout/mod.rs b/src/util/heap/layout/mod.rs index e303d3f749..41ce58ee94 100644 --- a/src/util/heap/layout/mod.rs +++ b/src/util/heap/layout/mod.rs @@ -15,12 +15,12 @@ mod map32; mod map64; #[cfg(target_pointer_width = "32")] -pub fn create_vm_map() -> Box { +pub fn create_vm_map() -> Box { Box::new(map32::Map32::new()) } #[cfg(target_pointer_width = "64")] -pub fn create_vm_map() -> Box { +pub fn create_vm_map() -> Box { if !vm_layout().force_use_contiguous_spaces { Box::new(map32::Map32::new()) } else { @@ -29,12 +29,12 @@ pub fn create_vm_map() -> Box { } #[cfg(target_pointer_width = "32")] -pub fn create_mmapper() -> Box { +pub fn create_mmapper() -> Box { Box::new(byte_map_mmapper::ByteMapMmapper::new()) } #[cfg(target_pointer_width = "64")] -pub fn create_mmapper() -> Box { +pub fn create_mmapper() -> Box { // TODO: ByteMapMmapper for 39-bit or less virtual space Box::new(fragmented_mapper::FragmentedMapper::new()) } diff --git a/src/util/heap/pageresource.rs b/src/util/heap/pageresource.rs index 2925d571ac..9154620e0d 100644 --- a/src/util/heap/pageresource.rs +++ b/src/util/heap/pageresource.rs @@ -148,11 +148,13 @@ impl CommonPageResource { ) -> Address { let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap(); - let new_head: Address = self.vm_map.allocate_contiguous_chunks( - space_descriptor, - chunks, - *head_discontiguous_region, - ); + let new_head: Address = unsafe { + self.vm_map.allocate_contiguous_chunks( + space_descriptor, + chunks, + *head_discontiguous_region, + ) + }; if new_head.is_zero() { return Address::ZERO; } @@ -169,7 +171,9 @@ impl CommonPageResource { if chunk == *head_discontiguous_region { *head_discontiguous_region = self.vm_map.get_next_contiguous_region(chunk); } - self.vm_map.free_contiguous_chunks(chunk); + unsafe { + self.vm_map.free_contiguous_chunks(chunk); + } } pub fn release_all_chunks(&self) { diff --git a/src/util/int_array_freelist.rs b/src/util/int_array_freelist.rs index 20eb9d860a..e8a6e5b7cf 100644 --- a/src/util/int_array_freelist.rs +++ b/src/util/int_array_freelist.rs @@ -1,14 +1,17 @@ use super::freelist::*; -use std::mem; +use std::{mem, ptr::NonNull}; #[derive(Debug)] pub struct IntArrayFreeList { pub head: i32, pub heads: i32, pub table: Option>, - parent: Option<&'static IntArrayFreeList>, + parent: Option>, } +unsafe impl Send for IntArrayFreeList {} +unsafe impl Sync for IntArrayFreeList {} + impl FreeList for IntArrayFreeList { fn head(&self) -> i32 { self.head @@ -53,20 +56,16 @@ impl IntArrayFreeList { } fn table(&self) -> &Vec { match self.parent { - Some(p) => p.table(), + Some(p) => unsafe { p.as_ref().table() }, None => self.table.as_ref().unwrap(), } } // FIXME: We need a safe implementation - #[allow(clippy::cast_ref_to_mut)] + fn table_mut(&mut self) -> &mut Vec { match self.parent { - Some(p) => { - let parent_mut: &mut Self = - unsafe { &mut *(p as *const IntArrayFreeList as *mut IntArrayFreeList) }; - parent_mut.table_mut() - } + Some(mut p) => unsafe { p.as_mut().table_mut() }, None => self.table.as_mut().unwrap(), } } diff --git a/src/util/malloc/mod.rs b/src/util/malloc/mod.rs index 3d538caedc..a224264fc9 100644 --- a/src/util/malloc/mod.rs +++ b/src/util/malloc/mod.rs @@ -27,7 +27,7 @@ pub fn malloc(size: usize) -> Address { pub fn counted_malloc(mmtk: &MMTK, size: usize) -> Address { let res = malloc(size); if !res.is_zero() { - mmtk.plan.base().increase_malloc_bytes_by(size); + mmtk.get_plan().base().increase_malloc_bytes_by(size); } res } @@ -40,7 +40,7 @@ pub fn calloc(num: usize, size: usize) -> Address { pub fn counted_calloc(mmtk: &MMTK, num: usize, size: usize) -> Address { let res = calloc(num, size); if !res.is_zero() { - mmtk.plan.base().increase_malloc_bytes_by(num * size); + mmtk.get_plan().base().increase_malloc_bytes_by(num * size); } res } @@ -59,10 +59,10 @@ pub fn realloc_with_old_size( let res = realloc(addr, size); if !addr.is_zero() { - mmtk.plan.base().decrease_malloc_bytes_by(old_size); + mmtk.get_plan().base().decrease_malloc_bytes_by(old_size); } if size != 0 && !res.is_zero() { - mmtk.plan.base().increase_malloc_bytes_by(size); + mmtk.get_plan().base().increase_malloc_bytes_by(size); } res @@ -76,6 +76,6 @@ pub fn free(addr: Address) { pub fn free_with_size(mmtk: &MMTK, addr: Address, old_size: usize) { free(addr); if !addr.is_zero() { - mmtk.plan.base().decrease_malloc_bytes_by(old_size); + mmtk.get_plan().base().decrease_malloc_bytes_by(old_size); } } diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 219cbba0d8..4b2709022a 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -67,12 +67,15 @@ impl ReferenceProcessors { /// plans, this separate step is required. pub fn forward_refs(&self, trace: &mut E, mmtk: &'static MMTK) { debug_assert!( - mmtk.plan.constraints().needs_forward_after_liveness, + mmtk.get_plan().constraints().needs_forward_after_liveness, "A plan with needs_forward_after_liveness=false does not need a separate forward step" ); - self.soft.forward::(trace, is_nursery_gc(&*mmtk.plan)); - self.weak.forward::(trace, is_nursery_gc(&*mmtk.plan)); - self.phantom.forward::(trace, is_nursery_gc(&*mmtk.plan)); + self.soft + .forward::(trace, is_nursery_gc(mmtk.get_plan())); + self.weak + .forward::(trace, is_nursery_gc(mmtk.get_plan())); + self.phantom + .forward::(trace, is_nursery_gc(mmtk.get_plan())); } // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom @@ -81,18 +84,18 @@ impl ReferenceProcessors { pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { // For soft refs, it is up to the VM to decide when to reclaim this. // If this is not an emergency collection, we have no heap stress. We simply retain soft refs. - if !mmtk.plan.is_emergency_collection() { + if !mmtk.get_plan().is_emergency_collection() { // This step only retains the referents (keep the referents alive), it does not update its addresses. // We will call soft.scan() again with retain=false to update its addresses based on liveness. - self.soft.retain::(trace, is_nursery_gc(&*mmtk.plan)); + self.soft.retain::(trace, is_nursery_gc(mmtk.get_plan())); } // This will update the references (and the referents). - self.soft.scan::(trace, is_nursery_gc(&*mmtk.plan)); + self.soft.scan::(trace, is_nursery_gc(mmtk.get_plan())); } /// Scan weak references. pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { - self.weak.scan::(trace, is_nursery_gc(&*mmtk.plan)); + self.weak.scan::(trace, is_nursery_gc(mmtk.get_plan())); } /// Scan phantom references. @@ -101,7 +104,8 @@ impl ReferenceProcessors { trace: &mut E, mmtk: &'static MMTK, ) { - self.phantom.scan::(trace, is_nursery_gc(&*mmtk.plan)); + self.phantom + .scan::(trace, is_nursery_gc(mmtk.get_plan())); } } diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 4cb0a19a88..f8f2ffdf88 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -63,7 +63,7 @@ impl ScheduleSanityGC

{ impl GCWork for ScheduleSanityGC

{ fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let scheduler = worker.scheduler(); - let plan = &mmtk.plan; + let plan = mmtk.get_plan(); scheduler.reset_state(); @@ -122,7 +122,7 @@ impl SanityPrepare

{ impl GCWork for SanityPrepare

{ fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { info!("Sanity GC prepare"); - mmtk.plan.enter_sanity(); + mmtk.get_plan().enter_sanity(); { let mut sanity_checker = mmtk.sanity_checker.lock().unwrap(); sanity_checker.refs.clear(); @@ -151,7 +151,7 @@ impl SanityRelease

{ impl GCWork for SanityRelease

{ fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { info!("Sanity GC release"); - mmtk.plan.leave_sanity(); + mmtk.get_plan().leave_sanity(); mmtk.sanity_checker.lock().unwrap().clear_roots_cache(); for mutator in ::VMActivePlan::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Release] @@ -205,7 +205,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { // Let plan check object assert!( - self.mmtk().plan.sanity_check_object(object), + self.mmtk().get_plan().sanity_check_object(object), "Invalid reference {:?}", object );