diff --git a/src/binding.cc b/src/binding.cc index a2161d176a..57e1670eda 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -359,6 +359,16 @@ bool v8__Isolate__HasPendingBackgroundTasks(v8::Isolate* isolate) { return isolate->HasPendingBackgroundTasks(); } +void v8__Locker__CONSTRUCT(uninit_t* locker, v8::Isolate* isolate) { + construct_in_place(locker, isolate); +} + +void v8__Locker__DESTRUCT(v8::Locker* locker) { locker->~Locker(); } + +bool v8__Locker__IsLocked(v8::Isolate* isolate) { + return v8::Locker::IsLocked(isolate); +} + void v8__Isolate__RequestGarbageCollectionForTesting( v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type) { isolate->RequestGarbageCollectionForTesting(type); diff --git a/src/isolate.rs b/src/isolate.rs index a2de0ba959..aae54cbb26 100644 --- a/src/isolate.rs +++ b/src/isolate.rs @@ -8,6 +8,7 @@ use crate::handle::FinalizerCallback; use crate::handle::FinalizerMap; use crate::isolate_create_params::raw; use crate::isolate_create_params::CreateParams; +use crate::locker::Locker; use crate::promise::PromiseRejectMessage; use crate::scope::data::ScopeData; use crate::snapshot::SnapshotCreator; @@ -43,6 +44,7 @@ use crate::Value; use std::any::Any; use std::any::TypeId; +use std::cell::UnsafeCell; use std::collections::HashMap; use std::ffi::c_void; use std::fmt::{self, Debug, Formatter}; @@ -651,6 +653,14 @@ impl Isolate { OwnedIsolate::new(Self::new_impl(params)) } + /// Creates a new isolate that can be accessed via lockers. + /// + /// Unlike V8 isolates, these do not currently support re-entrancy. + /// Do not create multiple lockers to the same isolate in the same thread. + pub fn new_shared(params: CreateParams) -> SharedIsolate { + SharedIsolate::new(Self::new_impl(params)) + } + #[allow(clippy::new_ret_no_self)] pub fn snapshot_creator( external_references: Option<&'static ExternalReferences>, @@ -1626,6 +1636,75 @@ impl IsolateHandle { true } } + + /// If this isolate is currently locked by the locker api to the current thread. + pub fn is_locked(&self) -> bool { + Locker::is_locked(unsafe { &*self.0.isolate }) + } +} + +/// An isolate that can be shared between threads, +/// only one thread can access the isolate at a time via a locker. +pub struct SharedIsolate { + // We wrap an owned isolate to persist the cleanup operations of an owned isolate. + // Lockers having a lifetime parameter ensures this can only be cleaned up after all lockers are dropped. + isolate: UnsafeCell>, +} + +// OwnedIsolate doesn't support send and sync, but we're guarding them with lockers. +unsafe impl Send for SharedIsolate {} +unsafe impl Sync for SharedIsolate {} + +impl SharedIsolate { + /// Consume an isolate, allowing it to be shared between threads as threads take a locker to the isolate. + pub(crate) fn new(cxx_isolate: *mut Isolate) -> Self { + let cxx_isolate = NonNull::new(cxx_isolate).unwrap(); + Self { + isolate: UnsafeCell::new(cxx_isolate), + } + } + + #[allow(clippy::mut_from_ref)] + fn internal_unsafe_isolate_mut(&self) -> &mut Isolate { + unsafe { (*self.isolate.get()).as_mut() } + } + + fn internal_unsafe_isolate(&self) -> &Isolate { + unsafe { (*self.isolate.get()).as_ref() } + } + + /// Acquire a lock on the isolate, this allows the current thread to use the isolate. + /// Threads attempting to lock an already locked isolate will block. + /// + /// Unlike V8 lockers, these do not currently support re-entrancy. + /// Do not create multiple lockers to the same isolate in the same thread. + pub fn lock(&self) -> Locker { + // Only lock if the isolate is not currently locked in the current thread. + // Re-entrant lockers may be supported later. + assert!(!self.is_locked()); + Locker::new(self.internal_unsafe_isolate_mut()) + } + + /// Gets if the shared isolate is locked by the current thread. + pub fn is_locked(&self) -> bool { + Locker::is_locked(self.internal_unsafe_isolate()) + } + + /// Gets a thread safe handle to the isolate, this can be done without acquiring a lock on the isolate. + pub fn thread_safe_handle(&self) -> IsolateHandle { + self.internal_unsafe_isolate().thread_safe_handle() + } +} + +impl Drop for SharedIsolate { + fn drop(&mut self) { + let isolate = self.internal_unsafe_isolate_mut(); + unsafe { + // Stack roots are disposed by individual lockers. + isolate.dispose_annex(); + isolate.dispose(); + } + } } /// Same as Isolate but gets disposed when it goes out of scope. diff --git a/src/lib.rs b/src/lib.rs index c4f1ccb23d..07a0db9a55 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -48,6 +48,7 @@ mod handle; pub mod icu; mod isolate; mod isolate_create_params; +mod locker; mod microtask; mod module; mod name; @@ -119,11 +120,13 @@ pub use isolate::OwnedIsolate; pub use isolate::PromiseHook; pub use isolate::PromiseHookType; pub use isolate::PromiseRejectCallback; +pub use isolate::SharedIsolate; pub use isolate::TimeZoneDetection; pub use isolate::UseCounterCallback; pub use isolate::UseCounterFeature; pub use isolate::WasmAsyncSuccess; pub use isolate_create_params::CreateParams; +pub use locker::Locker; pub use microtask::MicrotaskQueue; pub use module::*; pub use object::*; diff --git a/src/locker.rs b/src/locker.rs new file mode 100644 index 0000000000..4db75fb736 --- /dev/null +++ b/src/locker.rs @@ -0,0 +1,112 @@ +use std::ops::{Deref, DerefMut}; + +use crate::isolate::Isolate; +use crate::scope::data::ScopeData; +use crate::support::Opaque; + +#[repr(C)] +#[derive(Debug)] +struct LockerHandle(Opaque); + +/// A handle to a shared isolate, allowing access to the isolate in a thread safe way. +/// +/// Unlike V8 isolates, these do not currently support re-entrancy. +/// Do not create multiple lockers to the same isolate in the same thread. +#[derive(Debug)] +pub struct Locker<'a> { + _lock: raw::Locker, + // We maintain a mut reference to ensure we have exclusive ownership of the isolate during the lock. + locked: &'a mut Isolate, +} + +impl<'a> Locker<'a> { + /// Claims the isolate, this should only be used from a shared isolate. + pub(crate) fn new(isolate: &'a mut Isolate) -> Self { + let s = Self { + _lock: raw::Locker::new(isolate), + locked: isolate, + }; + ScopeData::new_root(s.locked); + unsafe { s.locked.enter() }; + s + } + + /// Returns a reference to the locked isolate. + pub fn isolate(&self) -> &Isolate { + self.locked + } + + /// Returns a mutable reference to the locked isolate. + pub fn isolate_mut(&mut self) -> &mut Isolate { + self.locked + } + + /// Returns if the isolate is locked by the current thread. + pub fn is_locked(isolate: &Isolate) -> bool { + raw::Locker::is_locked(isolate) + } +} + +impl<'a> Drop for Locker<'a> { + fn drop(&mut self) { + // A new locker automatically enters the isolate, so be sure to exit the isolate when the locker is exited. + unsafe { self.exit() }; + ScopeData::drop_root(self); + } +} + +impl<'a> Deref for Locker<'a> { + type Target = Isolate; + fn deref(&self) -> &Self::Target { + self.isolate() + } +} + +impl<'a> DerefMut for Locker<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.isolate_mut() + } +} + +impl<'a> AsMut for Locker<'a> { + fn as_mut(&mut self) -> &mut Isolate { + self + } +} + +mod raw { + use std::mem::MaybeUninit; + + use crate::Isolate; + + #[repr(C)] + #[derive(Debug)] + pub(super) struct Locker([MaybeUninit; 2]); + + impl Locker { + pub fn new(isolate: &Isolate) -> Self { + unsafe { + let mut s = Self(MaybeUninit::uninit().assume_init()); + v8__Locker__CONSTRUCT(&mut s, isolate); + // v8-locker.h disallows copying and assigning, but it does not disallow moving so this is hopefully safe. + s + } + } + + pub fn is_locked(isolate: &Isolate) -> bool { + unsafe { v8__Locker__IsLocked(isolate) } + } + } + + impl Drop for Locker { + fn drop(&mut self) { + unsafe { v8__Locker__DESTRUCT(self) } + } + } + + extern "C" { + fn v8__Locker__CONSTRUCT(locker: *mut Locker, isolate: *const Isolate); + fn v8__Locker__DESTRUCT(locker: *mut Locker); + fn v8__Locker__IsLocked(isolate: *const Isolate) -> bool; + } +}