diff --git a/src/binding.cc b/src/binding.cc index a2161d176a..57e1670eda 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -359,6 +359,16 @@ bool v8__Isolate__HasPendingBackgroundTasks(v8::Isolate* isolate) { return isolate->HasPendingBackgroundTasks(); } +void v8__Locker__CONSTRUCT(uninit_t* locker, v8::Isolate* isolate) { + construct_in_place(locker, isolate); +} + +void v8__Locker__DESTRUCT(v8::Locker* locker) { locker->~Locker(); } + +bool v8__Locker__IsLocked(v8::Isolate* isolate) { + return v8::Locker::IsLocked(isolate); +} + void v8__Isolate__RequestGarbageCollectionForTesting( v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type) { isolate->RequestGarbageCollectionForTesting(type); diff --git a/src/binding.hpp b/src/binding.hpp index 2f27a6ed59..d025591b00 100644 --- a/src/binding.hpp +++ b/src/binding.hpp @@ -20,6 +20,8 @@ static size_t v8__TracedReference_SIZE = sizeof(v8::TracedReference); static size_t v8__String__ValueView_SIZE = sizeof(v8::String::ValueView); +static size_t v8__Locker__SIZE = sizeof(v8::Locker); + static int v8__String__kMaxLength = v8::String::kMaxLength; static size_t v8__TypedArray__kMaxByteLength = v8::TypedArray::kMaxByteLength; diff --git a/src/handle.rs b/src/handle.rs index 437f4e95b7..1dcf56e263 100644 --- a/src/handle.rs +++ b/src/handle.rs @@ -212,6 +212,10 @@ pub struct Global { isolate_handle: IsolateHandle, } +// Globals can go between threads as long as a locker was used to acquire a SharedIsolate. +unsafe impl Send for Global {} +unsafe impl Sync for Global {} + impl Global { /// Construct a new Global from an existing Handle. #[inline(always)] diff --git a/src/isolate.rs b/src/isolate.rs index a2de0ba959..c9fcaacfe0 100644 --- a/src/isolate.rs +++ b/src/isolate.rs @@ -8,6 +8,7 @@ use crate::handle::FinalizerCallback; use crate::handle::FinalizerMap; use crate::isolate_create_params::raw; use crate::isolate_create_params::CreateParams; +use crate::locker::Locker; use crate::promise::PromiseRejectMessage; use crate::scope::data::ScopeData; use crate::snapshot::SnapshotCreator; @@ -43,6 +44,7 @@ use crate::Value; use std::any::Any; use std::any::TypeId; +use std::cell::UnsafeCell; use std::collections::HashMap; use std::ffi::c_void; use std::fmt::{self, Debug, Formatter}; @@ -651,6 +653,14 @@ impl Isolate { OwnedIsolate::new(Self::new_impl(params)) } + /// Creates a new isolate that can be accessed via lockers. + /// + /// Unlike V8 isolates, these do not currently support re-entrancy. + /// Do not create multiple lockers to the same isolate in the same thread. + pub fn new_shared(params: CreateParams) -> SharedIsolate { + SharedIsolate::new(Self::new(params)) + } + #[allow(clippy::new_ret_no_self)] pub fn snapshot_creator( external_references: Option<&'static ExternalReferences>, @@ -1626,8 +1636,72 @@ impl IsolateHandle { true } } + + /// If this isolate is currently locked by the locker api to the current thread. + pub fn is_locked(&self) -> bool { + Locker::is_locked(unsafe { &*self.0.isolate }) + } +} + +/// An isolate that can be shared between threads, +/// only one thread can access the isolate at a time via a locker. +pub struct SharedIsolate { + // We wrap an owned isolate to persist the cleanup operations of an owned isolate. + // Lockers having a lifetime parameter ensures this can only be cleaned up after all lockers are dropped. + isolate: UnsafeCell, +} + +// OwnedIsolate doesn't support send and sync, but we're guarding them with lockers. +unsafe impl Send for SharedIsolate {} +unsafe impl Sync for SharedIsolate {} + +impl SharedIsolate { + /// Consume an owned isolate, allowing it to be shared between threads as threads take a locker to it. + pub(crate) fn new(owned_isolate: OwnedIsolate) -> Self { + Self { + isolate: UnsafeCell::new(owned_isolate), + } + } + + #[allow(clippy::mut_from_ref)] + fn internal_unsafe_cxx_isolate_mut(&self) -> &mut Isolate { + unsafe { (*self.isolate.get()).deref_mut() } + } + + fn internal_unsafe_cxx_isolate(&self) -> &Isolate { + unsafe { (*self.isolate.get()).deref() } + } + + /// Acquire a lock on the isolate, this allows the current thread to use the isolate. + /// Threads attempting to lock an already locked isolate will block. + /// + /// Unlike V8 lockers, these do not currently support re-entrancy. + /// Do not create multiple lockers to the same isolate in the same thread. + pub fn lock(&self) -> Locker { + // Only lock if the isolate is not currently locked in the current thread. + // Re-entrant lockers may be supported later. + assert!(!self.is_locked()); + Locker::new(self.internal_unsafe_cxx_isolate_mut()) + } + + /// Gets if the shared isolate is locked by the current thread. + pub fn is_locked(&self) -> bool { + Locker::is_locked(self.internal_unsafe_cxx_isolate()) + } + + /// Gets a thread safe handle to the isolate, this can be done without acquiring a lock on the isolate. + pub fn thread_safe_handle(&self) -> IsolateHandle { + self.internal_unsafe_cxx_isolate().thread_safe_handle() + } } +// SharedIsolate should be automatically implement drop. +// impl Drop for SharedIsolate { +// fn drop(&mut self) { +// println!("SharedIsolate dropped!"); +// } +// } + /// Same as Isolate but gets disposed when it goes out of scope. #[derive(Debug)] pub struct OwnedIsolate { diff --git a/src/lib.rs b/src/lib.rs index c4f1ccb23d..07a0db9a55 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -48,6 +48,7 @@ mod handle; pub mod icu; mod isolate; mod isolate_create_params; +mod locker; mod microtask; mod module; mod name; @@ -119,11 +120,13 @@ pub use isolate::OwnedIsolate; pub use isolate::PromiseHook; pub use isolate::PromiseHookType; pub use isolate::PromiseRejectCallback; +pub use isolate::SharedIsolate; pub use isolate::TimeZoneDetection; pub use isolate::UseCounterCallback; pub use isolate::UseCounterFeature; pub use isolate::WasmAsyncSuccess; pub use isolate_create_params::CreateParams; +pub use locker::Locker; pub use microtask::MicrotaskQueue; pub use module::*; pub use object::*; diff --git a/src/locker.rs b/src/locker.rs new file mode 100644 index 0000000000..d4f2a06220 --- /dev/null +++ b/src/locker.rs @@ -0,0 +1,112 @@ +use std::ops::{Deref, DerefMut}; + +use crate::isolate::Isolate; +use crate::scope::data::ScopeData; + +/// A handle to a shared isolate, allowing access to the isolate in a thread safe way. +/// +/// Unlike V8 isolates, these do not currently support re-entrancy. +/// Do not create multiple lockers to the same isolate in the same thread. +#[derive(Debug)] +pub struct Locker<'a> { + _lock: raw::Locker, + // We maintain a mut reference to ensure we have exclusive ownership of the isolate during the lock. + locked: &'a mut Isolate, +} + +impl<'a> Locker<'a> { + /// Claims the isolate, this should only be used from a shared isolate. + #[allow(invalid_reference_casting)] + pub(crate) fn new(isolate: &Isolate) -> Self { + let const_isolate = isolate as *const Isolate; + let mut_isolate = const_isolate as *mut Isolate; + let s = unsafe { + Self { + _lock: raw::Locker::new(isolate), + locked: &mut *mut_isolate, + } + }; + ScopeData::new_root(s.locked); + unsafe { s.locked.enter() }; + s + } + + /// Returns a reference to the locked isolate. + pub fn isolate(&self) -> &Isolate { + self.locked + } + + /// Returns a mutable reference to the locked isolate. + pub fn isolate_mut(&mut self) -> &mut Isolate { + self.locked + } + + /// Returns if the isolate is locked by the current thread. + pub fn is_locked(isolate: &Isolate) -> bool { + raw::Locker::is_locked(isolate) + } +} + +impl<'a> Drop for Locker<'a> { + fn drop(&mut self) { + // A new locker automatically enters the isolate, so be sure to exit the isolate when the locker is exited. + unsafe { self.exit() }; + ScopeData::drop_root(self); + } +} + +impl<'a> Deref for Locker<'a> { + type Target = Isolate; + fn deref(&self) -> &Self::Target { + self.isolate() + } +} + +impl<'a> DerefMut for Locker<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.isolate_mut() + } +} + +impl<'a> AsMut for Locker<'a> { + fn as_mut(&mut self) -> &mut Isolate { + self + } +} + +mod raw { + use std::mem::MaybeUninit; + + use crate::Isolate; + + #[repr(C)] + #[derive(Debug)] + pub(super) struct Locker([u8; crate::binding::v8__Locker__SIZE]); + + impl Locker { + pub fn new(isolate: &Isolate) -> Self { + unsafe { + let mut this = MaybeUninit::::uninit(); + v8__Locker__CONSTRUCT(this.as_mut_ptr(), isolate); + // v8-locker.h disallows copying and assigning, but it does not disallow moving so this is hopefully safe. + this.assume_init() + } + } + + pub fn is_locked(isolate: &Isolate) -> bool { + unsafe { v8__Locker__IsLocked(isolate) } + } + } + + impl Drop for Locker { + fn drop(&mut self) { + unsafe { v8__Locker__DESTRUCT(self) } + } + } + + extern "C" { + fn v8__Locker__CONSTRUCT(locker: *mut Locker, isolate: *const Isolate); + fn v8__Locker__DESTRUCT(locker: *mut Locker); + fn v8__Locker__IsLocked(isolate: *const Isolate) -> bool; + } +} diff --git a/tests/test_cross_thread_locker.rs b/tests/test_cross_thread_locker.rs new file mode 100644 index 0000000000..87642e2038 --- /dev/null +++ b/tests/test_cross_thread_locker.rs @@ -0,0 +1,123 @@ +use std::{ + sync::{Arc, Once}, + thread::{self, JoinHandle}, +}; + +static INIT: Once = Once::new(); + +fn initialize_test() { + INIT.call_once(|| { + v8::V8::initialize_platform( + v8::new_default_platform(0, false).make_shared(), + ); + v8::V8::initialize(); + }); +} + +fn spawn_thread_locked( + isolate: &Arc, + f: F, +) -> JoinHandle +where + F: FnOnce(&mut v8::Locker) -> R + Send + Sync + 'static, + R: Send + 'static, +{ + let isolate = isolate.clone(); + thread::spawn(move || { + let mut locker = isolate.lock(); + f(&mut locker) + }) +} + +fn spawn_thread_with_scope( + isolate: &Arc, + f: F, +) -> JoinHandle +where + F: FnOnce(&mut v8::HandleScope) -> R + Send + Sync + 'static, + R: Send + 'static, +{ + spawn_thread_locked(isolate, |locker| { + let scope = &mut v8::HandleScope::new(locker.isolate_mut()); + let context = v8::Context::new(scope, Default::default()); + let scope = &mut v8::ContextScope::new(scope, context); + f(scope) + }) +} + +#[test] +fn isolate_passed_between_threads_with_locker() { + initialize_test(); + let isolate = Arc::new(v8::Isolate::new_shared(Default::default())); + + let global = spawn_thread_with_scope(&isolate, move |scope| { + let name = v8::String::new(scope, "Thread 1 value").unwrap(); + v8::Global::new(scope, name) + }) + .join() + .unwrap(); + + let found = spawn_thread_with_scope(&isolate, move |scope| { + let name = v8::Local::new(scope, global); + name.to_rust_string_lossy(scope) + }) + .join() + .unwrap(); + + assert_eq!(found, "Thread 1 value"); +} + +fn single_isolate_cross_thread_operation_spam(isolate: Arc) { + let global_handles = (0..100) + .map(|i| { + let val = i; + spawn_thread_with_scope(&isolate, move |scope| { + let name = v8::Number::new(scope, val as f64); + v8::Global::new(scope, name) + }) + }) + .collect::>(); + + let globals = global_handles + .into_iter() + .map(|h| h.join().unwrap()) + .collect::>(); + + let number_handles = globals + .into_iter() + .map(|global| { + spawn_thread_with_scope(&isolate, move |scope| { + let local = v8::Local::new(scope, global); + local.number_value(scope).unwrap() + }) + }) + .collect::>(); + + let numbers = number_handles + .into_iter() + .map(|h| h.join().unwrap()) + .collect::>(); + + for (val, item) in numbers.iter().enumerate() { + assert_eq!(val as f64, *item); + } +} + +#[test] +fn mass_spam_isolate() { + initialize_test(); + + // This is done multiple times to verify that disposal of an isolate doesn't raise errors. + let t1 = thread::spawn(|| { + single_isolate_cross_thread_operation_spam(Arc::new( + v8::Isolate::new_shared(Default::default()), + )); + }); + let t2 = thread::spawn(|| { + single_isolate_cross_thread_operation_spam(Arc::new( + v8::Isolate::new_shared(Default::default()), + )); + }); + t1.join().unwrap(); + t2.join().unwrap(); +}