diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc index 0254e278c8..a49a0797be 100644 --- a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc @@ -57,7 +57,8 @@ void DecommitPages(uintptr_t address, size_t size) { void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) { PA_DCHECK(!(ptr & kSuperPageOffsetMask)); PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask)); - PA_CHECK(handle > 0 && handle <= std::size(pools_)); + // Checked in GetPool + // PA_CHECK(handle > 0 && handle <= std::size(pools_)); Pool* pool = GetPool(handle); PA_CHECK(!pool->IsInitialized()); @@ -308,7 +309,9 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) { GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats); } #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - GetPoolStats(kThreadIsolatedPoolHandle, &stats->thread_isolated_pool_stats); + for (size_t i = 0; i < kNumCompartments; ++i) { + GetPoolStats(PoolHandleForCompartment(i), &stats->compartment_pool_stats[i]); + } #endif return true; } @@ -555,15 +558,12 @@ void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) { } } +// TODO(SJC): Can we assert layout for the compartments here? #if BUILDFLAG(ENABLE_THREAD_ISOLATION) // This function just exists to static_assert the layout of the private fields // in Pool. void AddressPoolManager::AssertThreadIsolatedLayout() { - constexpr size_t last_pool_offset = - offsetof(AddressPoolManager, pools_) + sizeof(Pool) * (kNumPools - 1); - constexpr size_t alloc_bitset_offset = - last_pool_offset + offsetof(Pool, alloc_bitset_); - static_assert(alloc_bitset_offset % PA_THREAD_ISOLATED_ALIGN_SZ == 0); + static_assert(offsetof(AddressPoolManager, pools_) % PA_THREAD_ISOLATED_ALIGN_SZ == 0); static_assert(sizeof(AddressPoolManager) % PA_THREAD_ISOLATED_ALIGN_SZ == 0); } #endif // BUILDFLAG(ENABLE_THREAD_ISOLATION) diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h index aeaa29964d..f6f354faa0 100644 --- a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h @@ -172,8 +172,17 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) #endif // BUILDFLAG(ENABLE_THREAD_ISOLATION) }; + struct PA_THREAD_ISOLATED_ALIGN AlignedPool { + Pool pool; + }; + PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) { PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools); +#if BUILDFLAG(ENABLE_THREAD_ISOLATION) + if (handle >= kCompartmentPool0Handle) { + return &protected_pools_[handle - kCompartmentPool0Handle].pool; + } +#endif return &pools_[handle - 1]; } @@ -181,16 +190,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) // initialized. void GetPoolStats(pool_handle handle, PoolStats* stats); - // If thread isolation support is enabled, we need to write-protect the - // isolated pool (which needs to be last). For this, we need to add padding in - // front of the pools so that the isolated one starts on a page boundary. - // We also skip the Lock at the beginning of the pool since it needs to be - // used in contexts where we didn't enable write access to the pool memory. - char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ_WITH_OFFSET( - Pool, - kNumPools, - offsetof(Pool, alloc_bitset_))] = {}; +#if BUILDFLAG(ENABLE_THREAD_ISOLATION) + // This should be the first member and the AddressPoolManager must be + // PA_THREAD_ISOLATED_ALIGN. Each AlignedPool is sized such that we can + // protect each independently. + AlignedPool protected_pools_[kNumCompartments]; + Pool pools_[kNumPools - kNumCompartments]; +#else Pool pools_[kNumPools]; +#endif #endif // BUILDFLAG(HAS_64_BIT_POINTERS) diff --git a/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h b/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h index 0c3c205c1d..18eaa72c54 100644 --- a/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h +++ b/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h @@ -9,6 +9,7 @@ #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" namespace partition_alloc { @@ -38,7 +39,8 @@ struct AddressSpaceStats { #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(HAS_64_BIT_POINTERS) #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - PoolStats thread_isolated_pool_stats; + PoolStats compartment_pool_stats[internal::kNumCompartments]; + // PoolStats thread_isolated_pool_stats; #endif }; diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_address_space.cc b/external/chromium/src/base/allocator/partition_allocator/partition_address_space.cc index fe5ebd51f3..4590ee9f95 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_address_space.cc +++ b/external/chromium/src/base/allocator/partition_allocator/partition_address_space.cc @@ -293,7 +293,7 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base, // It's possible that the thread isolated pool has been initialized first, in // which case the setup_ memory has been made read-only. Remove the protection // temporarily. - if (IsThreadIsolatedPoolInitialized()) { + if (IsAnyThreadIsolatedPoolInitialized()) { UnprotectThreadIsolatedGlobals(); } #endif @@ -312,7 +312,7 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base, #if BUILDFLAG(ENABLE_THREAD_ISOLATION) // Put the metadata protection back in place. - if (IsThreadIsolatedPoolInitialized()) { + if (IsAnyThreadIsolatedPoolInitialized()) { WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_); } #endif @@ -321,35 +321,42 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base, #if BUILDFLAG(ENABLE_THREAD_ISOLATION) void PartitionAddressSpace::InitThreadIsolatedPool( ThreadIsolationOption thread_isolation) { + size_t compartment = thread_isolation.compartment; + PA_CHECK(compartment < kNumCompartments); + // The ThreadIsolated pool can't be initialized with conflicting settings. - if (IsThreadIsolatedPoolInitialized()) { - PA_CHECK(setup_.thread_isolation_ == thread_isolation); + if (IsThreadIsolatedPoolInitialized(compartment)) { + PA_CHECK(setup_.thread_isolation_[compartment] == thread_isolation); return; } size_t pool_size = ThreadIsolatedPoolSize(); - setup_.thread_isolated_pool_base_address_ = + setup_.thread_isolated_pool_base_address_[compartment] = AllocPages(pool_size, pool_size, PageAccessibilityConfiguration( PageAccessibilityConfiguration::kInaccessible), PageTag::kPartitionAlloc); - if (!setup_.thread_isolated_pool_base_address_) { + if (!setup_.thread_isolated_pool_base_address_[compartment]) { HandlePoolAllocFailure(); } - PA_DCHECK(!(setup_.thread_isolated_pool_base_address_ & (pool_size - 1))); - setup_.thread_isolation_ = thread_isolation; + PA_DCHECK(!(setup_.thread_isolated_pool_base_address_[compartment] & (pool_size - 1))); + setup_.thread_isolation_[compartment] = thread_isolation; AddressPoolManager::GetInstance().Add( - kThreadIsolatedPoolHandle, setup_.thread_isolated_pool_base_address_, + PoolHandleForCompartment(compartment), + setup_.thread_isolated_pool_base_address_[compartment], pool_size); - PA_DCHECK( - !IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ - 1)); - PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_)); - PA_DCHECK(IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ + - pool_size - 1)); - PA_DCHECK(!IsInThreadIsolatedPool(setup_.thread_isolated_pool_base_address_ + - pool_size)); + PA_DCHECK(!IsInThreadIsolatedPool( + setup_.thread_isolated_pool_base_address_[compartment] - 1, compartment)); + PA_DCHECK(IsInThreadIsolatedPool( + setup_.thread_isolated_pool_base_address_[compartment], compartment)); + PA_DCHECK(IsInThreadIsolatedPool( + setup_.thread_isolated_pool_base_address_[compartment] + pool_size - 1, + compartment)); + PA_DCHECK(!IsInThreadIsolatedPool( + setup_.thread_isolated_pool_base_address_[compartment] + pool_size, + compartment)); // TODO(1362969): support PA_ENABLE_SHADOW_METADATA } @@ -388,7 +395,7 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() { // It's possible that the thread isolated pool has been initialized first, in // which case the setup_ memory has been made read-only. Remove the protection // temporarily. - if (IsThreadIsolatedPoolInitialized()) { + if (IsAnyThreadIsolatedPoolInitialized()) { UnprotectThreadIsolatedGlobals(); } #endif @@ -397,7 +404,7 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() { setup_.configurable_pool_base_mask_ = 0; #if BUILDFLAG(ENABLE_THREAD_ISOLATION) // Put the metadata protection back in place. - if (IsThreadIsolatedPoolInitialized()) { + if (IsAnyThreadIsolatedPoolInitialized()) { WriteProtectThreadIsolatedGlobals(setup_.thread_isolation_); } #endif @@ -405,17 +412,23 @@ void PartitionAddressSpace::UninitConfigurablePoolForTesting() { #if BUILDFLAG(ENABLE_THREAD_ISOLATION) void PartitionAddressSpace::UninitThreadIsolatedPoolForTesting() { - if (IsThreadIsolatedPoolInitialized()) { + if (IsAnyThreadIsolatedPoolInitialized()) { UnprotectThreadIsolatedGlobals(); #if BUILDFLAG(PA_DCHECK_IS_ON) ThreadIsolationSettings::settings.enabled = false; #endif - - FreePages(setup_.thread_isolated_pool_base_address_, - ThreadIsolatedPoolSize()); - AddressPoolManager::GetInstance().Remove(kThreadIsolatedPoolHandle); - setup_.thread_isolated_pool_base_address_ = kUninitializedPoolBaseAddress; - setup_.thread_isolation_.enabled = false; + for (auto thread_isolation : setup_.thread_isolation_) { + if (!IsThreadIsolatedPoolInitialized(thread_isolation.compartment)) { + continue; + } + Compartment c = thread_isolation.compartment; + FreePages(setup_.thread_isolated_pool_base_address_[c], + ThreadIsolatedPoolSize()); + AddressPoolManager::GetInstance().Remove(PoolHandleForCompartment(c)); + setup_.thread_isolated_pool_base_address_[c] = + kUninitializedPoolBaseAddress; + thread_isolation.enabled = false; + } } } #endif diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_address_space.h b/external/chromium/src/base/allocator/partition_allocator/partition_address_space.h index 260842f02b..a4fe68b693 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_address_space.h +++ b/external/chromium/src/base/allocator/partition_allocator/partition_address_space.h @@ -5,7 +5,9 @@ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_ +#include #include +#include #include #include "base/allocator/partition_allocator/address_pool_manager_types.h" @@ -75,9 +77,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { pool = kConfigurablePoolHandle; base = setup_.configurable_pool_base_address_; #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - } else if (IsInThreadIsolatedPool(address)) { - pool = kThreadIsolatedPoolHandle; - base = setup_.thread_isolated_pool_base_address_; + } else if (auto compartment = GetCompartmentForAddress(address)) { + pool = internal::PoolHandleForCompartment(*compartment); + base = setup_.thread_isolated_pool_base_address_[*compartment]; #endif } else { PA_NOTREACHED(); @@ -126,8 +128,18 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { } #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - PA_ALWAYS_INLINE static bool IsThreadIsolatedPoolInitialized() { - return setup_.thread_isolated_pool_base_address_ != + PA_ALWAYS_INLINE static bool IsAnyThreadIsolatedPoolInitialized() { + for (Compartment c = 0; c < kNumCompartments; ++c) { + if (IsThreadIsolatedPoolInitialized(c)) { + return true; + } + } + return false; + } + + PA_ALWAYS_INLINE static bool + IsThreadIsolatedPoolInitialized(Compartment compartment) { + return setup_.thread_isolated_pool_base_address_[compartment] != kUninitializedPoolBaseAddress; } #endif @@ -203,8 +215,30 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { #if BUILDFLAG(ENABLE_THREAD_ISOLATION) // Returns false for nullptr. PA_ALWAYS_INLINE static bool IsInThreadIsolatedPool(uintptr_t address) { + for (size_t i = 0; i < kNumCompartments; ++i) { + if (IsInThreadIsolatedPool(address, i)) { + return true; + } + } + return false; + } + + // Returns false for nullptr. + PA_ALWAYS_INLINE static bool IsInThreadIsolatedPool(uintptr_t address, + size_t compartment) { + PA_CHECK(compartment < kNumCompartments); return (address & kThreadIsolatedPoolBaseMask) == - setup_.thread_isolated_pool_base_address_; + setup_.thread_isolated_pool_base_address_[compartment]; + } + + PA_ALWAYS_INLINE static std::optional + GetCompartmentForAddress(uintptr_t address) { + for (size_t i = 0; i < kNumCompartments; ++i) { + if (IsInThreadIsolatedPool(address, i)) { + return {i}; + } + } + return {}; } #endif @@ -332,8 +366,16 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { uintptr_t brp_pool_base_address_ = kUninitializedPoolBaseAddress; uintptr_t configurable_pool_base_address_ = kUninitializedPoolBaseAddress; #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - uintptr_t thread_isolated_pool_base_address_ = - kUninitializedPoolBaseAddress; + uintptr_t thread_isolated_pool_base_address_[kNumCompartments] = { + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + kUninitializedPoolBaseAddress, kUninitializedPoolBaseAddress, + }; #endif #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) uintptr_t regular_pool_base_mask_ = 0; @@ -344,7 +386,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) uintptr_t configurable_pool_base_mask_ = 0; #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - ThreadIsolationOption thread_isolation_; + std::array thread_isolation_; #endif }; #if BUILDFLAG(ENABLE_THREAD_ISOLATION) diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_alloc_buildflags.h b/external/chromium/src/base/allocator/partition_allocator/partition_alloc_buildflags.h index 779a599236..10ccf71406 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_alloc_buildflags.h +++ b/external/chromium/src/base/allocator/partition_allocator/partition_alloc_buildflags.h @@ -35,5 +35,6 @@ #define BUILDFLAG_INTERNAL_PCSCAN_STACK_SUPPORTED() (0) #define BUILDFLAG_INTERNAL_ENABLE_PKEYS() (1) #define BUILDFLAG_INTERNAL_ENABLE_THREAD_ISOLATION() (1) +#define BUILDFLAG_INTERNAL_WRAP_SHIM() (1) #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BUILDFLAGS_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_alloc_constants.h b/external/chromium/src/base/allocator/partition_allocator/partition_alloc_constants.h index c581cb22e0..1183c75a71 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_alloc_constants.h +++ b/external/chromium/src/base/allocator/partition_allocator/partition_alloc_constants.h @@ -14,6 +14,7 @@ #include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "build/build_config.h" @@ -279,9 +280,26 @@ enum pool_handle : unsigned { // New pool_handles will be added here. #if BUILDFLAG(ENABLE_THREAD_ISOLATION) + kCompartmentPool0Handle, + kCompartmentPool1Handle, + kCompartmentPool2Handle, + kCompartmentPool3Handle, + kCompartmentPool4Handle, + kCompartmentPool5Handle, + kCompartmentPool6Handle, + kCompartmentPool7Handle, + kCompartmentPool8Handle, + kCompartmentPool9Handle, + kCompartmentPool10Handle, + kCompartmentPool11Handle, + kCompartmentPool12Handle, + kCompartmentPool13Handle, + kCompartmentPool14Handle, + kCompartmentPool15Handle, + // The thread isolated pool must come last since we write-protect its entry in // the metadata tables, e.g. AddressPoolManager::aligned_pools_ - kThreadIsolatedPoolHandle, + // kThreadIsolatedPoolHandle, #endif kMaxPoolHandle }; @@ -289,6 +307,21 @@ enum pool_handle : unsigned { // kNullPoolHandle doesn't have metadata, hence - 1 constexpr size_t kNumPools = kMaxPoolHandle - 1; +#if BUILDFLAG(ENABLE_THREAD_ISOLATION) +constexpr size_t kNumCompartments = + kCompartmentPool15Handle - kCompartmentPool0Handle + 1; + +PA_ALWAYS_INLINE pool_handle PoolHandleForCompartment(size_t compartment) { + PA_CHECK(compartment < kNumCompartments); + return static_cast(kCompartmentPool0Handle + compartment); +} + +PA_ALWAYS_INLINE bool IsCompartmentPoolHandle(pool_handle pool) { + return pool >= kCompartmentPool0Handle && + pool < kCompartmentPool0Handle + kNumCompartments; +} +#endif + // Maximum pool size. With exception of Configurable Pool, it is also // the actual size, unless PA_DYNAMICALLY_SELECT_POOL_SIZE is set, which // allows to choose a different size at initialization time for certain @@ -312,11 +345,11 @@ constexpr size_t kPoolMaxSize = 4 * kGiB; #endif constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize; -#if BUILDFLAG(ENABLE_THREAD_ISOLATION) -static_assert(kThreadIsolatedPoolHandle == kNumPools, - "The thread isolated pool must come last since we write-protect " - "its metadata."); -#endif +// #if BUILDFLAG(ENABLE_THREAD_ISOLATION) +// static_assert(kThreadIsolatedPoolHandle == kNumPools, +// "The thread isolated pool must come last since we write-protect " +// "its metadata."); +// #endif // Slots larger than this size will not receive MTE protection. Pages intended // for allocations larger than this constant should not be backed with PROT_MTE diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_page.cc b/external/chromium/src/base/allocator/partition_allocator/partition_page.cc index 4e8ab54e91..a54aa7144f 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_page.cc +++ b/external/chromium/src/base/allocator/partition_allocator/partition_page.cc @@ -322,7 +322,7 @@ void UnmapNow(uintptr_t reservation_start, { PA_DCHECK(pool == kRegularPoolHandle #if BUILDFLAG(ENABLE_THREAD_ISOLATION) - || pool == kThreadIsolatedPoolHandle + || IsCompartmentPoolHandle(pool) #endif #if BUILDFLAG(HAS_64_BIT_POINTERS) || diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_root.cc b/external/chromium/src/base/allocator/partition_allocator/partition_root.cc index 340c4eb002..bd308ff699 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_root.cc +++ b/external/chromium/src/base/allocator/partition_allocator/partition_root.cc @@ -852,7 +852,7 @@ void PartitionRoot::DestructForTesting() { #if BUILDFLAG(ENABLE_THREAD_ISOLATION) // The pages managed by thread isolated pool will be free-ed at // UninitThreadIsolatedForTesting(). Don't invoke FreePages() for the pages. - if (pool_handle == internal::kThreadIsolatedPoolHandle) { + if (internal::IsCompartmentPoolHandle(pool_handle)) { return; } PA_DCHECK(pool_handle < internal::kNumPools); diff --git a/external/chromium/src/base/allocator/partition_allocator/partition_root.h b/external/chromium/src/base/allocator/partition_allocator/partition_root.h index a3c96657cf..56269ba1c3 100644 --- a/external/chromium/src/base/allocator/partition_allocator/partition_root.h +++ b/external/chromium/src/base/allocator/partition_allocator/partition_root.h @@ -630,7 +630,9 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot { #endif #if BUILDFLAG(ENABLE_THREAD_ISOLATION) if (settings.thread_isolation.enabled) { - return internal::kThreadIsolatedPoolHandle; + return internal::PoolHandleForCompartment( + settings.thread_isolation.compartment); + // return internal::kThreadIsolatedPoolHandle; } #endif #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) diff --git a/external/chromium/src/base/allocator/partition_allocator/reservation_offset_table.h b/external/chromium/src/base/allocator/partition_allocator/reservation_offset_table.h index 9baef4b284..6a29cfea4c 100644 --- a/external/chromium/src/base/allocator/partition_allocator/reservation_offset_table.h +++ b/external/chromium/src/base/allocator/partition_allocator/reservation_offset_table.h @@ -97,12 +97,15 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) } }; #if BUILDFLAG(HAS_64_BIT_POINTERS) - // If thread isolation support is enabled, we need to write-protect the tables - // of the thread isolated pool. For this, we need to pad the tables so that - // the thread isolated ones start on a page boundary. - char pad_[PA_THREAD_ISOLATED_ARRAY_PAD_SZ(_ReservationOffsetTable, - kNumPools)] = {}; +#if BUILDFLAG(ENABLE_THREAD_ISOLATION) + struct PA_THREAD_ISOLATED_ALIGN _AlignedReservationOffsetTable { + struct _ReservationOffsetTable table; + }; + struct _AlignedReservationOffsetTable protected_tables[kNumCompartments]; + struct _ReservationOffsetTable tables[kNumPools - kNumCompartments]; +#else struct _ReservationOffsetTable tables[kNumPools]; +#endif static PA_CONSTINIT ReservationOffsetTable singleton_; #else // A single table for the entire 32-bit address space. @@ -113,6 +116,13 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) #if BUILDFLAG(HAS_64_BIT_POINTERS) PA_ALWAYS_INLINE uint16_t* GetReservationOffsetTable(pool_handle handle) { PA_DCHECK(kNullPoolHandle < handle && handle <= kNumPools); +#if BUILDFLAG(ENABLE_THREAD_ISOLATION) + if (handle >= kCompartmentPool0Handle) { + return ReservationOffsetTable::singleton_ + .protected_tables[handle - kCompartmentPool0Handle] + .table.offsets; + } +#endif return ReservationOffsetTable::singleton_.tables[handle - 1].offsets; } diff --git a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim.cc b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim.cc index fca41408d6..0bd74a62be 100644 --- a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim.cc +++ b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim.cc @@ -396,7 +396,7 @@ PA_ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) { #include "base/allocator/partition_allocator/shim/allocator_shim_override_cpp_symbols.h" #endif -#if BUILDFLAG(IS_ANDROID) +#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(WRAP_SHIM) // Android does not support symbol interposition. The way malloc symbols are // intercepted on Android is by using link-time -wrap flags. #include "base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h" diff --git a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h index 2ca091c909..b707da979c 100644 --- a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h +++ b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h @@ -22,6 +22,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocMalloc { static partition_alloc::PartitionRoot* OriginalAllocator(); // May return the same pointer as |Allocator()|. static partition_alloc::PartitionRoot* AlignedAllocator(); + // May return the same pointer as |Allocator()|. + static partition_alloc::PartitionRoot* SharedAllocator(); }; PA_COMPONENT_EXPORT(PARTITION_ALLOC) diff --git a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h index f074961841..47358f892a 100644 --- a/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h +++ b/external/chromium/src/base/allocator/partition_allocator/shim/allocator_shim_override_linker_wrapped_symbols.h @@ -18,6 +18,11 @@ #include #include +#if !BUILDFLAG(IS_ANDROID) +// For va_* +#include +#endif + #include "base/allocator/partition_allocator/shim/allocator_shim_internals.h" extern "C" { diff --git a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/pkey.h b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/pkey.h index 418c70c5a7..c4b07f3070 100644 --- a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/pkey.h +++ b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/pkey.h @@ -53,6 +53,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LiftPkeyRestrictionsScope { uint32_t saved_pkey_value_; }; +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) DoNotLiftPkeyRestrictionsScope {}; + #endif // BUILDFLAG(PA_DCHECK_IS_ON) } // namespace partition_alloc::internal diff --git a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc index 3d0c888197..0e326bbee4 100644 --- a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc +++ b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.cc @@ -52,30 +52,39 @@ int MprotectWithThreadIsolation(void* addr, #endif } +void WriteProtectThreadIsolatedGlobals( + std::array &thread_isolations) { + for (auto thread_isolation : thread_isolations) { + WriteProtectThreadIsolatedGlobals(thread_isolation); + } +} + void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation) { - WriteProtectThreadIsolatedVariable(thread_isolation, - PartitionAddressSpace::setup_); + // TODO(SJC): Protect compartment base addresses + // WriteProtectThreadIsolatedVariable(thread_isolation, + // PartitionAddressSpace::setup_); - AddressPoolManager::Pool* pool = - AddressPoolManager::GetInstance().GetPool(kThreadIsolatedPoolHandle); + pool_handle handle = PoolHandleForCompartment(thread_isolation.compartment); + AddressPoolManager::Pool *pool = + AddressPoolManager::GetInstance().GetPool(handle); WriteProtectThreadIsolatedVariable( - thread_isolation, *pool, - offsetof(AddressPoolManager::Pool, alloc_bitset_)); + thread_isolation, *pool); - uint16_t* pkey_reservation_offset_table = - GetReservationOffsetTable(kThreadIsolatedPoolHandle); + uint16_t *pkey_reservation_offset_table = GetReservationOffsetTable(handle); WriteProtectThreadIsolatedMemory( thread_isolation, pkey_reservation_offset_table, ReservationOffsetTable::kReservationOffsetTableLength); -#if BUILDFLAG(PA_DCHECK_IS_ON) - WriteProtectThreadIsolatedVariable(thread_isolation, - ThreadIsolationSettings::settings); -#endif +// #if BUILDFLAG(PA_DCHECK_IS_ON) +// WriteProtectThreadIsolatedVariable(thread_isolation, +// ThreadIsolationSettings::settings); +// #endif } void UnprotectThreadIsolatedGlobals() { - WriteProtectThreadIsolatedGlobals(ThreadIsolationOption(false)); + for (Compartment c = 0; c < kNumCompartments; ++c) { + WriteProtectThreadIsolatedGlobals(ThreadIsolationOption(kInvalidPkey, c)); + } } } // namespace partition_alloc::internal diff --git a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.h b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.h index 7c74e0a9bb..b780edac8e 100644 --- a/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.h +++ b/external/chromium/src/base/allocator/partition_allocator/thread_isolation/thread_isolation.h @@ -6,9 +6,11 @@ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_ISOLATION_THREAD_ISOLATION_H_ #include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" #if BUILDFLAG(ENABLE_THREAD_ISOLATION) +#include #include #include @@ -25,15 +27,19 @@ namespace partition_alloc { +using Compartment = size_t; + struct ThreadIsolationOption { constexpr ThreadIsolationOption() = default; explicit ThreadIsolationOption(bool enabled) : enabled(enabled) {} #if BUILDFLAG(ENABLE_PKEYS) - explicit ThreadIsolationOption(int pkey) : pkey(pkey) { + explicit ThreadIsolationOption(int pkey, size_t compartment) + : pkey(pkey), compartment(compartment) { enabled = pkey != internal::kInvalidPkey; } int pkey = -1; + Compartment compartment = 0; #endif // BUILDFLAG(ENABLE_PKEYS) bool enabled = false; @@ -61,11 +67,13 @@ struct PA_THREAD_ISOLATED_ALIGN ThreadIsolationSettings { #if BUILDFLAG(ENABLE_PKEYS) -using LiftThreadIsolationScope = LiftPkeyRestrictionsScope; +using LiftThreadIsolationScope = DoNotLiftPkeyRestrictionsScope; #endif // BUILDFLAG(ENABLE_PKEYS) #endif // BUILDFLAG(PA_DCHECK_IS_ON) +void WriteProtectThreadIsolatedGlobals( + std::array &thread_isolations); void WriteProtectThreadIsolatedGlobals(ThreadIsolationOption thread_isolation); void UnprotectThreadIsolatedGlobals(); [[nodiscard]] int MprotectWithThreadIsolation( diff --git a/libia2/ia2.c b/libia2/ia2.c index 8114eefede..11bd56ee3a 100644 --- a/libia2/ia2.c +++ b/libia2/ia2.c @@ -2,10 +2,10 @@ #define _GNU_SOURCE #endif #include -#include #include #include +#include "ia2_internal.h" #include "ia2.h" __attribute__((__used__)) uint32_t ia2_get_pkru() { @@ -78,17 +78,9 @@ size_t ia2_get_pkey() { } } -static const char *const shared_sections[][2] = { - {"__start_ia2_shared_data", "__stop_ia2_shared_data"}, -}; - -// The number of special ELF sections that may be shared by protect_pages -#define NUM_SHARED_SECTIONS \ - (sizeof(shared_sections) / sizeof(shared_sections[0])) - -// Reserve one extra shared range for the entire read-only segment that we are -// also sharing, in addition to the special-cased sections above. -#define NUM_SHARED_RANGES (NUM_SHARED_SECTIONS + 1) +// Reserve one extra shared range for the RELRO segment that we are +// also sharing, in addition to the special shared sections in PhdrSearchArgs. +#define NUM_SHARED_RANGES IA2_MAX_NUM_SHARED_SECTION_COUNT + 1 // The number of program headers to allocate space for in protect_pages. This is // only an estimate of the maximum value of dlpi_phnum below. @@ -290,48 +282,30 @@ int protect_pages(struct dl_phdr_info *info, size_t size, void *data) { // printf("protecting library: %s\n", basename(info->dlpi_name)); - void *lib = dlopen(info->dlpi_name, RTLD_NOW); - if (!lib) - exit(-1); - struct AddressRange shared_ranges[NUM_SHARED_RANGES] = {0}; size_t shared_range_count = 0; - for (size_t i = 0; i < NUM_SHARED_SECTIONS; i++) { - struct AddressRange *cur_range = &shared_ranges[shared_range_count]; + for (size_t i = 0; i < IA2_MAX_NUM_SHARED_SECTION_COUNT; i++) { + if (!search_args->shared_sections || + !search_args->shared_sections[i].start || + !search_args->shared_sections[i].end) { + break; + } - // Clear any potential old error conditions - dlerror(); + struct AddressRange *cur_range = &shared_ranges[shared_range_count]; - cur_range->start = (uint64_t)dlsym(lib, shared_sections[i][0]); - if (!cur_range->start) { - // We didn't find the start symbol for this shared section. Either the - // user didn't mark any shared global data or we didn't link with the - // correct linker script. We can't distinguish these cases here and the - // first shouldn't be an error, so let's continue. - continue; - } + cur_range->start = (uint64_t)search_args->shared_sections[i].start; uint64_t aligned_start = cur_range->start & ~0xFFFUL; if (aligned_start != cur_range->start) { - printf("Start of section %s is not page-aligned\n", - shared_sections[i][0]); + printf("Start of section %p is not page-aligned\n", + search_args->shared_sections[i].start); exit(-1); } - cur_range->end = (uint64_t)dlsym(lib, shared_sections[i][1]); - char *dl_err = dlerror(); - if (dl_err) { - printf("Could not find end symbol of shared section %s: %s\n", - shared_sections[i][1], dl_err); - exit(-1); - } - if (!cur_range->end) { - printf("End symbol of shared section %s was unexpectedly NULL\n", - shared_sections[i][1]); - exit(-1); - } + cur_range->end = (uint64_t)search_args->shared_sections[i].end; uint64_t aligned_end = (cur_range->end + 0xFFFUL) & ~0xFFFUL; if (aligned_end != cur_range->end) { - printf("End of section %s is not page-aligned\n", shared_sections[i][1]); + printf("End of section %p is not page-aligned\n", + search_args->shared_sections[i].end); exit(-1); } diff --git a/libia2/include/ia2_compartment_init.inc b/libia2/include/ia2_compartment_init.inc index 55c917857c..9315e73993 100644 --- a/libia2/include/ia2_compartment_init.inc +++ b/libia2/include/ia2_compartment_init.inc @@ -10,6 +10,7 @@ #include #include +#include #include #ifndef IA2_COMPARTMENT_LIBRARIES @@ -46,14 +47,23 @@ __thread void *COMPARTMENT_IDENT(ia2_stackptr) __attribute__((used)); **/ extern int ia2_n_pkeys_to_alloc; +extern char __start_ia2_shared_data __attribute__((visibility("hidden"))), + __stop_ia2_shared_data __attribute__((visibility("hidden"))); + void ensure_pkeys_allocated(int *n_to_alloc); __attribute__((constructor)) static void COMPARTMENT_IDENT(init_pkey)() { ensure_pkeys_allocated(&ia2_n_pkeys_to_alloc); + struct IA2SharedSection shared_sections[2] = {{ + &__start_ia2_shared_data, + &__stop_ia2_shared_data, + }, + {NULL, NULL}}; struct PhdrSearchArgs args = { .pkey = IA2_COMPARTMENT, .address = &COMPARTMENT_IDENT(init_pkey), .extra_libraries = IA2_COMPARTMENT_LIBRARIES, .found_library_count = 0, + .shared_sections = shared_sections, }; __asm__ volatile( diff --git a/libia2/include/ia2_internal.h b/libia2/include/ia2_internal.h index a666530532..6c14097c3e 100644 --- a/libia2/include/ia2_internal.h +++ b/libia2/include/ia2_internal.h @@ -100,6 +100,13 @@ instead of `fn`. */ int protect_pages(struct dl_phdr_info *info, size_t size, void *data); int protect_tls_pages(struct dl_phdr_info *info, size_t size, void *data); +struct IA2SharedSection { + const void *start; + const void *end; +}; + +#define IA2_MAX_NUM_SHARED_SECTION_COUNT 4 + // The data argument each time dl_iterate_phdr calls protect_pages struct PhdrSearchArgs { // The compartment pkey to use when the segments are found @@ -116,6 +123,12 @@ struct PhdrSearchArgs { // Number of other libraries from extra_libraries that were located and // protected. int found_library_count; + + // Array of shared section(s) to skip when protecting RW ranges. List is + // terminated with a double NULL entry. List must be at most + // IA2_MAX_SHARED_SECTION_COUNT elements (not including NULL terminating + // pair). Pointer may be NULL if no shared ranges are used. + const struct IA2SharedSection *shared_sections; }; // This emits the 5 bytes correponding to the movl $PKRU, %eax instruction diff --git a/partition-alloc/CMakeLists.txt b/partition-alloc/CMakeLists.txt index d05f1d8490..68861644b4 100644 --- a/partition-alloc/CMakeLists.txt +++ b/partition-alloc/CMakeLists.txt @@ -20,6 +20,7 @@ set(PA_SRCS partition_alloc_base/debug/stack_trace_posix.cc partition_alloc_base/logging.cc partition_alloc_base/log_message.cc + partition_alloc_base/memory/page_size_posix.cc partition_alloc_base/memory/ref_counted.cc partition_alloc_base/rand_util.cc partition_alloc_base/strings/cstring_builder.cc @@ -42,6 +43,8 @@ set(PA_SRCS thread_isolation/thread_isolation.cc random.cc reservation_offset_table.cc + shim/allocator_shim.cc + shim/nonscannable_allocator.cc spinning_mutex.cc starscan/metadata_allocator.cc starscan/pcscan.cc @@ -63,7 +66,8 @@ set(PA_SRCS list(TRANSFORM PA_SRCS PREPEND ${EXTERNAL_DIR}/chromium/src/base/allocator/partition_allocator/) add_library(partition-alloc SHARED - src/allocator_shim.cc + src/allocator_shim_default_dispatch_to_partition_alloc.cc + src/shared_allocator.cc ${PA_SRCS}) target_link_libraries(partition-alloc libia2) @@ -82,14 +86,43 @@ target_compile_options(partition-alloc "-Wno-invalid-offsetof" "-Wno-return-type" ) +target_compile_options(partition-alloc + PUBLIC + "-fno-exceptions" +) target_link_options(partition-alloc - INTERFACE - "-Wl,--wrap=malloc" - "-Wl,--wrap=calloc" - "-Wl,--wrap=realloc" - "-Wl,--wrap=free" PRIVATE "-Wl,-z,now" ) +# Wrapping options from upstream BUILD.gn +target_link_options(partition-alloc + PUBLIC + "-Wl,-wrap,calloc" + "-Wl,-wrap,free" + "-Wl,-wrap,malloc" + "-Wl,-wrap,memalign" + "-Wl,-wrap,posix_memalign" + "-Wl,-wrap,pvalloc" + "-Wl,-wrap,realloc" + "-Wl,-wrap,valloc" + + # Not allocating memory, but part of the API + "-Wl,-wrap,malloc_usable_size" + + # functions + "-Wl,-wrap,realpath" + + # functions + "-Wl,-wrap,strdup" + "-Wl,-wrap,strndup" + + # functions + "-Wl,-wrap,getcwd" + + # functions + "-Wl,-wrap,asprintf" + "-Wl,-wrap,vasprintf" +) + target_link_libraries(partition-alloc libia2) diff --git a/partition-alloc/src/allocator_shim.cc b/partition-alloc/src/allocator_shim.cc deleted file mode 100644 index 6dc3288061..0000000000 --- a/partition-alloc/src/allocator_shim.cc +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2020 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// This shim is a simplified version of the one in chromium in -// src/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc. - -#include "allocator_shim.h" -#include "base/allocator/partition_allocator/partition_alloc.h" -#include "base/allocator/partition_allocator/partition_root.h" -#include - -using namespace partition_alloc::internal; -using partition_alloc::PartitionOptions; -using partition_alloc::PartitionRoot; -using partition_alloc::PartitionAllocator; - -class SimpleScopedSpinLocker { -public: - explicit SimpleScopedSpinLocker(std::atomic &lock) : lock_(lock) { - bool expected = false; - while (!lock_.compare_exchange_weak( - expected, true, std::memory_order_acquire, std::memory_order_relaxed)) { - expected = false; - } - } - - ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); } - -private: - std::atomic &lock_; -}; - -// This is the LeakySingleton class in -// allocator_shim_default_dispatch_to_partition_alloc.cc, but it's renamed here -// since we don't need it to be generic over the type of partition root. -class SingletonPartition { -public: - PartitionRoot *Get() { - auto *instance = instance_.load(std::memory_order_acquire); - if (instance) { - return instance->root(); - } - return GetSlowPath(); - } - -private: - PartitionRoot *GetSlowPath(); - std::atomic instance_; - alignas(PartitionAllocator) uint8_t - instance_buffer_[sizeof(PartitionAllocator)] = {0}; - std::atomic initialization_lock_; -}; - -static PartitionAllocator *NewPartition(void *buffer) { - auto *new_heap = new (buffer) PartitionAllocator(); - new_heap->init(partition_alloc::PartitionOptions{}); - return new_heap; -} - -PartitionRoot *SingletonPartition::GetSlowPath() { - SimpleScopedSpinLocker scoped_lock{initialization_lock_}; - - PartitionAllocator *instance = - instance_.load(std::memory_order_relaxed); - - if (instance) { - return instance->root(); - } - - instance = ::NewPartition(reinterpret_cast(instance_buffer_)); - instance_.store(instance, std::memory_order_release); - - return instance->root(); -} - -SingletonPartition g_partitions[16]; - -void *ShimMalloc(size_t bytes) { - size_t pkey = ::ia2_get_pkey(); - return ShimMallocWithPkey(bytes, pkey); -} - -void *ShimMallocWithPkey(size_t bytes, size_t pkey) { - if (pkey == 0) { - return malloc(bytes); - } else { - PartitionRoot *root = g_partitions[pkey].Get(); - return root->Alloc(bytes, nullptr); - } -} - -void ShimFree(void *ptr) { - size_t pkey = ::ia2_get_pkey(); - ShimFreeWithPkey(ptr, pkey); -} - -void ShimFreeWithPkey(void *ptr, size_t pkey) { - if (pkey == 0) { - free(ptr); - } else { - PartitionRoot *root = g_partitions[pkey].Get(); - root->Free(ptr); - } -} - -void *ShimRealloc(void *ptr, size_t size) { - size_t pkey = ::ia2_get_pkey(); - return ShimReallocWithPkey(ptr, size, pkey); -} - -void *ShimReallocWithPkey(void *ptr, size_t size, size_t pkey) { - if (pkey == 0) { - return realloc(ptr, size); - } else { - PartitionRoot *root = g_partitions[pkey].Get(); - return root->Realloc(ptr, size, nullptr); - } -} - -void *ShimCalloc(size_t num, size_t size) { - size_t pkey = ::ia2_get_pkey(); - return ShimCallocWithPkey(num, size, pkey); -} - -void *ShimCallocWithPkey(size_t num, size_t size, size_t pkey) { - if (pkey == 0) { - return calloc(num, size); - } else { - PartitionRoot *root = g_partitions[pkey].Get(); - size_t total; - if (__builtin_mul_overflow(num, size, &total)) { - abort(); - } - void *ret = root->Alloc(total, nullptr); - if (ret != nullptr) { - memset(ret, 0, total); - } - return ret; - } -} diff --git a/partition-alloc/src/allocator_shim.h b/partition-alloc/src/allocator_shim.h deleted file mode 100644 index d4d5640063..0000000000 --- a/partition-alloc/src/allocator_shim.h +++ /dev/null @@ -1,63 +0,0 @@ -#pragma once - -#include -#include -#include - -#ifndef __THROW // Not a glibc system -#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead -#define __THROW _NOEXCEPT -#else -#define __THROW -#endif -#endif - -#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline)) - -#ifdef __cplusplus -extern "C" { -#endif - -void *ShimMalloc(size_t bytes); -void ShimFree(void *ptr); -void *ShimRealloc(void *ptr, size_t size); -void *ShimCalloc(size_t num, size_t size); - -void *ShimMallocWithPkey(size_t bytes, size_t pkey); -void ShimFreeWithPkey(void *ptr, size_t pkey); -void *ShimReallocWithPkey(void *ptr, size_t size, size_t pkey); -void *ShimCallocWithPkey(size_t num, size_t size, size_t pkey); - -SHIM_ALWAYS_EXPORT void *__wrap_malloc(size_t size) __THROW { - return ShimMalloc(size); -} - -SHIM_ALWAYS_EXPORT void __wrap_free(void *ptr) __THROW { ShimFree(ptr); } - -SHIM_ALWAYS_EXPORT void *__wrap_realloc(void *ptr, size_t size) __THROW { - return ShimRealloc(ptr, size); -} - -SHIM_ALWAYS_EXPORT void *__wrap_calloc(size_t num, size_t size) __THROW { - return ShimCalloc(num, size); -} - -SHIM_ALWAYS_EXPORT void *shared_malloc(size_t size) __THROW { - return ShimMallocWithPkey(size, 0); -} - -SHIM_ALWAYS_EXPORT void shared_free(void *ptr) __THROW { - ShimFreeWithPkey(ptr, 0); -} - -SHIM_ALWAYS_EXPORT void *shared_realloc(void *ptr, size_t size) __THROW { - return ShimReallocWithPkey(ptr, size, 0); -} - -SHIM_ALWAYS_EXPORT void *shared_calloc(size_t num, size_t size) __THROW { - return ShimCallocWithPkey(num, size, 0); -} - -#ifdef __cplusplus -} /* extern "C" */ -#endif diff --git a/partition-alloc/src/allocator_shim_default_dispatch_to_partition_alloc.cc b/partition-alloc/src/allocator_shim_default_dispatch_to_partition_alloc.cc new file mode 100644 index 0000000000..65f991b7da --- /dev/null +++ b/partition-alloc/src/allocator_shim_default_dispatch_to_partition_alloc.cc @@ -0,0 +1,684 @@ +// Copyright 2020 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h" + +#include +#include +#include +#include +#include + +#include "base/allocator/partition_allocator/allocation_guard.h" +#include "base/allocator/partition_allocator/chromecast_buildflags.h" +#include "base/allocator/partition_allocator/memory_reclaimer.h" +#include "base/allocator/partition_allocator/partition_alloc.h" +#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" +#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" +#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h" +#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h" +#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h" +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_root.h" +#include "base/allocator/partition_allocator/partition_stats.h" +#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h" +#include "base/allocator/partition_allocator/shim/nonscannable_allocator.h" +#include "base/allocator/partition_allocator/thread_isolation/thread_isolation.h" +#include "build/build_config.h" + +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) +#include +#endif + +#include + +using allocator_shim::AllocatorDispatch; + +namespace { + +class SimpleScopedSpinLocker { + public: + explicit SimpleScopedSpinLocker(std::atomic& lock) : lock_(lock) { + // Lock. Semantically equivalent to base::Lock::Acquire(). + bool expected = false; + // Weak CAS since we are in a retry loop, relaxed ordering for failure since + // in this case we don't imply any ordering. + // + // This matches partition_allocator/spinning_mutex.h fast path on Linux. + while (!lock_.compare_exchange_weak( + expected, true, std::memory_order_acquire, std::memory_order_relaxed)) { + expected = false; + } + } + + ~SimpleScopedSpinLocker() { lock_.store(false, std::memory_order_release); } + + private: + std::atomic& lock_; +}; + +// We can't use a "static local" or a base::LazyInstance, as: +// - static local variables call into the runtime on Windows, which is not +// prepared to handle it, as the first allocation happens during CRT init. +// - We don't want to depend on base::LazyInstance, which may be converted to +// static locals one day. +// +// Nevertheless, this provides essentially the same thing. +template +class LeakySingleton { + public: + constexpr LeakySingleton() = default; + + PA_ALWAYS_INLINE T* Get() { + auto* instance = instance_.load(std::memory_order_acquire); + if (PA_LIKELY(instance)) { + return instance; + } + + return GetSlowPath(); + } + + // Replaces the instance pointer with a new one. + void Replace(T* new_instance) { + SimpleScopedSpinLocker scoped_lock{initialization_lock_}; + + // Modify under the lock to avoid race between |if (instance)| and + // |instance_.store()| in GetSlowPath(). + instance_.store(new_instance, std::memory_order_release); + } + + private: + T* GetSlowPath(); + + std::atomic instance_; + // Before C++20, having an initializer here causes a "variable does not have a + // constant initializer" error. In C++20, omitting it causes a similar error. + // Presumably this is due to the C++20 changes to make atomic initialization + // (of the other members of this class) sane, so guarding under that + // feature-test. +#if !defined(__cpp_lib_atomic_value_initialization) || \ + __cpp_lib_atomic_value_initialization < 201911L + alignas(T) uint8_t instance_buffer_[sizeof(T)]; +#else + alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0}; +#endif + std::atomic initialization_lock_; +}; + +template +T* LeakySingleton::GetSlowPath() { + // The instance has not been set, the proper way to proceed (correct + // double-checked locking) is: + // + // auto* instance = instance_.load(std::memory_order_acquire); + // if (!instance) { + // ScopedLock initialization_lock; + // root = instance_.load(std::memory_order_relaxed); + // if (root) + // return root; + // instance = Create new root; + // instance_.store(instance, std::memory_order_release); + // return instance; + // } + // + // However, we don't want to use a base::Lock here, so instead we use + // compare-and-exchange on a lock variable, which provides the same + // guarantees. + SimpleScopedSpinLocker scoped_lock{initialization_lock_}; + + T* instance = instance_.load(std::memory_order_relaxed); + // Someone beat us. + if (instance) { + return instance; + } + + instance = Constructor::New(reinterpret_cast(instance_buffer_)); + instance_.store(instance, std::memory_order_release); + + return instance; +} + +class MainPartitionConstructor { + public: + static partition_alloc::PartitionRoot* New(void* buffer) { + size_t pkey = ::ia2_get_pkey(); + auto *new_root = new (buffer) + partition_alloc::PartitionRoot(partition_alloc::PartitionOptions{ + .aligned_alloc = partition_alloc::PartitionOptions::kAllowed, + .thread_cache = partition_alloc::PartitionOptions::kDisabled, + .backup_ref_ptr = + partition_alloc::PartitionOptions::kDisabled, + .thread_isolation = partition_alloc::ThreadIsolationOption(pkey, pkey), + }); + + return new_root; + } +}; + +LeakySingleton + g_root[partition_alloc::internal::kNumCompartments - 1] PA_CONSTINIT = {}; + +class SharedPartitionConstructor { + public: + static partition_alloc::PartitionRoot* New(void* buffer) { + auto *new_root = new (buffer) + partition_alloc::PartitionRoot(partition_alloc::PartitionOptions{ + .aligned_alloc = + partition_alloc::PartitionOptions::kAllowed, + .thread_cache = partition_alloc::PartitionOptions::kDisabled, + .backup_ref_ptr = + partition_alloc::PartitionOptions::kDisabled, + }); + + return new_root; + } +}; + +LeakySingleton + g_shared_root PA_CONSTINIT = {}; + +partition_alloc::PartitionRoot *Allocator() { + size_t pkey = ::ia2_get_pkey(); + if (pkey == 0) { + return g_shared_root.Get(); + } + return g_root[pkey - 1].Get(); +} + +partition_alloc::PartitionRoot *AlignedAllocator() { + size_t pkey = ::ia2_get_pkey(); + if (pkey == 0) { + return g_shared_root.Get(); + } + return g_root[pkey - 1].Get(); +} + +partition_alloc::PartitionRoot *SharedAllocator() { + return g_shared_root.Get(); +} + +void* AllocateAlignedMemory(size_t alignment, size_t size) { + // Memory returned by the regular allocator *always* respects |kAlignment|, + // which is a power of two, and any valid alignment is also a power of two. So + // we can directly fulfill these requests with the main allocator. + // + // This has several advantages: + // - The thread cache is supported on the main partition + // - Reduced fragmentation + // - Better coverage for MiraclePtr variants requiring extras + // + // There are several call sites in Chromium where base::AlignedAlloc is called + // with a small alignment. Some may be due to overly-careful code, some are + // because the client code doesn't know the required alignment at compile + // time. + // + // Note that all "AlignedFree()" variants (_aligned_free() on Windows for + // instance) directly call PartitionFree(), so there is no risk of + // mismatch. (see below the default_dispatch definition). + if (alignment <= partition_alloc::internal::kAlignment) { + // This is mandated by |posix_memalign()| and friends, so should never fire. + PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment)); + // TODO(bartekn): See if the compiler optimizes branches down the stack on + // Mac, where PartitionPageSize() isn't constexpr. + return Allocator()->AllocInline( + size); + } + + return AlignedAllocator() + ->AlignedAllocInline(alignment, + size); +} + +} // namespace + +namespace allocator_shim::internal { + +void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + return Allocator()->AllocInline(size); +} + +void* PartitionMallocUnchecked(const AllocatorDispatch*, + size_t size, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + return Allocator() + ->AllocInline(size); +} + +void* PartitionCalloc(const AllocatorDispatch*, + size_t n, + size_t size, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + const size_t total = + partition_alloc::internal::base::CheckMul(n, size).ValueOrDie(); + return Allocator() + ->AllocInline(total); +} + +void* PartitionMemalign(const AllocatorDispatch*, + size_t alignment, + size_t size, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + return AllocateAlignedMemory(alignment, size); +} + +void* PartitionAlignedAlloc(const AllocatorDispatch* dispatch, + size_t size, + size_t alignment, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + return AllocateAlignedMemory(alignment, size); +} + +// aligned_realloc documentation is +// https://docs.microsoft.com/ja-jp/cpp/c-runtime-library/reference/aligned-realloc +// TODO(tasak): Expand the given memory block to the given size if possible. +// This realloc always free the original memory block and allocates a new memory +// block. +// TODO(tasak): Implement PartitionRoot::AlignedReallocWithFlags +// and use it. +void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch, + void* address, + size_t size, + size_t alignment, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + void* new_ptr = nullptr; + if (size > 0) { + new_ptr = AllocateAlignedMemory(alignment, size); + } else { + // size == 0 and address != null means just "free(address)". + if (address) { + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(address); + } + } + // The original memory block (specified by address) is unchanged if ENOMEM. + if (!new_ptr) { + return nullptr; + } + // TODO(tasak): Need to compare the new alignment with the address' alignment. + // If the two alignments are not the same, need to return nullptr with EINVAL. + if (address) { + size_t usage = partition_alloc::PartitionRoot::GetUsableSize(address); + size_t copy_size = usage > size ? size : usage; + memcpy(new_ptr, address, copy_size); + + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(address); + } + return new_ptr; +} + +void* PartitionRealloc(const AllocatorDispatch*, + void* address, + size_t size, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; +#if BUILDFLAG(IS_APPLE) + if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(address)) && + address)) { + // A memory region allocated by the system allocator is passed in this + // function. Forward the request to `realloc` which supports zone- + // dispatching so that it appropriately selects the right zone. + return realloc(address, size); + } +#endif // BUILDFLAG(IS_APPLE) + + return Allocator()->Realloc(address, + size, ""); +} + +#if BUILDFLAG(PA_IS_CAST_ANDROID) +extern "C" { +void __real_free(void*); +} // extern "C" +#endif // BUILDFLAG(PA_IS_CAST_ANDROID) + +void PartitionFree(const AllocatorDispatch*, void* object, void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; +#if BUILDFLAG(IS_APPLE) + // TODO(bartekn): Add MTE unmasking here (and below). + if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(object)) && + object)) { + // A memory region allocated by the system allocator is passed in this + // function. Forward the request to `free` which supports zone- + // dispatching so that it appropriately selects the right zone. + return free(object); + } +#endif // BUILDFLAG(IS_APPLE) + + // On Android Chromecast devices, there is at least one case where a system + // malloc() pointer can be passed to PartitionAlloc's free(). If we don't own + // the pointer, pass it along. This should not have a runtime cost vs regular + // Android, since on Android we have a PA_CHECK() rather than the branch here. +#if BUILDFLAG(PA_IS_CAST_ANDROID) + if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(object)) && + object)) { + // A memory region allocated by the system allocator is passed in this + // function. Forward the request to `free()`, which is `__real_free()` + // here. + return __real_free(object); + } +#endif // BUILDFLAG(PA_IS_CAST_ANDROID) + + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(object); +} + +#if BUILDFLAG(IS_APPLE) +// Normal free() path on Apple OSes: +// 1. size = GetSizeEstimate(ptr); +// 2. if (size) FreeDefiniteSize(ptr, size) +// +// So we don't need to re-check that the pointer is owned in Free(), and we +// can use the size. +void PartitionFreeDefiniteSize(const AllocatorDispatch*, + void* address, + size_t size, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is + // still useful though, as we avoid double-checking that the address is owned. + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(address); +} +#endif // BUILDFLAG(IS_APPLE) + + +size_t PartitionGetSizeEstimate(const AllocatorDispatch*, + void* address, + void* context) { + // This is used to implement malloc_usable_size(3). Per its man page, "if ptr + // is NULL, 0 is returned". + if (!address) { + return 0; + } + +#if BUILDFLAG(IS_APPLE) + if (!partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(address))) { + // The object pointed to by `address` is not allocated by the + // PartitionAlloc. The return value `0` means that the pointer does not + // belong to this malloc zone. + return 0; + } +#endif // BUILDFLAG(IS_APPLE) + + // TODO(lizeb): Returns incorrect values for aligned allocations. + const size_t size = + partition_alloc::PartitionRoot::GetUsableSizeWithMac11MallocSizeHack( + address); +#if BUILDFLAG(IS_APPLE) + // The object pointed to by `address` is allocated by the PartitionAlloc. + // So, this function must not return zero so that the malloc zone dispatcher + // finds the appropriate malloc zone. + PA_DCHECK(size); +#endif // BUILDFLAG(IS_APPLE) + return size; +} + +#if BUILDFLAG(IS_APPLE) +bool PartitionClaimedAddress(const AllocatorDispatch*, + void* address, + void* context) { + return partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(address)); +} +#endif // BUILDFLAG(IS_APPLE) + +unsigned PartitionBatchMalloc(const AllocatorDispatch*, + size_t size, + void** results, + unsigned num_requested, + void* context) { + // No real batching: we could only acquire the lock once for instance, keep it + // simple for now. + for (unsigned i = 0; i < num_requested; i++) { + // No need to check the results, we crash if it fails. + results[i] = PartitionMalloc(nullptr, size, nullptr); + } + + // Either all succeeded, or we crashed. + return num_requested; +} + +void PartitionBatchFree(const AllocatorDispatch*, + void** to_be_freed, + unsigned num_to_be_freed, + void* context) { + // No real batching: we could only acquire the lock once for instance, keep it + // simple for now. + for (unsigned i = 0; i < num_to_be_freed; i++) { + PartitionFree(nullptr, to_be_freed[i], nullptr); + } +} + +#if BUILDFLAG(IS_APPLE) +void PartitionTryFreeDefault(const AllocatorDispatch*, + void* address, + void* context) { + partition_alloc::ScopedDisallowAllocations guard{}; + + if (PA_UNLIKELY(!partition_alloc::IsManagedByPartitionAlloc( + reinterpret_cast(address)))) { + // The object pointed to by `address` is not allocated by the + // PartitionAlloc. Call find_zone_and_free. + return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address); + } + + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(address); +} +#endif // BUILDFLAG(IS_APPLE) + +// static +bool PartitionAllocMalloc::AllocatorConfigurationFinalized() { + return true; +} + +// static +partition_alloc::PartitionRoot* PartitionAllocMalloc::Allocator() { + return ::Allocator(); +} + +// static +partition_alloc::PartitionRoot* PartitionAllocMalloc::OriginalAllocator() { + return ::Allocator(); +} + +// static +partition_alloc::PartitionRoot* PartitionAllocMalloc::AlignedAllocator() { + return ::AlignedAllocator(); +} + +// static +partition_alloc::PartitionRoot* PartitionAllocMalloc::SharedAllocator() { + return ::SharedAllocator(); +} + +} // namespace allocator_shim::internal + +#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) + +namespace allocator_shim { + +void EnablePartitionAllocMemoryReclaimer() { + // Unlike other partitions, Allocator() does not register its PartitionRoot to + // the memory reclaimer, because doing so may allocate memory. Thus, the + // registration to the memory reclaimer has to be done some time later, when + // the main root is fully configured. + ::partition_alloc::MemoryReclaimer::Instance()->RegisterPartition( + Allocator()); + + PA_DCHECK(AlignedAllocator() == Allocator()); +} + +// No synchronization provided: `PartitionRoot.flags` is only written +// to in `PartitionRoot::Init()`. +uint32_t GetMainPartitionRootExtrasSize() { +#if PA_CONFIG(EXTRAS_REQUIRED) + return Allocator()->settings.extras_size; +#else + return 0; +#endif // PA_CONFIG(EXTRAS_REQUIRED) +} + +#if BUILDFLAG(USE_STARSCAN) +void EnablePCScan(partition_alloc::internal::PCScan::InitConfig config) { + partition_alloc::internal::PCScan::Initialize(config); + + partition_alloc::internal::PCScan::RegisterScannableRoot(Allocator()); + if (Allocator() != AlignedAllocator()) { + partition_alloc::internal::PCScan::RegisterScannableRoot( + AlignedAllocator()); + } + + allocator_shim::NonScannableAllocator::Instance().NotifyPCScanEnabled(); + allocator_shim::NonQuarantinableAllocator::Instance().NotifyPCScanEnabled(); +} +#endif // BUILDFLAG(USE_STARSCAN) +} // namespace allocator_shim + +const AllocatorDispatch AllocatorDispatch::default_dispatch = { + &allocator_shim::internal::PartitionMalloc, // alloc_function + &allocator_shim::internal:: + PartitionMallocUnchecked, // alloc_unchecked_function + &allocator_shim::internal:: + PartitionCalloc, // alloc_zero_initialized_function + &allocator_shim::internal::PartitionMemalign, // alloc_aligned_function + &allocator_shim::internal::PartitionRealloc, // realloc_function + &allocator_shim::internal::PartitionFree, // free_function + &allocator_shim::internal:: + PartitionGetSizeEstimate, // get_size_estimate_function +#if BUILDFLAG(IS_APPLE) + &allocator_shim::internal::PartitionClaimedAddress, // claimed_address +#else + nullptr, // claimed_address +#endif + &allocator_shim::internal::PartitionBatchMalloc, // batch_malloc_function + &allocator_shim::internal::PartitionBatchFree, // batch_free_function +#if BUILDFLAG(IS_APPLE) + // On Apple OSes, free_definite_size() is always called from free(), since + // get_size_estimate() is used to determine whether an allocation belongs to + // the current zone. It makes sense to optimize for it. + &allocator_shim::internal::PartitionFreeDefiniteSize, + // On Apple OSes, try_free_default() is sometimes called as an optimization + // of free(). + &allocator_shim::internal::PartitionTryFreeDefault, +#else + nullptr, // free_definite_size_function + nullptr, // try_free_default_function +#endif + &allocator_shim::internal:: + PartitionAlignedAlloc, // aligned_malloc_function + &allocator_shim::internal:: + PartitionAlignedRealloc, // aligned_realloc_function + &allocator_shim::internal::PartitionFree, // aligned_free_function + nullptr, // next +}; + +// Intercept diagnostics symbols as well, even though they are not part of the +// unified shim layer. +// +// TODO(lizeb): Implement the ones that doable. + +extern "C" { + +#if !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID) + +SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {} + +SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW { + return 0; +} + +#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID) + +#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) +SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW { + partition_alloc::SimplePartitionStatsDumper allocator_dumper; + Allocator()->DumpStats("malloc", true, &allocator_dumper); + // TODO(bartekn): Dump OriginalAllocator() into "malloc" as well. + + partition_alloc::SimplePartitionStatsDumper aligned_allocator_dumper; + if (AlignedAllocator() != Allocator()) { + AlignedAllocator()->DumpStats("posix_memalign", true, + &aligned_allocator_dumper); + } + + // Dump stats for nonscannable and nonquarantinable allocators. + auto& nonscannable_allocator = + allocator_shim::NonScannableAllocator::Instance(); + partition_alloc::SimplePartitionStatsDumper nonscannable_allocator_dumper; + if (auto* nonscannable_root = nonscannable_allocator.root()) { + nonscannable_root->DumpStats("malloc", true, + &nonscannable_allocator_dumper); + } + auto& nonquarantinable_allocator = + allocator_shim::NonQuarantinableAllocator::Instance(); + partition_alloc::SimplePartitionStatsDumper nonquarantinable_allocator_dumper; + if (auto* nonquarantinable_root = nonquarantinable_allocator.root()) { + nonquarantinable_root->DumpStats("malloc", true, + &nonquarantinable_allocator_dumper); + } + + struct mallinfo info = {0}; + info.arena = 0; // Memory *not* allocated with mmap(). + + // Memory allocated with mmap(), aka virtual size. + info.hblks = + partition_alloc::internal::base::checked_cast( + allocator_dumper.stats().total_mmapped_bytes + + aligned_allocator_dumper.stats().total_mmapped_bytes + + nonscannable_allocator_dumper.stats().total_mmapped_bytes + + nonquarantinable_allocator_dumper.stats().total_mmapped_bytes); + // Resident bytes. + info.hblkhd = + partition_alloc::internal::base::checked_cast( + allocator_dumper.stats().total_resident_bytes + + aligned_allocator_dumper.stats().total_resident_bytes + + nonscannable_allocator_dumper.stats().total_resident_bytes + + nonquarantinable_allocator_dumper.stats().total_resident_bytes); + // Allocated bytes. + info.uordblks = + partition_alloc::internal::base::checked_cast( + allocator_dumper.stats().total_active_bytes + + aligned_allocator_dumper.stats().total_active_bytes + + nonscannable_allocator_dumper.stats().total_active_bytes + + nonquarantinable_allocator_dumper.stats().total_active_bytes); + + return info; +} +#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) + +} // extern "C" + +#if BUILDFLAG(IS_APPLE) + +namespace allocator_shim { + +void InitializeDefaultAllocatorPartitionRoot() { + // On OS_APPLE, the initialization of PartitionRoot uses memory allocations + // internally, e.g. __builtin_available, and it's not easy to avoid it. + // Thus, we initialize the PartitionRoot with using the system default + // allocator before we intercept the system default allocator. + std::ignore = Allocator(); +} + +} // namespace allocator_shim + +#endif // BUILDFLAG(IS_APPLE) + +#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) diff --git a/partition-alloc/src/shared_allocator.cc b/partition-alloc/src/shared_allocator.cc new file mode 100644 index 0000000000..7c30222a42 --- /dev/null +++ b/partition-alloc/src/shared_allocator.cc @@ -0,0 +1,33 @@ +#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h" +#include "base/allocator/partition_allocator/shim/allocator_shim_internals.h" + +extern "C" { + +SHIM_ALWAYS_EXPORT void *shared_malloc(size_t size) __THROW { + partition_alloc::ScopedDisallowAllocations guard{}; + return allocator_shim::internal::PartitionAllocMalloc::SharedAllocator() + ->AllocInline(size); +} + +SHIM_ALWAYS_EXPORT void shared_free(void *object) __THROW { + partition_alloc::ScopedDisallowAllocations guard{}; + partition_alloc::PartitionRoot::FreeInlineInUnknownRoot< + partition_alloc::FreeFlags::kNoHooks>(object); +} + +SHIM_ALWAYS_EXPORT void *shared_realloc(void *object, size_t size) __THROW { + partition_alloc::ScopedDisallowAllocations guard{}; + return allocator_shim::internal::PartitionAllocMalloc::SharedAllocator() + ->Realloc(object, + size, ""); +} + +SHIM_ALWAYS_EXPORT void *shared_calloc(size_t n, size_t size) __THROW { + partition_alloc::ScopedDisallowAllocations guard{}; + const size_t total = + partition_alloc::internal::base::CheckMul(n, size).ValueOrDie(); + return allocator_shim::internal::PartitionAllocMalloc::SharedAllocator() + ->AllocInline(total); +} +} diff --git a/rewriter/tests/heap_two_keys/Output/plugin.out b/rewriter/tests/heap_two_keys/Output/fault.out similarity index 100% rename from rewriter/tests/heap_two_keys/Output/plugin.out rename to rewriter/tests/heap_two_keys/Output/fault.out diff --git a/rewriter/tests/heap_two_keys/Output/main.out b/rewriter/tests/heap_two_keys/Output/main.out deleted file mode 100644 index 243708f7c0..0000000000 --- a/rewriter/tests/heap_two_keys/Output/main.out +++ /dev/null @@ -1 +0,0 @@ -CHECK_VIOLATION: did not seg fault as expected diff --git a/rewriter/tests/heap_two_keys/include/plugin/plugin.h b/rewriter/tests/heap_two_keys/include/plugin/plugin.h index 1c5299f778..f8a382739d 100644 --- a/rewriter/tests/heap_two_keys/include/plugin/plugin.h +++ b/rewriter/tests/heap_two_keys/include/plugin/plugin.h @@ -4,13 +4,7 @@ void trigger_compartment_init(void); -#define DECLARE_FUNCTIONS(ty) \ - ty read_##ty(ty *ptr); \ - ty read_##ty##_expect_fault(ty *ptr); \ - void write_##ty(ty *ptr, ty value); \ - void write_##ty##_expect_fault(ty *ptr, ty value) - -DECLARE_FUNCTIONS(uint8_t); -DECLARE_FUNCTIONS(uint16_t); -DECLARE_FUNCTIONS(uint32_t); -DECLARE_FUNCTIONS(uint64_t); +uint8_t read_from_plugin(uint8_t *ptr); +uint8_t read_from_plugin_expect_fault(uint8_t *ptr); +void write_from_plugin(uint8_t *ptr, uint8_t value); +void write_from_plugin_expect_fault(uint8_t *ptr, uint8_t value); diff --git a/rewriter/tests/heap_two_keys/main.c b/rewriter/tests/heap_two_keys/main.c index e06c2ad554..4b14fbbb98 100644 --- a/rewriter/tests/heap_two_keys/main.c +++ b/rewriter/tests/heap_two_keys/main.c @@ -1,9 +1,9 @@ /* RUN: sh -c 'if [ ! -s "heap_two_keys_call_gates_0.ld" ]; then echo "No link args as expected"; exit 0; fi; echo "Unexpected link args"; exit 1;' -TODO: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 0 | diff %S/Output/plugin.out - -// TODO(src_rewriter_wip): had to change the output here, why? -RUN: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 1 | diff %S/Output/main.out - -TODO: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 2 | diff %S/Output/clean_exit.out - +RUN: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 0 | diff %S/Output/clean_exit.out - +RUN: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 1 | diff %S/Output/fault.out - +RUN: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 2 | diff %S/Output/fault.out - +RUN: %binary_dir/tests/heap_two_keys/heap_two_keys_main_wrapped 3 | diff %S/Output/clean_exit.out - */ #include #include @@ -20,7 +20,6 @@ INIT_RUNTIME(2); #include // Test that the program can exit without error -// TODO(#112): it cannot. int test_0() { return 0; } @@ -33,7 +32,7 @@ int test_1() { return -1; } *x = 0x09431233; - read_uint32_t_expect_fault(x); + read_from_plugin_expect_fault((uint8_t*)x); free(x); // This test shouldn't return return -1; @@ -47,7 +46,7 @@ int test_2() { LOG("Failed to allocate memory on the heap"); return -1; } - write_uint8_t_expect_fault(x, 12); + write_from_plugin_expect_fault(x, 12); free(x); // This test shouldn't return return -1; @@ -61,7 +60,7 @@ int test_3() { return -1; } *x = 0xffed; - assert(read_uint16_t(x) == 0xffed); + assert(read_from_plugin((uint8_t*)x) == 0xed); shared_free(x); return 0; } diff --git a/rewriter/tests/heap_two_keys/plugin.c b/rewriter/tests/heap_two_keys/plugin.c index 602aa067ef..29e67c4c81 100644 --- a/rewriter/tests/heap_two_keys/plugin.c +++ b/rewriter/tests/heap_two_keys/plugin.c @@ -9,39 +9,39 @@ RUN: cat heap_two_keys_call_gates_1.ld | FileCheck --check-prefix=LINKARGS %s #define IA2_COMPARTMENT 2 #include +// LINKARGS: --wrap=read_from_plugin +uint8_t read_from_plugin(uint8_t *ptr) { + if (ptr == NULL) { + return -1; + } + uint8_t read = *ptr; + return read; +} + +// LINKARGS: --wrap=read_from_plugin_expect_fault +uint8_t read_from_plugin_expect_fault(uint8_t *ptr) { + if (ptr == NULL) { + return -1; + } + uint8_t read = CHECK_VIOLATION(*ptr); + return read; +} + // LINKARGS: --wrap=trigger_compartment_init void trigger_compartment_init(void) {} -#define DEFINE_FUNCTIONS(ty) \ - ty read_##ty(ty *ptr) { \ - if (ptr == NULL) { \ - return -1; \ - } \ - ty read = *ptr; \ - return read; \ - } \ - ty read_##ty##_expect_fault(ty *ptr) { \ - if (ptr == NULL) { \ - return -1; \ - } \ - ty read = CHECK_VIOLATION(*ptr); \ - return read; \ - } \ - void write_##ty(ty *ptr, ty value) { \ - if (ptr == NULL) { \ - return; \ - } \ - *ptr = value; \ - } \ - void write_##ty##_expect_fault(ty *ptr, ty value) { \ - if (ptr == NULL) { \ - return; \ - } \ - CHECK_VIOLATION(*ptr = value); \ - return; \ - } +// LINKARGS: --wrap=write_from_plugin +void write_from_plugin(uint8_t *ptr, uint8_t value) { + if (ptr == NULL) { + return; + } + *ptr = value; +} -DEFINE_FUNCTIONS(uint8_t); -DEFINE_FUNCTIONS(uint16_t); -DEFINE_FUNCTIONS(uint32_t); -DEFINE_FUNCTIONS(uint64_t); +// LINKARGS: --wrap=write_from_plugin_expect_fault +void write_from_plugin_expect_fault(uint8_t *ptr, uint8_t value) { + if (ptr == NULL) { + return; + } + CHECK_VIOLATION(*ptr = value); +}