diff --git a/chromium_commit b/chromium_commit new file mode 100644 index 000000000..06e943cda --- /dev/null +++ b/chromium_commit @@ -0,0 +1 @@ +8c743918c94d02cc0ea2ee81c89bd52c8f54613d diff --git a/external/chromium/src/base/allocator/partition_allocator/.gn b/external/chromium/src/base/allocator/partition_allocator/.gn new file mode 100644 index 000000000..ee3f91814 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/.gn @@ -0,0 +1,10 @@ +# Copyright 2022 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# The python interpreter to use by default. On Windows, this will look +# for python3.exe and python3.bat. +script_executable = "python3" + +# The location of the build configuration file. +buildconfig = "//build/config/BUILDCONFIG.gn" diff --git a/external/chromium/src/base/allocator/partition_allocator/BUILD.gn b/external/chromium/src/base/allocator/partition_allocator/BUILD.gn new file mode 100644 index 000000000..c38d654a2 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/BUILD.gn @@ -0,0 +1,464 @@ +# Copyright 2022 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//base/allocator/partition_allocator/partition_alloc.gni") +import("//build/buildflag_header.gni") +import("//build/config/chromecast_build.gni") +import("//build/config/chromeos/ui_mode.gni") +import("//build/config/dcheck_always_on.gni") +import("//build/config/logging.gni") + +# Add partition_alloc.gni and import it for partition_alloc configs. + +config("partition_alloc_implementation") { + # See also: `partition_alloc_base/component_export.h` + defines = [ "IS_PARTITION_ALLOC_IMPL" ] +} + +config("memory_tagging") { + if (current_cpu == "arm64" && is_clang && + (is_linux || is_chromeos || is_android || is_fuchsia)) { + # base/ has access to the MTE intrinsics because it needs to use them, + # but they're not backwards compatible. Use base::CPU::has_mte() + # beforehand to confirm or use indirect functions (ifuncs) to select + # an MTE-specific implementation at dynamic link-time. + cflags = [ + "-Xclang", + "-target-feature", + "-Xclang", + "+mte", + ] + } +} + +if (is_fuchsia) { + config("fuchsia_sync_lib") { + libs = [ + "sync", # Used by spinning_mutex.h. + ] + } +} + +if (enable_pkeys && is_debug) { + config("no_stack_protector") { + cflags = [ "-fno-stack-protector" ] + } +} + +component("partition_alloc") { + sources = [ + "address_pool_manager.cc", + "address_pool_manager.h", + "address_pool_manager_bitmap.cc", + "address_pool_manager_bitmap.h", + "address_pool_manager_types.h", + "address_space_randomization.cc", + "address_space_randomization.h", + "address_space_stats.h", + "allocation_guard.cc", + "allocation_guard.h", + "compressed_pointer.cc", + "compressed_pointer.h", + "dangling_raw_ptr_checks.cc", + "dangling_raw_ptr_checks.h", + "freeslot_bitmap.h", + "freeslot_bitmap_constants.h", + "gwp_asan_support.cc", + "gwp_asan_support.h", + "memory_reclaimer.cc", + "memory_reclaimer.h", + "oom.cc", + "oom.h", + "oom_callback.cc", + "oom_callback.h", + "page_allocator.cc", + "page_allocator.h", + "page_allocator_constants.h", + "page_allocator_internal.h", + "partition_address_space.cc", + "partition_address_space.h", + "partition_alloc-inl.h", + "partition_alloc.cc", + "partition_alloc.h", + "partition_alloc_base/atomic_ref_count.h", + "partition_alloc_base/augmentations/compiler_specific.h", + "partition_alloc_base/bit_cast.h", + "partition_alloc_base/bits.h", + "partition_alloc_base/check.cc", + "partition_alloc_base/check.h", + "partition_alloc_base/compiler_specific.h", + "partition_alloc_base/component_export.h", + "partition_alloc_base/cpu.cc", + "partition_alloc_base/cpu.h", + "partition_alloc_base/cxx17_backports.h", + "partition_alloc_base/debug/alias.cc", + "partition_alloc_base/debug/alias.h", + "partition_alloc_base/gtest_prod_util.h", + "partition_alloc_base/immediate_crash.h", + "partition_alloc_base/logging.cc", + "partition_alloc_base/logging.h", + "partition_alloc_base/memory/ref_counted.cc", + "partition_alloc_base/memory/ref_counted.h", + "partition_alloc_base/memory/scoped_policy.h", + "partition_alloc_base/memory/scoped_refptr.h", + "partition_alloc_base/migration_adapter.h", + "partition_alloc_base/no_destructor.h", + "partition_alloc_base/numerics/checked_math.h", + "partition_alloc_base/numerics/checked_math_impl.h", + "partition_alloc_base/numerics/clamped_math.h", + "partition_alloc_base/numerics/clamped_math_impl.h", + "partition_alloc_base/numerics/math_constants.h", + "partition_alloc_base/numerics/ostream_operators.h", + "partition_alloc_base/numerics/ranges.h", + "partition_alloc_base/numerics/safe_conversions.h", + "partition_alloc_base/numerics/safe_conversions_arm_impl.h", + "partition_alloc_base/numerics/safe_conversions_impl.h", + "partition_alloc_base/numerics/safe_math.h", + "partition_alloc_base/numerics/safe_math_arm_impl.h", + "partition_alloc_base/numerics/safe_math_clang_gcc_impl.h", + "partition_alloc_base/numerics/safe_math_shared_impl.h", + "partition_alloc_base/posix/eintr_wrapper.h", + "partition_alloc_base/rand_util.cc", + "partition_alloc_base/rand_util.h", + "partition_alloc_base/scoped_clear_last_error.h", + "partition_alloc_base/strings/stringprintf.cc", + "partition_alloc_base/strings/stringprintf.h", + "partition_alloc_base/system/sys_info.h", + "partition_alloc_base/thread_annotations.h", + "partition_alloc_base/threading/platform_thread.cc", + "partition_alloc_base/threading/platform_thread.h", + "partition_alloc_base/threading/platform_thread_ref.h", + "partition_alloc_base/time/time.cc", + "partition_alloc_base/time/time.h", + "partition_alloc_base/time/time_override.cc", + "partition_alloc_base/time/time_override.h", + "partition_alloc_base/types/strong_alias.h", + "partition_alloc_base/win/win_handle_types.h", + "partition_alloc_base/win/win_handle_types_list.inc", + "partition_alloc_base/win/windows_types.h", + "partition_alloc_check.h", + "partition_alloc_config.h", + "partition_alloc_constants.h", + "partition_alloc_forward.h", + "partition_alloc_hooks.cc", + "partition_alloc_hooks.h", + "partition_alloc_notreached.h", + "partition_bucket.cc", + "partition_bucket.h", + "partition_bucket_lookup.h", + "partition_cookie.h", + "partition_direct_map_extent.h", + "partition_freelist_entry.h", + "partition_lock.h", + "partition_oom.cc", + "partition_oom.h", + "partition_page.cc", + "partition_page.h", + "partition_ref_count.h", + "partition_root.cc", + "partition_root.h", + "partition_stats.cc", + "partition_stats.h", + "partition_tag.h", + "partition_tag_bitmap.h", + "partition_tag_types.h", + "partition_tls.h", + "pkey.cc", + "pkey.h", + "random.cc", + "random.h", + "reservation_offset_table.cc", + "reservation_offset_table.h", + "reverse_bytes.h", + "spinning_mutex.cc", + "spinning_mutex.h", + "tagging.cc", + "tagging.h", + "thread_cache.cc", + "thread_cache.h", + "yield_processor.h", + ] + + # Add *Scan sources if building inside Chromium. Currently, + # we see no need to add a more dedicated buildflag for this, as + # we don't anticipate Chromium-external usage of *Scan. + if (build_with_chromium) { + sources += [ + "starscan/logging.h", + "starscan/metadata_allocator.cc", + "starscan/metadata_allocator.h", + "starscan/pcscan.cc", + "starscan/pcscan.h", + "starscan/pcscan_internal.cc", + "starscan/pcscan_internal.h", + "starscan/pcscan_scheduling.cc", + "starscan/pcscan_scheduling.h", + "starscan/raceful_worklist.h", + "starscan/scan_loop.h", + "starscan/snapshot.cc", + "starscan/snapshot.h", + "starscan/stack/stack.cc", + "starscan/stack/stack.h", + "starscan/starscan_fwd.h", + "starscan/state_bitmap.h", + "starscan/stats_collector.cc", + "starscan/stats_collector.h", + "starscan/stats_reporter.h", + "starscan/write_protector.cc", + "starscan/write_protector.h", + ] + } + + defines = [] + if (is_win) { + sources += [ + "page_allocator_internals_win.h", + "partition_alloc_base/rand_util_win.cc", + "partition_alloc_base/scoped_clear_last_error_win.cc", + "partition_alloc_base/threading/platform_thread_win.cc", + "partition_alloc_base/time/time_win.cc", + "partition_tls_win.cc", + ] + } else if (is_posix) { + sources += [ + "page_allocator_internals_posix.cc", + "page_allocator_internals_posix.h", + "partition_alloc_base/files/file_util.h", + "partition_alloc_base/files/file_util_posix.cc", + "partition_alloc_base/posix/safe_strerror.cc", + "partition_alloc_base/posix/safe_strerror.h", + "partition_alloc_base/rand_util_posix.cc", + "partition_alloc_base/threading/platform_thread_internal_posix.h", + "partition_alloc_base/threading/platform_thread_posix.cc", + "partition_alloc_base/time/time_conversion_posix.cc", + ] + + if (is_android || is_chromeos_ash) { + sources += [ "partition_alloc_base/time/time_android.cc" ] + } + if (is_apple) { + sources += [ "partition_alloc_base/time/time_mac.mm" ] + } else { + sources += [ "partition_alloc_base/time/time_now_posix.cc" ] + } + } else if (is_fuchsia) { + sources += [ + "page_allocator_internals_fuchsia.h", + "partition_alloc_base/fuchsia/fuchsia_logging.cc", + "partition_alloc_base/fuchsia/fuchsia_logging.h", + "partition_alloc_base/posix/safe_strerror.cc", + "partition_alloc_base/posix/safe_strerror.h", + "partition_alloc_base/rand_util_fuchsia.cc", + "partition_alloc_base/threading/platform_thread_internal_posix.h", + "partition_alloc_base/threading/platform_thread_posix.cc", + "partition_alloc_base/time/time_conversion_posix.cc", + "partition_alloc_base/time/time_fuchsia.cc", + ] + } + if (is_android) { + # Only android build requires native_library, and native_library depends + # on file_path. So file_path is added if is_android = true. + sources += [ + "partition_alloc_base/files/file_path.cc", + "partition_alloc_base/files/file_path.h", + "partition_alloc_base/native_library.cc", + "partition_alloc_base/native_library.h", + "partition_alloc_base/native_library_posix.cc", + ] + } + if (is_apple) { + # Apple-specific utilities + sources += [ + "partition_alloc_base/mac/foundation_util.h", + "partition_alloc_base/mac/foundation_util.mm", + "partition_alloc_base/mac/scoped_cftyperef.h", + "partition_alloc_base/mac/scoped_typeref.h", + ] + if (is_ios) { + sources += [ + "partition_alloc_base/ios/ios_util.h", + "partition_alloc_base/ios/ios_util.mm", + "partition_alloc_base/system/sys_info_ios.mm", + ] + } + if (is_mac) { + sources += [ + "partition_alloc_base/mac/mac_util.h", + "partition_alloc_base/mac/mac_util.mm", + "partition_alloc_base/system/sys_info_mac.mm", + ] + } + } + if (build_with_chromium) { + if (current_cpu == "x64") { + defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] + sources += [ "starscan/stack/asm/x64/push_registers_asm.cc" ] + } else if (current_cpu == "x86") { + defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] + sources += [ "starscan/stack/asm/x86/push_registers_asm.cc" ] + } else if (current_cpu == "arm") { + defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] + sources += [ "starscan/stack/asm/arm/push_registers_asm.cc" ] + } else if (current_cpu == "arm64") { + defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] + sources += [ "starscan/stack/asm/arm64/push_registers_asm.cc" ] + } else { + # To support a trampoline for another arch, please refer to v8/src/heap/base. + } + } + public_deps = [ + ":chromecast_buildflags", + ":chromeos_buildflags", + ":debugging_buildflags", + ":logging_buildflags", + ":partition_alloc_buildflags", + ] + + configs += [ + ":partition_alloc_implementation", + ":memory_tagging", + ] + deps = [] + public_configs = [] + if (is_android) { + # tagging.cc requires __arm_mte_set_* functions. + deps += [ "//third_party/android_ndk:cpu_features" ] + } + if (is_fuchsia) { + public_deps += [ + "//third_party/fuchsia-sdk/sdk/pkg/fit", + "//third_party/fuchsia-sdk/sdk/pkg/sync", + "//third_party/fuchsia-sdk/sdk/pkg/zx", + ] + + # Needed for users of spinning_mutex.h, which for performance reasons, + # contains inlined calls to `libsync` inside the header file. + # It appends an entry to the "libs" section of the dependent target. + public_configs += [ ":fuchsia_sync_lib" ] + } + + frameworks = [] + if (is_mac) { + # SecTaskGetCodeSignStatus needs: + frameworks += [ "Security.framework" ] + } + + if (is_apple) { + frameworks += [ + "CoreFoundation.framework", + "Foundation.framework", + ] + } + + configs += [ "//build/config/compiler:wexit_time_destructors" ] + + # Partition alloc is relatively hot (>1% of cycles for users of CrOS). Use speed-focused + # optimizations for it. + if (!is_debug) { + configs -= [ "//build/config/compiler:default_optimization" ] + configs += [ "//build/config/compiler:optimize_speed" ] + } + + # We want to be able to test pkey mode without access to the default pkey. + # This is incompatible with stack protectors since the TLS won't be pkey-tagged. + if (enable_pkeys && is_debug) { + configs += [ ":no_stack_protector" ] + } +} + +buildflag_header("partition_alloc_buildflags") { + header = "partition_alloc_buildflags.h" + + _record_alloc_info = false + + # GWP-ASan is tied to BRP's "refcount in previous slot" mode, whose + # enablement is already gated on BRP enablement. + _enable_gwp_asan_support = put_ref_count_in_previous_slot + + # TODO(crbug.com/1151236): Need to refactor the following buildflags. + # The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and + # partition alloc. For partition alloc, + # gen/base/allocator/partition_allocator/partition_alloc_buildflags.h + # defines and partition alloc includes the header file. For chrome, + # gen/base/allocator/buildflags.h defines and chrome includes. + flags = [ + "USE_PARTITION_ALLOC=$use_partition_alloc", + "USE_PARTITION_ALLOC_AS_MALLOC=$use_partition_alloc_as_malloc", + + "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support", + "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks", + "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks", + "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment", + "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr", + "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot", + "USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr", + "USE_ASAN_UNOWNED_PTR=$use_asan_unowned_ptr", + "USE_HOOKABLE_RAW_PTR=$use_hookable_raw_ptr", + "ENABLE_GWP_ASAN_SUPPORT=$_enable_gwp_asan_support", + + # Not to be used directly - instead use + # defined(PA_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) + "ENABLE_MTE_CHECKED_PTR_SUPPORT=$enable_mte_checked_ptr_support", + + "RECORD_ALLOC_INFO=$_record_alloc_info", + "USE_FREESLOT_BITMAP=$use_freeslot_bitmap", + "GLUE_CORE_POOLS=$glue_core_pools", + "ENABLE_POINTER_COMPRESSION=$enable_pointer_compression_support", + "ENABLE_SHADOW_METADATA_FOR_64_BITS_POINTERS=$enable_shadow_metadata", + + # *Scan is currently only used by Chromium. + "STARSCAN=$build_with_chromium", + + "ENABLE_PKEYS=$enable_pkeys", + ] +} + +buildflag_header("chromecast_buildflags") { + header = "chromecast_buildflags.h" + + flags = [ + "PA_IS_CAST_ANDROID=$is_cast_android", + "PA_IS_CASTOS=$is_castos", + ] +} + +buildflag_header("chromeos_buildflags") { + header = "chromeos_buildflags.h" + + flags = [ "PA_IS_CHROMEOS_ASH=$is_chromeos_ash" ] +} + +buildflag_header("logging_buildflags") { + header = "logging_buildflags.h" + + flags = [ "PA_ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached" ] +} + +buildflag_header("debugging_buildflags") { + header = "debugging_buildflags.h" + header_dir = rebase_path(".", "//") + "/partition_alloc_base/debug" + + # Duplicates the setup Chromium uses to define `DCHECK_IS_ON()`, + # but avails it as a buildflag. + _dcheck_is_on = is_debug || dcheck_always_on + + flags = [ + "PA_DCHECK_IS_ON=$_dcheck_is_on", + "PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks", + "PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable", + ] +} + +group("buildflags") { + public_deps = [ + ":chromecast_buildflags", + ":chromeos_buildflags", + ":debugging_buildflags", + ":logging_buildflags", + ":partition_alloc_buildflags", + ] +} +# TODO(crbug.com/1151236): After making partition_alloc a standalone library, +# move test code here. i.e. test("partition_alloc_tests") { ... } and +# test("partition_alloc_perftests"). diff --git a/external/chromium/src/base/allocator/partition_allocator/DEPS b/external/chromium/src/base/allocator/partition_allocator/DEPS new file mode 100644 index 000000000..1e2b78b46 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/DEPS @@ -0,0 +1,157 @@ +# Copyright 2021 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# PartitionAlloc is planned to be extracted into a standalone library, and +# therefore dependencies need to be strictly controlled and minimized. + +gclient_gn_args_file = 'partition_allocator/build/config/gclient_args.gni' + +# Only these hosts are allowed for dependencies in this DEPS file. +# This is a subset of chromium/src/DEPS's allowed_hosts. +allowed_hosts = [ + 'chromium.googlesource.com', +] + +vars = { + 'chromium_git': 'https://chromium.googlesource.com', +} + +deps = { + 'partition_allocator/build': + Var('chromium_git') + '/chromium/src/build.git', + 'partition_allocator/buildtools': + Var('chromium_git') + '/chromium/src/buildtools.git', + 'partition_allocator/buildtools/clang_format/script': + Var('chromium_git') + + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git', + 'partition_allocator/buildtools/linux64': { + 'packages': [ + { + 'package': 'gn/gn/linux-${{arch}}', + 'version': 'latest', + } + ], + 'dep_type': 'cipd', + 'condition': 'host_os == "linux"', + }, + 'partition_allocator/buildtools/mac': { + 'packages': [ + { + 'package': 'gn/gn/mac-${{arch}}', + 'version': 'latest', + } + ], + 'dep_type': 'cipd', + 'condition': 'host_os == "mac"', + }, + 'partition_allocator/buildtools/win': { + 'packages': [ + { + 'package': 'gn/gn/windows-amd64', + 'version': 'latest', + } + ], + 'dep_type': 'cipd', + 'condition': 'host_os == "win"', + }, + 'partition_allocator/buildtools/third_party/libc++/trunk': + Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git', + 'partition_allocator/buildtools/third_party/libc++abi/trunk': + Var('chromium_git') + + '/external/github.com/llvm/llvm-project/libcxxabi.git', + 'partition_allocator/tools/clang': + Var('chromium_git') + '/chromium/src/tools/clang.git', +} + +hooks = [ + { + 'name': 'sysroot_arm', + 'pattern': '.', + 'condition': 'checkout_linux and checkout_arm', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=arm'], + }, + { + 'name': 'sysroot_arm64', + 'pattern': '.', + 'condition': 'checkout_linux and checkout_arm64', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=arm64'], + }, + { + 'name': 'sysroot_x86', + 'pattern': '.', + 'condition': 'checkout_linux and (checkout_x86 or checkout_x64)', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=x86'], + }, + { + 'name': 'sysroot_mips', + 'pattern': '.', + 'condition': 'checkout_linux and checkout_mips', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=mips'], + }, + { + 'name': 'sysroot_mips64', + 'pattern': '.', + 'condition': 'checkout_linux and checkout_mips64', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=mips64el'], + }, + { + 'name': 'sysroot_x64', + 'pattern': '.', + 'condition': 'checkout_linux and checkout_x64', + 'action': [ + 'python3', + 'partition_allocator/build/linux/sysroot_scripts/install-sysroot.py', + '--arch=x64'], + }, + { + # Update the prebuilt clang toolchain. + # Note: On Win, this should run after win_toolchain, as it may use it. + 'name': 'clang', + 'pattern': '.', + 'action': ['python3', 'partition_allocator/tools/clang/scripts/update.py'], + }, +] + +noparent = True + +include_rules = [ + "+build/build_config.h", + "+build/buildflag.h", + "+third_party/lss/linux_syscall_support.h", +] + +specific_include_rules = { + ".*_(perf|unit)test\.cc$": [ + "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", + "+base/debug/proc_maps_linux.h", + "+base/system/sys_info.h", + "+base/test/gtest_util.h", + "+base/timer/lap_timer.h", + "+base/win/windows_version.h", + "+testing/gmock/include/gmock/gmock.h", + "+testing/gtest/include/gtest/gtest.h", + "+testing/perf/perf_result_reporter.h", + ], + "extended_api\.cc$": [ + "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", + ], + "gtest_prod_util\.h$": [ + "+testing/gtest/include/gtest/gtest_prod.h", + ], +} diff --git a/external/chromium/src/base/allocator/partition_allocator/DIR_METADATA b/external/chromium/src/base/allocator/partition_allocator/DIR_METADATA new file mode 100644 index 000000000..41685caa4 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/DIR_METADATA @@ -0,0 +1,6 @@ +monorail { + component: "Blink>MemoryAllocator>Partition" +} + +# Also security-dev@chromium.org +team_email: "platform-architecture-dev@chromium.org" diff --git a/external/chromium/src/base/allocator/partition_allocator/OWNERS b/external/chromium/src/base/allocator/partition_allocator/OWNERS new file mode 100644 index 000000000..dd2d30768 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/OWNERS @@ -0,0 +1,4 @@ +bartekn@chromium.org +haraken@chromium.org +lizeb@chromium.org +tasak@google.com diff --git a/external/chromium/src/base/allocator/partition_allocator/PartitionAlloc.md b/external/chromium/src/base/allocator/partition_allocator/PartitionAlloc.md new file mode 100644 index 000000000..4cc3a4883 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/PartitionAlloc.md @@ -0,0 +1,203 @@ +# PartitionAlloc Design + +This document describes PartitionAlloc at a high level, with some architectural +details. For implementation details, see the comments in +`partition_alloc_constants.h`. + +## Quick Links + +* [Glossary](./glossary.md): Definitions of terms commonly used in + PartitionAlloc. The present document largely avoids defining terms. + +* [Build Config](./build_config.md): Pertinent GN args, buildflags, and + macros. + +* [Chrome-External Builds](./external_builds.md): Further considerations + for standalone PartitionAlloc, plus an embedder's guide for some extra + GN args. + +## Overview + +PartitionAlloc is a memory allocator optimized for space efficiency, +allocation latency, and security. + +### Performance + +PartitionAlloc is designed to be extremely fast in its fast paths. The fast +paths of allocation and deallocation require very few (reasonably predictable) +branches. The number of operations in the fast paths is minimal, leading to the +possibility of inlining. + +![The central allocator manages slots and spans. It is locked on a + per-partition basis. Separately, the thread cache consumes slots + from the central allocator, allowing it to hand out memory + quickly to individual threads.](./dot/layers.png) + +However, even the fast path isn't the fastest, because it requires taking +a per-partition lock. Although we optimized the lock, there was still room for +improvement; to this end, we introduced the thread cache. +The thread cache has been tailored to satisfy a vast majority of requests by +allocating from and releasing memory to the main allocator in batches, +amortizing lock acquisition and further improving locality while not trapping +excess memory. + +### Security + +Security is one of the important goals of PartitionAlloc. + +PartitionAlloc guarantees that different partitions exist in different regions +of the process's address space. When the caller has freed all objects contained +in a page in a partition, PartitionAlloc returns the physical memory to the +operating system, but continues to reserve the region of address space. +PartitionAlloc will only reuse an address space region for the same partition. + +Similarly, one page can contain only objects from the same bucket. +When freed, PartitionAlloc returns the physical memory, but continues to reserve +the region for this very bucket. + +The above techniques help avoid type confusion attacks. Note, however, these +apply only to normal buckets and not to direct map, as it'd waste too much +address space. + +PartitionAlloc also guarantees that: + +* Linear overflows/underflows cannot corrupt into, out of, or between + partitions. There are guard pages at the beginning and the end of each memory + region owned by a partition. + +* Linear overflows/underflows cannot corrupt the allocation metadata. + PartitionAlloc records metadata in a dedicated, out-of-line region (not + adjacent to objects), surrounded by guard pages. (Freelist pointers are an + exception.) + +* Partial pointer overwrite of freelist pointer should fault. + +* Direct map allocations have guard pages at the beginning and the end. + +### Alignment + +PartitionAlloc guarantees that returned pointers are aligned on +`partition_alloc::internal::kAlignment` boundary (typically 16B on +64-bit systems, and 8B on 32-bit). + +PartitionAlloc also supports higher levels of alignment, that can be requested +via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as +`posix_memalign()`). The requested +alignment has to be a power of two. PartitionAlloc reserves the right to round +up the requested size to the nearest power of two, greater than or equal to the +requested alignment. This may be wasteful, but allows taking advantage of +natural PartitionAlloc alignment guarantees. Allocations with an alignment +requirement greater than `partition_alloc::internal::kAlignment` are expected +to be very rare. + +## Architecture + +### Layout in Memory + +PartitionAlloc handles normal buckets by reserving (not committing) 2MiB super +pages. Each super page is split into partition pages. +The first and the last partition page are permanently inaccessible and serve +as guard pages, with the exception of one system page in the middle of the first +partition page that holds metadata (32B struct per partition page). + +![A super page is shown full of slot spans. The slot spans are logically + strung together to form buckets. At both extremes of the super page + are guard pages. PartitionAlloc metadata is hidden inside the + guard pages at the "front."](./dot/super-page.png) + +* The slot span numbers provide a visual hint of their size (in partition + pages). +* Colors provide a visual hint of the bucket to which the slot span belongs. + * Although only five colors are shown, in reality, a super page holds + tens of slot spans, some of which belong to the same bucket. +* The system page that holds metadata tracks each partition page with one 32B + [`PartitionPage` struct][PartitionPage], which is either + * a [`SlotSpanMetadata`][SlotSpanMetadata] ("v"s in the diagram) or + * a [`SubsequentPageMetadata`][SubsequentPageMetadata] ("+"s in the + diagram). +* Gray fill denotes guard pages (one partition page each at the head and tail + of each super page). +* In some configurations, PartitionAlloc stores more metadata than can + fit in the one system page at the front. These are the bitmaps for + StarScan and `MTECheckedPtr`, and they are relegated to the head of + what would otherwise be usable space for slot spans. One, both, or + none of these bitmaps may be present, depending on build + configuration, runtime configuration, and type of allocation. + See [`SuperPagePayloadBegin()`][payload-start] for details. + +As allocation requests arrive, there is eventually a need to allocate a new slot +span. +Address space for such a slot span is carved out from the last super page. If +not enough space, a new super page is allocated. Due to varying sizes of slot +span, this may lead to leaving space unused (we never go back to fill previous +super pages), which is fine because this memory is merely reserved, which is far +less precious than committed memory. Note also that address space reserved for a +slot span is never released, even if the slot span isn't used for a long time. + +All slots in a newly allocated slot span are *free*, i.e. available for +allocation. + +### Freelist Pointers + +All free slots within a slot span are chained into a singly-linked free-list, +by writing the *next* pointer at the beginning of each slot, and the head of the +list is written in the metadata struct. + +However, writing a pointer in each free slot of a newly allocated span would +require committing and faulting in physical pages upfront, which would be +unacceptable. Therefore, PartitionAlloc has a concept of *provisioning slots*. +Only provisioned slots are chained into the freelist. +Once provisioned slots in a span are depleted, then another page worth of slots +is provisioned (note, a slot that crosses a page boundary only gets +provisioned with slots of the next page). See +`PartitionBucket::ProvisionMoreSlotsAndAllocOne()` for more details. + +Freelist pointers are stored at the beginning of each free slot. As such, they +are the only metadata that is inline, i.e. stored among the +objects. This makes them prone to overruns. On little-endian systems, the +pointers are encoded by reversing byte order, so that partial overruns will very +likely result in destroying the pointer, as opposed to forming a valid pointer +to a nearby location. + +Furthermore, a shadow of a freelist pointer is stored next to it, encoded in a +different manner. This helps PartitionAlloc detect corruptions. + +### Slot Span States + +A slot span can be in any of 4 states: +* *Full*. A full span has no free slots. +* *Empty*. An empty span has no allocated slots, only free slots. +* *Active*. An active span is anything in between the above two. +* *Decommitted*. A decommitted span is a special case of an empty span, where + all pages are decommitted from memory. + +PartitionAlloc prioritizes getting an available slot from an active span, over +an empty one, in hope that the latter can be soon transitioned into a +decommitted state, thus releasing memory. There is no mechanism, however, to +prioritize selection of a slot span based on the number of already allocated +slots. + +An empty span becomes decommitted either when there are too many empty spans +(FIFO), or when `PartitionRoot::PurgeMemory()` gets invoked periodically (or in +low memory pressure conditions). An allocation can be satisfied from +a decommitted span if there are no active or empty spans available. The slot +provisioning mechanism kicks back in, committing the pages gradually as needed, +and the span becomes active. (There is currently no other way +to unprovision slots than decommitting the entire span). + +As mentioned above, a bucket is a collection of slot spans containing slots of +the same size. In fact, each bucket has 3 linked-lists, chaining active, empty +and decommitted spans (see `PartitionBucket::*_slot_spans_head`). +There is no need for a full span list. The lists are updated lazily. An empty, +decommitted or full span may stay on the active list for some time, until +`PartitionBucket::SetNewActiveSlotSpan()` encounters it. +A decommitted span may stay on the empty list for some time, +until `PartitionBucket::SlowPathAlloc()` encounters it. However, +the inaccuracy can't happen in the other direction, i.e. an active span can only +be on the active list, and an empty span can only be on the active or empty +list. + +[PartitionPage]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=314;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 +[SlotSpanMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=120;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 +[SubsequentPageMetadata]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_page.h;l=295;drc=e5b03e85ea180d1d1ab0dec471c7fd5d1706a9e4 +[payload-start]: https://source.chromium.org/chromium/chromium/src/+/35b2deed603dedd4abb37f204d516ed62aa2b85c:base/allocator/partition_allocator/partition_page.h;l=454 diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc new file mode 100644 index 000000000..0467a2504 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.cc @@ -0,0 +1,543 @@ +// Copyright 2020 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/address_pool_manager.h" + +#include +#include +#include +#include + +#include "base/allocator/partition_allocator/address_space_stats.h" +#include "base/allocator/partition_allocator/page_allocator.h" +#include "base/allocator/partition_allocator/page_allocator_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_notreached.h" +#include "base/allocator/partition_allocator/pkey.h" +#include "base/allocator/partition_allocator/reservation_offset_table.h" +#include "build/build_config.h" + +#if BUILDFLAG(IS_APPLE) || BUILDFLAG(ENABLE_PKEYS) +#include +#endif + +namespace partition_alloc::internal { + +AddressPoolManager AddressPoolManager::singleton_; + +// static +AddressPoolManager& AddressPoolManager::GetInstance() { + return singleton_; +} + +#if defined(PA_HAS_64_BITS_POINTERS) + +namespace { + +// This will crash if the range cannot be decommitted. +void DecommitPages(uintptr_t address, size_t size) { + // Callers rely on the pages being zero-initialized when recommitting them. + // |DecommitSystemPages| doesn't guarantee this on all operating systems, in + // particular on macOS, but |DecommitAndZeroSystemPages| does. + DecommitAndZeroSystemPages(address, size); +} + +} // namespace + +void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) { + PA_DCHECK(!(ptr & kSuperPageOffsetMask)); + PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask)); + PA_CHECK(handle > 0 && handle <= std::size(aligned_pools_.pools_)); + + Pool* pool = GetPool(handle); + PA_CHECK(!pool->IsInitialized()); + pool->Initialize(ptr, length); +} + +void AddressPoolManager::GetPoolUsedSuperPages( + pool_handle handle, + std::bitset& used) { + Pool* pool = GetPool(handle); + if (!pool) + return; + + pool->GetUsedSuperPages(used); +} + +uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) { + Pool* pool = GetPool(handle); + if (!pool) + return 0; + + return pool->GetBaseAddress(); +} + +void AddressPoolManager::ResetForTesting() { + for (pool_handle i = 0; i < std::size(aligned_pools_.pools_); ++i) + aligned_pools_.pools_[i].Reset(); +} + +void AddressPoolManager::Remove(pool_handle handle) { + Pool* pool = GetPool(handle); + PA_DCHECK(pool->IsInitialized()); + pool->Reset(); +} + +uintptr_t AddressPoolManager::Reserve(pool_handle handle, + uintptr_t requested_address, + size_t length) { + Pool* pool = GetPool(handle); + if (!requested_address) + return pool->FindChunk(length); + const bool is_available = pool->TryReserveChunk(requested_address, length); + if (is_available) + return requested_address; + return pool->FindChunk(length); +} + +void AddressPoolManager::UnreserveAndDecommit(pool_handle handle, + uintptr_t address, + size_t length) { + PA_DCHECK(0 < handle && handle <= kNumPools); + Pool* pool = GetPool(handle); + PA_DCHECK(pool->IsInitialized()); + DecommitPages(address, length); + pool->FreeChunk(address, length); +} + +void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) { + PA_CHECK(ptr != 0); + PA_CHECK(!(ptr & kSuperPageOffsetMask)); + PA_CHECK(!(length & kSuperPageOffsetMask)); + address_begin_ = ptr; +#if BUILDFLAG(PA_DCHECK_IS_ON) + address_end_ = ptr + length; + PA_DCHECK(address_begin_ < address_end_); +#endif + + total_bits_ = length / kSuperPageSize; + PA_CHECK(total_bits_ <= kMaxSuperPagesInPool); + + ScopedGuard scoped_lock(lock_); + alloc_bitset_.reset(); + bit_hint_ = 0; +} + +bool AddressPoolManager::Pool::IsInitialized() { + return address_begin_ != 0; +} + +void AddressPoolManager::Pool::Reset() { + address_begin_ = 0; +} + +void AddressPoolManager::Pool::GetUsedSuperPages( + std::bitset& used) { + ScopedGuard scoped_lock(lock_); + + PA_DCHECK(IsInitialized()); + used = alloc_bitset_; +} + +uintptr_t AddressPoolManager::Pool::GetBaseAddress() { + PA_DCHECK(IsInitialized()); + return address_begin_; +} + +uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) { + ScopedGuard scoped_lock(lock_); + + PA_DCHECK(!(requested_size & kSuperPageOffsetMask)); + const size_t need_bits = requested_size >> kSuperPageShift; + + // Use first-fit policy to find an available chunk from free chunks. Start + // from |bit_hint_|, because we know there are no free chunks before. + size_t beg_bit = bit_hint_; + size_t curr_bit = bit_hint_; + while (true) { + // |end_bit| points 1 past the last bit that needs to be 0. If it goes past + // |total_bits_|, return |nullptr| to signal no free chunk was found. + size_t end_bit = beg_bit + need_bits; + if (end_bit > total_bits_) + return 0; + + bool found = true; + for (; curr_bit < end_bit; ++curr_bit) { + if (alloc_bitset_.test(curr_bit)) { + // The bit was set, so this chunk isn't entirely free. Set |found=false| + // to ensure the outer loop continues. However, continue the inner loop + // to set |beg_bit| just past the last set bit in the investigated + // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the + // next outer loop pass from checking the same bits. + beg_bit = curr_bit + 1; + found = false; + if (bit_hint_ == curr_bit) + ++bit_hint_; + } + } + + // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to + // mark as allocated) and return the allocated address. + if (found) { + for (size_t i = beg_bit; i < end_bit; ++i) { + PA_DCHECK(!alloc_bitset_.test(i)); + alloc_bitset_.set(i); + } + if (bit_hint_ == beg_bit) { + bit_hint_ = end_bit; + } + uintptr_t address = address_begin_ + beg_bit * kSuperPageSize; +#if BUILDFLAG(PA_DCHECK_IS_ON) + PA_DCHECK(address + requested_size <= address_end_); +#endif + return address; + } + } + + PA_NOTREACHED(); + return 0; +} + +bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address, + size_t requested_size) { + ScopedGuard scoped_lock(lock_); + PA_DCHECK(!(address & kSuperPageOffsetMask)); + PA_DCHECK(!(requested_size & kSuperPageOffsetMask)); + const size_t begin_bit = (address - address_begin_) / kSuperPageSize; + const size_t need_bits = requested_size / kSuperPageSize; + const size_t end_bit = begin_bit + need_bits; + // Check that requested address is not too high. + if (end_bit > total_bits_) + return false; + // Check if any bit of the requested region is set already. + for (size_t i = begin_bit; i < end_bit; ++i) { + if (alloc_bitset_.test(i)) + return false; + } + // Otherwise, set the bits. + for (size_t i = begin_bit; i < end_bit; ++i) { + alloc_bitset_.set(i); + } + return true; +} + +void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) { + ScopedGuard scoped_lock(lock_); + + PA_DCHECK(!(address & kSuperPageOffsetMask)); + PA_DCHECK(!(free_size & kSuperPageOffsetMask)); + + PA_DCHECK(address_begin_ <= address); +#if BUILDFLAG(PA_DCHECK_IS_ON) + PA_DCHECK(address + free_size <= address_end_); +#endif + + const size_t beg_bit = (address - address_begin_) / kSuperPageSize; + const size_t end_bit = beg_bit + free_size / kSuperPageSize; + for (size_t i = beg_bit; i < end_bit; ++i) { + PA_DCHECK(alloc_bitset_.test(i)); + alloc_bitset_.reset(i); + } + bit_hint_ = std::min(bit_hint_, beg_bit); +} + +void AddressPoolManager::Pool::GetStats(PoolStats* stats) { + std::bitset pages; + size_t i; + { + ScopedGuard scoped_lock(lock_); + pages = alloc_bitset_; + i = bit_hint_; + } + + stats->usage = pages.count(); + + size_t largest_run = 0; + size_t current_run = 0; + for (; i < total_bits_; ++i) { + if (!pages[i]) { + current_run += 1; + continue; + } else if (current_run > largest_run) { + largest_run = current_run; + } + current_run = 0; + } + + // Fell out of the loop with last bit being zero. Check once more. + if (current_run > largest_run) { + largest_run = current_run; + } + stats->largest_available_reservation = largest_run; +} + +void AddressPoolManager::GetPoolStats(const pool_handle handle, + PoolStats* stats) { + Pool* pool = GetPool(handle); + if (!pool->IsInitialized()) { + return; + } + pool->GetStats(stats); +} + +bool AddressPoolManager::GetStats(AddressSpaceStats* stats) { + // Get 64-bit pool stats. + GetPoolStats(kRegularPoolHandle, &stats->regular_pool_stats); +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + GetPoolStats(kBRPPoolHandle, &stats->brp_pool_stats); +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + if (IsConfigurablePoolAvailable()) { + GetPoolStats(kConfigurablePoolHandle, &stats->configurable_pool_stats); + } +#if BUILDFLAG(ENABLE_PKEYS) + GetPoolStats(kPkeyPoolHandle, &stats->pkey_pool_stats); +#endif + return true; +} + +#else // defined(PA_HAS_64_BITS_POINTERS) + +static_assert( + kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == + 0, + "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap."); +static_assert( + kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0, + "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap."); +static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >= + AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap, + "kGuardBitsOfBRPPoolBitmap must be larger than or equal to " + "kGuardOffsetOfBRPPoolBitmap."); + +template +void SetBitmap(std::bitset& bitmap, + size_t start_bit, + size_t bit_length) { + const size_t end_bit = start_bit + bit_length; + PA_DCHECK(start_bit <= bitsize); + PA_DCHECK(end_bit <= bitsize); + + for (size_t i = start_bit; i < end_bit; ++i) { + PA_DCHECK(!bitmap.test(i)); + bitmap.set(i); + } +} + +template +void ResetBitmap(std::bitset& bitmap, + size_t start_bit, + size_t bit_length) { + const size_t end_bit = start_bit + bit_length; + PA_DCHECK(start_bit <= bitsize); + PA_DCHECK(end_bit <= bitsize); + + for (size_t i = start_bit; i < end_bit; ++i) { + PA_DCHECK(bitmap.test(i)); + bitmap.reset(i); + } +} + +uintptr_t AddressPoolManager::Reserve(pool_handle handle, + uintptr_t requested_address, + size_t length) { + PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask())); + uintptr_t address = + AllocPages(requested_address, length, kSuperPageSize, + PageAccessibilityConfiguration( + PageAccessibilityConfiguration::kInaccessible), + PageTag::kPartitionAlloc); + return address; +} + +void AddressPoolManager::UnreserveAndDecommit(pool_handle handle, + uintptr_t address, + size_t length) { + PA_DCHECK(!(address & kSuperPageOffsetMask)); + PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask())); + FreePages(address, length); +} + +void AddressPoolManager::MarkUsed(pool_handle handle, + uintptr_t address, + size_t length) { + ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock()); + // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + if (handle == kBRPPoolHandle) { + PA_DCHECK( + (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0); + + // Make IsManagedByBRPPoolPool() return false when an address inside the + // first or the last PartitionPageSize()-bytes block is given: + // + // ------+---+---------------+---+---- + // memory ..... | B | managed by PA | B | ... + // regions ------+---+---------------+---+---- + // + // B: PartitionPageSize()-bytes block. This is used internally by the + // allocator and is not available for callers. + // + // This is required to avoid crash caused by the following code: + // { + // // Assume this allocation happens outside of PartitionAlloc. + // raw_ptr ptr = new T[20]; + // for (size_t i = 0; i < 20; i ++) { ptr++; } + // // |ptr| may point to an address inside 'B'. + // } + // + // Suppose that |ptr| points to an address inside B after the loop. If + // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr() would + // crash, since the memory is not allocated by PartitionAlloc. + SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_, + (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) + + AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap, + (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) - + AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap); + } else +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + { + PA_DCHECK(handle == kRegularPoolHandle); + PA_DCHECK( + (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) == + 0); + SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_, + address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap, + length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap); + } +} + +void AddressPoolManager::MarkUnused(pool_handle handle, + uintptr_t address, + size_t length) { + // Address regions allocated for normal buckets are never released, so this + // function can only be called for direct map. However, do not DCHECK on + // IsManagedByDirectMap(address), because many tests test this function using + // small allocations. + + ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock()); + // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + if (handle == kBRPPoolHandle) { + PA_DCHECK( + (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0); + + // Make IsManagedByBRPPoolPool() return false when an address inside the + // first or the last PartitionPageSize()-bytes block is given. + // (See MarkUsed comment) + ResetBitmap( + AddressPoolManagerBitmap::brp_pool_bits_, + (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) + + AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap, + (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) - + AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap); + } else +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + { + PA_DCHECK(handle == kRegularPoolHandle); + PA_DCHECK( + (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) == + 0); + ResetBitmap( + AddressPoolManagerBitmap::regular_pool_bits_, + address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap, + length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap); + } +} + +void AddressPoolManager::ResetForTesting() { + ScopedGuard guard(AddressPoolManagerBitmap::GetLock()); + AddressPoolManagerBitmap::regular_pool_bits_.reset(); + AddressPoolManagerBitmap::brp_pool_bits_.reset(); +} + +namespace { + +// Counts super pages in use represented by `bitmap`. +template +size_t CountUsedSuperPages(const std::bitset& bitmap, + const size_t bits_per_super_page) { + size_t count = 0; + size_t bit_index = 0; + + // Stride over super pages. + for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) { + // Stride over the bits comprising the super page. + for (bit_index = super_page_index * bits_per_super_page; + bit_index < (super_page_index + 1) * bits_per_super_page && + bit_index < bitsize; + ++bit_index) { + if (bitmap[bit_index]) { + count += 1; + // Move on to the next super page. + break; + } + } + } + return count; +} + +} // namespace + +bool AddressPoolManager::GetStats(AddressSpaceStats* stats) { + std::bitset regular_pool_bits; + std::bitset brp_pool_bits; + { + ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock()); + regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_; + brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_; + } // scoped_lock + + // Pool usage is read out from the address pool bitmaps. + // The output stats are sized in super pages, so we interpret + // the bitmaps into super page usage. + static_assert( + kSuperPageSize % + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap == + 0, + "information loss when calculating metrics"); + constexpr size_t kRegularPoolBitsPerSuperPage = + kSuperPageSize / + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap; + + // Get 32-bit pool usage. + stats->regular_pool_stats.usage = + CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage); +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + static_assert( + kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap == + 0, + "information loss when calculating metrics"); + constexpr size_t kBRPPoolBitsPerSuperPage = + kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap; + stats->brp_pool_stats.usage = + CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage); + + // Get blocklist size. + for (const auto& blocked : + AddressPoolManagerBitmap::brp_forbidden_super_page_map_) { + if (blocked.load(std::memory_order_relaxed)) + stats->blocklist_size += 1; + } + + // Count failures in finding non-blocklisted addresses. + stats->blocklist_hit_count = + AddressPoolManagerBitmap::blocklist_hit_count_.load( + std::memory_order_relaxed); +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + return true; +} + +#endif // defined(PA_HAS_64_BITS_POINTERS) + +void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) { + AddressSpaceStats stats{}; + if (GetStats(&stats)) { + dumper->DumpStats(&stats); + } +} + +} // namespace partition_alloc::internal diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h new file mode 100644 index 000000000..7b4d4bae8 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager.h @@ -0,0 +1,178 @@ +// Copyright 2020 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_ + +#include +#include + +#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h" +#include "base/allocator/partition_allocator/address_pool_manager_types.h" +#include "base/allocator/partition_allocator/partition_address_space.h" +#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" +#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_lock.h" +#include "build/build_config.h" + +namespace partition_alloc { + +class AddressSpaceStatsDumper; +struct AddressSpaceStats; +struct PoolStats; + +} // namespace partition_alloc + +namespace partition_alloc::internal { + +// (64bit version) +// AddressPoolManager takes a reserved virtual address space and manages address +// space allocation. +// +// AddressPoolManager (currently) supports up to 4 pools. Each pool manages a +// contiguous reserved address space. Alloc() takes a pool_handle and returns +// address regions from the specified pool. Free() also takes a pool_handle and +// returns the address region back to the manager. +// +// (32bit version) +// AddressPoolManager wraps AllocPages and FreePages and remembers allocated +// address regions using bitmaps. IsManagedByPartitionAlloc*Pool use the bitmaps +// to judge whether a given address is in a pool that supports BackupRefPtr or +// in a pool that doesn't. All PartitionAlloc allocations must be in either of +// the pools. +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManager { + public: + static AddressPoolManager& GetInstance(); + + AddressPoolManager(const AddressPoolManager&) = delete; + AddressPoolManager& operator=(const AddressPoolManager&) = delete; + +#if defined(PA_HAS_64_BITS_POINTERS) + void Add(pool_handle handle, uintptr_t address, size_t length); + void Remove(pool_handle handle); + + // Populate a |used| bitset of superpages currently in use. + void GetPoolUsedSuperPages(pool_handle handle, + std::bitset& used); + + // Return the base address of a pool. + uintptr_t GetPoolBaseAddress(pool_handle handle); +#endif + + // Reserves address space from the pool. + uintptr_t Reserve(pool_handle handle, + uintptr_t requested_address, + size_t length); + + // Frees address space back to the pool and decommits underlying system pages. + void UnreserveAndDecommit(pool_handle handle, + uintptr_t address, + size_t length); + void ResetForTesting(); + +#if !defined(PA_HAS_64_BITS_POINTERS) + void MarkUsed(pool_handle handle, uintptr_t address, size_t size); + void MarkUnused(pool_handle handle, uintptr_t address, size_t size); + + static bool IsManagedByRegularPool(uintptr_t address) { + return AddressPoolManagerBitmap::IsManagedByRegularPool(address); + } + + static bool IsManagedByBRPPool(uintptr_t address) { + return AddressPoolManagerBitmap::IsManagedByBRPPool(address); + } +#endif // !defined(PA_HAS_64_BITS_POINTERS) + + void DumpStats(AddressSpaceStatsDumper* dumper); + + private: + friend class AddressPoolManagerForTesting; +#if BUILDFLAG(ENABLE_PKEYS) + // If we use a pkey pool, we need to tag its metadata with the pkey. Allow the + // function to get access to the pool pointer. + friend void TagGlobalsWithPkey(int pkey); +#endif + + constexpr AddressPoolManager() = default; + ~AddressPoolManager() = default; + + // Populates `stats` if applicable. + // Returns whether `stats` was populated. (They might not be, e.g. + // if PartitionAlloc is wholly unused in this process.) + bool GetStats(AddressSpaceStats* stats); + +#if defined(PA_HAS_64_BITS_POINTERS) + class Pool { + public: + constexpr Pool() = default; + ~Pool() = default; + + Pool(const Pool&) = delete; + Pool& operator=(const Pool&) = delete; + + void Initialize(uintptr_t ptr, size_t length); + bool IsInitialized(); + void Reset(); + + uintptr_t FindChunk(size_t size); + void FreeChunk(uintptr_t address, size_t size); + + bool TryReserveChunk(uintptr_t address, size_t size); + + void GetUsedSuperPages(std::bitset& used); + uintptr_t GetBaseAddress(); + + void GetStats(PoolStats* stats); + + private: + Lock lock_; + + // The bitset stores the allocation state of the address pool. 1 bit per + // super-page: 1 = allocated, 0 = free. + std::bitset alloc_bitset_ PA_GUARDED_BY(lock_); + + // An index of a bit in the bitset before which we know for sure there all + // 1s. This is a best-effort hint in the sense that there still may be lots + // of 1s after this index, but at least we know there is no point in + // starting the search before it. + size_t bit_hint_ PA_GUARDED_BY(lock_) = 0; + + size_t total_bits_ = 0; + uintptr_t address_begin_ = 0; +#if BUILDFLAG(PA_DCHECK_IS_ON) + uintptr_t address_end_ = 0; +#endif + }; + + PA_ALWAYS_INLINE Pool* GetPool(pool_handle handle) { + PA_DCHECK(0 < handle && handle <= kNumPools); + return &aligned_pools_.pools_[handle - 1]; + } + + // Gets the stats for the pool identified by `handle`, if + // initialized. + void GetPoolStats(pool_handle handle, PoolStats* stats); + + // If pkey support is enabled, we need to pkey-tag the pkey pool (which needs + // to be last). For this, we need to add padding in front of the pools so that + // pkey one starts on a page boundary. + struct { + char pad_[PA_PKEY_ARRAY_PAD_SZ(Pool, kNumPools)] = {}; + Pool pools_[kNumPools]; + char pad_after_[PA_PKEY_FILL_PAGE_SZ(sizeof(Pool))] = {}; + } aligned_pools_ PA_PKEY_ALIGN; + +#endif // defined(PA_HAS_64_BITS_POINTERS) + + static PA_CONSTINIT AddressPoolManager singleton_; +}; + +} // namespace partition_alloc::internal + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.cc b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.cc new file mode 100644 index 000000000..a85714fab --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.cc @@ -0,0 +1,37 @@ +// Copyright 2021 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h" + +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" + +#if !defined(PA_HAS_64_BITS_POINTERS) + +namespace partition_alloc::internal { + +namespace { + +Lock g_lock; + +} // namespace + +Lock& AddressPoolManagerBitmap::GetLock() { + return g_lock; +} + +std::bitset + AddressPoolManagerBitmap::regular_pool_bits_; // GUARDED_BY(GetLock()) +std::bitset + AddressPoolManagerBitmap::brp_pool_bits_; // GUARDED_BY(GetLock()) +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) +std::array + AddressPoolManagerBitmap::brp_forbidden_super_page_map_; +std::atomic_size_t AddressPoolManagerBitmap::blocklist_hit_count_; +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + +} // namespace partition_alloc::internal + +#endif // !defined(PA_HAS_64_BITS_POINTERS) diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.h b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.h new file mode 100644 index 000000000..0aa51ceb4 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_bitmap.h @@ -0,0 +1,190 @@ +// Copyright 2021 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_ + +#include +#include +#include +#include + +#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_lock.h" +#include "build/build_config.h" + +#if !defined(PA_HAS_64_BITS_POINTERS) + +namespace partition_alloc { + +namespace internal { + +// AddressPoolManagerBitmap is a set of bitmaps that track whether a given +// address is in a pool that supports BackupRefPtr, or in a pool that doesn't +// support it. All PartitionAlloc allocations must be in either of the pools. +// +// This code is specific to 32-bit systems. +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressPoolManagerBitmap { + public: + static constexpr uint64_t kGiB = 1024 * 1024 * 1024ull; + static constexpr uint64_t kAddressSpaceSize = 4ull * kGiB; + + // For BRP pool, we use partition page granularity to eliminate the guard + // pages from the bitmap at the ends: + // - Eliminating the guard page at the beginning is needed so that pointers + // to the end of an allocation that immediately precede a super page in BRP + // pool don't accidentally fall into that pool. + // - Eliminating the guard page at the end is to ensure that the last page + // of the address space isn't in the BRP pool. This allows using sentinels + // like reinterpret_cast(-1) without a risk of triggering BRP logic + // on an invalid address. (Note, 64-bit systems don't have this problem as + // the upper half of the address space always belongs to the OS.) + // + // Note, direct map allocations also belong to this pool. The same logic as + // above applies. It is important to note, however, that the granularity used + // here has to be a minimum of partition page size and direct map allocation + // granularity. Since DirectMapAllocationGranularity() is no smaller than + // PageAllocationGranularity(), we don't need to decrease the bitmap + // granularity any further. + static constexpr size_t kBitShiftOfBRPPoolBitmap = PartitionPageShift(); + static constexpr size_t kBytesPer1BitOfBRPPoolBitmap = PartitionPageSize(); + static_assert(kBytesPer1BitOfBRPPoolBitmap == 1 << kBitShiftOfBRPPoolBitmap, + ""); + static constexpr size_t kGuardOffsetOfBRPPoolBitmap = 1; + static constexpr size_t kGuardBitsOfBRPPoolBitmap = 2; + static constexpr size_t kBRPPoolBits = + kAddressSpaceSize / kBytesPer1BitOfBRPPoolBitmap; + + // Regular pool may include both normal bucket and direct map allocations, so + // the bitmap granularity has to be at least as small as + // DirectMapAllocationGranularity(). No need to eliminate guard pages at the + // ends, as this is a BackupRefPtr-specific concern, hence no need to lower + // the granularity to partition page size. + static constexpr size_t kBitShiftOfRegularPoolBitmap = + DirectMapAllocationGranularityShift(); + static constexpr size_t kBytesPer1BitOfRegularPoolBitmap = + DirectMapAllocationGranularity(); + static_assert(kBytesPer1BitOfRegularPoolBitmap == + 1 << kBitShiftOfRegularPoolBitmap, + ""); + static constexpr size_t kRegularPoolBits = + kAddressSpaceSize / kBytesPer1BitOfRegularPoolBitmap; + + // Returns false for nullptr. + static bool IsManagedByRegularPool(uintptr_t address) { + static_assert( + std::numeric_limits::max() >> kBitShiftOfRegularPoolBitmap < + regular_pool_bits_.size(), + "The bitmap is too small, will result in unchecked out of bounds " + "accesses."); + // It is safe to read |regular_pool_bits_| without a lock since the caller + // is responsible for guaranteeing that the address is inside a valid + // allocation and the deallocation call won't race with this call. + return PA_TS_UNCHECKED_READ( + regular_pool_bits_)[address >> kBitShiftOfRegularPoolBitmap]; + } + + // Returns false for nullptr. + static bool IsManagedByBRPPool(uintptr_t address) { + static_assert(std::numeric_limits::max() >> + kBitShiftOfBRPPoolBitmap < brp_pool_bits_.size(), + "The bitmap is too small, will result in unchecked out of " + "bounds accesses."); + // It is safe to read |brp_pool_bits_| without a lock since the caller + // is responsible for guaranteeing that the address is inside a valid + // allocation and the deallocation call won't race with this call. + return PA_TS_UNCHECKED_READ( + brp_pool_bits_)[address >> kBitShiftOfBRPPoolBitmap]; + } + +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + static void BanSuperPageFromBRPPool(uintptr_t address) { + brp_forbidden_super_page_map_[address >> kSuperPageShift].store( + true, std::memory_order_relaxed); + } + + static bool IsAllowedSuperPageForBRPPool(uintptr_t address) { + // The only potentially dangerous scenario, in which this check is used, is + // when the assignment of the first raw_ptr object for an address + // allocated outside the BRP pool is racing with the allocation of a new + // super page at the same address. We assume that if raw_ptr is being + // initialized with a raw pointer, the associated allocation is "alive"; + // otherwise, the issue should be fixed by rewriting the raw pointer + // variable as raw_ptr. In the worst case, when such a fix is + // impossible, we should just undo the raw pointer -> raw_ptr rewrite of + // the problematic field. If the above assumption holds, the existing + // allocation will prevent us from reserving the super-page region and, + // thus, having the race condition. Since we rely on that external + // synchronization, the relaxed memory ordering should be sufficient. + return !brp_forbidden_super_page_map_[address >> kSuperPageShift].load( + std::memory_order_relaxed); + } + + static void IncrementBlocklistHitCount() { ++blocklist_hit_count_; } +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + + private: + friend class AddressPoolManager; + + static Lock& GetLock(); + + static std::bitset regular_pool_bits_ + PA_GUARDED_BY(GetLock()); + static std::bitset brp_pool_bits_ PA_GUARDED_BY(GetLock()); +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + static std::array + brp_forbidden_super_page_map_; + static std::atomic_size_t blocklist_hit_count_; +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) +}; + +} // namespace internal + +// Returns false for nullptr. +PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) { + // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used. + // No need to add IsManagedByConfigurablePool, because Configurable Pool + // doesn't exist on 32-bit. +#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + PA_DCHECK(!internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address)); +#endif + return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address) +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + || internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address) +#endif + ; +} + +// Returns false for nullptr. +PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) { + return internal::AddressPoolManagerBitmap::IsManagedByRegularPool(address); +} + +// Returns false for nullptr. +PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) { + return internal::AddressPoolManagerBitmap::IsManagedByBRPPool(address); +} + +// Returns false for nullptr. +PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool( + uintptr_t address) { + // The Configurable Pool is only available on 64-bit builds. + return false; +} + +PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() { + // The Configurable Pool is only available on 64-bit builds. + return false; +} + +} // namespace partition_alloc + +#endif // !defined(PA_HAS_64_BITS_POINTERS) + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_BITMAP_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_types.h b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_types.h new file mode 100644 index 000000000..d14304671 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_types.h @@ -0,0 +1,14 @@ +// Copyright 2020 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_ + +namespace partition_alloc::internal { + +using pool_handle = unsigned; + +} // namespace partition_alloc::internal + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_TYPES_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_unittest.cc b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_unittest.cc new file mode 100644 index 000000000..743b23886 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_pool_manager_unittest.cc @@ -0,0 +1,407 @@ +// Copyright 2020 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/address_pool_manager.h" + +#include + +#include "base/allocator/partition_allocator/address_space_stats.h" +#include "base/allocator/partition_allocator/page_allocator.h" +#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace partition_alloc::internal { + +class AddressSpaceStatsDumperForTesting final : public AddressSpaceStatsDumper { + public: + AddressSpaceStatsDumperForTesting() = default; + ~AddressSpaceStatsDumperForTesting() = default; + + void DumpStats( + const partition_alloc::AddressSpaceStats* address_space_stats) override { + regular_pool_usage_ = address_space_stats->regular_pool_stats.usage; +#if defined(PA_HAS_64_BITS_POINTERS) + regular_pool_largest_reservation_ = + address_space_stats->regular_pool_stats.largest_available_reservation; +#endif // defined(PA_HAS_64_BITS_POINTERS) +#if !defined(PA_HAS_64_BITS_POINTERS) && \ + BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + blocklist_size_ = address_space_stats->blocklist_size; +#endif // !defined(PA_HAS_64_BITS_POINTERS) && + // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + } + + size_t regular_pool_usage_ = 0; + size_t regular_pool_largest_reservation_ = 0; + size_t blocklist_size_ = 0; +}; + +#if defined(PA_HAS_64_BITS_POINTERS) + +class AddressPoolManagerForTesting : public AddressPoolManager { + public: + AddressPoolManagerForTesting() = default; + ~AddressPoolManagerForTesting() = default; +}; + +class PartitionAllocAddressPoolManagerTest : public testing::Test { + protected: + PartitionAllocAddressPoolManagerTest() = default; + ~PartitionAllocAddressPoolManagerTest() override = default; + + void SetUp() override { + manager_ = std::make_unique(); + base_address_ = + AllocPages(kPoolSize, kSuperPageSize, + PageAccessibilityConfiguration( + PageAccessibilityConfiguration::kInaccessible), + PageTag::kPartitionAlloc); + ASSERT_TRUE(base_address_); + manager_->Add(kRegularPoolHandle, base_address_, kPoolSize); + pool_ = kRegularPoolHandle; + } + + void TearDown() override { + manager_->Remove(pool_); + FreePages(base_address_, kPoolSize); + manager_.reset(); + } + + AddressPoolManager* GetAddressPoolManager() { return manager_.get(); } + + static constexpr size_t kPoolSize = kPoolMaxSize; + static constexpr size_t kPageCnt = kPoolSize / kSuperPageSize; + + std::unique_ptr manager_; + uintptr_t base_address_; + pool_handle pool_; +}; + +TEST_F(PartitionAllocAddressPoolManagerTest, TooLargePool) { + uintptr_t base_addr = 0x4200000; + const pool_handle extra_pool = 2; + static_assert(kNumPools >= 2); + + EXPECT_DEATH_IF_SUPPORTED( + GetAddressPoolManager()->Add(extra_pool, base_addr, + kPoolSize + kSuperPageSize), + ""); +} + +TEST_F(PartitionAllocAddressPoolManagerTest, ManyPages) { + EXPECT_EQ( + GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize), + base_address_); + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_, + kPageCnt * kSuperPageSize); + + EXPECT_EQ( + GetAddressPoolManager()->Reserve(pool_, 0, kPageCnt * kSuperPageSize), + base_address_); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, base_address_, + kPageCnt * kSuperPageSize); +} + +TEST_F(PartitionAllocAddressPoolManagerTest, PagesFragmented) { + uintptr_t addrs[kPageCnt]; + for (size_t i = 0; i < kPageCnt; ++i) { + addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize); + } + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u); + // Free other other super page, so that we have plenty of free space, but none + // of the empty spaces can fit 2 super pages. + for (size_t i = 1; i < kPageCnt; i += 2) { + GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i], + kSuperPageSize); + } + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u); + // Reserve freed super pages back, so that there are no free ones. + for (size_t i = 1; i < kPageCnt; i += 2) { + addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize); + } + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u); + // Lastly, clean up. + for (uintptr_t addr : addrs) { + GetAddressPoolManager()->UnreserveAndDecommit(pool_, addr, kSuperPageSize); + } +} + +TEST_F(PartitionAllocAddressPoolManagerTest, GetUsedSuperpages) { + uintptr_t addrs[kPageCnt]; + for (size_t i = 0; i < kPageCnt; ++i) { + addrs[i] = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + EXPECT_EQ(addrs[i], base_address_ + i * kSuperPageSize); + } + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize), 0u); + + std::bitset used_super_pages; + GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages); + + // We expect every bit to be set. + for (size_t i = 0; i < kPageCnt; ++i) { + ASSERT_TRUE(used_super_pages.test(i)); + } + + // Free every other super page, so that we have plenty of free space, but none + // of the empty spaces can fit 2 super pages. + for (size_t i = 1; i < kPageCnt; i += 2) { + GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i], + kSuperPageSize); + } + + EXPECT_EQ(GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize), 0u); + + GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages); + + // We expect every other bit to be set. + for (size_t i = 0; i < kPageCnt; i++) { + if (i % 2 == 0) { + ASSERT_TRUE(used_super_pages.test(i)); + } else { + ASSERT_FALSE(used_super_pages.test(i)); + } + } + + // Free the even numbered super pages. + for (size_t i = 0; i < kPageCnt; i += 2) { + GetAddressPoolManager()->UnreserveAndDecommit(pool_, addrs[i], + kSuperPageSize); + } + + // Finally check to make sure all bits are zero in the used superpage bitset. + GetAddressPoolManager()->GetPoolUsedSuperPages(pool_, used_super_pages); + + for (size_t i = 0; i < kPageCnt; i++) { + ASSERT_FALSE(used_super_pages.test(i)); + } +} + +TEST_F(PartitionAllocAddressPoolManagerTest, IrregularPattern) { + uintptr_t a1 = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + EXPECT_EQ(a1, base_address_); + uintptr_t a2 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize); + EXPECT_EQ(a2, base_address_ + 1 * kSuperPageSize); + uintptr_t a3 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize); + EXPECT_EQ(a3, base_address_ + 3 * kSuperPageSize); + uintptr_t a4 = GetAddressPoolManager()->Reserve(pool_, 0, 4 * kSuperPageSize); + EXPECT_EQ(a4, base_address_ + 6 * kSuperPageSize); + uintptr_t a5 = GetAddressPoolManager()->Reserve(pool_, 0, 5 * kSuperPageSize); + EXPECT_EQ(a5, base_address_ + 10 * kSuperPageSize); + + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a4, 4 * kSuperPageSize); + uintptr_t a6 = GetAddressPoolManager()->Reserve(pool_, 0, 6 * kSuperPageSize); + EXPECT_EQ(a6, base_address_ + 15 * kSuperPageSize); + + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a5, 5 * kSuperPageSize); + uintptr_t a7 = GetAddressPoolManager()->Reserve(pool_, 0, 7 * kSuperPageSize); + EXPECT_EQ(a7, base_address_ + 6 * kSuperPageSize); + uintptr_t a8 = GetAddressPoolManager()->Reserve(pool_, 0, 3 * kSuperPageSize); + EXPECT_EQ(a8, base_address_ + 21 * kSuperPageSize); + uintptr_t a9 = GetAddressPoolManager()->Reserve(pool_, 0, 2 * kSuperPageSize); + EXPECT_EQ(a9, base_address_ + 13 * kSuperPageSize); + + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a7, 7 * kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a9, 2 * kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a6, 6 * kSuperPageSize); + uintptr_t a10 = + GetAddressPoolManager()->Reserve(pool_, 0, 15 * kSuperPageSize); + EXPECT_EQ(a10, base_address_ + 6 * kSuperPageSize); + + // Clean up. + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a1, kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a2, 2 * kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a3, 3 * kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a8, 3 * kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, a10, + 15 * kSuperPageSize); +} + +TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) { + uintptr_t address = + GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + ASSERT_TRUE(address); + RecommitSystemPages(address, kSuperPageSize, + PageAccessibilityConfiguration( + PageAccessibilityConfiguration::kReadWrite), + PageAccessibilityDisposition::kRequireUpdate); + + memset(reinterpret_cast(address), 42, kSuperPageSize); + GetAddressPoolManager()->UnreserveAndDecommit(pool_, address, kSuperPageSize); + + uintptr_t address2 = + GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); + ASSERT_EQ(address, address2); + RecommitSystemPages(address2, kSuperPageSize, + PageAccessibilityConfiguration( + PageAccessibilityConfiguration::kReadWrite), + PageAccessibilityDisposition::kRequireUpdate); + + uint32_t sum = 0; + for (size_t i = 0; i < kSuperPageSize; i++) { + sum += reinterpret_cast(address2)[i]; + } + EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed"; + + GetAddressPoolManager()->UnreserveAndDecommit(pool_, address2, + kSuperPageSize); +} + +TEST_F(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) { + AddressSpaceStatsDumperForTesting dumper{}; + + GetAddressPoolManager()->DumpStats(&dumper); + ASSERT_EQ(dumper.regular_pool_usage_, 0ull); + ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt); + + // Bisect the pool by reserving a super page in the middle. + const uintptr_t midpoint_address = + base_address_ + (kPageCnt / 2) * kSuperPageSize; + ASSERT_EQ( + GetAddressPoolManager()->Reserve(pool_, midpoint_address, kSuperPageSize), + midpoint_address); + + GetAddressPoolManager()->DumpStats(&dumper); + ASSERT_EQ(dumper.regular_pool_usage_, 1ull); + ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt / 2); + + GetAddressPoolManager()->UnreserveAndDecommit(pool_, midpoint_address, + kSuperPageSize); + + GetAddressPoolManager()->DumpStats(&dumper); + ASSERT_EQ(dumper.regular_pool_usage_, 0ull); + ASSERT_EQ(dumper.regular_pool_largest_reservation_, kPageCnt); +} + +#else // defined(PA_HAS_64_BITS_POINTERS) + +TEST(PartitionAllocAddressPoolManagerTest, IsManagedByRegularPool) { + constexpr size_t kAllocCount = 8; + static const size_t kNumPages[kAllocCount] = {1, 4, 7, 8, 13, 16, 31, 60}; + uintptr_t addrs[kAllocCount]; + for (size_t i = 0; i < kAllocCount; ++i) { + addrs[i] = AddressPoolManager::GetInstance().Reserve( + kRegularPoolHandle, 0, + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap * + kNumPages[i]); + EXPECT_TRUE(addrs[i]); + EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask)); + AddressPoolManager::GetInstance().MarkUsed( + kRegularPoolHandle, addrs[i], + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap * + kNumPages[i]); + } + for (size_t i = 0; i < kAllocCount; ++i) { + uintptr_t address = addrs[i]; + size_t num_pages = + base::bits::AlignUp( + kNumPages[i] * + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap, + kSuperPageSize) / + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap; + for (size_t j = 0; j < num_pages; ++j) { + if (j < kNumPages[i]) { + EXPECT_TRUE(AddressPoolManager::IsManagedByRegularPool(address)); + } else { + EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address)); + } + EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address)); + address += AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap; + } + } + for (size_t i = 0; i < kAllocCount; ++i) { + AddressPoolManager::GetInstance().MarkUnused( + kRegularPoolHandle, addrs[i], + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap * + kNumPages[i]); + AddressPoolManager::GetInstance().UnreserveAndDecommit( + kRegularPoolHandle, addrs[i], + AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap * + kNumPages[i]); + EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i])); + EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i])); + } +} + +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) +TEST(PartitionAllocAddressPoolManagerTest, IsManagedByBRPPool) { + constexpr size_t kAllocCount = 4; + // Totally (1+3+7+11) * 2MB = 44MB allocation + static const size_t kNumPages[kAllocCount] = {1, 3, 7, 11}; + uintptr_t addrs[kAllocCount]; + for (size_t i = 0; i < kAllocCount; ++i) { + addrs[i] = AddressPoolManager::GetInstance().Reserve( + kBRPPoolHandle, 0, kSuperPageSize * kNumPages[i]); + EXPECT_TRUE(addrs[i]); + EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask)); + AddressPoolManager::GetInstance().MarkUsed(kBRPPoolHandle, addrs[i], + kSuperPageSize * kNumPages[i]); + } + + constexpr size_t first_guard_size = + AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * + AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap; + constexpr size_t last_guard_size = + AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * + (AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap - + AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap); + + for (size_t i = 0; i < kAllocCount; ++i) { + uintptr_t address = addrs[i]; + size_t num_allocated_size = kNumPages[i] * kSuperPageSize; + size_t num_system_pages = num_allocated_size / SystemPageSize(); + for (size_t j = 0; j < num_system_pages; ++j) { + size_t offset = address - addrs[i]; + if (offset < first_guard_size || + offset >= (num_allocated_size - last_guard_size)) { + EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address)); + } else { + EXPECT_TRUE(AddressPoolManager::IsManagedByBRPPool(address)); + } + EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address)); + address += SystemPageSize(); + } + } + for (size_t i = 0; i < kAllocCount; ++i) { + AddressPoolManager::GetInstance().MarkUnused(kBRPPoolHandle, addrs[i], + kSuperPageSize * kNumPages[i]); + AddressPoolManager::GetInstance().UnreserveAndDecommit( + kBRPPoolHandle, addrs[i], kSuperPageSize * kNumPages[i]); + EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i])); + EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i])); + } +} +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + +TEST(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) { + AddressSpaceStatsDumperForTesting dumper{}; + AddressPoolManager::GetInstance().DumpStats(&dumper); + const size_t usage_before = dumper.regular_pool_usage_; + + const uintptr_t address = AddressPoolManager::GetInstance().Reserve( + kRegularPoolHandle, 0, kSuperPageSize); + ASSERT_TRUE(address); + AddressPoolManager::GetInstance().MarkUsed(kRegularPoolHandle, address, + kSuperPageSize); + + AddressPoolManager::GetInstance().DumpStats(&dumper); + EXPECT_GT(dumper.regular_pool_usage_, usage_before); + + AddressPoolManager::GetInstance().MarkUnused(kRegularPoolHandle, address, + kSuperPageSize); + AddressPoolManager::GetInstance().UnreserveAndDecommit( + kRegularPoolHandle, address, kSuperPageSize); + + AddressPoolManager::GetInstance().DumpStats(&dumper); + EXPECT_EQ(dumper.regular_pool_usage_, usage_before); +} + +#endif // defined(PA_HAS_64_BITS_POINTERS) + +} // namespace partition_alloc::internal diff --git a/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.cc b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.cc new file mode 100644 index 000000000..b327666c0 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.cc @@ -0,0 +1,67 @@ +// Copyright 2014 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/address_space_randomization.h" + +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" +#include "base/allocator/partition_allocator/random.h" +#include "build/build_config.h" + +#if BUILDFLAG(IS_WIN) +#include // Must be in front of other Windows header files. + +#include +#endif + +namespace partition_alloc { + +uintptr_t GetRandomPageBase() { + uintptr_t random = static_cast(internal::RandomValue()); + +#if defined(PA_HAS_64_BITS_POINTERS) + random <<= 32ULL; + random |= static_cast(internal::RandomValue()); + +// The ASLRMask() and ASLROffset() constants will be suitable for the +// OS and build configuration. +#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + // Windows >= 8.1 has the full 47 bits. Use them where available. + static bool windows_81 = false; + static bool windows_81_initialized = false; + if (!windows_81_initialized) { + windows_81 = IsWindows8Point1OrGreater(); + windows_81_initialized = true; + } + if (!windows_81) { + random &= internal::ASLRMaskBefore8_10(); + } else { + random &= internal::ASLRMask(); + } + random += internal::ASLROffset(); +#else + random &= internal::ASLRMask(); + random += internal::ASLROffset(); +#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) +#else // defined(PA_HAS_64_BITS_POINTERS) +#if BUILDFLAG(IS_WIN) + // On win32 host systems the randomization plus huge alignment causes + // excessive fragmentation. Plus most of these systems lack ASLR, so the + // randomization isn't buying anything. In that case we just skip it. + // TODO(palmer): Just dump the randomization when HE-ASLR is present. + static BOOL is_wow64 = -1; + if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) + is_wow64 = FALSE; + if (!is_wow64) + return 0; +#endif // BUILDFLAG(IS_WIN) + random &= internal::ASLRMask(); + random += internal::ASLROffset(); +#endif // defined(PA_HAS_64_BITS_POINTERS) + + PA_DCHECK(!(random & internal::PageAllocationGranularityOffsetMask())); + return random; +} + +} // namespace partition_alloc diff --git a/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.h b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.h new file mode 100644 index 000000000..e76125677 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization.h @@ -0,0 +1,290 @@ +// Copyright 2014 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ + +#include + +#include "base/allocator/partition_allocator/page_allocator_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" +#include "build/build_config.h" + +namespace partition_alloc { + +// Calculates a random preferred mapping address. In calculating an address, we +// balance good ASLR against not fragmenting the address space too badly. +PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase(); + +namespace internal { + +PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t +AslrAddress(uintptr_t mask) { + return mask & PageAllocationGranularityBaseMask(); +} +PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t +AslrMask(uintptr_t bits) { + return AslrAddress((1ULL << bits) - 1ULL); +} + +// Turn off formatting, because the thicket of nested ifdefs below is +// incomprehensible without indentation. It is also incomprehensible with +// indentation, but the only other option is a combinatorial explosion of +// *_{win,linux,mac,foo}_{32,64}.h files. +// +// clang-format off + +#if defined(ARCH_CPU_64_BITS) + + #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + + // We shouldn't allocate system pages at all for sanitizer builds. However, + // we do, and if random hint addresses interfere with address ranges + // hard-coded in those tools, bad things happen. This address range is + // copied from TSAN source but works with all tools. See + // https://crbug.com/539863. + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLRMask() { + return AslrAddress(0x007fffffffffULL); + } + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLROffset() { + return AslrAddress(0x7e8000000000ULL); + } + + #elif BUILDFLAG(IS_WIN) + + // Windows 8.10 and newer support the full 48 bit address range. Older + // versions of Windows only support 44 bits. Since ASLROffset() is non-zero + // and may cause a carry, use 47 and 43 bit masks. See + // http://www.alex-ionescu.com/?p=246 + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(47); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() { + return AslrMask(43); + } + // Try not to map pages into the range where Windows loads DLLs by default. + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return 0x80000000ULL; + } + + #elif BUILDFLAG(IS_APPLE) + + // macOS as of 10.12.5 does not clean up entries in page map levels 3/4 + // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region + // is destroyed. Using a virtual address space that is too large causes a + // leak of about 1 wired [can never be paged out] page per call to mmap. The + // page is only reclaimed when the process is killed. Confine the hint to a + // 39-bit section of the virtual address space. + // + // This implementation adapted from + // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference + // is that here we clamp to 39 bits, not 32. + // + // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior + // changes. + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLRMask() { + return AslrMask(38); + } + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLROffset() { + // Be careful, there is a zone where macOS will not map memory, at least + // on ARM64. From an ARM64 machine running 12.3, the range seems to be + // [0x1000000000, 0x7000000000). Make sure that the range we use is + // outside these bounds. In 12.3, there is a reserved area between + // MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS, + // which is reserved on ARM64. See these constants in XNU's source code + // for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h). + return AslrAddress(0x10000000000ULL); + } + + #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA) + + #if defined(ARCH_CPU_X86_64) + + // Linux (and macOS) support the full 47-bit user space of x64 processors. + // Use only 46 to allow the kernel a chance to fulfill the request. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(46); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0); + } + + #elif defined(ARCH_CPU_ARM64) + + #if BUILDFLAG(IS_ANDROID) + + // Restrict the address range on Android to avoid a large performance + // regression in single-process WebViews. See https://crbug.com/837640. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(30); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x20000000ULL); + } + + #elif BUILDFLAG(IS_LINUX) + + // Linux on arm64 can use 39, 42, 48, or 52-bit user space, depending on + // page size and number of levels of translation pages used. We use + // 39-bit as base as all setups should support this, lowered to 38-bit + // as ASLROffset() could cause a carry. + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLRMask() { + return AslrMask(38); + } + PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t + ASLROffset() { + return AslrAddress(0x1000000000ULL); + } + + #else + + // ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset() + // could cause a carry. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(38); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x1000000000ULL); + } + + #endif + + #elif defined(ARCH_CPU_PPC64) + + #if BUILDFLAG(IS_AIX) + + // AIX has 64 bits of virtual addressing, but we limit the address range + // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use + // extra address space to isolate the mmap regions. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(30); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x400000000000ULL); + } + + #elif defined(ARCH_CPU_BIG_ENDIAN) + + // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(42); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0); + } + + #else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) + + // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(46); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0); + } + + #endif // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) + + #elif defined(ARCH_CPU_S390X) + + // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to + // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a + // chance to fulfill the request. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(40); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0); + } + + #elif defined(ARCH_CPU_S390) + + // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel + // a chance to fulfill the request. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(29); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0); + } + + #else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && + // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) + + // For all other POSIX variants, use 30 bits. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(30); + } + + #if BUILDFLAG(IS_SOLARIS) + + // For our Solaris/illumos mmap hint, we pick a random address in the + // bottom half of the top half of the address space (that is, the third + // quarter). Because we do not MAP_FIXED, this will be treated only as a + // hint -- the system will not fail to mmap because something else + // happens to already be mapped at our random address. We deliberately + // set the hint high enough to get well above the system's break (that + // is, the heap); Solaris and illumos will try the hint and if that + // fails allocate as if there were no hint at all. The high hint + // prevents the break from getting hemmed in at low values, ceding half + // of the address space to the system heap. + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x80000000ULL); + } + + #elif BUILDFLAG(IS_AIX) + + // The range 0x30000000 - 0xD0000000 is available on AIX; choose the + // upper range. + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x90000000ULL); + } + + #else // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX) + + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS + // 10.6 and 10.7. + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x20000000ULL); + } + + #endif // !BUILDFLAG(IS_SOLARIS) && !BUILDFLAG(IS_AIX) + + #endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) && + // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) + + #endif // BUILDFLAG(IS_POSIX) + +#elif defined(ARCH_CPU_32_BITS) + + // This is a good range on 32-bit Windows and Android (the only platforms on + // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There + // is no issue with carries here. + constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() { + return AslrMask(30); + } + constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() { + return AslrAddress(0x20000000ULL); + } + +#else + + #error Please tell us about your exotic hardware! Sounds interesting. + +#endif // defined(ARCH_CPU_32_BITS) + +// clang-format on + +} // namespace internal + +} // namespace partition_alloc + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization_unittest.cc new file mode 100644 index 000000000..d8211de8d --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_space_randomization_unittest.cc @@ -0,0 +1,281 @@ +// Copyright 2017 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/address_space_randomization.h" + +#include +#include + +#include "base/allocator/partition_allocator/page_allocator.h" +#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_check.h" +#include "base/allocator/partition_allocator/random.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +#if BUILDFLAG(IS_WIN) +#include +#include "base/win/windows_version.h" +// versionhelpers.h must be included after windows.h. +#include +#endif + +namespace partition_alloc { + +namespace { + +uintptr_t GetMask() { + uintptr_t mask = internal::ASLRMask(); +#if defined(ARCH_CPU_64_BITS) +// Sanitizers use their own ASLR mask constant. +#if BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + if (!IsWindows8Point1OrGreater()) { + mask = internal::ASLRMaskBefore8_10(); + } +#endif // BUILDFLAG(IS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)) +#elif defined(ARCH_CPU_32_BITS) +#if BUILDFLAG(IS_WIN) + BOOL is_wow64 = FALSE; + if (!IsWow64Process(GetCurrentProcess(), &is_wow64)) + is_wow64 = FALSE; + if (!is_wow64) { + mask = 0; + } +#endif // BUILDFLAG(IS_WIN) +#endif // defined(ARCH_CPU_32_BITS) + return mask; +} + +const size_t kSamples = 100; + +uintptr_t GetAddressBits() { + return GetRandomPageBase(); +} + +uintptr_t GetRandomBits() { + return GetAddressBits() - internal::ASLROffset(); +} + +} // namespace + +// Configurations without ASLR are tested here. +TEST(PartitionAllocAddressSpaceRandomizationTest, DisabledASLR) { + uintptr_t mask = GetMask(); + if (!mask) { +#if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_32_BITS) + // ASLR should be turned off on 32-bit Windows. + EXPECT_EQ(0u, GetRandomPageBase()); +#else + // Otherwise, 0 is very unexpected. + EXPECT_NE(0u, GetRandomPageBase()); +#endif + } +} + +TEST(PartitionAllocAddressSpaceRandomizationTest, Alignment) { + uintptr_t mask = GetMask(); + if (!mask) + return; + + for (size_t i = 0; i < kSamples; ++i) { + uintptr_t address = GetAddressBits(); + EXPECT_EQ(0ULL, + (address & internal::PageAllocationGranularityOffsetMask())); + } +} + +TEST(PartitionAllocAddressSpaceRandomizationTest, Range) { + uintptr_t mask = GetMask(); + if (!mask) + return; + + uintptr_t min = internal::ASLROffset(); + uintptr_t max = internal::ASLROffset() + internal::ASLRMask(); + for (size_t i = 0; i < kSamples; ++i) { + uintptr_t address = GetAddressBits(); + EXPECT_LE(min, address); + EXPECT_GE(max + mask, address); + } +} + +TEST(PartitionAllocAddressSpaceRandomizationTest, Predictable) { + uintptr_t mask = GetMask(); + if (!mask) + return; + + const uint64_t kInitialSeed = 0xfeed5eedULL; + SetMmapSeedForTesting(kInitialSeed); + + std::vector sequence; + for (size_t i = 0; i < kSamples; ++i) { + sequence.push_back(GetRandomPageBase()); + } + + SetMmapSeedForTesting(kInitialSeed); + + for (size_t i = 0; i < kSamples; ++i) { + EXPECT_EQ(GetRandomPageBase(), sequence[i]); + } +} + +// This randomness test is adapted from V8's PRNG tests. + +// Chi squared for getting m 0s out of n bits. +double ChiSquared(int m, int n) { + double ys_minus_np1 = (m - n / 2.0); + double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n; + double ys_minus_np2 = ((n - m) - n / 2.0); + double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n; + return chi_squared_1 + chi_squared_2; +} + +// Test for correlations between recent bits from the PRNG, or bits that are +// biased. +void RandomBitCorrelation(int random_bit) { + uintptr_t mask = GetMask(); + if ((mask & (1ULL << random_bit)) == 0) + return; // bit is always 0. + +#if BUILDFLAG(PA_DCHECK_IS_ON) + // Do fewer checks when BUILDFLAG(PA_DCHECK_IS_ON). Exercized code only + // changes when the random number generator does, which should be almost + // never. However it's expensive to run all the tests. So keep iterations + // faster for local development builds, while having the stricter version run + // on official build testers. + constexpr int kHistory = 2; + constexpr int kRepeats = 1000; +#else + constexpr int kHistory = 8; + constexpr int kRepeats = 10000; +#endif + constexpr int kPointerBits = 8 * sizeof(void*); + uintptr_t history[kHistory]; + // The predictor bit is either constant 0 or 1, or one of the bits from the + // history. + for (int predictor_bit = -2; predictor_bit < kPointerBits; predictor_bit++) { + // The predicted bit is one of the bits from the PRNG. + for (int ago = 0; ago < kHistory; ago++) { + // We don't want to check whether each bit predicts itself. + if (ago == 0 && predictor_bit == random_bit) + continue; + + // Enter the new random value into the history. + for (int i = ago; i >= 0; i--) { + history[i] = GetRandomBits(); + } + + // Find out how many of the bits are the same as the prediction bit. + int m = 0; + for (int i = 0; i < kRepeats; i++) { + uintptr_t random = GetRandomBits(); + for (int j = ago - 1; j >= 0; j--) + history[j + 1] = history[j]; + history[0] = random; + + int predicted; + if (predictor_bit >= 0) { + predicted = (history[ago] >> predictor_bit) & 1; + } else { + predicted = predictor_bit == -2 ? 0 : 1; + } + int bit = (random >> random_bit) & 1; + if (bit == predicted) + m++; + } + + // Chi squared analysis for k = 2 (2, states: same/not-same) and one + // degree of freedom (k - 1). + double chi_squared = ChiSquared(m, kRepeats); + // For k=2 probability of Chi^2 < 35 is p=3.338e-9. This condition is + // tested ~19000 times, so probability of it failing randomly per one + // base_unittests run is (1 - (1 - p) ^ 19000) ~= 6e-5. + PA_CHECK(chi_squared <= 35.0); + // If the predictor bit is a fixed 0 or 1 then it makes no sense to + // repeat the test with a different age. + if (predictor_bit < 0) + break; + } + } +} + +// Tests are fairly slow, so give each random bit its own test. +#define TEST_RANDOM_BIT(BIT) \ + TEST(PartitionAllocAddressSpaceRandomizationTest, \ + RandomBitCorrelations##BIT) { \ + RandomBitCorrelation(BIT); \ + } + +// The first 12 bits on all platforms are always 0. +TEST_RANDOM_BIT(12) +TEST_RANDOM_BIT(13) +TEST_RANDOM_BIT(14) +TEST_RANDOM_BIT(15) +TEST_RANDOM_BIT(16) +TEST_RANDOM_BIT(17) +TEST_RANDOM_BIT(18) +TEST_RANDOM_BIT(19) +TEST_RANDOM_BIT(20) +TEST_RANDOM_BIT(21) +TEST_RANDOM_BIT(22) +TEST_RANDOM_BIT(23) +TEST_RANDOM_BIT(24) +TEST_RANDOM_BIT(25) +TEST_RANDOM_BIT(26) +TEST_RANDOM_BIT(27) +TEST_RANDOM_BIT(28) +TEST_RANDOM_BIT(29) +TEST_RANDOM_BIT(30) +TEST_RANDOM_BIT(31) +#if defined(ARCH_CPU_64_BITS) +TEST_RANDOM_BIT(32) +TEST_RANDOM_BIT(33) +TEST_RANDOM_BIT(34) +TEST_RANDOM_BIT(35) +TEST_RANDOM_BIT(36) +TEST_RANDOM_BIT(37) +TEST_RANDOM_BIT(38) +TEST_RANDOM_BIT(39) +TEST_RANDOM_BIT(40) +TEST_RANDOM_BIT(41) +TEST_RANDOM_BIT(42) +TEST_RANDOM_BIT(43) +TEST_RANDOM_BIT(44) +TEST_RANDOM_BIT(45) +TEST_RANDOM_BIT(46) +TEST_RANDOM_BIT(47) +TEST_RANDOM_BIT(48) +// No platforms have more than 48 address bits. +#endif // defined(ARCH_CPU_64_BITS) + +#undef TEST_RANDOM_BIT + +// Checks that we can actually map memory in the requested range. +// TODO(crbug.com/1318466): Extend to all operating systems once they are fixed. +#if BUILDFLAG(IS_MAC) +TEST(PartitionAllocAddressSpaceRandomizationTest, CanMapInAslrRange) { + int tries = 0; + // This is overly generous, but we really don't want to make the test flaky. + constexpr int kMaxTries = 1000; + + for (tries = 0; tries < kMaxTries; tries++) { + uintptr_t requested_address = GetRandomPageBase(); + size_t size = internal::PageAllocationGranularity(); + + uintptr_t address = AllocPages( + requested_address, size, internal::PageAllocationGranularity(), + PageAccessibilityConfiguration( + PageAccessibilityConfiguration::kReadWrite), + PageTag::kPartitionAlloc); + ASSERT_NE(address, 0u); + FreePages(address, size); + + if (address == requested_address) + break; + } + + EXPECT_LT(tries, kMaxTries); +} +#endif // BUILDFLAG(IS_MAC) + +} // namespace partition_alloc diff --git a/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h b/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h new file mode 100644 index 000000000..a11b4f8a7 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/address_space_stats.h @@ -0,0 +1,55 @@ +// Copyright 2022 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_ + +#include + +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" +#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" + +namespace partition_alloc { + +// All members are measured in super pages. +struct PoolStats { + size_t usage = 0; + + // On 32-bit, pools are mainly logical entities, intermingled with + // allocations not managed by PartitionAlloc. The "largest available + // reservation" is not possible to measure in that case. +#if defined(PA_HAS_64_BITS_POINTERS) + size_t largest_available_reservation = 0; +#endif // defined(PA_HAS_64_BITS_POINTERS) +}; + +struct AddressSpaceStats { + PoolStats regular_pool_stats; +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + PoolStats brp_pool_stats; +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) +#if defined(PA_HAS_64_BITS_POINTERS) + PoolStats configurable_pool_stats; +#else +#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) + size_t blocklist_size; // measured in super pages + size_t blocklist_hit_count; +#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) +#endif // defined(PA_HAS_64_BITS_POINTERS) +#if BUILDFLAG(ENABLE_PKEYS) + PoolStats pkey_pool_stats; +#endif +}; + +// Interface passed to `AddressPoolManager::DumpStats()` to mediate +// for `AddressSpaceDumpProvider`. +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) AddressSpaceStatsDumper { + public: + virtual void DumpStats(const AddressSpaceStats* address_space_stats) = 0; +}; + +} // namespace partition_alloc + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_STATS_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/allocation_guard.cc b/external/chromium/src/base/allocator/partition_allocator/allocation_guard.cc new file mode 100644 index 000000000..f0da0dcaa --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/allocation_guard.cc @@ -0,0 +1,41 @@ +// Copyright 2021 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/allocation_guard.h" +#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" + +#if defined(PA_HAS_ALLOCATION_GUARD) + +namespace partition_alloc { + +namespace { +thread_local bool g_disallow_allocations; +} // namespace + +ScopedDisallowAllocations::ScopedDisallowAllocations() { + if (g_disallow_allocations) + PA_IMMEDIATE_CRASH(); + + g_disallow_allocations = true; +} + +ScopedDisallowAllocations::~ScopedDisallowAllocations() { + g_disallow_allocations = false; +} + +ScopedAllowAllocations::ScopedAllowAllocations() { + // Save the previous value, as ScopedAllowAllocations is used in all + // partitions, not just the malloc() ones(s). + saved_value_ = g_disallow_allocations; + g_disallow_allocations = false; +} + +ScopedAllowAllocations::~ScopedAllowAllocations() { + g_disallow_allocations = saved_value_; +} + +} // namespace partition_alloc + +#endif // defined(PA_HAS_ALLOCATION_GUARD) diff --git a/external/chromium/src/base/allocator/partition_allocator/allocation_guard.h b/external/chromium/src/base/allocator/partition_allocator/allocation_guard.h new file mode 100644 index 000000000..8e6d2df31 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/allocation_guard.h @@ -0,0 +1,49 @@ +// Copyright 2021 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_ + +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" +#include "base/allocator/partition_allocator/partition_alloc_config.h" +#include "build/build_config.h" + +namespace partition_alloc { + +#if defined(PA_HAS_ALLOCATION_GUARD) + +// Disallow allocations in the scope. Does not nest. +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedDisallowAllocations { + public: + ScopedDisallowAllocations(); + ~ScopedDisallowAllocations(); +}; + +// Disallow allocations in the scope. Does not nest. +class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations { + public: + ScopedAllowAllocations(); + ~ScopedAllowAllocations(); + + private: + bool saved_value_; +}; + +#else + +struct [[maybe_unused]] ScopedDisallowAllocations{}; +struct [[maybe_unused]] ScopedAllowAllocations{}; + +#endif // defined(PA_HAS_ALLOCATION_GUARD) + +} // namespace partition_alloc + +namespace base::internal { + +using ::partition_alloc::ScopedAllowAllocations; +using ::partition_alloc::ScopedDisallowAllocations; + +} // namespace base::internal + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ALLOCATION_GUARD_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.S b/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.S new file mode 100644 index 000000000..bf04e9350 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.S @@ -0,0 +1,50 @@ +# Copyright 2021 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file contains a test function for checking Arm's branch target +# identification (BTI) feature, which helps mitigate jump-oriented +# programming. To get it working, BTI instructions must be executed +# on a compatible core, and the executable pages must be mapped with +# PROT_BTI. To validate that pages mapped with PROT_BTI are working +# correctly: +# 1) Allocate a read-write page. +# 2) Copy between the start and end symbols into that page. +# 3) Set the page to read-execute with PROT_BTI. +# 4) Call the first offset of the page, verify the result. +# 5) Call the second offset of the page (skipping the landing pad). +# Verify that it crashes as expected. +# This test works irrespective of whether BTI is enabled for C/C++ +# objects via -mbranch-protection=standard. + +.text +.global arm_bti_test_function +.global arm_bti_test_function_invalid_offset +.global arm_bti_test_function_end +arm_bti_test_function: + # Mark the start of this function as a valid call target. + bti jc + add x0, x0, #1 +arm_bti_test_function_invalid_offset: + # This label simulates calling an incomplete function. + # Jumping here should crash systems which support BTI. + add x0, x0, #2 + ret +arm_bti_test_function_end: + nop + +// For details see section "6.2 Program Property" in +// "ELF for the Arm 64-bit Architecture (AArch64)" +// https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst#62program-property +.pushsection .note.gnu.property, "a"; +.balign 8; +.long 4; +.long 0x10; +.long 0x5; +.asciz "GNU"; +.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ +.long 4; +.long 1; /* GNU_PROPERTY_AARCH64_BTI */; +.long 0; +.popsection + diff --git a/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.h b/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.h new file mode 100644 index 000000000..485a67b28 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/arm_bti_test_functions.h @@ -0,0 +1,31 @@ +// Copyright 2021 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_ + +#include "build/build_config.h" + +#if defined(ARCH_CPU_ARM64) +extern "C" { +/** + * A valid BTI function. Jumping to this funtion should not cause any problem in + * a BTI enabled environment. + **/ +int64_t arm_bti_test_function(int64_t); + +/** + * A function without proper BTI landing pad. Jumping here should crash the + * program on systems which support BTI. + **/ +int64_t arm_bti_test_function_invalid_offset(int64_t); + +/** + * A simple function which immediately returns to sender. + **/ +void arm_bti_test_function_end(void); +} +#endif // defined(ARCH_CPU_ARM64) + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ARM_BTI_TEST_FUNCTIONS_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/build_config.md b/external/chromium/src/base/allocator/partition_allocator/build_config.md new file mode 100644 index 000000000..aa905c736 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/build_config.md @@ -0,0 +1,118 @@ +# Build Config + +PartitionAlloc's behavior and operation can be influenced by many +different settings. Broadly, these are controlled at the top-level by +[GN args][gn-declare-args], which propagate via +[buildflags][buildflag-header] and `#defined` clauses. + +*** promo +Most of what you'll want to know exists between + +* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn], +* Everything else ending in `.gn` or `.gni` in + `//base/allocator/partition_allocator/`, +* [`allocator.gni`][allocator-gni], +* [`//base/allocator/BUILD.gn`][base-allocator-build-gn], and +* [`//base/BUILD.gn`][base-build-gn]. +*** + +*** aside +While Chromium promotes the `#if BUILDFLAG(FOO)` construct, some of +PartitionAlloc's behavior is governed by compound conditions `#defined` +in [`partition_alloc_config.h`][partition-alloc-config]. +*** + +*** promo +PartitionAlloc targets C++17. As the team develops standalone +PartitionAlloc, this may diverge from what the rest of Chrome browser +does, as we will be obligated to support external clients that +may not yet support newer C++ standards. + +See [Chrome-External Builds](./external_builds.md) for more. +*** + +## Select GN Args + +### `use_partition_alloc` + +Defines whether PartitionAlloc is at all available. + +Setting this `false` will entirely remove PartitionAlloc from the +Chromium build. _You probably do not want this._ + +*** note +Back when PartitionAlloc was the dedicated allocator in Blink, disabling +it was logically identical to wholly disabling it in Chromium. This GN +arg organically grew in scope with the advent of +PartitionAlloc-Everywhere and must be `true` as a prerequisite for +enabling PA-E. +*** + +### `use_partition_alloc_as_malloc` + +Does nothing special when value is `false`. Enables +[PartitionAlloc-Everywhere (PA-E)][pae-public-doc] when value is `true`. + +*** note +* While "everywhere" (in "PartitionAlloc-Everywhere") tautologically + includes Blink where PartitionAlloc originated, setting + `use_partition_alloc_as_malloc = false` does not disable PA usage in Blink, + which invokes PA explicitly (not via malloc). +* `use_partition_alloc_as_malloc = true` must not be confused + with `use_partition_alloc` (see above). +*** + +## Note: Component Builds + +When working on PartitionAlloc, know that `is_debug` defaults to +implying `is_component_build`, which interferes with the allocator +shim. A typical set of GN args should include + +```none +is_debug = true +is_component_build = false +``` + +Conversely, build configurations that have `is_component_build = true` +without explicitly specifying PA-specific args will not build with PA-E +enabled. + +## Notable Macros + +There is an ongoing effort +[to break out PartitionAlloc into a standalone library][pa-ee-crbug]. +Once PartitionAlloc stands alone from the larger Chrome build apparatus, +the code loses access to some macros. This is not an immediate concern, +but the team needs to decide either + +* how to propagate these macros in place, or +* how to remove them, replacing them with PA-specific build config. + +A non-exhaustive list of work items: + +* `OFFICIAL_BUILD` - influences crash macros and + `PA_THREAD_CACHE_ALLOC_STATS`. These are conceptually distinct enough + to be worth separating into dedicated build controls. +* `IS_PARTITION_ALLOC_IMPL` - must be defined when PartitionAlloc is + built as a shared library. This is required to export symbols. +* `COMPONENT_BUILD` - component builds (as per + `//docs/component_build.md`) must `#define COMPONENT_BUILD`. + Additionally, to build Win32, invoker must `#define WIN32`. +* `MEMORY_TOOL_REPLACES_ALLOCATOR` +* `*_SANITIZER` - mainly influences unit tests. + +*** note +Over time, the above list should evolve into a list of macros / GN args +that influence PartitionAlloc's behavior. +*** + +[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args +[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni +[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn +[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni +[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn +[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn +[partition-alloc-config]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/partition_alloc_config.h +[pae-public-doc]: https://docs.google.com/document/d/1R1H9z5IVUAnXJgDjnts3nTJVcRbufWWT9ByXLgecSUM/preview +[miracleptr-doc]: https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/preview +[pa-ee-crbug]: https://crbug.com/1151236 diff --git a/external/chromium/src/base/allocator/partition_allocator/build_overrides/build.gni b/external/chromium/src/base/allocator/partition_allocator/build_overrides/build.gni new file mode 100644 index 000000000..30c0b6490 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/build_overrides/build.gni @@ -0,0 +1,9 @@ +# Copyright 2022 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file will be used to check out PartitionAlloc and to build it as +# standalone library. In this case, PartitionAlloc needs to define +# build_with_chromium. If building PartitionAlloc as a part of chromium, +# chromium will provide build_with_chromium=true. +build_with_chromium = false diff --git a/external/chromium/src/base/allocator/partition_allocator/build_overrides/partition_alloc.gni b/external/chromium/src/base/allocator/partition_allocator/build_overrides/partition_alloc.gni new file mode 100644 index 000000000..7a169bce7 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/build_overrides/partition_alloc.gni @@ -0,0 +1,19 @@ +# Copyright 2022 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import("//build_overrides/build.gni") + +# This is the default build configuration when building PartitionAlloc +# as a standalone library. +# If embedders want to use PartitionAlloc, they need to create their own +# //build_overrides/partition_alloc.gni and define their own PartitionAlloc +# configuration. + +use_partition_alloc_as_malloc_default = false +use_allocator_shim_default = false +enable_backup_ref_ptr_support_default = false +enable_mte_checked_ptr_support_default = false +put_ref_count_in_previous_slot_default = false +enable_backup_ref_ptr_slow_checks_default = false +enable_dangling_raw_ptr_checks_default = false diff --git a/external/chromium/src/base/allocator/partition_allocator/chromecast_buildflags.h b/external/chromium/src/base/allocator/partition_allocator/chromecast_buildflags.h new file mode 100644 index 000000000..5e39d24fc --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/chromecast_buildflags.h @@ -0,0 +1,12 @@ +// Generated by build/write_buildflag_header.py +// From "//base/allocator/partition_allocator:chromecast_buildflags" + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMECAST_BUILDFLAGS_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMECAST_BUILDFLAGS_H_ + +#include "build/buildflag.h" // IWYU pragma: export + +#define BUILDFLAG_INTERNAL_PA_IS_CAST_ANDROID() (0) +#define BUILDFLAG_INTERNAL_PA_IS_CASTOS() (0) + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMECAST_BUILDFLAGS_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/chromeos_buildflags.h b/external/chromium/src/base/allocator/partition_allocator/chromeos_buildflags.h new file mode 100644 index 000000000..67bc3033f --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/chromeos_buildflags.h @@ -0,0 +1,11 @@ +// Generated by build/write_buildflag_header.py +// From "//base/allocator/partition_allocator:chromeos_buildflags" + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMEOS_BUILDFLAGS_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMEOS_BUILDFLAGS_H_ + +#include "build/buildflag.h" // IWYU pragma: export + +#define BUILDFLAG_INTERNAL_PA_IS_CHROMEOS_ASH() (0) + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHROMEOS_BUILDFLAGS_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.cc b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.cc new file mode 100644 index 000000000..0350be9fa --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.cc @@ -0,0 +1,28 @@ +// Copyright 2022 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/compressed_pointer.h" + +#if defined(PA_POINTER_COMPRESSION) + +namespace partition_alloc::internal { + +// We keep the useful part in |g_base_| as 1s to speed up decompression. +alignas(kPartitionCachelineSize) + PA_COMPONENT_EXPORT(PARTITION_ALLOC) CompressedPointerBaseGlobal::Base + CompressedPointerBaseGlobal::g_base_ = {.base = kUsefulBitsMask}; + +void CompressedPointerBaseGlobal::SetBase(uintptr_t base) { + PA_DCHECK(!IsSet()); + PA_DCHECK((base & kUsefulBitsMask) == 0); + g_base_.base = base | kUsefulBitsMask; +} + +void CompressedPointerBaseGlobal::ResetBaseForTesting() { + g_base_.base = kUsefulBitsMask; +} + +} // namespace partition_alloc::internal + +#endif // defined(PA_POINTER_COMPRESSION) diff --git a/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.h b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.h new file mode 100644 index 000000000..45e838b3e --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer.h @@ -0,0 +1,666 @@ +// Copyright 2022 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_ + +#include +#include + +#include "base/allocator/partition_allocator/partition_address_space.h" +#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" +#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" +#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" + +#if defined(PA_POINTER_COMPRESSION) + +#if !defined(PA_GLUE_CORE_POOLS) +#error "Pointer compression only works with glued pools" +#endif //! defined(PA_GLUE_CORE_POOLS) +#if defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) +#error "Pointer compression currently supports constant pool size" +#endif // defined(PA_DYNAMICALLY_SELECT_POOL_SIZE) + +#endif // defined(PA_POINTER_COMPRESSION) + +namespace partition_alloc { + +namespace internal { + +template +constexpr bool IsDecayedSame = + std::is_same_v, std::decay_t>; + +#if defined(PA_POINTER_COMPRESSION) + +// Pointer compression works by storing only the 'useful' 32-bit part of the +// pointer. The other half (the base) is stored in a global variable +// (CompressedPointerBaseGlobal::g_base_), which is used on decompression. To +// support fast branchless decompression of nullptr, we use the most significant +// bit in the compressed pointer to leverage sign-extension (for non-nullptr +// pointers, the most significant bit is set, whereas for nullptr it's not). +// Using this bit and supporting heaps larger than 4GB relies on having +// alignment bits in pointers. Assuming that all pointers point to at least +// 8-byte alignment objects, pointer compression can support heaps of size <= +// 16GB. +// ((3 alignment bits) = (1 bit for sign-extension) + (2 bits for 16GB heap)). +// +// Example: heap base: 0x4b0'ffffffff +// - g_base: 0x4b3'ffffffff (lower 34 bits set) +// - normal pointer: 0x4b2'a08b6480 +// - compression: +// - shift right by 3: 0x96'54116c90 +// - truncate: 0x54116c90 +// - mark MSB: 0xd4116c90 +// - decompression: +// - sign-extend: 0xffffffff'd4116c90 +// - shift left by 3: 0xfffffffe'a08b6480 +// - 'and' with g_base: 0x000004b2'a08b6480 +// +// - nullptr: 0x00000000'00000000 +// - compression: +// - shift right by 3: 0x00000000'00000000 +// - truncate: 0x00000000 +// - (don't mark MSB for nullptr) +// - decompression: +// - sign-extend: 0x00000000'00000000 +// - shift left by 3: 0x00000000'00000000 +// - 'and' with g_base: 0x00000000'00000000 +// +// Pointer compression relies on having both the regular and the BRP pool (core +// pools) 'glued', so that the same base could be used for both. For simplicity, +// the configurations with dynamically selected pool size are not supported. +// However, they can be at the cost of performing an extra load for +// core-pools-shift-size on both compression and decompression. + +class CompressedPointerBaseGlobal final { + public: + static constexpr size_t kUsefulBits = + base::bits::CountTrailingZeroBits(PartitionAddressSpace::CorePoolsSize()); + static_assert(kUsefulBits >= sizeof(uint32_t) * CHAR_BIT); + static constexpr size_t kBitsToShift = + kUsefulBits - sizeof(uint32_t) * CHAR_BIT; + + CompressedPointerBaseGlobal() = delete; + + // Attribute const allows the compiler to assume that + // CompressedPointerBaseGlobal::g_base_ doesn't change (e.g. across calls) and + // thereby avoid redundant loads. + PA_ALWAYS_INLINE __attribute__((const)) static uintptr_t Get() { + PA_DCHECK(IsBaseConsistent()); + return g_base_.base; + } + + PA_ALWAYS_INLINE static bool IsSet() { + PA_DCHECK(IsBaseConsistent()); + return (g_base_.base & ~kUsefulBitsMask) != 0; + } + + private: + static constexpr uintptr_t kUsefulBitsMask = + PartitionAddressSpace::CorePoolsSize() - 1; + + static union alignas(kPartitionCachelineSize) + PA_COMPONENT_EXPORT(PARTITION_ALLOC) Base { + uintptr_t base; + char cache_line[kPartitionCachelineSize]; + } g_base_ PA_CONSTINIT; + + PA_ALWAYS_INLINE static bool IsBaseConsistent() { + return kUsefulBitsMask == (g_base_.base & kUsefulBitsMask); + } + + static void SetBase(uintptr_t base); + static void ResetBaseForTesting(); + + friend class PartitionAddressSpace; +}; + +#endif // defined(PA_POINTER_COMPRESSION) + +} // namespace internal + +#if defined(PA_POINTER_COMPRESSION) + +template +class PA_TRIVIAL_ABI CompressedPointer final { + public: + using UnderlyingType = uint32_t; + + PA_ALWAYS_INLINE constexpr CompressedPointer() = default; + PA_ALWAYS_INLINE explicit CompressedPointer(T* ptr) : value_(Compress(ptr)) {} + PA_ALWAYS_INLINE constexpr explicit CompressedPointer(std::nullptr_t) + : value_(0u) {} + + PA_ALWAYS_INLINE constexpr CompressedPointer(const CompressedPointer&) = + default; + PA_ALWAYS_INLINE constexpr CompressedPointer( + CompressedPointer&& other) noexcept = default; + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr CompressedPointer( + const CompressedPointer& other) { + if constexpr (internal::IsDecayedSame) { + // When pointers have the same type modulo constness, avoid the + // compress-decompress round. + value_ = other.value_; + } else { + // When the types are different, perform the round, because the pointer + // may need to be adjusted. + // TODO(1376980): Avoid the cycle here. + value_ = Compress(other.get()); + } + } + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr CompressedPointer( + CompressedPointer&& other) noexcept + : CompressedPointer(other) {} + + ~CompressedPointer() = default; + + PA_ALWAYS_INLINE constexpr CompressedPointer& operator=( + const CompressedPointer&) = default; + PA_ALWAYS_INLINE constexpr CompressedPointer& operator=( + CompressedPointer&& other) noexcept = default; + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr CompressedPointer& operator=( + const CompressedPointer& other) { + CompressedPointer copy(other); + value_ = copy.value_; + return *this; + } + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr CompressedPointer& operator=( + CompressedPointer&& other) noexcept { + *this = other; + return *this; + } + + // Don't perform compression when assigning to nullptr. + PA_ALWAYS_INLINE constexpr CompressedPointer& operator=(std::nullptr_t) { + value_ = 0u; + return *this; + } + + PA_ALWAYS_INLINE T* get() const { return Decompress(value_); } + + PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return value_; } + + PA_ALWAYS_INLINE constexpr UnderlyingType GetAsIntegral() const { + return value_; + } + + PA_ALWAYS_INLINE constexpr explicit operator bool() const { + return is_nonnull(); + } + + template >>* = nullptr> + PA_ALWAYS_INLINE U& operator*() const { + PA_DCHECK(is_nonnull()); + return *get(); + } + + PA_ALWAYS_INLINE T* operator->() const { + PA_DCHECK(is_nonnull()); + return get(); + } + + PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& other) { + std::swap(value_, other.value_); + } + + private: + template + friend class CompressedPointer; + + static constexpr size_t kBitsForSignExtension = 1; + static constexpr size_t kOverallBitsToShift = + internal::CompressedPointerBaseGlobal::kBitsToShift + + kBitsForSignExtension; + + static PA_ALWAYS_INLINE UnderlyingType Compress(T* ptr) { + static constexpr size_t kMinimalRequiredAlignment = 8; + static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment); + +#if BUILDFLAG(PA_DCHECK_IS_ON) + PA_DCHECK(reinterpret_cast(ptr) % kMinimalRequiredAlignment == + 0); + PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet()); + + const uintptr_t base = internal::CompressedPointerBaseGlobal::Get(); + static constexpr size_t kCorePoolsBaseMask = + ~(internal::PartitionAddressSpace::CorePoolsSize() - 1); + PA_DCHECK(!ptr || + (base & kCorePoolsBaseMask) == + (reinterpret_cast(ptr) & kCorePoolsBaseMask)); +#endif // BUILDFLAG(PA_DCHECK_IS_ON) + + const auto uptr = reinterpret_cast(ptr); + // Shift the pointer and truncate. + auto compressed = static_cast(uptr >> kOverallBitsToShift); + // If the pointer is non-null, mark the most-significant-bit to sign-extend + // it on decompression. Assuming compression is a significantly less + // frequent operation, we let more work here in favor of faster + // decompression. + // TODO(1376980): Avoid this by overreserving the heap. + if (compressed) + compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1)); + + return compressed; + } + + static PA_ALWAYS_INLINE T* Decompress(UnderlyingType ptr) { + PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet()); + const uintptr_t base = internal::CompressedPointerBaseGlobal::Get(); + // Treat compressed pointer as signed and cast it to uint64_t, which will + // sign-extend it. Then, shift the result by one. It's important to shift + // the already unsigned value, as otherwise it would result in undefined + // behavior. + const uint64_t mask = static_cast(static_cast(ptr)) + << (kOverallBitsToShift); + return reinterpret_cast(mask & base); + } + + UnderlyingType value_; +}; + +template +PA_ALWAYS_INLINE constexpr void swap(CompressedPointer& a, + CompressedPointer& b) { + a.swap(b); +} + +// operators==. +template +PA_ALWAYS_INLINE bool operator==(CompressedPointer a, + CompressedPointer b) { + if constexpr (internal::IsDecayedSame) { + // When pointers have the same type modulo constness, simply compare + // compressed values. + return a.GetAsIntegral() == b.GetAsIntegral(); + } else { + // When the types are different, compare decompressed pointers, because the + // pointers may need to be adjusted. + // TODO(1376980): Avoid decompression here. + return a.get() == b.get(); + } +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a == static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(T* a, CompressedPointer b) { + return b == a; +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(CompressedPointer a, + std::nullptr_t) { + return !a.is_nonnull(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t, + CompressedPointer b) { + return b == nullptr; +} + +// operators!=. +template +PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer a, + CompressedPointer b) { + return !(a == b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a != static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(T* a, CompressedPointer b) { + return b != a; +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(CompressedPointer a, + std::nullptr_t) { + return a.is_nonnull(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t, + CompressedPointer b) { + return b != nullptr; +} + +// operators<. +template +PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer a, + CompressedPointer b) { + if constexpr (internal::IsDecayedSame) { + // When pointers have the same type modulo constness, simply compare + // compressed values. + return a.GetAsIntegral() < b.GetAsIntegral(); + } else { + // When the types are different, compare decompressed pointers, because the + // pointers may need to be adjusted. + // TODO(1376980): Avoid decompression here. + return a.get() < b.get(); + } +} + +template +PA_ALWAYS_INLINE constexpr bool operator<(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a < static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<(T* a, CompressedPointer b) { + // Do compression, since it is less expensive. + return static_cast>(a) < b; +} + +// operators<=. +template +PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer a, + CompressedPointer b) { + if constexpr (internal::IsDecayedSame) { + // When pointers have the same type modulo constness, simply compare + // compressed values. + return a.GetAsIntegral() <= b.GetAsIntegral(); + } else { + // When the types are different, compare decompressed pointers, because the + // pointers may need to be adjusted. + // TODO(1376980): Avoid decompression here. + return a.get() <= b.get(); + } +} + +template +PA_ALWAYS_INLINE constexpr bool operator<=(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a <= static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<=(T* a, CompressedPointer b) { + // Do compression, since it is less expensive. + return static_cast>(a) <= b; +} + +// operators>. +template +PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer a, + CompressedPointer b) { + return !(a <= b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a > static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>(T* a, CompressedPointer b) { + // Do compression, since it is less expensive. + return static_cast>(a) > b; +} + +// operators>=. +template +PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer a, + CompressedPointer b) { + return !(a < b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>=(CompressedPointer a, U* b) { + // Do compression, since it is less expensive. + return a >= static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>=(T* a, CompressedPointer b) { + // Do compression, since it is less expensive. + return static_cast>(a) >= b; +} + +#endif // defined(PA_POINTER_COMPRESSION) + +// Simple wrapper over the raw pointer. +template +class PA_TRIVIAL_ABI UncompressedPointer final { + public: + PA_ALWAYS_INLINE constexpr UncompressedPointer() = default; + PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(T* ptr) : ptr_(ptr) {} + PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(std::nullptr_t) + : ptr_(nullptr) {} + + PA_ALWAYS_INLINE constexpr UncompressedPointer(const UncompressedPointer&) = + default; + PA_ALWAYS_INLINE constexpr UncompressedPointer( + UncompressedPointer&& other) noexcept = default; + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr UncompressedPointer( + const UncompressedPointer& other) + : ptr_(other.ptr_) {} + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr UncompressedPointer( + UncompressedPointer&& other) noexcept + : ptr_(std::move(other.ptr_)) {} + + ~UncompressedPointer() = default; + + PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=( + const UncompressedPointer&) = default; + PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=( + UncompressedPointer&& other) noexcept = default; + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=( + const UncompressedPointer& other) { + ptr_ = other.ptr_; + return *this; + } + + template >* = nullptr> + PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=( + UncompressedPointer&& other) noexcept { + ptr_ = std::move(other.ptr_); + return *this; + } + + PA_ALWAYS_INLINE constexpr UncompressedPointer& operator=(std::nullptr_t) { + ptr_ = nullptr; + return *this; + } + + PA_ALWAYS_INLINE constexpr T* get() const { return ptr_; } + + PA_ALWAYS_INLINE constexpr bool is_nonnull() const { return ptr_; } + + PA_ALWAYS_INLINE constexpr explicit operator bool() const { + return is_nonnull(); + } + + template >>* = nullptr> + PA_ALWAYS_INLINE constexpr U& operator*() const { + PA_DCHECK(is_nonnull()); + return *get(); + } + + PA_ALWAYS_INLINE constexpr T* operator->() const { + PA_DCHECK(is_nonnull()); + return get(); + } + + PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& other) { + std::swap(ptr_, other.ptr_); + } + + private: + template + friend class UncompressedPointer; + + T* ptr_; +}; + +template +PA_ALWAYS_INLINE constexpr void swap(UncompressedPointer& a, + UncompressedPointer& b) { + a.swap(b); +} + +// operators==. +template +PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer a, + UncompressedPointer b) { + return a.get() == b.get(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer a, U* b) { + return a == static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(T* a, UncompressedPointer b) { + return b == a; +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(UncompressedPointer a, + std::nullptr_t) { + return !a.is_nonnull(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator==(std::nullptr_t, + UncompressedPointer b) { + return b == nullptr; +} + +// operators!=. +template +PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer a, + UncompressedPointer b) { + return !(a == b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer a, U* b) { + return a != static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(T* a, UncompressedPointer b) { + return b != a; +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(UncompressedPointer a, + std::nullptr_t) { + return a.is_nonnull(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator!=(std::nullptr_t, + UncompressedPointer b) { + return b != nullptr; +} + +// operators<. +template +PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer a, + UncompressedPointer b) { + return a.get() < b.get(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<(UncompressedPointer a, U* b) { + return a < static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<(T* a, UncompressedPointer b) { + return static_cast>(a) < b; +} + +// operators<=. +template +PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer a, + UncompressedPointer b) { + return a.get() <= b.get(); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<=(UncompressedPointer a, U* b) { + return a <= static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator<=(T* a, UncompressedPointer b) { + return static_cast>(a) <= b; +} + +// operators>. +template +PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer a, + UncompressedPointer b) { + return !(a <= b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>(UncompressedPointer a, U* b) { + return a > static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>(T* a, UncompressedPointer b) { + return static_cast>(a) > b; +} + +// operators>=. +template +PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer a, + UncompressedPointer b) { + return !(a < b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>=(UncompressedPointer a, U* b) { + return a >= static_cast>(b); +} + +template +PA_ALWAYS_INLINE constexpr bool operator>=(T* a, UncompressedPointer b) { + return static_cast>(a) >= b; +} + +} // namespace partition_alloc + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_COMPRESSED_POINTER_H_ diff --git a/external/chromium/src/base/allocator/partition_allocator/compressed_pointer_unittest.cc b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer_unittest.cc new file mode 100644 index 000000000..7a608e739 --- /dev/null +++ b/external/chromium/src/base/allocator/partition_allocator/compressed_pointer_unittest.cc @@ -0,0 +1,439 @@ +// Copyright 2022 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/allocator/partition_allocator/compressed_pointer.h" + +#include "base/allocator/partition_allocator/partition_alloc.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace partition_alloc { + +namespace { + +struct Base { + double a; +}; +struct Derived : Base { + double b; +}; +struct Mixin { + double c; +}; +struct DerivedWithMixin : Base, Mixin { + double d; +}; + +using PAAllocator = internal::PartitionAllocator; + +struct PADeleter final { + void operator()(void* ptr) const { allocator_.root()->Free(ptr); } + PAAllocator& allocator_; +}; + +template +std::unique_ptr make_pa_unique(PAAllocator& alloc, + Args&&... args) { + T* result = new (alloc.root()->Alloc(sizeof(T), nullptr)) + T(std::forward(args)...); + return std::unique_ptr(result, PADeleter{alloc}); +} + +template +std::unique_ptr make_pa_array_unique(PAAllocator& alloc, + size_t num) { + T* result = new (alloc.root()->Alloc(sizeof(T) * num, nullptr)) T(); + return std::unique_ptr(result, PADeleter{alloc}); +} + +// Test that pointer types are trivial. +#if defined(PA_POINTER_COMPRESSION) +static_assert( + std::is_trivially_default_constructible_v>); +static_assert(std::is_trivially_copy_constructible_v>); +static_assert(std::is_trivially_move_constructible_v>); +static_assert(std::is_trivially_copy_assignable_v>); +static_assert(std::is_trivially_move_assignable_v>); +#endif // if defined(PA_POINTER_COMPRESSION) +static_assert( + std::is_trivially_default_constructible_v>); +static_assert( + std::is_trivially_copy_constructible_v>); +static_assert( + std::is_trivially_move_constructible_v>); +static_assert(std::is_trivially_copy_assignable_v>); +static_assert(std::is_trivially_move_assignable_v>); + +} // namespace + +struct UncompressedTypeTag {}; +struct CompressedTypeTag {}; + +template +class CompressedPointerTest : public ::testing::Test { + public: +#if defined(PA_POINTER_COMPRESSION) + template + using PointerType = + std::conditional_t, + CompressedPointer, + UncompressedPointer>; +#else + template + using PointerType = UncompressedPointer; +#endif + + CompressedPointerTest() { + allocator_.init({PartitionOptions::AlignedAlloc::kDisallowed, + PartitionOptions::ThreadCache::kDisabled, + PartitionOptions::Quarantine::kDisallowed, + PartitionOptions::Cookie::kDisallowed, + PartitionOptions::BackupRefPtr::kDisabled, + PartitionOptions::BackupRefPtrZapping::kDisabled, + PartitionOptions::UseConfigurablePool::kNo}); + } + + protected: + internal::PartitionAllocator allocator_; +}; + +#if defined(PA_POINTER_COMPRESSION) +using ObjectTypes = ::testing::Types; +#else // !defined(PA_POINTER_COMPRESSION) +using ObjectTypes = ::testing::Types; +#endif // !defined(PA_POINTER_COMPRESSION) + +TYPED_TEST_SUITE(CompressedPointerTest, ObjectTypes); + +TYPED_TEST(CompressedPointerTest, NullConstruction) { + using DoublePointer = typename TestFixture::template PointerType; + { + DoublePointer p = static_cast(nullptr); + EXPECT_FALSE(p.is_nonnull()); + EXPECT_FALSE(p.get()); + EXPECT_EQ(p, nullptr); + } + { + DoublePointer p1 = static_cast(nullptr); + DoublePointer p2 = p1; + EXPECT_FALSE(p2.is_nonnull()); + EXPECT_FALSE(p2.get()); + EXPECT_EQ(p2, nullptr); + } + { + DoublePointer p1 = static_cast(nullptr); + DoublePointer p2 = std::move(p1); + EXPECT_FALSE(p2.is_nonnull()); + EXPECT_FALSE(p2.get()); + EXPECT_EQ(p2, nullptr); + } +} + +TYPED_TEST(CompressedPointerTest, NullAssignment) { + using DoublePointer = typename TestFixture::template PointerType; + { + DoublePointer p; + p = static_cast(nullptr); + EXPECT_FALSE(p.is_nonnull()); + EXPECT_FALSE(p.get()); + EXPECT_EQ(p.get(), nullptr); + EXPECT_EQ(p, nullptr); + } + { + DoublePointer p1 = DoublePointer(nullptr), p2; + p2 = p1; + EXPECT_FALSE(p2.is_nonnull()); + EXPECT_FALSE(p2.get()); + EXPECT_EQ(p2.get(), nullptr); + EXPECT_EQ(p2, nullptr); + } + { + DoublePointer p1 = DoublePointer(nullptr), p2; + p2 = std::move(p1); + EXPECT_FALSE(p2.is_nonnull()); + EXPECT_FALSE(p2.get()); + EXPECT_EQ(p2.get(), nullptr); + EXPECT_EQ(p2, nullptr); + } +} + +TYPED_TEST(CompressedPointerTest, SameTypeValueConstruction) { + using DoublePointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + DoublePointer p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + EXPECT_EQ(p, d.get()); + } + { + DoublePointer p1 = static_cast(d.get()); + DoublePointer p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + DoublePointer p1 = static_cast(d.get()); + DoublePointer p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +TYPED_TEST(CompressedPointerTest, SameTypeValueAssignment) { + using DoublePointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + DoublePointer p; + p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + EXPECT_EQ(p, d.get()); + } + { + DoublePointer p1 = static_cast(d.get()); + DoublePointer p2; + p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + DoublePointer p1 = static_cast(d.get()); + DoublePointer p2; + p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +TYPED_TEST(CompressedPointerTest, + HeterogeneousValueConstructionSamePointerValue) { + using BasePointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + BasePointer p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + } + { + BasePointer p1 = static_cast(d.get()); + BasePointer p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + BasePointer p1 = static_cast(d.get()); + BasePointer p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +TYPED_TEST(CompressedPointerTest, + HeterogeneousValueAssignmentSamePointerValue) { + using BasePointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + BasePointer p; + p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + } + { + BasePointer p1 = static_cast(d.get()); + BasePointer p2; + p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + BasePointer p1 = static_cast(d.get()); + BasePointer p2; + p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +TYPED_TEST(CompressedPointerTest, + HeterogeneousValueConstructionDifferentPointerValues) { + using MixinPointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + MixinPointer p = static_cast(d.get()); + ASSERT_NE(static_cast(p.get()), static_cast(d.get())); + } + { + MixinPointer p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + } + { + MixinPointer p1 = static_cast(d.get()); + MixinPointer p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + MixinPointer p1 = static_cast(d.get()); + MixinPointer p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +TYPED_TEST(CompressedPointerTest, + HeterogeneousValueAssignmentDifferentPointerValue) { + using MixinPointer = typename TestFixture::template PointerType; + auto d = make_pa_unique(this->allocator_); + { + MixinPointer p; + p = static_cast(d.get()); + ASSERT_NE(static_cast(p.get()), static_cast(d.get())); + } + { + MixinPointer p; + p = static_cast(d.get()); + EXPECT_TRUE(p.is_nonnull()); + EXPECT_EQ(p.get(), d.get()); + } + { + MixinPointer p1 = static_cast(d.get()); + MixinPointer p2; + p2 = p1; + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, p1); + EXPECT_EQ(p2, d.get()); + } + { + MixinPointer p1 = static_cast(d.get()); + MixinPointer p2; + p2 = std::move(p1); + EXPECT_TRUE(p2.is_nonnull()); + EXPECT_EQ(p2.get(), d.get()); + EXPECT_EQ(p2, d.get()); + } +} + +namespace { + +template