diff --git a/include/runtime/arena.h b/include/runtime/arena.h index afbfca21b..eea6f9aff 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -12,7 +12,7 @@ extern "C" { -size_t const HYPERBLOCK_SIZE = (size_t)BLOCK_SIZE * 1024 * 1024; +size_t const hyperblock_size = (size_t)BLOCK_SIZE * 1024 * 1024; // An arena can be used to allocate objects that can then be deallocated all at // once. @@ -24,9 +24,9 @@ class arena { } ~arena() { - munmap(current_addr_ptr, HYPERBLOCK_SIZE); + munmap(current_addr_ptr, hyperblock_size); if (collection_addr_ptr) - munmap(collection_addr_ptr, HYPERBLOCK_SIZE); + munmap(collection_addr_ptr, hyperblock_size); } char *evacuate(char *scan_ptr); @@ -124,6 +124,10 @@ class arena { = nullptr; // pointer to start of collection address space size_t num_collection_blocks = 0; // notional number of BLOCK_SIZE blocks in collection semispace + // + // hyperblock_size is set to non-zero value just once, at runtime, by any thread. + // + static size_t hyperblock_size; }; inline char arena::get_arena_semispace_id_of_object(void *ptr) { @@ -135,7 +139,7 @@ inline char arena::get_arena_semispace_id_of_object(void *ptr) { // Set the low bits to 1 to get the address of the last byte in the hyperblock. // uintptr_t end_address - = reinterpret_cast(ptr) | (HYPERBLOCK_SIZE - 1); + = reinterpret_cast(ptr) | (hyperblock_size - 1); return *reinterpret_cast(end_address); } @@ -166,7 +170,7 @@ inline void *arena::kore_arena_alloc(size_t requested) { // We move the tripwire to 1 past the end of our hyperblock so that we have // a well defined comparison that will always be false until the next arena swap. // - tripwire = current_addr_ptr + HYPERBLOCK_SIZE; + tripwire = current_addr_ptr + hyperblock_size; } void *result = allocation_ptr; allocation_ptr += requested; diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index fbfd7e527..1dd10bb2a 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -23,11 +23,38 @@ thread_local bool time_for_collection = false; thread_local bool gc_enabled = true; #endif +size_t arena::hyperblock_size = 0; + void arena::initialize_semispace() { + if (hyperblock_size == 0) { + // + // No thread has initialized hyperblock_size at this moment so we will. + // We get the size of the memory from the OS as number of physical pages * page size. + // + size_t pages = sysconf(_SC_PHYS_PAGES); + size_t page_size = sysconf(_SC_PAGE_SIZE); + size_t v = pages * page_size; + // + // We require hyperblock_size to be a power of 2 for bit twiddling, so we + // round up to the next power of 2. + // + --v; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + // + // Writing to a class static variable might be a race with another thread. + // + hyperblock_size = v; + } // // Current semispace is uninitialized so mmap() a big chuck of address space. // - size_t request = 2 * HYPERBLOCK_SIZE; + size_t request = 2 * hyperblock_size; void *addr = mmap( nullptr, // let OS choose the address request, // Linux and MacOS both allow up to 64TB @@ -46,12 +73,12 @@ void arena::initialize_semispace() { auto *start_block = reinterpret_cast(addr); auto *end_block = start_block + request; // - // We allocated 2 * HYPERBLOCK_SIZE worth of address space but we're only going to use 1, aligned on a - // HYPERBLOCK_SIZE boundry. This is so we can get end of the hyperblock by setting the low bits of any + // We allocated 2 * hyperblock_size worth of address space but we're only going to use 1, aligned on a + // hyperblock_size boundry. This is so we can get end of the hyperblock by setting the low bits of any // address within the space to 1. // current_addr_ptr = reinterpret_cast( - std::align(HYPERBLOCK_SIZE, HYPERBLOCK_SIZE, addr, request)); + std::align(hyperblock_size, hyperblock_size, addr, request)); // // Release any unused address space at the start of the mmap()ed block. // @@ -61,7 +88,7 @@ void arena::initialize_semispace() { // // Release any unused address space at the end of the mmap()ed block. // - auto *end_aligned = current_addr_ptr + HYPERBLOCK_SIZE; + auto *end_aligned = current_addr_ptr + hyperblock_size; if (size_t back_slop = end_block - end_aligned) { munmap(end_aligned, back_slop); } @@ -69,7 +96,7 @@ void arena::initialize_semispace() { // We put a semispace id in the last byte of the hyperblock so we can identify which semispace an address // belongs to by setting the low bits to 1 to access this id. // - current_addr_ptr[HYPERBLOCK_SIZE - 1] = allocation_semispace_id; + current_addr_ptr[hyperblock_size - 1] = allocation_semispace_id; allocation_ptr = current_addr_ptr; // // We set the tripwire for this space so we get trigger a garbage collection when we pass BLOCK_SIZE of memory