From b24b5213ee3e788bf269b5b0e98a55860100f399 Mon Sep 17 00:00:00 2001 From: Steven Eker Date: Tue, 19 Nov 2024 04:25:34 +0100 Subject: [PATCH] deleted free_all_memory(), global variable hyperblock_ptr --- include/runtime/arena.h | 11 +++++------ runtime/alloc/arena.cpp | 9 --------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/include/runtime/arena.h b/include/runtime/arena.h index 95ce46819..9b40f66ca 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -14,6 +14,11 @@ class arena { public: arena(char id) : allocation_semispace_id(id) { } + + // Allocates the requested number of bytes as a contiguous region and returns a + // pointer to the first allocated byte. + // If called with requested size greater than the maximun single allocation + // size, the space is allocated in a general (not garbage collected pool). void *kore_arena_alloc(size_t requested); // Returns the address of the first byte that belongs in the given arena. @@ -114,10 +119,6 @@ extern thread_local bool time_for_collection; size_t get_gc_threshold(); -// Allocates the requested number of bytes as a contiguous region and returns a -// pointer to the first allocated byte. -// If called with requested size greater than the maximun single allocation -// size, the space is allocated in a general (not garbage collected pool). inline void *arena::kore_arena_alloc(size_t requested) { if (block + requested > block_end) { return do_alloc_slow(requested); @@ -130,8 +131,6 @@ inline void *arena::kore_arena_alloc(size_t requested) { return result; } -// Deallocates all the memory allocated for registered arenas. -void free_all_memory(void); } #endif // ARENA_H diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index 6e9bea097..961b2c3be 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -33,7 +33,6 @@ arena::get_arena_semispace_id_of_object(void *ptr) { // We will reserve enough address space for 1 million 1MB blocks. Might want to increase this on a > 1TB server. // size_t const HYPERBLOCK_SIZE = (size_t)BLOCK_SIZE * 1024 * 1024; -static thread_local void *hyperblock_ptr = nullptr; // only needed for munmap() static void *megabyte_malloc() { // @@ -64,7 +63,6 @@ static void *megabyte_malloc() { perror("mmap()"); abort(); } - hyperblock_ptr = addr; // // We ask for one block worth of address space less than we allocated so alignment will always succeed. // We don't worry about unused address space either side of our aligned address space because there will be no @@ -76,13 +74,6 @@ static void *megabyte_malloc() { return currentblock_ptr; } -void free_all_memory() { - // - // Frees all memory that was demand paged into this address range. - // - munmap(hyperblock_ptr, HYPERBLOCK_SIZE); -} - #ifdef __MACH__ // // thread_local disabled for Apple