diff --git a/include/runtime/arena.h b/include/runtime/arena.h index 6248b243d..fc72155fb 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -10,27 +10,100 @@ extern "C" { // An arena can be used to allocate objects that can then be deallocated all at // once. -struct arena { - char *first_block; - char *block; - char *block_start; - char *block_end; - char *first_collection_block; - size_t num_blocks; - size_t num_collection_blocks; - char allocation_semispace_id; -}; - -using memory_block_header = struct { - char *next_block; - char *next_superblock; - char semispace; +class arena { +public: + arena(char id) + : allocation_semispace_id(id) { } + + // Allocates the requested number of bytes as a contiguous region and returns a + // pointer to the first allocated byte. + // If called with requested size greater than the maximun single allocation + // size, the space is allocated in a general (not garbage collected pool). + void *kore_arena_alloc(size_t requested); + + // Returns the address of the first byte that belongs in the given arena. + // Returns 0 if nothing has been allocated ever in that arena. + char *arena_start_ptr() const; + + // Returns a pointer to a location holding the address of last allocated + // byte in the given arena plus 1. + // This address is 0 if nothing has been allocated ever in that arena. + char **arena_end_ptr(); + + // return the total number of allocatable bytes currently in the arena in its + // active semispace. + size_t arena_size() const; + + // Clears the current allocation space by setting its start back to its first + // block. It is used during garbage collection to effectively collect all of the + // arena. + void arena_clear(); + + // Resizes the last allocation as long as the resize does not require a new + // block allocation. + // Returns the address of the byte following the last newlly allocated byte when + // the resize succeeds, returns 0 otherwise. + void *arena_resize_last_alloc(ssize_t increase); + + // Returns the given arena's current collection semispace ID. + // Each arena has 2 semispace IDs one equal to the arena ID and the other equal + // to the 1's complement of the arena ID. At any time one of these semispaces + // is used for allocation and the other is used for collection. + char get_arena_collection_semispace_id() const; + + // Exchanges the current allocation and collection semispaces and clears the new + // current allocation semispace by setting its start back to its first block. + // It is used before garbage collection. + void arena_swap_and_clear(); + + // Given two pointers to objects allocated in the same arena, return the number + // of bytes they are separated by within the virtual block of memory represented + // by the blocks of that arena. This difference will include blocks containing + // sentinel bytes. Undefined behavior will result if the pointers belong to + // different arenas. + static ssize_t ptr_diff(char *ptr1, char *ptr2); + + // Given a starting pointer to an address allocated in an arena and a size in + // bytes, this function returns a pointer to an address allocated in the + // same arena after size bytes from the starting pointer. + // + // 1st argument: the starting pointer + // 2nd argument: the size in bytes to add to the starting pointer + // 3rd argument: the address of last allocated byte in the arena plus 1 + // Return value: the address allocated in the arena after size bytes from the + // starting pointer, or 0 if this is equal to the 3rd argument. + static char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr); + + // Returns the ID of the semispace where the given address was allocated. + // The behavior is undefined if called with an address that has not been + // allocated within an arena. + static char get_arena_semispace_id_of_object(void *ptr); + +private: + struct memory_block_header { + char *next_block; + char semispace; + }; + + void fresh_block(); + static memory_block_header *mem_block_header(void *ptr); + + // helper function for `kore_arena_alloc`. Do not call directly. + void *do_alloc_slow(size_t requested); + + char *first_block; // beginning of first block + char *block; // where allocations are being made in current block + char *block_start; // start of current block + char *block_end; // 1 past end of current block + char *first_collection_block; // beginning of other semispace + size_t num_blocks; // number of blocks in current semispace + size_t num_collection_blocks; // number of blocks in other semispace + char allocation_semispace_id; // id of current semispace }; // Macro to define a new arena with the given ID. Supports IDs ranging from 0 to // 127. -#define REGISTER_ARENA(name, id) \ - static thread_local struct arena name = {.allocation_semispace_id = (id)} +#define REGISTER_ARENA(name, id) static thread_local arena name(id) #define MEM_BLOCK_START(ptr) \ ((char *)(((uintptr_t)(ptr)-1) & ~(BLOCK_SIZE - 1))) @@ -46,92 +119,17 @@ extern thread_local bool time_for_collection; size_t get_gc_threshold(); -// Resets the given arena. -void arena_reset(struct arena *); - -// Returns the given arena's current allocation semispace ID. -// Each arena has 2 semispace IDs one equal to the arena ID and the other equal -// to the 1's complement of the arena ID. At any time one of these semispaces -// is used for allocation and the other is used for collection. -char get_arena_allocation_semispace_id(const struct arena *); - -// Returns the given arena's current collection semispace ID. -// See above for details. -char get_arena_collection_semispace_id(const struct arena *); - -// Returns the ID of the semispace where the given address was allocated. -// The behavior is undefined if called with an address that has not been -// allocated within an arena. -char get_arena_semispace_id_of_object(void *); - -// helper function for `kore_arena_alloc`. Do not call directly. -void *do_alloc_slow(size_t, struct arena *); - -// Allocates the requested number of bytes as a contiguous region and returns a -// pointer to the first allocated byte. -// If called with requested size greater than the maximun single allocation -// size, the space is allocated in a general (not garbage collected pool). -inline void *kore_arena_alloc(struct arena *arena, size_t requested) { - if (arena->block + requested > arena->block_end) { - return do_alloc_slow(requested, arena); +inline void *arena::kore_arena_alloc(size_t requested) { + if (block + requested > block_end) { + return do_alloc_slow(requested); } - void *result = arena->block; - arena->block += requested; + void *result = block; + block += requested; MEM_LOG( "Allocation at %p (size %zd), next alloc at %p (if it fits)\n", result, - requested, arena->block); + requested, block); return result; } - -// Resizes the last allocation as long as the resize does not require a new -// block allocation. -// Returns the address of the byte following the last newlly allocated byte when -// the resize succeeds, returns 0 otherwise. -void *arena_resize_last_alloc(struct arena *, ssize_t); - -// Exchanges the current allocation and collection semispaces and clears the new -// current allocation semispace by setting its start back to its first block. -// It is used before garbage collection. -void arena_swap_and_clear(struct arena *); - -// Clears the current allocation space by setting its start back to its first -// block. It is used during garbage collection to effectively collect all of the -// arena. -void arena_clear(struct arena *); - -// Returns the address of the first byte that belongs in the given arena. -// Returns 0 if nothing has been allocated ever in that arena. -char *arena_start_ptr(const struct arena *); - -// Returns a pointer to a location holding the address of last allocated -// byte in the given arena plus 1. -// This address is 0 if nothing has been allocated ever in that arena. -char **arena_end_ptr(struct arena *); - -// Given a starting pointer to an address allocated in an arena and a size in -// bytes, this function returns a pointer to an address allocated in the -// same arena after size bytes from the starting pointer. -// -// 1st argument: the starting pointer -// 2nd argument: the size in bytes to add to the starting pointer -// 3rd argument: the address of last allocated byte in the arena plus 1 -// Return value: the address allocated in the arena after size bytes from the -// starting pointer, or 0 if this is equal to the 3rd argument. -char *move_ptr(char *, size_t, char const *); - -// Given two pointers to objects allocated in the same arena, return the number -// of bytes they are separated by within the virtual block of memory represented -// by the blocks of that arena. This difference will include blocks containing -// sentinel bytes. Undefined behavior will result if the pointers belong to -// different arenas. -ssize_t ptr_diff(char *, char *); - -// return the total number of allocatable bytes currently in the arena in its -// active semispace. -size_t arena_size(const struct arena *); - -// Deallocates all the memory allocated for registered arenas. -void free_all_memory(void); } #endif // ARENA_H diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index 0e1e4de15..c4384642e 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -12,40 +12,20 @@ extern size_t const VAR_BLOCK_SIZE = BLOCK_SIZE; -__attribute__((always_inline)) memory_block_header * -mem_block_header(void *ptr) { +__attribute__((always_inline)) arena::memory_block_header * +arena::mem_block_header(void *ptr) { // NOLINTNEXTLINE(*-reinterpret-cast) - return reinterpret_cast( + return reinterpret_cast( ((uintptr_t)(ptr)-1) & ~(BLOCK_SIZE - 1)); } -__attribute__((always_inline)) void arena_reset(struct arena *arena) { - char id = arena->allocation_semispace_id; - if (id < 0) { - id = ~arena->allocation_semispace_id; - } - arena->first_block = nullptr; - arena->block = nullptr; - arena->block_start = nullptr; - arena->block_end = nullptr; - arena->first_collection_block = nullptr; - arena->num_blocks = 0; - arena->num_collection_blocks = 0; - arena->allocation_semispace_id = id; -} - -__attribute__((always_inline)) char -get_arena_allocation_semispace_id(const struct arena *arena) { - return arena->allocation_semispace_id; -} - __attribute__((always_inline)) char -get_arena_collection_semispace_id(const struct arena *arena) { - return ~arena->allocation_semispace_id; +arena::get_arena_collection_semispace_id() const { + return ~allocation_semispace_id; } __attribute__((always_inline)) char -get_arena_semispace_id_of_object(void *ptr) { +arena::get_arena_semispace_id_of_object(void *ptr) { return mem_block_header(ptr)->semispace; } @@ -53,7 +33,6 @@ get_arena_semispace_id_of_object(void *ptr) { // We will reserve enough address space for 1 million 1MB blocks. Might want to increase this on a > 1TB server. // size_t const HYPERBLOCK_SIZE = (size_t)BLOCK_SIZE * 1024 * 1024; -static thread_local void *hyperblock_ptr = nullptr; // only needed for munmap() static void *megabyte_malloc() { // @@ -84,7 +63,6 @@ static void *megabyte_malloc() { perror("mmap()"); abort(); } - hyperblock_ptr = addr; // // We ask for one block worth of address space less than we allocated so alignment will always succeed. // We don't worry about unused address space either side of our aligned address space because there will be no @@ -96,13 +74,6 @@ static void *megabyte_malloc() { return currentblock_ptr; } -void free_all_memory() { - // - // Frees all memory that was demand paged into this address range. - // - munmap(hyperblock_ptr, HYPERBLOCK_SIZE); -} - #ifdef __MACH__ // // thread_local disabled for Apple @@ -112,48 +83,47 @@ bool time_for_collection; thread_local bool time_for_collection; #endif -static void fresh_block(struct arena *arena) { +void arena::fresh_block() { char *next_block = nullptr; - if (arena->block_start == nullptr) { + if (block_start == nullptr) { next_block = (char *)megabyte_malloc(); - arena->first_block = next_block; - auto *next_header = (memory_block_header *)next_block; + first_block = next_block; + auto *next_header = (arena::memory_block_header *)next_block; next_header->next_block = nullptr; - next_header->semispace = arena->allocation_semispace_id; - arena->num_blocks++; + next_header->semispace = allocation_semispace_id; + num_blocks++; } else { - next_block = *(char **)arena->block_start; - if (arena->block != arena->block_end) { - if (arena->block_end - arena->block == 8) { - *(uint64_t *)arena->block - = NOT_YOUNG_OBJECT_BIT; // 8 bit sentinel value + next_block = *(char **)block_start; + if (block != block_end) { + if (block_end - block == 8) { + *(uint64_t *)block = NOT_YOUNG_OBJECT_BIT; // 8 bit sentinel value } else { - *(uint64_t *)arena->block = arena->block_end - arena->block - - 8; // 16-bit or more sentinel value + *(uint64_t *)block + = block_end - block - 8; // 16-bit or more sentinel value } } if (!next_block) { MEM_LOG( "Allocating new block for the first time in arena %d\n", - arena->allocation_semispace_id); + allocation_semispace_id); next_block = (char *)megabyte_malloc(); - *(char **)arena->block_start = next_block; - auto *next_header = (memory_block_header *)next_block; + *(char **)block_start = next_block; + auto *next_header = (arena::memory_block_header *)next_block; next_header->next_block = nullptr; - next_header->semispace = arena->allocation_semispace_id; - arena->num_blocks++; + next_header->semispace = allocation_semispace_id; + num_blocks++; time_for_collection = true; } } - if (!*(char **)next_block && arena->num_blocks >= get_gc_threshold()) { + if (!*(char **)next_block && num_blocks >= get_gc_threshold()) { time_for_collection = true; } - arena->block = next_block + sizeof(memory_block_header); - arena->block_start = next_block; - arena->block_end = next_block + BLOCK_SIZE; + block = next_block + sizeof(arena::memory_block_header); + block_start = next_block; + block_end = next_block + BLOCK_SIZE; MEM_LOG( - "New block at %p (remaining %zd)\n", arena->block, - BLOCK_SIZE - sizeof(memory_block_header)); + "New block at %p (remaining %zd)\n", block, + BLOCK_SIZE - sizeof(arena::memory_block_header)); } #ifdef __MACH__ @@ -165,63 +135,59 @@ bool gc_enabled = true; thread_local bool gc_enabled = true; #endif -__attribute__((noinline)) void * -do_alloc_slow(size_t requested, struct arena *arena) { +__attribute__((noinline)) void *arena::do_alloc_slow(size_t requested) { MEM_LOG( - "Block at %p too small, %zd remaining but %zd needed\n", arena->block, - arena->block_end - arena->block, requested); - if (requested > BLOCK_SIZE - sizeof(memory_block_header)) { + "Block at %p too small, %zd remaining but %zd needed\n", block, + block_end - block, requested); + if (requested > BLOCK_SIZE - sizeof(arena::memory_block_header)) { return malloc(requested); } - fresh_block(arena); - void *result = arena->block; - arena->block += requested; + fresh_block(); + void *result = block; + block += requested; MEM_LOG( "Allocation at %p (size %zd), next alloc at %p (if it fits)\n", result, - requested, arena->block); + requested, block); return result; } __attribute__((always_inline)) void * -arena_resize_last_alloc(struct arena *arena, ssize_t increase) { - if (arena->block + increase <= arena->block_end) { - arena->block += increase; - return arena->block; +arena::arena_resize_last_alloc(ssize_t increase) { + if (block + increase <= block_end) { + block += increase; + return block; } return nullptr; } -__attribute__((always_inline)) void arena_swap_and_clear(struct arena *arena) { - char *tmp = arena->first_block; - arena->first_block = arena->first_collection_block; - arena->first_collection_block = tmp; - size_t tmp2 = arena->num_blocks; - arena->num_blocks = arena->num_collection_blocks; - arena->num_collection_blocks = tmp2; - arena->allocation_semispace_id = ~arena->allocation_semispace_id; - arena_clear(arena); +__attribute__((always_inline)) void arena::arena_swap_and_clear() { + char *tmp = first_block; + first_block = first_collection_block; + first_collection_block = tmp; + size_t tmp2 = num_blocks; + num_blocks = num_collection_blocks; + num_collection_blocks = tmp2; + allocation_semispace_id = ~allocation_semispace_id; + arena_clear(); } -__attribute__((always_inline)) void arena_clear(struct arena *arena) { - arena->block = arena->first_block - ? arena->first_block + sizeof(memory_block_header) - : nullptr; - arena->block_start = arena->first_block; - arena->block_end - = arena->first_block ? arena->first_block + BLOCK_SIZE : nullptr; +__attribute__((always_inline)) void arena::arena_clear() { + block = first_block ? first_block + sizeof(arena::memory_block_header) + : nullptr; + block_start = first_block; + block_end = first_block ? first_block + BLOCK_SIZE : nullptr; } -__attribute__((always_inline)) char * -arena_start_ptr(const struct arena *arena) { - return arena->first_block ? arena->first_block + sizeof(memory_block_header) - : nullptr; +__attribute__((always_inline)) char *arena::arena_start_ptr() const { + return first_block ? first_block + sizeof(arena::memory_block_header) + : nullptr; } -__attribute__((always_inline)) char **arena_end_ptr(struct arena *arena) { - return &arena->block; +__attribute__((always_inline)) char **arena::arena_end_ptr() { + return █ } -char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { +char *arena::move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { char *next_ptr = ptr + size; if (next_ptr == arena_end_ptr) { return nullptr; @@ -233,23 +199,23 @@ char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { if (!next_block) { return nullptr; } - return next_block + sizeof(memory_block_header); + return next_block + sizeof(arena::memory_block_header); } -ssize_t ptr_diff(char *ptr1, char *ptr2) { +ssize_t arena::ptr_diff(char *ptr1, char *ptr2) { if (MEM_BLOCK_START(ptr1) == MEM_BLOCK_START(ptr2)) { return ptr1 - ptr2; } - memory_block_header *hdr = mem_block_header(ptr2); + arena::memory_block_header *hdr = mem_block_header(ptr2); ssize_t result = 0; while (hdr != mem_block_header(ptr1) && hdr->next_block) { if (ptr2) { result += ((char *)hdr + BLOCK_SIZE) - ptr2; ptr2 = nullptr; } else { - result += (BLOCK_SIZE - sizeof(memory_block_header)); + result += (BLOCK_SIZE - sizeof(arena::memory_block_header)); } - hdr = (memory_block_header *)hdr->next_block; + hdr = (arena::memory_block_header *)hdr->next_block; } if (hdr == mem_block_header(ptr1)) { result += ptr1 - (char *)(hdr + 1); @@ -264,9 +230,8 @@ ssize_t ptr_diff(char *ptr1, char *ptr2) { return -ptr_diff(ptr2, ptr1); } -size_t arena_size(const struct arena *arena) { - return (arena->num_blocks > arena->num_collection_blocks - ? arena->num_blocks - : arena->num_collection_blocks) - * (BLOCK_SIZE - sizeof(memory_block_header)); +size_t arena::arena_size() const { + return (num_blocks > num_collection_blocks ? num_blocks + : num_collection_blocks) + * (BLOCK_SIZE - sizeof(arena::memory_block_header)); } diff --git a/runtime/collect/collect.cpp b/runtime/collect/collect.cpp index b519bc15b..cc596d205 100644 --- a/runtime/collect/collect.cpp +++ b/runtime/collect/collect.cpp @@ -85,9 +85,9 @@ void migrate_once(block **block_ptr) { return; } if (youngspace_collection_id() - == get_arena_semispace_id_of_object((void *)curr_block) + == arena::get_arena_semispace_id_of_object((void *)curr_block) || oldspace_collection_id() - == get_arena_semispace_id_of_object((void *)curr_block)) { + == arena::get_arena_semispace_id_of_object((void *)curr_block)) { migrate(block_ptr); } } @@ -255,7 +255,7 @@ static char *evacuate(char *scan_ptr, char **alloc_ptr) { migrate_child(curr_block, layout_data->args, i, false); } } - return move_ptr(scan_ptr, get_size(hdr, layout_int), *alloc_ptr); + return arena::move_ptr(scan_ptr, get_size(hdr, layout_int), *alloc_ptr); } // Contains the decision logic for collecting the old generation. @@ -325,9 +325,10 @@ void kore_collect( // allocation pointer is invalid and does not actually point to the next // address that would have been allocated at, according to the logic of // kore_arena_alloc, which will have allocated a fresh memory block and put - // the allocation at the start of it. Thus, we use move_ptr with a size + // the allocation at the start of it. Thus, we use arena::move_ptr with a size // of zero to adjust and get the true address of the allocation. - scan_ptr = move_ptr(previous_oldspace_alloc_ptr, 0, *old_alloc_ptr()); + scan_ptr + = arena::move_ptr(previous_oldspace_alloc_ptr, 0, *old_alloc_ptr()); } else { scan_ptr = previous_oldspace_alloc_ptr; } @@ -340,7 +341,7 @@ void kore_collect( } #ifdef GC_DBG ssize_t numBytesAllocedSinceLastCollection - = ptr_diff(current_alloc_ptr, last_alloc_ptr); + = arena::ptr_diff(current_alloc_ptr, last_alloc_ptr); assert(numBytesAllocedSinceLastCollection >= 0); fwrite(&numBytesAllocedSinceLastCollection, sizeof(ssize_t), 1, stderr); last_alloc_ptr = *young_alloc_ptr(); diff --git a/runtime/collect/migrate_collection.cpp b/runtime/collect/migrate_collection.cpp index e2870de3e..c6e644e0e 100644 --- a/runtime/collect/migrate_collection.cpp +++ b/runtime/collect/migrate_collection.cpp @@ -7,9 +7,9 @@ void migrate_collection_node(void **node_ptr) { string *curr_block = STRUCT_BASE(string, data, *node_ptr); if (youngspace_collection_id() - != get_arena_semispace_id_of_object((void *)curr_block) + != arena::get_arena_semispace_id_of_object((void *)curr_block) && oldspace_collection_id() - != get_arena_semispace_id_of_object((void *)curr_block)) { + != arena::get_arena_semispace_id_of_object((void *)curr_block)) { return; } uint64_t const hdr = curr_block->h.hdr; diff --git a/runtime/lto/alloc.cpp b/runtime/lto/alloc.cpp index bcb2601e2..0cd79a3f8 100644 --- a/runtime/lto/alloc.cpp +++ b/runtime/lto/alloc.cpp @@ -16,47 +16,42 @@ REGISTER_ARENA(oldspace, OLDSPACE_ID); REGISTER_ARENA(alwaysgcspace, ALWAYSGCSPACE_ID); char *youngspace_ptr() { - return arena_start_ptr(&youngspace); + return youngspace.arena_start_ptr(); } char *oldspace_ptr() { - return arena_start_ptr(&oldspace); + return oldspace.arena_start_ptr(); } char **young_alloc_ptr() { - return arena_end_ptr(&youngspace); + return youngspace.arena_end_ptr(); } char **old_alloc_ptr() { - return arena_end_ptr(&oldspace); + return oldspace.arena_end_ptr(); } char youngspace_collection_id() { - return get_arena_collection_semispace_id(&youngspace); + return youngspace.get_arena_collection_semispace_id(); } char oldspace_collection_id() { - return get_arena_collection_semispace_id(&oldspace); + return oldspace.get_arena_collection_semispace_id(); } size_t youngspace_size(void) { - return arena_size(&youngspace); -} - -bool youngspace_almost_full(size_t threshold) { - char *next_block = *(char **)youngspace.block_start; - return !next_block; + return youngspace.arena_size(); } void kore_alloc_swap(bool swap_old) { - arena_swap_and_clear(&youngspace); + youngspace.arena_swap_and_clear(); if (swap_old) { - arena_swap_and_clear(&oldspace); + oldspace.arena_swap_and_clear(); } } void kore_clear() { - arena_clear(&alwaysgcspace); + alwaysgcspace.arena_clear(); } void set_kore_memory_functions_for_gmp() { @@ -64,25 +59,25 @@ void set_kore_memory_functions_for_gmp() { } __attribute__((always_inline)) void *kore_alloc(size_t requested) { - return kore_arena_alloc(&youngspace, requested); + return youngspace.kore_arena_alloc(requested); } __attribute__((always_inline)) void *kore_alloc_token(size_t requested) { size_t size = (requested + 7) & ~7; - return kore_arena_alloc(&youngspace, size < 16 ? 16 : size); + return youngspace.kore_arena_alloc(size < 16 ? 16 : size); } __attribute__((always_inline)) void *kore_alloc_old(size_t requested) { - return kore_arena_alloc(&oldspace, requested); + return oldspace.kore_arena_alloc(requested); } __attribute__((always_inline)) void *kore_alloc_token_old(size_t requested) { size_t size = (requested + 7) & ~7; - return kore_arena_alloc(&oldspace, size < 16 ? 16 : size); + return oldspace.kore_arena_alloc(size < 16 ? 16 : size); } __attribute__((always_inline)) void *kore_alloc_always_gc(size_t requested) { - return kore_arena_alloc(&alwaysgcspace, requested); + return alwaysgcspace.kore_arena_alloc(requested); } void * @@ -90,7 +85,7 @@ kore_resize_last_alloc(void *oldptr, size_t newrequest, size_t last_size) { newrequest = (newrequest + 7) & ~7; last_size = (last_size + 7) & ~7; - if (oldptr != *arena_end_ptr(&youngspace) - last_size) { + if (oldptr != *(youngspace.arena_end_ptr()) - last_size) { MEM_LOG( "May only reallocate last allocation. Tried to reallocate %p to %zd\n", oldptr, newrequest); @@ -98,7 +93,7 @@ kore_resize_last_alloc(void *oldptr, size_t newrequest, size_t last_size) { } ssize_t increase = newrequest - last_size; - if (arena_resize_last_alloc(&youngspace, increase)) { + if (youngspace.arena_resize_last_alloc(increase)) { return oldptr; }