Skip to content

Commit

Permalink
WIP make shrinkFn optional in Allocator interface
Browse files Browse the repository at this point in the history
This is work-in-progress because it's blocked by coroutines depending on
the Allocator interface, which will be solved with the coroutine rewrite
(#2377).

closes #2292
  • Loading branch information
andrewrk committed Apr 30, 2019
1 parent 81767a6 commit 92122f9
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 80 deletions.
97 changes: 21 additions & 76 deletions std/heap.zig
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ var c_allocator_state = Allocator{
};

fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
assert(new_align <= @alignOf(c_longdouble));
if (new_align > @alignOf(c_longdouble)) return error.OutOfMemory;
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
return @ptrCast([*]u8, buf)[0..new_size];
Expand Down Expand Up @@ -143,7 +143,7 @@ pub const DirectAllocator = struct {
const result = try alloc(allocator, new_size, new_align);
if (old_mem.len != 0) {
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
}
return result;
},
Expand All @@ -155,12 +155,12 @@ pub const DirectAllocator = struct {
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(*c_void, root_addr);
if(new_size == 0) {

if (new_size == 0) {
if (os.windows.HeapFree(self.heap_handle.?, 0, old_ptr) == 0) unreachable;
return old_mem[0..0];
}

const amt = new_size + new_align + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(
self.heap_handle.?,
Expand All @@ -178,11 +178,7 @@ pub const DirectAllocator = struct {
// or the memory starting at the old offset would be outside of the new allocation,
// then we need to copy the memory to a valid aligned address and use that
const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
@memcpy(
@intToPtr([*]u8, new_aligned_addr),
@intToPtr([*]u8, new_adjusted_addr),
std.math.min(old_mem.len, new_size),
);
@memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
new_adjusted_addr = new_aligned_addr;
}
const new_record_addr = new_adjusted_addr + new_size;
Expand All @@ -209,7 +205,7 @@ pub const ArenaAllocator = struct {
return ArenaAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.shrinkFn = null,
},
.child_allocator = child_allocator,
.buffer_list = std.LinkedList([]u8).init(),
Expand All @@ -231,8 +227,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
len += len / 2;
len += os.page_size - @rem(len, os.page_size);
len = mem.alignForward(len + len / 2, os.page_size);
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
Expand All @@ -248,9 +243,11 @@ pub const ArenaAllocator = struct {
return buf_node;
}

fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);

assert(old_mem.len == 0); // because shrinkFn == null

var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
Expand All @@ -267,21 +264,6 @@ pub const ArenaAllocator = struct {
return result;
}
}

fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= new_size) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
};

pub const FixedBufferAllocator = struct {
Expand All @@ -293,15 +275,17 @@ pub const FixedBufferAllocator = struct {
return FixedBufferAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.shrinkFn = null,
},
.buffer = buffer,
.end_index = 0,
};
}

fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(old_mem.len == 0); // because shrinkFn == null

const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
Expand All @@ -314,39 +298,14 @@ pub const FixedBufferAllocator = struct {

return result;
}

fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(old_mem.len <= self.end_index);
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
{
const start_index = self.end_index - old_mem.len;
const new_end_index = start_index + new_size;
if (new_end_index > self.buffer.len) return error.OutOfMemory;
const result = self.buffer[start_index..new_end_index];
self.end_index = new_end_index;
return result;
} else if (new_size <= old_mem.len and new_align <= old_align) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
};

// FIXME: Exposed LLVM intrinsics is a bug
// TODO: Exposed LLVM intrinsics is a bug
// See: https://github.com/ziglang/zig/issues/2291
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;

/// TODO Currently this allocator does not actually reclaim memory, but it should.
pub const wasm_allocator = &wasm_allocator_state.allocator;
var wasm_allocator_state = WasmAllocator{
.allocator = Allocator{
Expand Down Expand Up @@ -433,11 +392,11 @@ const WasmAllocator = struct {
}
};

/// Lock free
pub const ThreadSafeFixedBufferAllocator = blk: {
if (builtin.single_threaded) {
break :blk FixedBufferAllocator;
} else {
// lock free
break :blk struct {
allocator: Allocator,
end_index: usize,
Expand All @@ -447,14 +406,15 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
return ThreadSafeFixedBufferAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.shrinkFn = null,
},
.buffer = buffer,
.end_index = 0,
};
}

fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, n: usize, alignment: u29) ![]u8 {
assert(old_mem.len == 0); // because shrinkFn == null
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
Expand All @@ -468,21 +428,6 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}

fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= old_align) {
// We can't do anything useful with the memory, tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
};
}
};
Expand Down
44 changes: 40 additions & 4 deletions std/mem.zig
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ pub const Allocator = struct {
/// `reallocFn` or `shrinkFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
/// If `shrinkFn` is `null` then it is guaranteed that `old_mem.len == 0`.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
/// Guaranteed to be the same as what was returned from most recent call to
Expand All @@ -55,7 +56,12 @@ pub const Allocator = struct {
) Error![]u8,

/// This function deallocates memory. It must succeed.
shrinkFn: fn (
/// If this function is null, it means the allocator implementation cannot
/// reclaim memory. The shrink functions of the
/// Allocator interface will still work; they will trivially return the
/// old memory with adjusted length. In this case, `reallocFn` with a smaller
/// `new_byte_count` will always return `error.OutOfMemory`.
shrinkFn: ?fn (
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `reallocFn` or `shrinkFn`.
Expand All @@ -82,8 +88,9 @@ pub const Allocator = struct {
pub fn destroy(self: *Allocator, ptr: var) void {
const T = @typeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const shrinkFn = self.shrinkFn orelse return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
const shrink_result = shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
assert(shrink_result.len == 0);
}

Expand Down Expand Up @@ -147,6 +154,20 @@ pub const Allocator = struct {
self.free(old_mem);
return ([*]align(new_alignment) T)(undefined)[0..0];
}
if (!self.canReclaimMemory()) {
if (new_n <= old_mem.len and new_alignment <= Slice.alignment) {
// Cannot reclaim memory; tell the client to keep it.
return error.OutOfMemory;
}
const result = try self.alignedAlloc(T, new_alignment, new_n);
const end_len = std.math.min(old_mem.len, new_n);
mem.copy(T, result, old_mem[0..end_len]);
// This loop gets optimized out in ReleaseFast mode
for (result[end_len..]) |*elem| {
elem.* = undefined;
}
return result;
}

const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
Expand Down Expand Up @@ -185,6 +206,7 @@ pub const Allocator = struct {
) []align(new_alignment) @typeInfo(@typeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
const T = Slice.child;
const shrinkFn = self.shrinkFn orelse return old_mem[0..new_n];

if (new_n == 0) {
self.free(old_mem);
Expand All @@ -199,19 +221,33 @@ pub const Allocator = struct {
const byte_count = @sizeOf(T) * new_n;

const old_byte_slice = @sliceToBytes(old_mem);
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
const byte_slice = shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
}

pub fn free(self: *Allocator, memory: var) void {
const shrinkFn = self.shrinkFn orelse return;
const Slice = @typeInfo(@typeOf(memory)).Pointer;
const bytes = @sliceToBytes(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
const shrink_result = shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
assert(shrink_result.len == 0);
}

/// If this returns `false`, it means that the allocator implementation
/// will only ever increase memory usage. In this case, `free` and `shrink`
/// are no-ops and will not make the freed bytes available for use.
/// It also means using `realloc` to resize downwards will always result
/// in `error.OutOfMemory`.
/// When creating an arena allocator on top of a backing allocator, it is
/// best practice to check if the backing allocator can reclaim memory.
/// If it cannot, then the backing allocator should be used directly, to
/// avoid pointless overhead.
pub fn canReclaimMemory(self: *Allocator) bool {
return self.shrinkFn != null;
}
};

pub const Compare = enum {
Expand Down

0 comments on commit 92122f9

Please sign in to comment.