Skip to content

Commit

Permalink
Re-add a default constructor for the pool_allocator.
Browse files Browse the repository at this point in the history
  • Loading branch information
ptal committed Oct 9, 2024
1 parent b19c84e commit 8060250
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 4 deletions.
31 changes: 27 additions & 4 deletions include/battery/allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,8 @@ class pool_allocator {
control_block* block;

public:
CUDA NI pool_allocator() = default;

CUDA NI pool_allocator(const pool_allocator& other):
block(other.block)
{
Expand All @@ -235,17 +237,38 @@ class pool_allocator {
}
}

CUDA NI pool_allocator(pool_allocator&& other):
block(other.block)
{
other.block = nullptr;
}

CUDA NI pool_allocator(unsigned char* mem, size_t capacity, size_t alignment = alignof(std::max_align_t))
: block(::new control_block(mem, capacity, alignment))
{}

private:
CUDA void destroy() {
block->counter--;
if(block->counter == 0) {
::delete block;
}
}

public:
CUDA NI ~pool_allocator() {
if(block != nullptr) {
block->counter--;
if(block->counter == 0) {
::delete block;
}
destroy();
}
}

CUDA NI pool_allocator& operator=(pool_allocator&& other) {
if(block != nullptr) {
destroy();
}
block = other.block;
other.block = nullptr;
return *this;
}

CUDA size_t align_at(size_t alignment) {
Expand Down
9 changes: 9 additions & 0 deletions tests/allocator_test_gpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,13 @@ __global__ void kernel_compute(int measured_mem_usage, int mem_usage) {
assert(measured_mem_usage == pool_mem.used());
}

__global__ void test_empty_pool() {
shared_ptr<int, pool_allocator> x(pool_allocator(nullptr, 0));
void* mem_pool = global_allocator{}.allocate(10);
pool_allocator pool(static_cast<unsigned char*>(mem_pool), 10);
x = allocate_shared<int, pool_allocator>(pool);
}

void shared_memory_max_usage(int shared_memory_size) {
const int mem_usage = shared_memory_size / 4 - 2;
shared_ptr<int, managed_allocator> measured_mem_usage = make_shared<int, managed_allocator>(0);
Expand Down Expand Up @@ -153,5 +160,7 @@ int main() {
global_memory_vector_passing();
}
shared_memory_with_precomputation();
test_empty_pool<<<1, 1>>>();
CUDAEX(cudaDeviceSynchronize());
return 0;
}

0 comments on commit 8060250

Please sign in to comment.