Skip to content

Commit

Permalink
Merge pull request #5491 from lplewa/stable-1.12
Browse files Browse the repository at this point in the history
Stable 1.12
  • Loading branch information
lplewa authored Aug 12, 2022
2 parents 38ebb59 + 03d0729 commit e1677b6
Show file tree
Hide file tree
Showing 11 changed files with 295 additions and 11 deletions.
7 changes: 7 additions & 0 deletions src/PMDK.sln
Original file line number Diff line number Diff line change
Expand Up @@ -929,6 +929,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "obj_list_remove", "test\obj
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "obj_defrag", "test\obj_defrag\obj_defrag.vcxproj", "{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA80}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "obj_heap_reopen", "test\obj_heap_reopen\obj_heap_reopen.vcxproj", "{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Expand Down Expand Up @@ -2100,6 +2102,10 @@ Global
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA80}.Debug|x64.Build.0 = Debug|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA80}.Release|x64.ActiveCfg = Release|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA80}.Release|x64.Build.0 = Release|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81}.Debug|x64.ActiveCfg = Debug|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81}.Debug|x64.Build.0 = Debug|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81}.Release|x64.ActiveCfg = Release|x64
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
Expand Down Expand Up @@ -2435,6 +2441,7 @@ Global
{FEA09B48-34C2-4963-8A5A-F97BDA136D72} = {B870D8A6-12CD-4DD0-B843-833695C2310A}
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA79} = {63C9B3F8-437D-4AD9-B32D-D04AE38C35B6}
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA80} = {63C9B3F8-437D-4AD9-B32D-D04AE38C35B6}
{FF6E5B0C-DC00-4C93-B9C2-63D1E858BA81} = {63C9B3F8-437D-4AD9-B32D-D04AE38C35B6}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {5E690324-2D48-486A-8D3C-DCB520D3F693}
Expand Down
66 changes: 58 additions & 8 deletions src/libpmemobj/heap.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2021, Intel Corporation */
/* Copyright 2015-2022, Intel Corporation */

/*
* heap.c -- heap implementation
Expand All @@ -15,6 +15,7 @@
#include "memblock.h"
#include "queue.h"
#include "heap.h"
#include "heap_layout.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
Expand Down Expand Up @@ -93,7 +94,7 @@ struct heap_rt {
unsigned nlocks;

unsigned nzones;
unsigned zones_exhausted;
int *zone_reclaimed_map;
};

/*
Expand Down Expand Up @@ -807,6 +808,41 @@ heap_reclaim_zone_garbage(struct palloc_heap *heap, struct bucket *bucket,
}
}

/*
* heap_ensure_zone_reclaimed -- make sure that the specified zone has been
* already reclaimed.
*/
void
heap_ensure_zone_reclaimed(struct palloc_heap *heap, uint32_t zone_id)
{
int zone_reclaimed;
util_atomic_load_explicit32(&heap->rt->zone_reclaimed_map[zone_id],
&zone_reclaimed, memory_order_acquire);
if (zone_reclaimed)
return;

struct bucket *defb = heap_bucket_acquire(heap,
DEFAULT_ALLOC_CLASS_ID,
HEAP_ARENA_PER_THREAD);

struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
ASSERTeq(z->header.magic, ZONE_HEADER_MAGIC);

/* check a second time just to make sure no other thread was first */
util_atomic_load_explicit32(&heap->rt->zone_reclaimed_map[zone_id],
&zone_reclaimed, memory_order_acquire);
if (zone_reclaimed)
goto out;

util_atomic_store_explicit32(&heap->rt->zone_reclaimed_map[zone_id], 1,
memory_order_release);

heap_reclaim_zone_garbage(heap, defb, zone_id);

out:
heap_bucket_release(defb);
}

/*
* heap_populate_bucket -- (internal) creates volatile state of memory blocks
*/
Expand All @@ -815,11 +851,19 @@ heap_populate_bucket(struct palloc_heap *heap, struct bucket *bucket)
{
struct heap_rt *h = heap->rt;

unsigned zone_id; /* first not reclaimed zone */
for (zone_id = 0; zone_id < h->nzones; ++zone_id) {
if (h->zone_reclaimed_map[zone_id] == 0)
break;
}

/* at this point we are sure that there's no more memory in the heap */
if (h->zones_exhausted == h->nzones)
if (zone_id == h->nzones)
return ENOMEM;

uint32_t zone_id = h->zones_exhausted++;
util_atomic_store_explicit32(&heap->rt->zone_reclaimed_map[zone_id], 1,
memory_order_release);

struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

/* ignore zone and chunk headers */
Expand Down Expand Up @@ -1562,6 +1606,13 @@ heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
goto error_heap_malloc;
}

h->nzones = heap_max_zone(heap_size);
h->zone_reclaimed_map = Zalloc(sizeof(int) * h->nzones);
if (h->zone_reclaimed_map == NULL) {
err = ENOMEM;
goto err_reclaimed_map_malloc;
}

if ((err = arena_thread_assignment_init(&h->arenas.assignment,
Default_arenas_assignment_type)) != 0) {
goto error_assignment_init;
Expand All @@ -1581,10 +1632,6 @@ heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
goto error_arenas_malloc;
}

h->nzones = heap_max_zone(heap_size);

h->zones_exhausted = 0;

h->nlocks = On_valgrind ? MAX_RUN_LOCKS_VG : MAX_RUN_LOCKS;
for (unsigned i = 0; i < h->nlocks; ++i)
util_mutex_init(&h->run_locks[i]);
Expand Down Expand Up @@ -1621,6 +1668,8 @@ heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
error_alloc_classes_new:
arena_thread_assignment_fini(&h->arenas.assignment);
error_assignment_init:
Free(h->zone_reclaimed_map);
err_reclaimed_map_malloc:
Free(h);
heap->rt = NULL;
error_heap_malloc:
Expand Down Expand Up @@ -1716,6 +1765,7 @@ heap_cleanup(struct palloc_heap *heap)

VALGRIND_DO_DESTROY_MEMPOOL(heap->layout);

Free(rt->zone_reclaimed_map);
Free(rt);
heap->rt = NULL;
}
Expand Down
5 changes: 4 additions & 1 deletion src/libpmemobj/heap.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright 2015-2021, Intel Corporation */
/* Copyright 2015-2022, Intel Corporation */

/*
* heap.h -- internal definitions for heap
Expand Down Expand Up @@ -72,6 +72,9 @@ heap_discard_run(struct palloc_heap *heap, struct memory_block *m);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);

void
heap_ensure_zone_reclaimed(struct palloc_heap *heap, uint32_t zone_id);

int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
Expand Down
7 changes: 5 additions & 2 deletions src/libpmemobj/palloc.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2021, Intel Corporation */
/* Copyright 2015-2022, Intel Corporation */

/*
* palloc.c -- implementation of pmalloc POSIX-like API
Expand Down Expand Up @@ -270,7 +270,8 @@ palloc_heap_action_exec(struct palloc_heap *heap,
SUPPRESS_UNUSED(heap);

#ifdef DEBUG
if (act->m.m_ops->get_state(&act->m) == act->new_state) {
enum memblock_state s = act->m.m_ops->get_state(&act->m);
if (s == act->new_state || s == MEMBLOCK_STATE_UNKNOWN) {
ERR("invalid operation or heap corruption");
ASSERT(0);
}
Expand Down Expand Up @@ -616,6 +617,8 @@ palloc_defer_free_create(struct palloc_heap *heap, uint64_t off,
out->offset = off;
out->m = memblock_from_offset(heap, off);

heap_ensure_zone_reclaimed(heap, out->m.zone_id);

/*
* For the duration of free we may need to protect surrounding
* metadata from being modified.
Expand Down
1 change: 1 addition & 0 deletions src/test/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ OBJ_TESTS = \
obj_fragmentation2\
obj_heap\
obj_heap_interrupt\
obj_heap_reopen\
obj_heap_state\
obj_include\
obj_lane\
Expand Down
1 change: 1 addition & 0 deletions src/test/obj_heap_reopen/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
obj_heap_reopen
13 changes: 13 additions & 0 deletions src/test/obj_heap_reopen/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2022, Intel Corporation

#
# src/test/obj_heap_reopen/Makefile -- build obj_heap_reopen test
#
TARGET = obj_heap_reopen
OBJS = obj_heap_reopen.o

LIBPMEMOBJ=y

include ../Makefile.inc
INCS += -I../../libpmemobj
20 changes: 20 additions & 0 deletions src/test/obj_heap_reopen/TESTS.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2022, Intel Corporation


import testframework as t
from testframework import granularity as g


class BASIC(t.Test):
test_type = t.Medium

def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile1')
ctx.exec('obj_heap_reopen', filepath)


@g.require_granularity(g.BYTE, g.CACHELINE)
class TEST0(BASIC):
pass
62 changes: 62 additions & 0 deletions src/test/obj_heap_reopen/obj_heap_reopen.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2022, Intel Corporation */

/*
* obj_heap_reopen.c -- test for reopening an existing heap and deallocating
* objects prior to any allocations to validate the memory reclamation process.
*/

#include <stddef.h>

#include "libpmemobj/action_base.h"
#include "libpmemobj/atomic_base.h"
#include "out.h"
#include "unittest.h"
#include "obj.h"

#define TEST_OBJECT_SIZE (4 << 20)

int
main(int argc, char *argv[])
{
START(argc, argv, "obj_heap_reopen");

if (argc < 2)
UT_FATAL("usage: %s file-name", argv[0]);

const char *path = argv[1];
PMEMobjpool *pop = NULL;

if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic),
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);

PMEMoid oid;
pmemobj_alloc(pop, &oid, 4 << 20, 0, NULL, NULL);

pmemobj_close(pop);

if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(basic))) == NULL)
UT_FATAL("!pmemobj_open: %s", path);

uint64_t freed_oid_off = oid.off;
pmemobj_free(&oid);

struct pobj_action act;
oid = pmemobj_reserve(pop, &act, TEST_OBJECT_SIZE, 0);
UT_ASSERTeq(oid.off, freed_oid_off);

for (;;) {
PMEMoid oid2;
if (pmemobj_alloc(pop, &oid2, 1, 0, NULL, NULL) != 0)
break;
UT_ASSERT(!(oid2.off >= oid.off &&
oid2.off <= oid.off + TEST_OBJECT_SIZE));
}

pmemobj_publish(pop, &act, 1);

pmemobj_close(pop);

DONE(NULL);
}
Loading

0 comments on commit e1677b6

Please sign in to comment.