Skip to content

Commit

Permalink
8346011: [Lilliput] Compact Full-GC Forwarding
Browse files Browse the repository at this point in the history
  • Loading branch information
Roman Kennke committed Dec 11, 2024
1 parent 26b675f commit eb5ff17
Show file tree
Hide file tree
Showing 13 changed files with 515 additions and 79 deletions.
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/g1/g1Arguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
#include "gc/g1/g1HeapRegionRemSet.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/fullGCForwarding.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/globals.hpp"
Expand Down Expand Up @@ -244,8 +243,6 @@ void G1Arguments::initialize() {
if (max_parallel_refinement_threads > UINT_MAX / divisor) {
vm_exit_during_initialization("Too large parallelism for remembered sets.");
}

FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}

CollectedHeap* G1Arguments::create_heap() {
Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/gc/g1/g1FullCollector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ void G1FullCollector::collect() {
// Don't add any more derived pointers during later phases
deactivate_derived_pointers();

FullGCForwarding::begin();

phase2_prepare_compaction();

if (has_compaction_targets()) {
Expand All @@ -224,6 +226,8 @@ void G1FullCollector::collect() {
log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
}

FullGCForwarding::end();

phase5_reset_metadata();

G1CollectedHeap::finish_codecache_marking_cycle();
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/parallel/parallelArguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include "gc/parallel/parallelArguments.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/fullGCForwarding.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/genArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
Expand Down Expand Up @@ -83,8 +82,6 @@ void ParallelArguments::initialize() {
if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
}

FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}

// The alignment used for boundary between young gen and old gen
Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/gc/parallel/psParallelCompact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1049,12 +1049,16 @@ bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
DerivedPointerTable::set_active(false);
#endif

FullGCForwarding::begin();

forward_to_new_addr();

adjust_pointers();

compact();

FullGCForwarding::end();

ParCompactionManager::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());

ParCompactionManager::verify_all_region_stack_empty();
Expand Down
7 changes: 0 additions & 7 deletions src/hotspot/share/gc/serial/serialArguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,9 @@
*/

#include "precompiled.hpp"
#include "gc/shared/fullGCForwarding.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/serial/serialArguments.hpp"
#include "gc/serial/serialHeap.hpp"

void SerialArguments::initialize() {
GCArguments::initialize();
FullGCForwarding::initialize_flags(MaxHeapSize);
}

CollectedHeap* SerialArguments::create_heap() {
return new SerialHeap();
}
1 change: 0 additions & 1 deletion src/hotspot/share/gc/serial/serialArguments.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ class CollectedHeap;

class SerialArguments : public GenArguments {
private:
virtual void initialize();
virtual CollectedHeap* create_heap();
};

Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/gc/serial/serialFullGC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -697,6 +697,8 @@ void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {

phase1_mark(clear_all_softrefs);

FullGCForwarding::begin();

Compacter compacter{gch};

{
Expand Down Expand Up @@ -740,6 +742,8 @@ void SerialFullGC::invoke_at_safepoint(bool clear_all_softrefs) {

restore_marks();

FullGCForwarding::end();

deallocate_stacks();

SerialFullGC::_string_dedup_requests->flush();
Expand Down
174 changes: 153 additions & 21 deletions src/hotspot/share/gc/shared/fullGCForwarding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,34 +24,166 @@

#include "precompiled.hpp"
#include "gc/shared/fullGCForwarding.hpp"
#include "memory/memRegion.hpp"
#include "runtime/globals_extension.hpp"
#include "logging/log.hpp"
#include "nmt/memTag.hpp"
#include "utilities/ostream.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/fastHash.hpp"
#include "utilities/powerOfTwo.hpp"

HeapWord* FullGCForwarding::_heap_base = nullptr;
int FullGCForwarding::_num_low_bits = 0;
static uintx hash(HeapWord* const& addr) {
uint64_t val = reinterpret_cast<uint64_t>(addr);
uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32));
return hash;
}

void FullGCForwarding::initialize_flags(size_t max_heap_size) {
#ifdef _LP64
size_t max_narrow_heap_size = right_n_bits(NumLowBitsNarrow - Shift);
if (UseCompactObjectHeaders && max_heap_size > max_narrow_heap_size * HeapWordSize) {
warning("Compact object headers require a java heap size smaller than " SIZE_FORMAT
"%s (given: " SIZE_FORMAT "%s). Disabling compact object headers.",
byte_size_in_proper_unit(max_narrow_heap_size * HeapWordSize),
proper_unit_for_byte_size(max_narrow_heap_size * HeapWordSize),
byte_size_in_proper_unit(max_heap_size),
proper_unit_for_byte_size(max_heap_size));
FLAG_SET_ERGO(UseCompactObjectHeaders, false);
struct ForwardingEntry {
HeapWord* _from;
HeapWord* _to;
ForwardingEntry(HeapWord* from, HeapWord* to) : _from(from), _to(to) {}
};

struct FallbackTableConfig {
using Value = ForwardingEntry;
static uintx get_hash(Value const& entry, bool* is_dead) {
return hash(entry._from);
}
static void* allocate_node(void* context, size_t size, Value const& value) {
return AllocateHeap(size, mtGC);
}
static void free_node(void* context, void* memory, Value const& value) {
FreeHeap(memory);
}
};

class FallbackTable : public ConcurrentHashTable<FallbackTableConfig, mtGC> {

};

class FallbackTableLookup : public StackObj {
ForwardingEntry const _entry;
public:
explicit FallbackTableLookup(HeapWord* from) : _entry(from, nullptr) {}
uintx get_hash() const {
return hash(_entry._from);
}
bool equals(ForwardingEntry* value) {
return _entry._from == value->_from;
}
bool is_dead(ForwardingEntry* value) { return false; }
};

// We cannot use 0, because that may already be a valid base address in zero-based heaps.
// 0x1 is safe because heap base addresses must be aligned by much larger alignment
HeapWord* const FullGCForwarding::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1);

HeapWord* FullGCForwarding::_heap_start = nullptr;
size_t FullGCForwarding::_heap_start_region_bias = 0;
size_t FullGCForwarding::_num_regions = 0;
uintptr_t FullGCForwarding::_region_mask = 0;
HeapWord** FullGCForwarding::_biased_bases = nullptr;
HeapWord** FullGCForwarding::_bases_table = nullptr;
FallbackTable* FullGCForwarding::_fallback_table = nullptr;
#ifndef PRODUCT
volatile uint64_t FullGCForwarding::_num_forwardings = 0;
volatile uint64_t FullGCForwarding::_num_fallback_forwardings = 0;
#endif
}

void FullGCForwarding::initialize(MemRegion heap) {
#ifdef _LP64
_heap_base = heap.start();
if (UseCompactObjectHeaders) {
_num_low_bits = NumLowBitsNarrow;
} else {
_num_low_bits = NumLowBitsWide;
_heap_start = heap.start();

size_t rounded_heap_size = round_up_power_of_2(heap.byte_size());

_num_regions = (rounded_heap_size / BytesPerWord) / BLOCK_SIZE_WORDS;

_heap_start_region_bias = (uintptr_t)_heap_start >> BLOCK_SIZE_BYTES_SHIFT;
_region_mask = ~((uintptr_t(1) << BLOCK_SIZE_BYTES_SHIFT) - 1);

guarantee((_heap_start_region_bias << BLOCK_SIZE_BYTES_SHIFT) == (uintptr_t)_heap_start, "must be aligned: _heap_start_region_bias: " SIZE_FORMAT ", _region_size_byte_shift: %u, _heap_start: " PTR_FORMAT, _heap_start_region_bias, BLOCK_SIZE_BYTES_SHIFT, p2i(_heap_start));

assert(_bases_table == nullptr, "should not be initialized yet");
assert(_fallback_table == nullptr, "should not be initialized yet");
#endif
}

void FullGCForwarding::begin() {
#ifdef _LP64
assert(_bases_table == nullptr, "should not be initialized yet");
assert(_fallback_table == nullptr, "should not be initialized yet");

_fallback_table = new FallbackTable();

#ifndef PRODUCT
_num_forwardings = 0;
_num_fallback_forwardings = 0;
#endif

size_t max = _num_regions;
_bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC);
HeapWord** biased_start = _bases_table - _heap_start_region_bias;
_biased_bases = biased_start;
for (size_t i = 0; i < max; i++) {
_bases_table[i] = UNUSED_BASE;
}
#endif
}

void FullGCForwarding::end() {
#ifndef PRODUCT
log_info(gc)("Total forwardings: " UINT64_FORMAT ", fallback forwardings: " UINT64_FORMAT
", ratio: %f, memory used by fallback table: " SIZE_FORMAT "%s, memory used by bases table: " SIZE_FORMAT "%s",
_num_forwardings, _num_fallback_forwardings, (float)_num_forwardings/(float)_num_fallback_forwardings,
byte_size_in_proper_unit(_fallback_table->get_mem_size(Thread::current())),
proper_unit_for_byte_size(_fallback_table->get_mem_size(Thread::current())),
byte_size_in_proper_unit(sizeof(HeapWord*) * _num_regions),
proper_unit_for_byte_size(sizeof(HeapWord*) * _num_regions));
#endif
#ifdef _LP64
assert(_bases_table != nullptr, "should be initialized");
FREE_C_HEAP_ARRAY(HeapWord*, _bases_table);
_bases_table = nullptr;
delete _fallback_table;
_fallback_table = nullptr;
#endif
}

void FullGCForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) {
assert(to != nullptr, "no null forwarding");
assert(_fallback_table != nullptr, "should be initialized");
FallbackTableLookup lookup_f(from);
ForwardingEntry entry(from, to);
auto found_f = [&](ForwardingEntry* found) {
// If dupe has been found, override it with new value.
// This is also called when new entry is succussfully inserted.
if (found->_to != to) {
found->_to = to;
}
};
Thread* current_thread = Thread::current();
bool grow;
bool added = _fallback_table->insert_get(current_thread, lookup_f, entry, found_f, &grow);
NOT_PRODUCT(Atomic::inc(&_num_fallback_forwardings);)
#ifdef ASSERT
assert(fallback_forwardee(from) != nullptr, "must have entered forwarding");
assert(fallback_forwardee(from) == to, "forwarding must be correct, added: %s, from: " PTR_FORMAT ", to: " PTR_FORMAT ", fwd: " PTR_FORMAT, BOOL_TO_STR(added), p2i(from), p2i(to), p2i(fallback_forwardee(from)));
#endif
if (grow) {
_fallback_table->grow(current_thread);
tty->print_cr("grow fallback table to size: " SIZE_FORMAT " bytes",
_fallback_table->get_mem_size(current_thread));
}
}

HeapWord* FullGCForwarding::fallback_forwardee(HeapWord* from) {
assert(_fallback_table != nullptr, "fallback table must be present");
HeapWord* result;
FallbackTableLookup lookup_f(from);
auto found_f = [&](ForwardingEntry* found) {
result = found->_to;
};
bool found = _fallback_table->get(Thread::current(), lookup_f, found_f);
assert(found, "something must have been found");
assert(result != nullptr, "must have found forwarding");
return result;
}
Loading

0 comments on commit eb5ff17

Please sign in to comment.