Skip to content

Commit

Permalink
8320761: [Lilliput] Implement compact identity hashcode
Browse files Browse the repository at this point in the history
  • Loading branch information
rkennke committed Jul 26, 2024
1 parent c6c93f7 commit dd5be45
Show file tree
Hide file tree
Showing 68 changed files with 1,059 additions and 240 deletions.
15 changes: 7 additions & 8 deletions src/hotspot/cpu/x86/sharedRuntime_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
__ bind(Continue);
}

if (UseCompactObjectHeaders) {
// Don't generate anything else and always take the slow-path for now.
return;
}

__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));


Expand All @@ -78,14 +83,8 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
// Read the header and build a mask to get its hash field.
// Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
// because it could be larger than 32 bits in a 64-bit vm. See markWord.hpp.
if (UseCompactObjectHeaders) {
STATIC_ASSERT(markWord::hash_mask_compact < nth_bit(32));
__ shrptr(result, markWord::hash_shift_compact);
__ andptr(result, markWord::hash_mask_compact);
} else {
__ shrptr(result, markWord::hash_shift);
__ andptr(result, markWord::hash_mask);
}
__ shrptr(result, markWord::hash_shift);
__ andptr(result, markWord::hash_mask);
#else
__ andptr(result, markWord::hash_mask_in_place);
#endif //_LP64
Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/cds/archiveBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1143,7 +1143,9 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
if (source_oop != nullptr) {
// This is a regular oop that got archived.
print_oop_with_requested_addr_cr(&st, source_oop, false);
byte_size = source_oop->size() * BytesPerWord;
size_t old_size = source_oop->size();
size_t new_size = source_oop->copy_size(old_size, source_oop->mark());
byte_size = new_size * BytesPerWord;
} else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) {
// HeapShared::roots() is copied specially, so it doesn't exist in
// ArchiveHeapWriter::BufferOffsetToSourceObjectTable.
Expand Down
40 changes: 30 additions & 10 deletions src/hotspot/share/cds/archiveHeapWriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,9 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu

size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
assert(!is_too_large_to_archive(src_obj), "already checked");
size_t byte_size = src_obj->size() * HeapWordSize;
size_t old_size = src_obj->size();
size_t new_size = src_obj->copy_size(old_size, src_obj->mark());
size_t byte_size = new_size * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");

// For region-based collectors such as G1, the archive heap may be mapped into
Expand All @@ -413,9 +415,11 @@ size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {

address from = cast_from_oop<address>(src_obj);
address to = offset_to_buffered_address<address>(_buffer_used);
log_info(gc)("Copying obj: " PTR_FORMAT ", to: " PTR_FORMAT ", old_size: " SIZE_FORMAT ", new_size: " SIZE_FORMAT, p2i(src_obj), p2i(to), old_size, new_size);

assert(is_object_aligned(_buffer_used), "sanity");
assert(is_object_aligned(byte_size), "sanity");
memcpy(to, from, byte_size);
memcpy(to, from, old_size * HeapWordSize);

// These native pointers will be restored explicitly at run time.
if (java_lang_Module::is_instance(src_obj)) {
Expand Down Expand Up @@ -539,24 +543,40 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s

oop fake_oop = cast_to_oop(buffered_addr);
if (UseCompactObjectHeaders) {
fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
if (src_obj == nullptr) {
// Other case below.
fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
}
} else {
fake_oop->set_narrow_klass(nk);
}

// We need to retain the identity_hash, because it may have been used by some hashtables
// in the shared heap.
if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
intptr_t src_hash = src_obj->identity_hash();
// in the shared heap. This also has the side effect of pre-initializing the
// identity_hash for all shared objects, so they are less likely to be written
// into during run time, increasing the potential of memory sharing.
if (src_obj != nullptr) {
if (UseCompactObjectHeaders) {
fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
markWord m = markWord::prototype().set_narrow_klass(nk);
m = m.hash_copy_hashctrl_from(src_obj->mark());
assert(m.hashctrl() == src_obj->mark().hashctrl(), "hashctrl must match");
if (m.has_no_hash()) {
m.hash_set_hashed();
} else if (m.hash_is_hashed()) {
intptr_t src_hash = src_obj->identity_hash();
log_info(gc)("init_hash: old: " PTR_FORMAT ", new: " PTR_FORMAT, p2i(src_obj), p2i(fake_oop));
m = fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
fake_oop->set_mark(m);
assert(src_hash == fake_oop->identity_hash(), "i-hash must match");
}
fake_oop->set_mark(m);
} else {
intptr_t src_hash = src_obj->identity_hash();
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
}
assert(fake_oop->mark().is_unlocked(), "sanity");

DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
}
}

Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/ci/ciInstanceKlass.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,10 @@ class ciInstanceKlass : public ciKlass {
}
GrowableArray<ciInstanceKlass*>* transitive_interfaces() const;

int hash_offset_in_bytes() const {
return get_instanceKlass()->hash_offset_in_bytes(nullptr);
}

// Replay support

// Dump the current state of this klass for compilation replay.
Expand Down
3 changes: 3 additions & 0 deletions src/hotspot/share/ci/ciKlass.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,9 @@ class ciKlass : public ciType {
public:
ciKlass(Klass* k);

bool is_mirror_instance_klass() { return get_Klass()->is_mirror_instance_klass(); }
bool is_reference_instance_klass() { return get_Klass()->is_reference_instance_klass(); }

// What is the name of this klass?
ciSymbol* name() const { return _name; }

Expand Down
3 changes: 3 additions & 0 deletions src/hotspot/share/classfile/altHashing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@
// objects. We don't want to call the synchronizer hash code to install
// this value because it may safepoint.
static intptr_t object_hash(Klass* k) {
if (UseCompactObjectHeaders) {
return os::random();
}
intptr_t hc = k->java_mirror()->mark().hash();
return hc != markWord::no_hash ? hc : os::random();
}
Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/classfile/classFileParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5086,6 +5086,10 @@ jint ClassFileParser::layout_size() const {
return _field_info->_instance_size;
}

int ClassFileParser::hash_offset() const {
return _field_info->_hash_offset;
}

static void check_methods_for_intrinsics(const InstanceKlass* ik,
const Array<Method*>* methods) {
assert(ik != nullptr, "invariant");
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/share/classfile/classFileParser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ class FieldLayoutInfo : public ResourceObj {
public:
OopMapBlocksBuilder* oop_map_blocks;
int _instance_size;
int _hash_offset;
int _nonstatic_field_size;
int _static_field_size;
bool _has_nonstatic_fields;
Expand Down Expand Up @@ -549,6 +550,7 @@ class ClassFileParser {
int static_field_size() const;
int total_oop_map_count() const;
jint layout_size() const;
int hash_offset() const;

int vtable_size() const { return _vtable_size; }
int itable_size() const { return _itable_size; }
Expand Down
20 changes: 20 additions & 0 deletions src/hotspot/share/classfile/fieldLayoutBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,23 @@ void FieldLayout::add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* star
}
}

// Finds a slot for the identity hash-code.
// Same basic algorithm as above add() method, but simplified
// and does not actually insert the field.
int FieldLayout::find_hash_offset() {
LayoutRawBlock* start = this->_start;
LayoutRawBlock* last = last_block();
LayoutRawBlock* cursor = start;
while (cursor != last) {
assert(cursor != nullptr, "Sanity check");
if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(4, 1)) {
break;
}
cursor = cursor->next_block();
}
return cursor->offset();
}

// Used for classes with hard coded field offsets, insert a field at the specified offset */
void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
assert(block != nullptr, "Sanity check");
Expand Down Expand Up @@ -688,6 +705,9 @@ void FieldLayoutBuilder::epilogue() {

_info->oop_map_blocks = nonstatic_oop_maps;
_info->_instance_size = align_object_size(instance_end / wordSize);
if (UseCompactObjectHeaders) {
_info->_hash_offset = _layout->find_hash_offset();
}
_info->_static_field_size = static_fields_size;
_info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
_info->_has_nonstatic_fields = _has_nonstatic_fields;
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/classfile/fieldLayoutBuilder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ class FieldLayout : public ResourceObj {

LayoutRawBlock* first_field_block();
void add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start = nullptr);
int find_hash_offset();
void add_field_at_offset(LayoutRawBlock* blocks, int offset, LayoutRawBlock* start = nullptr);
void add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start = nullptr);
LayoutRawBlock* insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block);
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,8 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id)
Prefetch::read(curr, interval);
oop obj = cast_to_oop(curr);
size_t size = obj->oop_iterate_size(&cl);
assert(size == obj->size(), "sanity");
if (UseCompactObjectHeaders) log_trace(gc)("Scan object : " PTR_FORMAT ", with size: " SIZE_FORMAT, p2i(obj), size);
assert(size == obj->size(), "sanity: size: " SIZE_FORMAT ", obj-size: " SIZE_FORMAT, size, obj->size());
curr += size;
}
}
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,12 @@ void G1FullGCCompactTask::copy_object_to_new_location(oop obj) {
// Copy object and reinit its mark.
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
HeapWord* destination = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee(obj));
assert(obj_addr != destination, "only copy actually-moving objects");
Copy::aligned_conjoint_words(obj_addr, destination, size);

// There is no need to transform stack chunks - marking already did that.
cast_to_oop(destination)->init_mark();
cast_to_oop(destination)->initialize_hash_if_necessary(obj);
assert(cast_to_oop(destination)->klass() != nullptr, "should have a class");
}

Expand Down
12 changes: 10 additions & 2 deletions src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,14 @@ void G1FullGCCompactionPoint::switch_region() {
void G1FullGCCompactionPoint::forward(oop object, size_t size) {
assert(_current_region != nullptr, "Must have been initialized");

size_t old_size = size;
size_t new_size = object->copy_size(old_size, object->mark());
size = cast_from_oop<HeapWord*>(object) != _compaction_top ? new_size : old_size;

// Ensure the object fit in the current region.
while (!object_will_fit(size)) {
switch_region();
size = cast_from_oop<HeapWord*>(object) != _compaction_top ? new_size : old_size;
}

// Store a forwarding pointer if the object should be moved.
Expand Down Expand Up @@ -154,8 +159,10 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) {
assert(hr->is_starts_humongous(), "Sanity!");

oop obj = cast_to_oop(hr->bottom());
size_t obj_size = obj->size();
uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size);
size_t old_size = obj->size();
size_t new_size = obj->copy_size(old_size, obj->mark());

uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(new_size);

if (!has_regions()) {
return;
Expand All @@ -173,6 +180,7 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) {
preserved_stack()->push_if_necessary(obj, obj->mark());

G1HeapRegion* dest_hr = _compaction_regions->at(range_begin);
assert(hr->bottom() != dest_hr->bottom(), "assuming actual humongous move");
SlidingForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom()));
assert(SlidingForwarding::is_forwarded(obj), "Object must be forwarded!");

Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
if (SlidingForwarding::is_forwarded(obj)) {
oop forwardee = SlidingForwarding::forwardee(obj);
// Forwarded, just update.
assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space");
assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space, obj: " PTR_FORMAT ", forwardee: " PTR_FORMAT ", mark: " INTPTR_FORMAT ", pre: " INTPTR_FORMAT ", post: " INTPTR_FORMAT, p2i(obj), p2i(forwardee), obj->mark().value(), *(cast_from_oop<intptr_t*>(obj)) - 1, *(cast_from_oop<intptr_t*>(obj) + 1));
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
}

Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/g1/g1HeapRegion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,7 @@ void G1HeapRegion::object_iterate(ObjectClosure* blk) {
HeapWord* p = bottom();
while (p < top()) {
if (block_is_obj(p, parsable_bottom())) {
log_trace(gc)("Iterate object: " PTR_FORMAT, p2i(p));
blk->do_object(cast_to_oop(p));
}
p += block_size(p);
Expand Down
14 changes: 12 additions & 2 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,8 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
assert(region_attr.is_in_cset(),
"Unexpected region attr type: %s", region_attr.get_type_str());

assert(!old_mark.is_marked(), "must not yet be forwarded");

// Get the klass once. We'll need it again later, and this avoids
// re-decoding when it's compressed.
// NOTE: With compact headers, it is not safe to load the Klass* from o, because
Expand All @@ -463,7 +465,8 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
// the mark-word that we have already loaded. This is safe, because we have checked
// that this is not yet forwarded in the caller.
Klass* klass = old->forward_safe_klass(old_mark);
const size_t word_sz = old->size_given_klass(klass);
const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
const size_t word_sz = old->copy_size(old_size, old_mark);

// JNI only allows pinning of typeArrays, so we only need to keep those in place.
if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
Expand Down Expand Up @@ -499,9 +502,13 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
}

if (old_size != word_sz) {
log_trace(gc)("expanding obj: " PTR_FORMAT ", old_size: " SIZE_FORMAT ", new object: " PTR_FORMAT ", word_sz: " SIZE_FORMAT, p2i(old), old_size, p2i(obj_ptr), word_sz);
}

// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, old_size);

const oop obj = cast_to_oop(obj_ptr);
// Because the forwarding is done with memory_order_relaxed there is no
Expand All @@ -518,6 +525,9 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
_surviving_young_words[young_index] += word_sz;
}

// Initialize i-hash if necessary
obj->initialize_hash_if_necessary(old);

if (dest_attr.is_young()) {
if (age < markWord::max_age) {
age++;
Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,9 @@ inline void ParCompactionManager::mark_and_push(T* p) {
ContinuationGCSupport::transform_stack_chunk(obj);

assert(_marking_stats_cache != nullptr, "inv");
_marking_stats_cache->push(obj, obj->size());
size_t old_size = obj->size();
size_t new_size = obj->copy_size(old_size, obj->mark());
_marking_stats_cache->push(obj, new_size);
push(obj);
}
}
Expand Down
9 changes: 7 additions & 2 deletions src/hotspot/share/gc/parallel/psParallelCompact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1602,12 +1602,17 @@ void PSParallelCompact::forward_to_new_addr() {
assert(mark_bitmap()->is_marked(cur_addr), "inv");
HeapWord* new_addr = destination + live_words;
oop obj = cast_to_oop(cur_addr);
size_t obj_size = obj->size();
size_t new_size = obj_size;
if (new_addr != cur_addr) {
cm->preserved_marks()->push_if_necessary(obj, obj->mark());
SlidingForwarding::forward_to(obj, cast_to_oop(new_addr));
new_size = obj->copy_size(obj_size, obj->mark());
if (new_size != obj_size) {
Unimplemented();
}
}
size_t obj_size = obj->size();
live_words += obj_size;
live_words += new_size;
cur_addr += obj_size;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/parallel/psParallelCompact.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ class ParallelCompactData
// in this region (words). This does not include the partial object
// extending onto the region (if any), or the part of an object that extends
// onto the next region (if any).
size_t live_obj_size() const { return _dc_and_los & los_mask; }
size_t live_obj_size() const { return MIN2((size_t)_dc_and_los & los_mask, RegionSize - partial_obj_size()); }

// Total live data that lies within the region (words).
size_t data_size() const { return partial_obj_size() + live_obj_size(); }
Expand Down
6 changes: 4 additions & 2 deletions src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,8 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
// the mark-word that we have already loaded. This is safe, because we have checked
// that this is not yet forwarded in the caller.
Klass* klass = o->forward_safe_klass(test_mark);
size_t new_obj_size = o->size_given_klass(klass);
size_t old_obj_size = o->size_given_mark_and_klass(test_mark, klass);
size_t new_obj_size = o->copy_size(old_obj_size, test_mark);

// Find the objects age, MT safe.
uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
Expand Down Expand Up @@ -258,14 +259,15 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
assert(new_obj != nullptr, "allocation should have succeeded");

// Copy obj
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), old_obj_size);

if (UseCompactObjectHeaders) {
// The copy above is not atomic. Make sure we have seen the proper mark
// and re-install it into the copy, so that Klass* is guaranteed to be correct.
markWord mark = o->mark();
if (!mark.is_forwarded()) {
new_obj->set_mark(mark);
new_obj->initialize_hash_if_necessary(o);
ContinuationGCSupport::transform_stack_chunk(new_obj);
} else {
// If we copied a mark-word that indicates 'forwarded' state, the object
Expand Down
Loading

0 comments on commit dd5be45

Please sign in to comment.