diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
index 78330962d1a..c3ac5c9cedb 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp
@@ -58,6 +58,11 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
__ bind(Continue);
}
+ if (UseCompactObjectHeaders) {
+ // Don't generate anything else and always take the slow-path for now.
+ return;
+ }
+
__ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 1cd9d13c3ba..768a2a9baa0 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -1298,7 +1298,9 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
// Example:
// 0x00000007ffd27938: @@ Object (0xfffa4f27) java.util.HashMap
print_oop_info_cr(&st, source_oop, /*print_requested_addr=*/false);
- byte_size = source_oop->size() * BytesPerWord;
+ size_t old_size = source_oop->size();
+ size_t new_size = source_oop->copy_size_cds(old_size, source_oop->mark());
+ byte_size = new_size * BytesPerWord;
} else if ((byte_size = ArchiveHeapWriter::get_filler_size_at(start)) > 0) {
// We have a filler oop, which also does not exist in BufferOffsetToSourceObjectTable.
// Example:
diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp
index be821044a96..393a053e864 100644
--- a/src/hotspot/share/cds/archiveHeapWriter.cpp
+++ b/src/hotspot/share/cds/archiveHeapWriter.cpp
@@ -417,7 +417,9 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu
size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
assert(!is_too_large_to_archive(src_obj), "already checked");
- size_t byte_size = src_obj->size() * HeapWordSize;
+ size_t old_size = src_obj->size();
+ size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
+ size_t byte_size = new_size * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
// For region-based collectors such as G1, the archive heap may be mapped into
@@ -436,9 +438,11 @@ size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
address from = cast_from_oop
(src_obj);
address to = offset_to_buffered_address(_buffer_used);
+ log_info(gc)("Copying obj: " PTR_FORMAT ", to: " PTR_FORMAT ", old_size: " SIZE_FORMAT ", new_size: " SIZE_FORMAT, p2i(src_obj), p2i(to), old_size, new_size);
+
assert(is_object_aligned(_buffer_used), "sanity");
assert(is_object_aligned(byte_size), "sanity");
- memcpy(to, from, byte_size);
+ memcpy(to, from, old_size * HeapWordSize);
// These native pointers will be restored explicitly at run time.
if (java_lang_Module::is_instance(src_obj)) {
@@ -574,6 +578,7 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
oop fake_oop = cast_to_oop(buffered_addr);
if (UseCompactObjectHeaders) {
fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
+ assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
} else {
fake_oop->set_narrow_klass(nk);
}
@@ -586,15 +591,22 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
if (!src_obj->fast_no_hash_check()) {
intptr_t src_hash = src_obj->identity_hash();
if (UseCompactObjectHeaders) {
- fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
+ markWord m = markWord::prototype().set_narrow_klass(nk);
+ m = m.copy_hashctrl_from(src_obj->mark());
+ fake_oop->set_mark(m);
+ if (m.is_hashed_not_expanded()) {
+ fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
+ }
} else {
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
}
assert(fake_oop->mark().is_unlocked(), "sanity");
- DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
- assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
+ //log_trace(gc)("fake_oop: " PTR_FORMAT, p2i(fake_oop));
+ //DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
+ //assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
}
+ assert(!UseCompactObjectHeaders || (!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded()), "must not be not-hashed-moved and not be hashed-not-moved");
// Strip age bits.
fake_oop->set_mark(fake_oop->mark().set_age(0));
}
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index d2ab109cc72..00b2f353141 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -378,7 +378,7 @@ void HeapShared::init_scratch_objects(TRAPS) {
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
- oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
+ oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, true, CHECK);
_scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
}
}
@@ -530,18 +530,38 @@ void HeapShared::copy_aot_initialized_mirror(Klass* orig_k, oop orig_mirror, oop
static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
// We need to retain the identity_hash, because it may have been used by some hashtables
// in the shared heap.
+ assert(!UseCompactObjectHeaders || scratch_m->mark().is_not_hashed_expanded(), "scratch mirror must have not-hashed-expanded state");
if (!orig_mirror->fast_no_hash_check()) {
+ intptr_t orig_mark = orig_mirror->mark().value();
intptr_t src_hash = orig_mirror->identity_hash();
if (UseCompactObjectHeaders) {
- narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
- scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
+ // We leave the cases not_hashed/not_hashed_expanded as they are.
+ assert(orig_mirror->mark().is_hashed_not_expanded() || orig_mirror->mark().is_hashed_expanded(), "must be hashed");
+ Klass* orig_klass = orig_mirror->klass();
+ narrowKlass nk = CompressedKlassPointers::encode(orig_klass);
+ markWord mark = markWord::prototype().set_narrow_klass(nk);
+ mark = mark.copy_hashctrl_from(orig_mirror->mark());
+ if (mark.is_hashed_not_expanded()) {
+ scratch_m->initialize_hash_if_necessary(orig_mirror, orig_klass, mark);
+ } else {
+ assert(mark.is_hashed_expanded(), "must be hashed & moved");
+ int offset = orig_klass->hash_offset_in_bytes(orig_mirror);
+ assert(offset >= 8, "hash offset must not be in header");
+ scratch_m->int_field_put(offset, (jint) src_hash);
+ scratch_m->set_mark(mark);
+ }
+ assert(scratch_m->mark().is_hashed_expanded(), "must be hashed & moved");
} else {
scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
}
assert(scratch_m->mark().is_unlocked(), "sanity");
DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
- assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
+ assert(src_hash == archived_hash, "Different hash codes, orig_mark: " INTPTR_FORMAT ", scratch mark: " INTPTR_FORMAT ", orig hash: " INTPTR_FORMAT ", new hash: " INTPTR_FORMAT, orig_mark, scratch_m->mark().value(), src_hash, archived_hash);
+ }
+ assert(!UseCompactObjectHeaders || scratch_m->mark().is_not_hashed_expanded() || scratch_m->mark().is_hashed_expanded(), "must be not hashed and expanded");
+ if (UseCompactObjectHeaders) {
+ log_trace(gc)("Updated hashctrl of scratch mirror: " PTR_FORMAT ", mark: " INTPTR_FORMAT, p2i(scratch_m), scratch_m->mark().value());
}
}
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index 4c327e1f32d..7b8de35784a 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -296,6 +296,10 @@ class ciInstanceKlass : public ciKlass {
}
GrowableArray* transitive_interfaces() const;
+ int hash_offset_in_bytes() const {
+ return get_instanceKlass()->hash_offset_in_bytes(nullptr);
+ }
+
// Replay support
// Dump the current state of this klass for compilation replay.
diff --git a/src/hotspot/share/ci/ciKlass.hpp b/src/hotspot/share/ci/ciKlass.hpp
index 7b8d871eb56..79fa0e8d10b 100644
--- a/src/hotspot/share/ci/ciKlass.hpp
+++ b/src/hotspot/share/ci/ciKlass.hpp
@@ -75,6 +75,9 @@ class ciKlass : public ciType {
public:
ciKlass(Klass* k);
+ bool is_mirror_instance_klass() { return get_Klass()->is_mirror_instance_klass(); }
+ bool is_reference_instance_klass() { return get_Klass()->is_reference_instance_klass(); }
+
// What is the name of this klass?
ciSymbol* name() const { return _name; }
diff --git a/src/hotspot/share/classfile/altHashing.cpp b/src/hotspot/share/classfile/altHashing.cpp
index 1d43d6ebf1e..7baf18ba30a 100644
--- a/src/hotspot/share/classfile/altHashing.cpp
+++ b/src/hotspot/share/classfile/altHashing.cpp
@@ -56,6 +56,9 @@
// objects. We don't want to call the synchronizer hash code to install
// this value because it may safepoint.
static intptr_t object_hash(Klass* k) {
+ if (UseCompactObjectHeaders) {
+ return os::random();
+ }
intptr_t hc = k->java_mirror()->mark().hash();
return hc != markWord::no_hash ? hc : os::random();
}
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index a26831cd783..16872a21796 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -4886,6 +4886,14 @@ jint ClassFileParser::layout_size() const {
return _field_info->_instance_size;
}
+int ClassFileParser::hash_offset() const {
+ return _field_info->_hash_offset;
+}
+
+int ClassFileParser::static_hash_offset() const {
+ return _field_info->_static_hash_offset;
+}
+
static void check_methods_for_intrinsics(const InstanceKlass* ik,
const Array* methods) {
assert(ik != nullptr, "invariant");
diff --git a/src/hotspot/share/classfile/classFileParser.hpp b/src/hotspot/share/classfile/classFileParser.hpp
index e993120d140..e6165568c40 100644
--- a/src/hotspot/share/classfile/classFileParser.hpp
+++ b/src/hotspot/share/classfile/classFileParser.hpp
@@ -72,6 +72,8 @@ class FieldLayoutInfo : public ResourceObj {
public:
OopMapBlocksBuilder* oop_map_blocks;
int _instance_size;
+ int _hash_offset;
+ int _static_hash_offset;
int _nonstatic_field_size;
int _static_field_size;
bool _has_nonstatic_fields;
@@ -500,6 +502,8 @@ class ClassFileParser {
int static_field_size() const;
int total_oop_map_count() const;
jint layout_size() const;
+ int hash_offset() const;
+ int static_hash_offset() const;
int vtable_size() const { return _vtable_size; }
int itable_size() const { return _itable_size; }
diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp
index f9353465ca7..d26e19c2f81 100644
--- a/src/hotspot/share/classfile/fieldLayoutBuilder.cpp
+++ b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp
@@ -209,6 +209,23 @@ void FieldLayout::add(GrowableArray* list, LayoutRawBlock* star
}
}
+// Finds a slot for the identity hash-code.
+// Same basic algorithm as above add() method, but simplified
+// and does not actually insert the field.
+int FieldLayout::find_hash_offset() {
+ LayoutRawBlock* start = this->_start;
+ LayoutRawBlock* last = last_block();
+ LayoutRawBlock* cursor = start;
+ while (cursor != last) {
+ assert(cursor != nullptr, "Sanity check");
+ if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(4, 1)) {
+ break;
+ }
+ cursor = cursor->next_block();
+ }
+ return cursor->offset();
+}
+
// Used for classes with hard coded field offsets, insert a field at the specified offset */
void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
assert(block != nullptr, "Sanity check");
@@ -674,6 +691,10 @@ void FieldLayoutBuilder::epilogue() {
_info->oop_map_blocks = nonstatic_oop_maps;
_info->_instance_size = align_object_size(instance_end / wordSize);
+ if (UseCompactObjectHeaders) {
+ _info->_hash_offset = _layout->find_hash_offset();
+ _info->_static_hash_offset = _static_layout->find_hash_offset();
+ }
_info->_static_field_size = static_fields_size;
_info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
_info->_has_nonstatic_fields = _has_nonstatic_fields;
diff --git a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp
index 9b0d80b2a55..71d9f6fe4c9 100644
--- a/src/hotspot/share/classfile/fieldLayoutBuilder.hpp
+++ b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp
@@ -185,6 +185,7 @@ class FieldLayout : public ResourceObj {
LayoutRawBlock* first_field_block();
void add(GrowableArray* list, LayoutRawBlock* start = nullptr);
+ int find_hash_offset();
void add_field_at_offset(LayoutRawBlock* blocks, int offset, LayoutRawBlock* start = nullptr);
void add_contiguously(GrowableArray* list, LayoutRawBlock* start = nullptr);
LayoutRawBlock* insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block);
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index c8f6276cb01..27fb05e2929 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -951,6 +951,8 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
int java_fields;
int injected_fields;
InstanceKlass* ik = InstanceKlass::cast(k);
+ ik->fix_static_hash_offset();
+
GrowableArray* fields =
FieldInfoStream::create_FieldInfoArray(ik->fieldinfo_stream(),
&java_fields, &injected_fields);
@@ -1055,7 +1057,7 @@ void java_lang_Class::allocate_fixup_lists() {
void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protection_domain, Handle classData,
Handle& mirror, Handle& comp_mirror, TRAPS) {
// Allocate mirror (java.lang.Class instance)
- oop mirror_oop = InstanceMirrorKlass::cast(vmClasses::Class_klass())->allocate_instance(k, CHECK);
+ oop mirror_oop = InstanceMirrorKlass::cast(vmClasses::Class_klass())->allocate_instance(k, is_scratch, CHECK);
mirror = Handle(THREAD, mirror_oop);
// Setup indirection from mirror->klass
@@ -1349,10 +1351,10 @@ void java_lang_Class::set_source_file(oop java_class, oop source_file) {
java_class->obj_field_put(_source_file_offset, source_file);
}
-oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
+oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, bool is_scratch, TRAPS) {
// This should be improved by adding a field at the Java level or by
// introducing a new VM klass (see comment in ClassFileParser)
- oop java_class = InstanceMirrorKlass::cast(vmClasses::Class_klass())->allocate_instance(nullptr, CHECK_NULL);
+ oop java_class = InstanceMirrorKlass::cast(vmClasses::Class_klass())->allocate_instance(nullptr, is_scratch, CHECK_NULL);
if (type != T_VOID) {
Klass* aklass = Universe::typeArrayKlass(type);
assert(aklass != nullptr, "correct bootstrap");
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index 0d0fa5954b1..5cfa8b0251e 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -280,7 +280,7 @@ class java_lang_Class : AllStatic {
static void create_mirror(Klass* k, Handle class_loader, Handle module,
Handle protection_domain, Handle classData, TRAPS);
static void fixup_mirror(Klass* k, TRAPS);
- static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
+ static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, bool is_scratch, TRAPS);
// Archiving
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index d0879e9967c..ef518383df8 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1051,7 +1051,8 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id)
Prefetch::read(curr, interval);
oop obj = cast_to_oop(curr);
size_t size = obj->oop_iterate_size(&cl);
- assert(size == obj->size(), "sanity");
+ if (UseCompactObjectHeaders) log_trace(gc)("Scan object : " PTR_FORMAT ", with size: " SIZE_FORMAT, p2i(obj), size);
+ assert(size == obj->size(), "sanity: size: " SIZE_FORMAT ", obj-size: " SIZE_FORMAT, size, obj->size());
curr += size;
}
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
index bee3656ead5..fa0d1fcd408 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
@@ -60,10 +60,12 @@ void G1FullGCCompactTask::copy_object_to_new_location(oop obj) {
// Copy object and reinit its mark.
HeapWord* obj_addr = cast_from_oop(obj);
HeapWord* destination = cast_from_oop(FullGCForwarding::forwardee(obj));
+ assert(obj_addr != destination, "only copy actually-moving objects");
Copy::aligned_conjoint_words(obj_addr, destination, size);
// There is no need to transform stack chunks - marking already did that.
cast_to_oop(destination)->init_mark();
+ cast_to_oop(destination)->initialize_hash_if_necessary(obj);
assert(cast_to_oop(destination)->klass() != nullptr, "should have a class");
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
index ddd1b7c0999..c06312fa97e 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp
@@ -97,9 +97,14 @@ void G1FullGCCompactionPoint::switch_region() {
void G1FullGCCompactionPoint::forward(oop object, size_t size) {
assert(_current_region != nullptr, "Must have been initialized");
+ size_t old_size = size;
+ size_t new_size = object->copy_size(old_size, object->mark());
+ size = cast_from_oop(object) != _compaction_top ? new_size : old_size;
+
// Ensure the object fit in the current region.
while (!object_will_fit(size)) {
switch_region();
+ size = cast_from_oop(object) != _compaction_top ? new_size : old_size;
}
// Store a forwarding pointer if the object should be moved.
@@ -154,8 +159,10 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) {
assert(hr->is_starts_humongous(), "Sanity!");
oop obj = cast_to_oop(hr->bottom());
- size_t obj_size = obj->size();
- uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size);
+ size_t old_size = obj->size();
+ size_t new_size = obj->copy_size(old_size, obj->mark());
+
+ uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(new_size);
if (!has_regions()) {
return;
@@ -173,6 +180,7 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) {
preserved_stack()->push_if_necessary(obj, obj->mark());
G1HeapRegion* dest_hr = _compaction_regions->at(range_begin);
+ assert(hr->bottom() != dest_hr->bottom(), "assuming actual humongous move");
FullGCForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom()));
assert(FullGCForwarding::is_forwarded(obj), "Object must be forwarded!");
diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
index b20593ac290..3a5bf585720 100644
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp
@@ -69,7 +69,7 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) {
if (FullGCForwarding::is_forwarded(obj)) {
oop forwardee = FullGCForwarding::forwardee(obj);
// Forwarded, just update.
- assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space");
+ assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space, obj: " PTR_FORMAT ", forwardee: " PTR_FORMAT ", mark: " INTPTR_FORMAT ", pre: " INTPTR_FORMAT ", post: " INTPTR_FORMAT, p2i(obj), p2i(forwardee), obj->mark().value(), *(cast_from_oop(obj)) - 1, *(cast_from_oop(obj) + 1));
RawAccess::oop_store(p, forwardee);
}
diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
index 9cb2650f820..e6111b438c1 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
@@ -738,6 +738,7 @@ void G1HeapRegion::object_iterate(ObjectClosure* blk) {
HeapWord* p = bottom();
while (p < top()) {
if (block_is_obj(p, parsable_bottom())) {
+ log_trace(gc)("Iterate object: " PTR_FORMAT, p2i(p));
blk->do_object(cast_to_oop(p));
}
p += block_size(p);
diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
index f3b7e87bc78..05704360b65 100644
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
@@ -478,7 +478,8 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
? old_mark.klass()
: old->klass();
- const size_t word_sz = old->size_given_klass(klass);
+ const size_t old_size = old->size_given_mark_and_klass(old_mark, klass);
+ const size_t word_sz = old->copy_size(old_size, old_mark);
// JNI only allows pinning of typeArrays, so we only need to keep those in place.
if (region_attr.is_pinned() && klass->is_typeArray_klass()) {
@@ -514,9 +515,13 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
return handle_evacuation_failure_par(old, old_mark, word_sz, false /* cause_pinned */);
}
+ if (old_size != word_sz) {
+ log_trace(gc)("expanding obj: " PTR_FORMAT ", old_size: " SIZE_FORMAT ", new object: " PTR_FORMAT ", word_sz: " SIZE_FORMAT, p2i(old), old_size, p2i(obj_ptr), word_sz);
+ }
+
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
- Copy::aligned_disjoint_words(cast_from_oop(old), obj_ptr, word_sz);
+ Copy::aligned_disjoint_words(cast_from_oop(old), obj_ptr, old_size);
const oop obj = cast_to_oop(obj_ptr);
// Because the forwarding is done with memory_order_relaxed there is no
@@ -533,6 +538,9 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
_surviving_young_words[young_index] += word_sz;
}
+ // Initialize i-hash if necessary
+ obj->initialize_hash_if_necessary(old);
+
if (dest_attr.is_young()) {
if (age < markWord::max_age) {
age++;
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
index ed517c06a40..bf69003ef2c 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
@@ -177,7 +177,8 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
? test_mark.klass()
: o->klass();
- size_t new_obj_size = o->size_given_klass(klass);
+ size_t old_obj_size = o->size_given_mark_and_klass(test_mark, klass);
+ size_t new_obj_size = o->copy_size(old_obj_size, test_mark);
// Find the objects age, MT safe.
uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp
index 3792bb5a721..89d8fe2cf6f 100644
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp
@@ -41,7 +41,7 @@
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.hpp"
+#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/weakProcessor.hpp"
@@ -698,8 +698,8 @@ void DefNewGeneration::remove_forwarding_pointers() {
// Will enter Full GC soon due to failed promotion. Must reset the mark word
// of objs in young-gen so that no objs are marked (forwarded) when Full GC
// starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
- struct ResetForwardedMarkWord : ObjectClosure {
- void do_object(oop obj) override {
+ struct ResetForwardedMarkWord {
+ size_t do_object(oop obj) {
if (obj->is_self_forwarded()) {
obj->unset_self_forwarded();
} else if (obj->is_forwarded()) {
@@ -707,10 +707,11 @@ void DefNewGeneration::remove_forwarding_pointers() {
// Needed for object iteration to work properly.
obj->set_mark(obj->forwardee()->prototype_mark());
}
+ return obj->size();
}
} cl;
- eden()->object_iterate(&cl);
- from()->object_iterate(&cl);
+ eden()->object_iterate_sized(&cl);
+ from()->object_iterate_sized(&cl);
}
void DefNewGeneration::handle_promotion_failure(oop old) {
@@ -737,7 +738,9 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
oop DefNewGeneration::copy_to_survivor_space(oop old) {
assert(is_in_reserved(old) && !old->is_forwarded(),
"shouldn't be scavenging this oop");
- size_t s = old->size();
+ size_t old_size = old->size();
+ size_t s = old->copy_size(old_size, old->mark());
+
oop obj = nullptr;
// Try allocating obj in to-space (unless too old)
@@ -762,7 +765,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
Prefetch::write(obj, interval);
// Copy obj
- Copy::aligned_disjoint_words(cast_from_oop(old), cast_from_oop(obj), s);
+ Copy::aligned_disjoint_words(cast_from_oop(old), cast_from_oop(obj), old_size);
ContinuationGCSupport::transform_stack_chunk(obj);
@@ -772,8 +775,10 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
age_table()->add(obj, s);
}
+ bool expanded = obj->initialize_hash_if_necessary(old);
+
// Done, insert forward pointer to obj in this header
- old->forward_to(obj);
+ old->forward_to(obj, expanded);
if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
// Record old; request adds a new weak reference, which reference
diff --git a/src/hotspot/share/gc/serial/serialFullGC.cpp b/src/hotspot/share/gc/serial/serialFullGC.cpp
index 1a2544003d5..f4d77b528a5 100644
--- a/src/hotspot/share/gc/serial/serialFullGC.cpp
+++ b/src/hotspot/share/gc/serial/serialFullGC.cpp
@@ -191,7 +191,8 @@ class Compacter {
_spaces[index]._first_dead = first_dead;
}
- HeapWord* alloc(size_t words) {
+ HeapWord* alloc(size_t old_size, size_t new_size, HeapWord* old_obj) {
+ size_t words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
while (true) {
if (words <= pointer_delta(_spaces[_index]._space->end(),
_spaces[_index]._compaction_top)) {
@@ -207,6 +208,7 @@ class Compacter {
// out-of-memory in this space
_index++;
assert(_index < max_num_spaces - 1, "the last space should not be used");
+ words = (old_obj == _spaces[_index]._compaction_top) ? old_size : new_size;
}
}
@@ -258,12 +260,16 @@ class Compacter {
oop obj = cast_to_oop(addr);
oop new_obj = FullGCForwarding::forwardee(obj);
HeapWord* new_addr = cast_from_oop(new_obj);
- assert(addr != new_addr, "inv");
- prefetch_write_copy(new_addr);
size_t obj_size = obj->size();
- Copy::aligned_conjoint_words(addr, new_addr, obj_size);
+ if (addr != new_addr) {
+ prefetch_write_copy(new_addr);
+ Copy::aligned_conjoint_words(addr, new_addr, obj_size);
+ }
new_obj->init_mark();
+ if (addr != new_addr) {
+ new_obj->initialize_hash_if_necessary(obj);
+ }
return obj_size;
}
@@ -299,21 +305,27 @@ class Compacter {
while (cur_addr < top) {
oop obj = cast_to_oop(cur_addr);
size_t obj_size = obj->size();
+ size_t new_size = obj->copy_size(obj_size, obj->mark());
if (obj->is_gc_marked()) {
- HeapWord* new_addr = alloc(obj_size);
+ HeapWord* new_addr = alloc(obj_size, new_size, cur_addr);
forward_obj(obj, new_addr);
+ assert(obj->size() == obj_size, "size must not change after forwarding");
+ log_trace(gc)("Regular alloc move: " PTR_FORMAT ", size: " SIZE_FORMAT ", mark after: " INTPTR_FORMAT, p2i(cur_addr), obj_size, obj->mark().value());
cur_addr += obj_size;
} else {
// Skipping the current known-unmarked obj
HeapWord* next_live_addr = find_next_live_addr(cur_addr + obj_size, top);
if (dead_spacer.insert_deadspace(cur_addr, next_live_addr)) {
// Register space for the filler obj
- alloc(pointer_delta(next_live_addr, cur_addr));
+ size_t size = pointer_delta(next_live_addr, cur_addr);
+ log_trace(gc)("Fill dead: " PTR_FORMAT ", next_live: " PTR_FORMAT, p2i(cur_addr), p2i(next_live_addr));
+ alloc(size, size, cur_addr);
} else {
if (!record_first_dead_done) {
record_first_dead(i, cur_addr);
record_first_dead_done = true;
}
+ log_trace(gc)("Skip dead: " PTR_FORMAT ", next_live: " PTR_FORMAT, p2i(cur_addr), p2i(next_live_addr));
*(HeapWord**)cur_addr = next_live_addr;
}
cur_addr = next_live_addr;
@@ -337,9 +349,11 @@ class Compacter {
prefetch_write_scan(cur_addr);
if (cur_addr < first_dead || cast_to_oop(cur_addr)->is_gc_marked()) {
size_t size = cast_to_oop(cur_addr)->oop_iterate_size(&SerialFullGC::adjust_pointer_closure);
+ log_trace(gc)("adjust oop: " PTR_FORMAT ", size: " SIZE_FORMAT, p2i(cur_addr), size);
cur_addr += size;
} else {
assert(*(HeapWord**)cur_addr > cur_addr, "forward progress");
+ log_trace(gc)("adjust oop: " PTR_FORMAT ", next obj: " PTR_FORMAT, p2i(cur_addr), p2i(*(HeapWord**)cur_addr));
cur_addr = *(HeapWord**)cur_addr;
}
}
@@ -594,7 +608,8 @@ void SerialFullGC::mark_object(oop obj) {
// some marks may contain information we need to preserve so we store them away
// and overwrite the mark. We'll restore it at the end of serial full GC.
markWord mark = obj->mark();
- obj->set_mark(obj->prototype_mark().set_marked());
+ assert((!UseCompactObjectHeaders) || mark.narrow_klass() != 0, "null narrowKlass: " INTPTR_FORMAT, mark.value());
+ obj->set_mark(mark.set_marked());
ContinuationGCSupport::transform_stack_chunk(obj);
diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.cpp b/src/hotspot/share/gc/serial/tenuredGeneration.cpp
index a00eb369980..43c333e6f79 100644
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp
@@ -391,7 +391,7 @@ bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes)
}
oop TenuredGeneration::allocate_for_promotion(oop obj, size_t obj_size) {
- assert(obj_size == obj->size(), "bad obj_size passed in");
+ assert(obj_size == obj->size() || UseCompactObjectHeaders, "bad obj_size passed in");
#ifndef PRODUCT
if (SerialHeap::heap()->promotion_should_fail()) {
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 036bc0230c8..1f86f281ff2 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -282,7 +282,7 @@ class CollectedHeap : public CHeapObj {
oop obj_allocate(Klass* klass, size_t size, TRAPS);
virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
- oop class_allocate(Klass* klass, size_t size, TRAPS);
+ oop class_allocate(Klass* klass, size_t size, size_t base_size, TRAPS);
// Utilities for turning raw memory into filler objects.
//
diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
index cd8a2b89d32..d671fdb7736 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
@@ -41,8 +41,8 @@ inline oop CollectedHeap::array_allocate(Klass* klass, size_t size, int length,
return allocator.allocate();
}
-inline oop CollectedHeap::class_allocate(Klass* klass, size_t size, TRAPS) {
- ClassAllocator allocator(klass, size, THREAD);
+inline oop CollectedHeap::class_allocate(Klass* klass, size_t size, size_t base_size, TRAPS) {
+ ClassAllocator allocator(klass, size, base_size, THREAD);
return allocator.allocate();
}
diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp
index f96ec50e3b0..34ac6945061 100644
--- a/src/hotspot/share/gc/shared/memAllocator.cpp
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp
@@ -436,8 +436,8 @@ oop ClassAllocator::initialize(HeapWord* mem) const {
// Set oop_size field before setting the _klass field because a
// non-null _klass field indicates that the object is parsable by
// concurrent GC.
- assert(_word_size > 0, "oop_size must be positive.");
+ assert(_base_size > 0, "oop_size must be positive.");
mem_clear(mem);
- java_lang_Class::set_oop_size(mem, _word_size);
+ java_lang_Class::set_oop_size(mem, _base_size);
return finish(mem);
}
diff --git a/src/hotspot/share/gc/shared/memAllocator.hpp b/src/hotspot/share/gc/shared/memAllocator.hpp
index ec67616adba..2c1ce1230ac 100644
--- a/src/hotspot/share/gc/shared/memAllocator.hpp
+++ b/src/hotspot/share/gc/shared/memAllocator.hpp
@@ -108,9 +108,11 @@ class ObjArrayAllocator: public MemAllocator {
};
class ClassAllocator: public MemAllocator {
+ size_t _base_size;
public:
- ClassAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current())
- : MemAllocator(klass, word_size, thread) {}
+ ClassAllocator(Klass* klass, size_t word_size, size_t base_size, Thread* thread = Thread::current())
+ : MemAllocator(klass, word_size, thread),
+ _base_size(base_size) {}
virtual oop initialize(HeapWord* mem) const;
};
diff --git a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
index fc732fee534..54082289d3f 100644
--- a/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
+++ b/src/hotspot/share/gc/shared/preservedMarks.inline.hpp
@@ -57,7 +57,7 @@ inline PreservedMarks::PreservedMarks()
0 /* max_cache_size */) { }
void PreservedMark::set_mark() const {
- _o->set_mark(_m);
+ _o->set_mark(_m.copy_hashctrl_from(_o->mark()));
}
#endif // SHARE_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp
index 0eb9d2520b7..026123a918d 100644
--- a/src/hotspot/share/gc/shared/space.cpp
+++ b/src/hotspot/share/gc/shared/space.cpp
@@ -97,10 +97,13 @@ void ContiguousSpace::verify() const {
void ContiguousSpace::object_iterate(ObjectClosure* blk) {
HeapWord* addr = bottom();
+ oop last = nullptr;
while (addr < top()) {
oop obj = cast_to_oop(addr);
blk->do_object(obj);
+ assert(!UseCompactObjectHeaders || obj->mark().narrow_klass() != 0, "null narrow klass, mark: " INTPTR_FORMAT ", last mark: " INTPTR_FORMAT, obj->mark().value(), last->mark().value());
addr += obj->size();
+ last = obj;
}
}
diff --git a/src/hotspot/share/gc/shared/space.hpp b/src/hotspot/share/gc/shared/space.hpp
index 5d361ce8e50..1e3b9f0d711 100644
--- a/src/hotspot/share/gc/shared/space.hpp
+++ b/src/hotspot/share/gc/shared/space.hpp
@@ -120,6 +120,8 @@ class ContiguousSpace: public CHeapObj {
// Iteration
void object_iterate(ObjectClosure* blk);
+ template
+ void object_iterate_sized(CL* blk);
// Addresses for inlined allocation
HeapWord** top_addr() { return &_top; }
diff --git a/src/hotspot/share/gc/shared/space.inline.hpp b/src/hotspot/share/gc/shared/space.inline.hpp
new file mode 100644
index 00000000000..7a0cf7dca42
--- /dev/null
+++ b/src/hotspot/share/gc/shared/space.inline.hpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2024, 2024, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP
+#define SHARE_GC_SHARED_SPACE_INLINE_HPP
+
+#include "gc/shared/space.hpp"
+
+template
+void ContiguousSpace::object_iterate_sized(CL* blk) {
+ HeapWord* addr = bottom();
+ oop last = nullptr;
+ while (addr < top()) {
+ oop obj = cast_to_oop(addr);
+ size_t size = blk->do_object(obj);
+ assert(!UseCompactObjectHeaders || obj->mark().narrow_klass() != 0, "null narrow klass, mark: " INTPTR_FORMAT ", last mark: " INTPTR_FORMAT, obj->mark().value(), last->mark().value());
+ addr += size;
+ last = obj;
+ }
+}
+
+#endif // SHARE_GC_SHARED_SPACE_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp
index fb57c55e09a..2a7b01575ac 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp
@@ -29,6 +29,9 @@
#include "utilities/globalDefinitions.hpp"
class ShenandoahForwarding {
+private:
+ static const uintptr_t FWDED_HASH_TRANSITION = 0b111;
+
public:
/* Gets forwardee from the given object.
*/
@@ -51,6 +54,7 @@ class ShenandoahForwarding {
* Returns true if the object is forwarded, false otherwise.
*/
static inline bool is_forwarded(oop obj);
+ static inline bool is_forwarded(markWord m);
/* Tries to atomically update forwardee in $holder object to $update.
* Assumes $holder points at itself.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
index ccdbb81f33b..222b052541e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp
@@ -28,6 +28,7 @@
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "oops/klass.hpp"
#include "oops/markWord.hpp"
#include "runtime/javaThread.hpp"
@@ -71,17 +72,24 @@ inline oop ShenandoahForwarding::get_forwardee(oop obj) {
return get_forwardee_raw_unchecked(obj);
}
+inline bool ShenandoahForwarding::is_forwarded(markWord m) {
+ return (m.value() & (markWord::lock_mask_in_place | markWord::self_fwd_mask_in_place)) > markWord::monitor_value;
+}
+
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
- return obj->mark().is_marked();
+ return is_forwarded(obj->mark());
}
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
markWord old_mark = obj->mark();
- if (old_mark.is_marked()) {
+ if (is_forwarded(old_mark)) {
return cast_to_oop(old_mark.clear_lock_bits().to_pointer());
}
markWord new_mark = markWord::encode_pointer_as_mark(update);
+ if (UseCompactObjectHeaders && old_mark.is_hashed_not_expanded()) {
+ new_mark = markWord(new_mark.value() | FWDED_HASH_TRANSITION);
+ }
markWord prev_mark = obj->cas_set_mark(new_mark, old_mark, memory_order_conservative);
if (prev_mark == old_mark) {
return update;
@@ -104,7 +112,25 @@ inline Klass* ShenandoahForwarding::klass(oop obj) {
}
inline size_t ShenandoahForwarding::size(oop obj) {
- return obj->size_given_klass(klass(obj));
+ markWord mark = obj->mark();
+ if (is_forwarded(mark)) {
+ oop fwd = obj->forwardee(mark);
+ markWord fwd_mark = fwd->mark();
+ Klass* klass = UseCompactObjectHeaders ? fwd_mark.klass() : fwd->klass();
+ size_t size = fwd->base_size_given_klass(klass);
+ if (UseCompactObjectHeaders) {
+ if ((mark.value() & FWDED_HASH_TRANSITION) != FWDED_HASH_TRANSITION) {
+ if (fwd_mark.is_hashed_expanded() && klass->expand_for_hash(fwd)) {
+ // log_trace(gc)("Extended size for object: " PTR_FORMAT " base-size: " SIZE_FORMAT ", mark: " PTR_FORMAT, p2i(fwd), size, fwd_mark.value());
+ size = align_object_size(size + 1);
+ }
+ }
+ }
+ return size;
+ } else {
+ Klass* klass = UseCompactObjectHeaders ? mark.klass() : obj->klass();
+ return obj->size_given_mark_and_klass(mark, klass);
+ }
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 8aee12923eb..4e9ddaba73e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -370,7 +370,9 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
- size_t obj_size = p->size();
+ size_t old_size = p->size();
+ size_t new_size = p->copy_size(old_size, p->mark());
+ size_t obj_size = _compact_point == cast_from_oop(p) ? old_size : new_size;
if (_compact_point + obj_size > _to_region->end()) {
finish();
@@ -388,6 +390,7 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
assert(new_to_region != nullptr, "must not be null");
_to_region = new_to_region;
_compact_point = _to_region->bottom();
+ obj_size = _compact_point == cast_from_oop(p) ? old_size : new_size;
}
// Object fits into current region, record new location, if object does not move:
@@ -907,6 +910,7 @@ class ShenandoahCompactObjectsClosure : public ObjectClosure {
ContinuationGCSupport::relativize_stack_chunk(new_obj);
new_obj->init_mark();
+ new_obj->initialize_hash_if_necessary(p);
}
}
};
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index c1bc9dc6616..ffe3f3027af 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -1204,12 +1204,18 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
assert(from_region->is_young(), "Only expect evacuations from young in this mode");
bool alloc_from_lab = true;
HeapWord* copy = nullptr;
- size_t size = ShenandoahForwarding::size(p);
+
+ markWord mark = p->mark();
+ if (ShenandoahForwarding::is_forwarded(mark)) {
+ return ShenandoahForwarding::get_forwardee(p);
+ }
+ size_t old_size = ShenandoahForwarding::size(p);
+ size_t size = p->copy_size(old_size, mark);
#ifdef ASSERT
if (ShenandoahOOMDuringEvacALot &&
(os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
- copy = nullptr;
+ copy = nullptr;
} else {
#endif
if (UseTLAB) {
@@ -1234,13 +1240,14 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
}
// Copy the object:
- Copy::aligned_disjoint_words(cast_from_oop(p), copy, size);
+ Copy::aligned_disjoint_words(cast_from_oop(p), copy, old_size);
// Try to install the new forwarding pointer.
oop copy_val = cast_to_oop(copy);
oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
if (result == copy_val) {
// Successfully evacuated. Our copy is now the public one!
+ copy_val->initialize_hash_if_necessary(p);
ContinuationGCSupport::relativize_stack_chunk(copy_val);
shenandoah_assert_correct(nullptr, copy_val);
return copy_val;
diff --git a/src/hotspot/share/gc/z/zPage.hpp b/src/hotspot/share/gc/z/zPage.hpp
index 9b6c155f77d..93081781dfc 100644
--- a/src/hotspot/share/gc/z/zPage.hpp
+++ b/src/hotspot/share/gc/z/zPage.hpp
@@ -95,6 +95,7 @@ class ZPage : public CHeapObj {
zoffset_end end() const;
size_t size() const;
zoffset_end top() const;
+ zaddress top_addr() const;
size_t remaining() const;
size_t used() const;
diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp
index d8ecad57190..ea4ea2838b8 100644
--- a/src/hotspot/share/gc/z/zPage.inline.hpp
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp
@@ -474,6 +474,10 @@ inline zaddress ZPage::alloc_object(size_t size) {
return ZOffset::address(to_zoffset(addr));
}
+inline zaddress ZPage::top_addr() const {
+ return ZOffset::address(to_zoffset(top()));
+}
+
inline zaddress ZPage::alloc_object_atomic(size_t size) {
assert(is_allocating(), "Invalid state");
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index 7f69c0752bc..cb8ab57946a 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -324,7 +324,8 @@ static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_add
assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
// Allocate object
- const size_t size = ZUtils::object_size(from_addr);
+ const size_t old_size = ZUtils::object_size(from_addr);
+ const size_t size = ZUtils::copy_size(from_addr, old_size);
ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
@@ -334,9 +335,11 @@ static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_add
// Allocation failed
return zaddress::null;
}
+ assert(to_addr != from_addr, "addresses must be different");
// Copy object
- ZUtils::object_copy_disjoint(from_addr, to_addr, size);
+ ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
+ ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
// Insert forwarding
const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
@@ -588,11 +591,13 @@ class ZRelocateWork : public StackObj {
}
}
- zaddress try_relocate_object_inner(zaddress from_addr) {
+ zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
ZForwardingCursor cursor;
- const size_t size = ZUtils::object_size(from_addr);
ZPage* const to_page = target(_forwarding->to_age());
+ const size_t new_size = ZUtils::copy_size(from_addr, old_size);
+ const zaddress top = to_page != nullptr ? to_page->top_addr() : zaddress::null;
+ const size_t size = top == from_addr ? old_size : new_size;
// Lookup forwarding
{
@@ -610,13 +615,20 @@ class ZRelocateWork : public StackObj {
// Allocation failed
return zaddress::null;
}
+ if (old_size != new_size && ((top == from_addr) != (allocated_addr == from_addr))) {
+ _allocator->undo_alloc_object(to_page, allocated_addr, size);
+ return zaddress::null;
+ }
// Copy object. Use conjoint copying if we are relocating
// in-place and the new object overlaps with the old object.
- if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
- ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
+ if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
+ ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
} else {
- ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
+ ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
+ }
+ if (from_addr != allocated_addr) {
+ ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
}
// Insert forwarding
@@ -630,7 +642,7 @@ class ZRelocateWork : public StackObj {
return to_addr;
}
- void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
+ void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
// Old-to-old relocation - move existing remset bits
// If this is called for an in-place relocated page, then this code has the
@@ -652,10 +664,8 @@ class ZRelocateWork : public StackObj {
assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
assert(to_page->is_in(to_addr), "Must be");
-
- // Read the size from the to-object, since the from-object
- // could have been overwritten during in-place relocation.
- const size_t size = ZUtils::object_size(to_addr);
+ assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
+ assert(size > 0, "size must be set");
// If a young generation collection started while the old generation
// relocated objects, the remember set bits were flipped from "current"
@@ -780,7 +790,7 @@ class ZRelocateWork : public StackObj {
ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
}
- void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
+ void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
if (_forwarding->to_age() != ZPageAge::old) {
// No remembered set in young pages
return;
@@ -788,7 +798,7 @@ class ZRelocateWork : public StackObj {
// Need to deal with remset when moving objects to the old generation
if (_forwarding->from_age() == ZPageAge::old) {
- update_remset_old_to_old(from_addr, to_addr);
+ update_remset_old_to_old(from_addr, to_addr, size);
return;
}
@@ -797,13 +807,14 @@ class ZRelocateWork : public StackObj {
}
bool try_relocate_object(zaddress from_addr) {
- const zaddress to_addr = try_relocate_object_inner(from_addr);
+ size_t size = ZUtils::object_size(from_addr);
+ const zaddress to_addr = try_relocate_object_inner(from_addr, size);
if (is_null(to_addr)) {
return false;
}
- update_remset_for_fields(from_addr, to_addr);
+ update_remset_for_fields(from_addr, to_addr, size);
return true;
}
diff --git a/src/hotspot/share/gc/z/zUtils.hpp b/src/hotspot/share/gc/z/zUtils.hpp
index 59e789d5b38..21bc184f722 100644
--- a/src/hotspot/share/gc/z/zUtils.hpp
+++ b/src/hotspot/share/gc/z/zUtils.hpp
@@ -42,6 +42,9 @@ class ZUtils : public AllStatic {
// Object
static size_t object_size(zaddress addr);
+ static size_t copy_size(zaddress addr, size_t size);
+ static void initialize_hash_if_necessary(zaddress to_addr, zaddress from_addr);
+
static void object_copy_disjoint(zaddress from, zaddress to, size_t size);
static void object_copy_conjoint(zaddress from, zaddress to, size_t size);
diff --git a/src/hotspot/share/gc/z/zUtils.inline.hpp b/src/hotspot/share/gc/z/zUtils.inline.hpp
index b6acf12df30..be769cc21af 100644
--- a/src/hotspot/share/gc/z/zUtils.inline.hpp
+++ b/src/hotspot/share/gc/z/zUtils.inline.hpp
@@ -59,6 +59,15 @@ inline size_t ZUtils::object_size(zaddress addr) {
return words_to_bytes(to_oop(addr)->size());
}
+inline size_t ZUtils::copy_size(zaddress addr, size_t old_size) {
+ oop obj = to_oop(addr);
+ return words_to_bytes(obj->copy_size(bytes_to_words(old_size), obj->mark()));
+}
+
+inline void ZUtils::initialize_hash_if_necessary(zaddress to_addr, zaddress from_addr) {
+ to_oop(to_addr)->initialize_hash_if_necessary(to_oop(from_addr));
+}
+
inline void ZUtils::object_copy_disjoint(zaddress from, zaddress to, size_t size) {
Copy::aligned_disjoint_words((HeapWord*)untype(from), (HeapWord*)untype(to), bytes_to_words(size));
}
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index ad4dd045bcb..8bdd2cf3a0f 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -565,7 +565,7 @@ void Universe::initialize_basic_type_mirrors(TRAPS) {
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
- oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
+ oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, false, CHECK);
_basic_type_mirrors[i] = OopHandle(vm_global(), m);
}
CDS_JAVA_HEAP_ONLY(_archived_basic_type_mirror_indices[i] = -1);
diff --git a/src/hotspot/share/oops/arrayKlass.cpp b/src/hotspot/share/oops/arrayKlass.cpp
index fd362ae8a06..6d8b311b33a 100644
--- a/src/hotspot/share/oops/arrayKlass.cpp
+++ b/src/hotspot/share/oops/arrayKlass.cpp
@@ -305,3 +305,10 @@ void ArrayKlass::oop_verify_on(oop obj, outputStream* st) {
arrayOop a = arrayOop(obj);
guarantee(a->length() >= 0, "array with negative length?");
}
+
+int ArrayKlass::hash_offset_in_bytes(oop obj) const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ arrayOop ary = arrayOop(obj);
+ BasicType type = element_type();
+ return ary->base_offset_in_bytes(type) + (ary->length() << log2_element_size());
+}
diff --git a/src/hotspot/share/oops/arrayKlass.hpp b/src/hotspot/share/oops/arrayKlass.hpp
index 1c1d01fc32a..278d344810d 100644
--- a/src/hotspot/share/oops/arrayKlass.hpp
+++ b/src/hotspot/share/oops/arrayKlass.hpp
@@ -122,6 +122,8 @@ class ArrayKlass: public Klass {
// jvm support
jint compute_modifier_flags() const;
+ int hash_offset_in_bytes(oop obj) const;
+
// JVMTI support
jint jvmti_class_status() const;
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 9cf48da91b6..79240f5120b 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -529,7 +529,9 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, Refe
_nest_host_index(0),
_init_state(allocated),
_reference_type(reference_type),
- _init_thread(nullptr)
+ _init_thread(nullptr),
+ _hash_offset(parser.hash_offset()),
+ _static_hash_offset(parser.static_hash_offset())
{
set_vtable_length(parser.vtable_size());
set_access_flags(parser.access_flags());
@@ -4539,3 +4541,7 @@ void ClassHierarchyIterator::next() {
_current = _current->next_sibling();
return; // visit next sibling subclass
}
+
+void InstanceKlass::fix_static_hash_offset() {
+ _static_hash_offset += InstanceMirrorKlass::offset_of_static_fields();
+}
diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp
index 13b50859ee3..9ec71e69a1e 100644
--- a/src/hotspot/share/oops/instanceKlass.hpp
+++ b/src/hotspot/share/oops/instanceKlass.hpp
@@ -238,6 +238,9 @@ class InstanceKlass: public Klass {
JavaThread* volatile _init_thread; // Pointer to current thread doing initialization (to handle recursive initialization)
+ int _hash_offset; // Offset of hidden field for i-hash
+ int _static_hash_offset; // Offset of hidden field for i-hash in corresponding Class
+
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or null if none
@@ -947,6 +950,19 @@ class InstanceKlass: public Klass {
return layout_helper_to_size_helper(layout_helper());
}
+ virtual int hash_offset_in_bytes(oop obj) const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return _hash_offset;
+ }
+ static int hash_offset_offset_in_bytes() {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (int)offset_of(InstanceKlass, _hash_offset);
+ }
+ int static_hash_offset_in_bytes() const {
+ return _static_hash_offset;
+ }
+ void fix_static_hash_offset();
+
// This bit is initialized in classFileParser.cpp.
// It is false under any of the following conditions:
// - the class is abstract (including any interface)
diff --git a/src/hotspot/share/oops/instanceMirrorKlass.cpp b/src/hotspot/share/oops/instanceMirrorKlass.cpp
index a90c9284b1a..490f4a77dc7 100644
--- a/src/hotspot/share/oops/instanceMirrorKlass.cpp
+++ b/src/hotspot/share/oops/instanceMirrorKlass.cpp
@@ -44,21 +44,33 @@ InstanceMirrorKlass::InstanceMirrorKlass() {
assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS");
}
-size_t InstanceMirrorKlass::instance_size(Klass* k) {
+size_t InstanceMirrorKlass::instance_size(Klass* k) const {
if (k != nullptr && k->is_instance_klass()) {
return align_object_size(size_helper() + InstanceKlass::cast(k)->static_field_size());
}
return size_helper();
}
-instanceOop InstanceMirrorKlass::allocate_instance(Klass* k, TRAPS) {
+instanceOop InstanceMirrorKlass::allocate_instance(Klass* k, bool extend, TRAPS) {
// Query before forming handle.
- size_t size = instance_size(k);
- assert(size > 0, "total object size must be non-zero: " SIZE_FORMAT, size);
+ size_t base_size = instance_size(k);
+ size_t size = base_size;
+ if (extend && UseCompactObjectHeaders) {
+ size_t base_size_bytes = base_size * BytesPerWord;
+ assert(checked_cast(base_size_bytes) >= hash_offset(k), "hash_offset must be <= base size");
+ if (base_size_bytes - hash_offset(k) < BytesPerInt) {
+ size = align_object_size(size + 1);
+ }
+ }
+ assert(base_size > 0, "base object size must be non-zero: " SIZE_FORMAT, size);
// Since mirrors can be variable sized because of the static fields, store
// the size in the mirror itself.
- return (instanceOop)Universe::heap()->class_allocate(this, size, THREAD);
+ instanceOop obj = (instanceOop)Universe::heap()->class_allocate(this, size, base_size, THREAD);
+ if (extend && UseCompactObjectHeaders) {
+ obj->set_mark(obj->mark().set_not_hashed_expanded());
+ }
+ return obj;
}
size_t InstanceMirrorKlass::oop_size(oop obj) const {
@@ -73,6 +85,31 @@ int InstanceMirrorKlass::compute_static_oop_field_count(oop obj) {
return 0;
}
+int InstanceMirrorKlass::hash_offset(Klass* klass) const {
+ if (_offset_of_static_fields - _hash_offset > BytesPerInt) {
+ // There is a usable gap between the fields of this Class and the static fields block. Use it.
+ //tty->print_cr("offset static fields: %d, hash offset: %d", _offset_of_static_fields, _hash_offset);
+ return _hash_offset;
+ } else if (klass != nullptr && klass->is_instance_klass()) {
+ // Use the static field offset of the corresponding Klass*.
+ return InstanceKlass::cast(klass)->static_hash_offset_in_bytes();
+ } else {
+ // TODO: we could be more clever here and try to use gaps that are
+ // left after the static fields. Unfortunately, the static_field_size
+ // is only in words, this would require careful rewrite to
+ // be in bytes.
+ //tty->print_cr("Use base size");
+ return instance_size(klass);
+ }
+}
+
+int InstanceMirrorKlass::hash_offset_in_bytes(oop obj) const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ assert(obj != nullptr, "expect object");
+ assert(instance_size(java_lang_Class::as_Klass(obj)) == obj->base_size_given_klass(this), "must match");
+ return hash_offset(java_lang_Class::as_Klass(obj));
+}
+
#if INCLUDE_CDS
void InstanceMirrorKlass::serialize_offsets(SerializeClosure* f) {
f->do_int(&_offset_of_static_fields);
diff --git a/src/hotspot/share/oops/instanceMirrorKlass.hpp b/src/hotspot/share/oops/instanceMirrorKlass.hpp
index 9783d416a1d..c64def86ff5 100644
--- a/src/hotspot/share/oops/instanceMirrorKlass.hpp
+++ b/src/hotspot/share/oops/instanceMirrorKlass.hpp
@@ -66,6 +66,8 @@ class InstanceMirrorKlass: public InstanceKlass {
// Returns the size of the instance including the extra static fields.
virtual size_t oop_size(oop obj) const;
+ int hash_offset_in_bytes(oop obj) const;
+ int hash_offset(Klass* klass) const;
// Static field offset is an offset into the Heap, should be converted by
// based on UseCompressedOop for traversal
@@ -86,10 +88,10 @@ class InstanceMirrorKlass: public InstanceKlass {
int compute_static_oop_field_count(oop obj);
// Given a Klass return the size of the instance
- size_t instance_size(Klass* k);
+ size_t instance_size(Klass* k) const ;
// allocation
- instanceOop allocate_instance(Klass* k, TRAPS);
+ instanceOop allocate_instance(Klass* k, bool extend, TRAPS);
static void serialize_offsets(class SerializeClosure* f) NOT_CDS_RETURN;
diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp
index 884816764a0..97b801faf4d 100644
--- a/src/hotspot/share/oops/klass.cpp
+++ b/src/hotspot/share/oops/klass.cpp
@@ -1324,3 +1324,9 @@ void Klass::on_secondary_supers_verification_failure(Klass* super, Klass* sub, b
fatal("%s: %s implements %s: linear_search: %d; table_lookup: %d",
msg, sub->external_name(), super->external_name(), linear_result, table_result);
}
+
+bool Klass::expand_for_hash(oop obj) const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ assert((size_t)hash_offset_in_bytes(obj) <= (obj->base_size_given_klass(this) * HeapWordSize), "hash offset must be eq or lt base size: hash offset: %d, base size: " SIZE_FORMAT, hash_offset_in_bytes(obj), obj->base_size_given_klass(this) * HeapWordSize);
+ return obj->base_size_given_klass(this) * HeapWordSize - hash_offset_in_bytes(obj) < (int)sizeof(uint32_t);
+}
diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp
index 2c75d6da3b8..6c38b8dc09a 100644
--- a/src/hotspot/share/oops/klass.hpp
+++ b/src/hotspot/share/oops/klass.hpp
@@ -789,6 +789,10 @@ class Klass : public Metadata {
// Returns true if this Klass needs to be addressable via narrow Klass ID.
inline bool needs_narrow_id() const;
+ virtual int hash_offset_in_bytes(oop obj) const = 0;
+ static int kind_offset_in_bytes() { return (int)offset_of(Klass, _kind); }
+
+ bool expand_for_hash(oop obj) const;
};
#endif // SHARE_OOPS_KLASS_HPP
diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp
index a9b1a7b026a..c519280e2af 100644
--- a/src/hotspot/share/oops/markWord.cpp
+++ b/src/hotspot/share/oops/markWord.cpp
@@ -91,6 +91,9 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const {
st->print("is_unlocked");
if (has_no_hash()) {
st->print(" no_hash");
+ } else if (UseCompactObjectHeaders) {
+ st->print(" hash is-hashed=%s is-copied=%s", BOOL_TO_STR(is_hashed_not_expanded()), BOOL_TO_STR(
+ is_hashed_expanded()));
} else {
st->print(" hash=" INTPTR_FORMAT, hash());
}
diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp
index 1e1b8d77a90..f9b2c5a27da 100644
--- a/src/hotspot/share/oops/markWord.hpp
+++ b/src/hotspot/share/oops/markWord.hpp
@@ -46,13 +46,25 @@
//
// 64 bits (with compact headers):
// -------------------------------
-// klass:22 hash:31 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object)
+// klass:22 unused_gap:29 hash:2 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object)
//
// - hash contains the identity hash value: largest value is
// 31 bits, see os::random(). Also, 64-bit vm's require
// a hash value no bigger than 32 bits because they will not
// properly generate a mask larger than that: see library_call.cpp
//
+// - With +UseCompactObjectHeaders:
+// hashctrl bits indicate if object has been hashed:
+// 00 - never hashed
+// 01 - hashed, but not moved by GC: will recompute hash
+// 10 - hashed and moved by GC, and hashcode has been installed in appended field
+//
+// When identityHashCode() is called, the transitions work as follows:
+// 00 - set the hashctrl bits to 01, and compute the identity hash
+// 01 - recompute idendity hash. When GC encounters 01 when moving an object, it will allocate an extra word, if
+// necessary, for the object copy, and install 10.
+// 10 - fast-path: read hashcode from field
+//
// - the two lock bits are used to describe three states: locked/unlocked and monitor.
//
// [ptr | 00] locked ptr points to real header on stack (stack-locking in use)
@@ -113,11 +125,13 @@ class markWord {
static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_fwd_bits;
static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits;
static const int unused_gap_bits = LP64_ONLY(4) NOT_LP64(0); // Reserved for Valhalla.
+ static const int hashctrl_bits = 2;
static const int lock_shift = 0;
static const int self_fwd_shift = lock_shift + lock_bits;
static const int age_shift = self_fwd_shift + self_fwd_bits;
static const int hash_shift = age_shift + age_bits + unused_gap_bits;
+ static const int hashctrl_shift = age_shift + age_bits + unused_gap_bits;
static const uintptr_t lock_mask = right_n_bits(lock_bits);
static const uintptr_t lock_mask_in_place = lock_mask << lock_shift;
@@ -127,6 +141,10 @@ class markWord {
static const uintptr_t age_mask_in_place = age_mask << age_shift;
static const uintptr_t hash_mask = right_n_bits(hash_bits);
static const uintptr_t hash_mask_in_place = hash_mask << hash_shift;
+ static const uintptr_t hashctrl_mask = right_n_bits(hashctrl_bits);
+ static const uintptr_t hashctrl_mask_in_place = hashctrl_mask << hashctrl_shift;
+ static const uintptr_t hashctrl_hashed_mask_in_place = ((uintptr_t)1) << hashctrl_shift;
+ static const uintptr_t hashctrl_expanded_mask_in_place = ((uintptr_t)2) << hashctrl_shift;
#ifdef _LP64
// Used only with compact headers:
@@ -146,6 +164,7 @@ class markWord {
static const uintptr_t unlocked_value = 1;
static const uintptr_t monitor_value = 2;
static const uintptr_t marked_value = 3;
+ static const uintptr_t forward_expanded_value = 0b111;
static const uintptr_t no_hash = 0 ; // no hash value assigned
static const uintptr_t no_hash_in_place = (uintptr_t)no_hash << hash_shift;
@@ -164,7 +183,7 @@ class markWord {
return (mask_bits(value(), lock_mask_in_place) == unlocked_value);
}
bool is_marked() const {
- return (mask_bits(value(), lock_mask_in_place) == marked_value);
+ return (value() & (self_fwd_mask_in_place | lock_mask_in_place)) > monitor_value;
}
bool is_forwarded() const {
// Returns true for normal forwarded (0b011) and self-forwarded (0b1xx).
@@ -174,6 +193,15 @@ class markWord {
return (mask_bits(value(), lock_mask_in_place) == unlocked_value);
}
+ markWord set_forward_expanded() {
+ assert((value() & (lock_mask_in_place | self_fwd_mask_in_place)) == marked_value, "must be normal-forwarded here");
+ return markWord(value() | forward_expanded_value);
+ }
+
+ bool is_forward_expanded() {
+ return (value() & (lock_mask_in_place | self_fwd_mask_in_place)) == forward_expanded_value;
+ }
+
// Special temporary state of the markWord while being inflated.
// Code that looks at mark outside a lock need to take this into account.
bool is_being_inflated() const { return (value() == 0); }
@@ -189,7 +217,7 @@ class markWord {
// Should this header be preserved during GC?
bool must_be_preserved(const oopDesc* obj) const {
- return (!is_unlocked() || !has_no_hash());
+ return UseCompactObjectHeaders ? !is_unlocked() : (!is_unlocked() || !has_no_hash());
}
// WARNING: The following routines are used EXCLUSIVELY by
@@ -236,6 +264,7 @@ class markWord {
markWord displaced_mark_helper() const;
void set_displaced_mark_helper(markWord m) const;
markWord copy_set_hash(intptr_t hash) const {
+ assert(!UseCompactObjectHeaders, "Do not use with compact i-hash");
uintptr_t tmp = value() & (~hash_mask_in_place);
tmp |= ((hash & hash_mask) << hash_shift);
return markWord(tmp);
@@ -261,7 +290,7 @@ class markWord {
}
// used to encode pointers during GC
- markWord clear_lock_bits() const { return markWord(value() & ~lock_mask_in_place); }
+ markWord clear_lock_bits() const { return markWord(value() & ~(lock_mask_in_place | self_fwd_mask_in_place)); }
// age operations
markWord set_marked() { return markWord((value() & ~lock_mask_in_place) | marked_value); }
@@ -276,11 +305,68 @@ class markWord {
// hash operations
intptr_t hash() const {
+ assert(!UseCompactObjectHeaders, "only without compact i-hash");
return mask_bits(value() >> hash_shift, hash_mask);
}
bool has_no_hash() const {
- return hash() == no_hash;
+ if (UseCompactObjectHeaders) {
+ return !is_hashed();
+ } else {
+ return hash() == no_hash;
+ }
+ }
+
+ inline bool is_hashed_not_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (value() & hashctrl_mask_in_place) == hashctrl_hashed_mask_in_place;
+ }
+ inline markWord set_hashed_not_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return markWord((value() & ~hashctrl_mask_in_place) | hashctrl_hashed_mask_in_place);
+ }
+
+ inline bool is_hashed_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (value() & hashctrl_mask_in_place) == (hashctrl_hashed_mask_in_place | hashctrl_expanded_mask_in_place);
+ }
+ inline markWord set_hashed_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return markWord((value() & ~hashctrl_mask_in_place) | (hashctrl_hashed_mask_in_place | hashctrl_expanded_mask_in_place));
+ }
+
+ // This is a special hashctrl state (11) that is only used
+ // during CDS archive dumping. There we allocate 'scratch mirrors' for
+ // each real mirror klass. We allocate those scratch mirrors
+ // in a pre-extended form, but without being hashed. When the
+ // real mirror gets hashed, then we turn the scratch mirror into
+ // hashed_moved state, otherwise we leave it in that special state
+ // which indicates that the archived copy will be allocated in the
+ // unhashed form.
+ inline bool is_not_hashed_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (value() & hashctrl_mask_in_place) == hashctrl_expanded_mask_in_place;
+ }
+ inline markWord set_not_hashed_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return markWord((value() & ~hashctrl_mask_in_place) | hashctrl_expanded_mask_in_place);
+ }
+ // Return true when object is either hashed_moved or not_hashed_moved.
+ inline bool is_expanded() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (value() & hashctrl_expanded_mask_in_place) != 0;
+ }
+ inline bool is_hashed() const {
+ assert(UseCompactObjectHeaders, "only with compact i-hash");
+ return (value() & hashctrl_hashed_mask_in_place) != 0;
+ }
+
+ inline markWord copy_hashctrl_from(markWord m) const {
+ if (UseCompactObjectHeaders) {
+ return markWord((value() & ~hashctrl_mask_in_place) | (m.value() & hashctrl_mask_in_place));
+ } else {
+ return markWord(value());
+ }
}
inline Klass* klass() const;
@@ -291,7 +377,11 @@ class markWord {
// Prototype mark for initialization
static markWord prototype() {
- return markWord( no_hash_in_place | no_lock_in_place );
+ if (UseCompactObjectHeaders) {
+ return markWord(no_lock_in_place);
+ } else {
+ return markWord(no_hash_in_place | no_lock_in_place);
+ }
}
// Debugging
@@ -305,7 +395,8 @@ class markWord {
inline bool is_self_forwarded() const {
NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");)
- return mask_bits(value(), self_fwd_mask_in_place) != 0;
+ // Match 100, 101, 110 but not 111.
+ return mask_bits(value() + 1, (lock_mask_in_place | self_fwd_mask_in_place)) > 4;
}
inline markWord set_self_forwarded() const {
diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp
index 11cab4c043b..3fc67eb008b 100644
--- a/src/hotspot/share/oops/oop.cpp
+++ b/src/hotspot/share/oops/oop.cpp
@@ -38,6 +38,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/synchronizer.hpp"
+#include "runtime/lightweightSynchronizer.hpp"
#include "utilities/macros.hpp"
void oopDesc::print_on(outputStream* st) const {
@@ -129,6 +130,23 @@ bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
return LockingMode == LM_LIGHTWEIGHT || !SafepointSynchronize::is_at_safepoint();
}
+void oopDesc::initialize_hash_if_necessary(oop obj, Klass* k, markWord m) {
+ assert(UseCompactObjectHeaders, "only with compact object headers");
+ assert(!m.has_displaced_mark_helper(), "must not be displaced header");
+ assert(m.is_hashed_not_expanded(), "must be hashed but not moved");
+ assert(!m.is_hashed_expanded(), "must not be moved: " INTPTR_FORMAT, m.value());
+ uint32_t hash = static_cast(ObjectSynchronizer::get_next_hash(nullptr, obj));
+ int offset = k->hash_offset_in_bytes(cast_to_oop(this));
+ assert(offset >= 8, "hash offset must not be in header");
+ //log_info(gc)("Initializing hash for " PTR_FORMAT ", old: " PTR_FORMAT ", hash: %d, offset: %d", p2i(this), p2i(obj), hash, offset);
+ int_field_put(offset, (jint) hash);
+ m = m.set_hashed_expanded();
+ assert(static_cast(LightweightSynchronizer::get_hash(m, cast_to_oop(this), k)) == hash,
+ "hash must remain the same");
+ assert(m.narrow_klass() != 0, "must not be null");
+ set_mark(m);
+}
+
// used only for asserts and guarantees
bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
return obj == nullptr ? true : is_oop(obj, ignore_mark_word);
diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp
index f52baab0de6..61df26141b9 100644
--- a/src/hotspot/share/oops/oop.hpp
+++ b/src/hotspot/share/oops/oop.hpp
@@ -72,6 +72,7 @@ class oopDesc {
inline markWord mark() const;
inline markWord mark_acquire() const;
inline markWord* mark_addr() const;
+ inline markWord forward_safe_mark() const;
inline void set_mark(markWord m);
static inline void set_mark(HeapWord* mem, markWord m);
@@ -116,9 +117,23 @@ class oopDesc {
// Returns the actual oop size of the object in machine words
inline size_t size();
+ // Returns the size that a copy of this object requires, in machine words.
+ // It can be 1 word larger than its current size to accomodate
+ // an additional 4-byte-field for the identity hash-code.
+ //
+ // size: the current size of this object, we're passing this here for performance
+ // reasons, because all callers compute this anyway, and we want to avoid
+ // recomputing it.
+ // mark: the mark-word of this object. Some callers (e.g. G1ParScanThreadState::do_copy_to_survivor_space())
+ // need to use a known markWord because of racing GC threads that can change
+ // the markWord at any time.
+ inline size_t copy_size(size_t size, markWord mark) const;
+ inline size_t copy_size_cds(size_t size, markWord mark) const;
+
// Sometimes (for complicated concurrency-related reasons), it is useful
// to be able to figure out the size of an object knowing its klass.
- inline size_t size_given_klass(Klass* klass);
+ inline size_t base_size_given_klass(const Klass* klass);
+ inline size_t size_given_mark_and_klass(markWord mrk, const Klass* kls);
// type test operations (inlined in oop.inline.hpp)
inline bool is_instance() const;
@@ -272,7 +287,7 @@ class oopDesc {
inline bool is_forwarded() const;
inline bool is_self_forwarded() const;
- inline void forward_to(oop p);
+ inline void forward_to(oop p, bool expanded = false);
inline void forward_to_self();
// Like "forward_to", but inserts the forwarding pointer atomically.
@@ -311,11 +326,22 @@ class oopDesc {
inline static bool is_instanceof_or_null(oop obj, Klass* klass);
+private:
+ inline intptr_t hash_from_field() const;
+ size_t hash_offset_in_bytes() const;
+
+public:
// identity hash; returns the identity hash key (computes it if necessary)
inline intptr_t identity_hash();
intptr_t slow_identity_hash();
inline bool fast_no_hash_check();
+ // Initialize identity hash code in hash word of object copy from original object.
+ // Returns true if the object has been expanded, false otherwise.
+ inline bool initialize_hash_if_necessary(oop obj);
+ // For CDS only.
+ void initialize_hash_if_necessary(oop obj, Klass* k, markWord m);
+
// marks are forwarded to stack when object is locked
inline bool has_displaced_mark() const;
inline markWord displaced_mark() const;
diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp
index 45902e63147..82a3924b132 100644
--- a/src/hotspot/share/oops/oop.inline.hpp
+++ b/src/hotspot/share/oops/oop.inline.hpp
@@ -43,6 +43,7 @@
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "logging/log.hpp"
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
@@ -59,27 +60,49 @@ markWord* oopDesc::mark_addr() const {
return (markWord*) &_mark;
}
+static void assert_correct_hash_transition(markWord old_mark, markWord new_mark) {
+#ifdef ASSERT
+ if (UseCompactObjectHeaders) {
+ if (new_mark.is_marked()) return; // Install forwardee.
+ if (old_mark.is_forwarded()) return; // Restoration of forwarded object.
+ assert(!(new_mark.is_hashed_not_expanded() && new_mark.is_hashed_expanded()), "must not be simultaneously hashed and copied state");
+ if (old_mark.is_hashed_not_expanded()) assert(new_mark.is_hashed(), "incorrect hash state transition");
+ if (old_mark.is_hashed_expanded()) assert(new_mark.is_hashed_expanded(), "incorrect hash state transition, old_mark: " INTPTR_FORMAT ", new_mark: " INTPTR_FORMAT, old_mark.value(), new_mark.value());
+ }
+#endif
+}
+
void oopDesc::set_mark(markWord m) {
+ //assert_correct_hash_transition(mark(), m);
Atomic::store(&_mark, m);
}
void oopDesc::set_mark(HeapWord* mem, markWord m) {
+ if (UseCompactObjectHeaders) {
+ assert(!(m.is_hashed_not_expanded() && m.is_hashed_expanded()), "must not be simultaneously hashed and copied state");
+ }
*(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
}
void oopDesc::release_set_mark(HeapWord* mem, markWord m) {
+ if (UseCompactObjectHeaders) {
+ assert(!(m.is_hashed_not_expanded() && m.is_hashed_expanded()), "must not be simultaneously hashed and copied state");
+ }
Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m);
}
void oopDesc::release_set_mark(markWord m) {
+ assert_correct_hash_transition(mark(), m);
Atomic::release_store(&_mark, m);
}
markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
+ assert_correct_hash_transition(old_mark, new_mark);
return Atomic::cmpxchg(&_mark, old_mark, new_mark);
}
markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) {
+ assert_correct_hash_transition(old_mark, new_mark);
return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
}
@@ -92,7 +115,14 @@ markWord oopDesc::prototype_mark() const {
}
void oopDesc::init_mark() {
- set_mark(prototype_mark());
+ if (UseCompactObjectHeaders) {
+ markWord m = prototype_mark().copy_hashctrl_from(mark());
+ //log_info(gc)("Init mark: oop: " PTR_FORMAT ", mark: " INTPTR_FORMAT, p2i(this), m.value());
+ assert(m.is_neutral(), "must be neutral");
+ set_mark(m);
+ } else {
+ set_mark(prototype_mark());
+ }
}
Klass* oopDesc::klass() const {
@@ -172,11 +202,66 @@ bool oopDesc::is_a(Klass* k) const {
return klass()->is_subtype_of(k);
}
+size_t oopDesc::size_given_mark_and_klass(markWord mrk, const Klass* kls) {
+ size_t sz = base_size_given_klass(kls);
+ if (UseCompactObjectHeaders) {
+ assert(!mrk.has_displaced_mark_helper(), "must not be displaced");
+ if (mrk.is_expanded() && kls->expand_for_hash(cast_to_oop(this))) {
+ log_trace(gc)("Extended size for object: " PTR_FORMAT " base-size: " SIZE_FORMAT ", mark: " PTR_FORMAT ", klass: " PTR_FORMAT ", expand: %s", p2i(this), sz, mrk.value(), p2i(kls), BOOL_TO_STR(kls->expand_for_hash(cast_to_oop(this))));
+ sz = align_object_size(sz + 1);
+ }
+ }
+ return sz;
+}
+
+size_t oopDesc::copy_size(size_t size, markWord mark) const {
+ if (UseCompactObjectHeaders) {
+ assert(!mark.has_displaced_mark_helper(), "must not be displaced");
+ Klass* klass = mark.klass();
+ if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this))) {
+ size = align_object_size(size + 1);
+ }
+ }
+ assert(is_object_aligned(size), "Oop size is not properly aligned: " SIZE_FORMAT, size);
+ return size;
+}
+
+size_t oopDesc::copy_size_cds(size_t size, markWord mark) const {
+ if (UseCompactObjectHeaders) {
+ assert(!mark.has_displaced_mark_helper(), "must not be displaced");
+ Klass* klass = mark.klass();
+ if (mark.is_hashed_not_expanded() && klass->expand_for_hash(cast_to_oop(this))) {
+ size = align_object_size(size + 1);
+ }
+ if (mark.is_not_hashed_expanded() && klass->expand_for_hash(cast_to_oop(this))) {
+ size = align_object_size(size - 1);
+ }
+ }
+ assert(is_object_aligned(size), "Oop size is not properly aligned: " SIZE_FORMAT, size);
+ return size;
+}
+
+#ifdef _LP64
+markWord oopDesc::forward_safe_mark() const {
+ assert(UseCompactObjectHeaders, "Only get here with compact headers");
+ markWord m = mark();
+ if (m.is_marked()) {
+ oop fwd = forwardee(m);
+ markWord m2 = fwd->mark();
+ assert(!m2.is_marked() || m2.is_self_forwarded(), "no double forwarding: this: " PTR_FORMAT " (" INTPTR_FORMAT "), fwd: " PTR_FORMAT " (" INTPTR_FORMAT ")", p2i(this), m.value(), p2i(fwd
+), m2.value());
+ m = m2;
+ }
+ return m;
+}
+#endif
+
size_t oopDesc::size() {
- return size_given_klass(klass());
+ markWord m = UseCompactObjectHeaders ? mark() : markWord::unused_mark();;
+ return size_given_mark_and_klass(m, klass());
}
-size_t oopDesc::size_given_klass(Klass* klass) {
+size_t oopDesc::base_size_given_klass(const Klass* klass) {
int lh = klass->layout_helper();
size_t s;
@@ -295,10 +380,13 @@ bool oopDesc::is_self_forwarded() const {
}
// Used by scavengers
-void oopDesc::forward_to(oop p) {
+void oopDesc::forward_to(oop p, bool expanded) {
assert(cast_from_oop(p) != this,
"must not be used for self-forwarding, use forward_to_self() instead");
markWord m = markWord::encode_pointer_as_mark(p);
+ if (expanded) {
+ m = m.set_forward_expanded();
+ }
assert(m.decode_pointer() == p, "encoding must be reversible");
set_mark(m);
}
@@ -384,16 +472,19 @@ void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
template
size_t oopDesc::oop_iterate_size(OopClosureType* cl) {
+ markWord m = UseCompactObjectHeaders ? mark() : markWord::unused_mark();
+ assert((!UseCompactObjectHeaders) || m.narrow_klass() != 0, "null narrowKlass: " INTPTR_FORMAT, m.value());
Klass* k = klass();
- size_t size = size_given_klass(k);
+ size_t size = size_given_mark_and_klass(m, k);
OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
return size;
}
template
size_t oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
+ markWord m = UseCompactObjectHeaders ? mark() : markWord::unused_mark();
Klass* k = klass();
- size_t size = size_given_klass(k);
+ size_t size = size_given_mark_and_klass(m, k);
OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
return size;
}
@@ -417,14 +508,23 @@ bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
intptr_t oopDesc::identity_hash() {
// Fast case; if the object is unlocked and the hash value is set, no locking is needed
// Note: The mark must be read into local variable to avoid concurrent updates.
- markWord mrk = mark();
- if (mrk.is_unlocked() && !mrk.has_no_hash()) {
- return mrk.hash();
- } else if (mrk.is_marked()) {
- return mrk.hash();
+ if (UseCompactObjectHeaders) {
+ markWord mrk = mark();
+ if (mrk.is_hashed_expanded()) {
+ Klass* klass = mrk.klass();
+ return int_field(klass->hash_offset_in_bytes(cast_to_oop(this)));
+ }
+ // Fall-through to slow-case.
} else {
- return slow_identity_hash();
+ markWord mrk = mark();
+ if (mrk.is_unlocked() && !mrk.has_no_hash()) {
+ return mrk.hash();
+ } else if (mrk.is_marked()) {
+ return mrk.hash();
+ }
+ // Fall-through to slow-case.
}
+ return slow_identity_hash();
}
// This checks fast simple case of whether the oop has_no_hash,
@@ -432,7 +532,7 @@ intptr_t oopDesc::identity_hash() {
bool oopDesc::fast_no_hash_check() {
markWord mrk = mark_acquire();
assert(!mrk.is_marked(), "should never be marked");
- return mrk.is_unlocked() && mrk.has_no_hash();
+ return (UseCompactObjectHeaders || mrk.is_unlocked()) && mrk.has_no_hash();
}
bool oopDesc::has_displaced_mark() const {
@@ -455,4 +555,18 @@ bool oopDesc::mark_must_be_preserved(markWord m) const {
return m.must_be_preserved(this);
}
+inline bool oopDesc::initialize_hash_if_necessary(oop obj) {
+ if (!UseCompactObjectHeaders) {
+ return false;
+ }
+ markWord m = mark();
+ assert(!m.has_displaced_mark_helper(), "must not be displaced header");
+ if (m.is_hashed_not_expanded()) {
+ initialize_hash_if_necessary(obj, m.klass(), m);
+ return true;
+ }
+ return false;
+}
+
+
#endif // SHARE_OOPS_OOP_INLINE_HPP
diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp
index c9d1491afc5..e5a55c2f449 100644
--- a/src/hotspot/share/opto/chaitin.cpp
+++ b/src/hotspot/share/opto/chaitin.cpp
@@ -281,6 +281,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc
void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
uint src = _lrg_map.find(src_n);
uint dst = _lrg_map.find(dst_n);
+ //if (!src) { src_n->dump(); dst_n->dump();}
assert(src, "");
assert(dst, "");
assert(src < _lrg_map.max_lrg_id(), "oob");
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index 1072a5d6a24..6398b13e86d 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -4665,7 +4665,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
assert(is_static == callee()->is_static(), "correct intrinsic selection");
assert(!(is_virtual && is_static), "either virtual, special, or static");
- enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
+ enum { _slow_path = 1, _null_path, _fast_path, _fast_path2, PATH_LIMIT };
RegionNode* result_reg = new RegionNode(PATH_LIMIT);
PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
@@ -4713,63 +4713,209 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
generate_virtual_guard(obj_klass, slow_region);
}
- // Get the header out of the object, use LoadMarkNode when available
- Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
- // The control of the load must be null. Otherwise, the load can move before
- // the null check after castPP removal.
- Node* no_ctrl = nullptr;
- Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+ if (UseCompactObjectHeaders) {
+ // Get the header out of the object.
+ Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
+ // The control of the load must be null. Otherwise, the load can move before
+ // the null check after castPP removal.
+ Node* no_ctrl = nullptr;
+ Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+
+ // Test the header to see if the object is in hashed or copied state.
+ Node* hashctrl_mask = _gvn.MakeConX(markWord::hashctrl_mask_in_place);
+ Node* masked_header = _gvn.transform(new AndXNode(header, hashctrl_mask));
+
+ // Take slow-path when the object has not been hashed.
+ Node* not_hashed_val = _gvn.MakeConX(0);
+ Node* chk_hashed = _gvn.transform(new CmpXNode(masked_header, not_hashed_val));
+ Node* test_hashed = _gvn.transform(new BoolNode(chk_hashed, BoolTest::eq));
+
+ generate_slow_guard(test_hashed, slow_region);
+
+ // Test whether the object is hashed or hashed&copied.
+ Node* hashed_copied = _gvn.MakeConX(markWord::hashctrl_expanded_mask_in_place | markWord::hashctrl_hashed_mask_in_place);
+ Node* chk_copied = _gvn.transform(new CmpXNode(masked_header, hashed_copied));
+ // If true, then object has been hashed&copied, otherwise it's only hashed.
+ Node* test_copied = _gvn.transform(new BoolNode(chk_copied, BoolTest::eq));
+ IfNode* if_copied = create_and_map_if(control(), test_copied, PROB_FAIR, COUNT_UNKNOWN);
+ Node* if_true = _gvn.transform(new IfTrueNode(if_copied));
+ Node* if_false = _gvn.transform(new IfFalseNode(if_copied));
+
+ // Hashed&Copied path: read hash-code out of the object.
+ set_control(if_true);
+ // result_val->del_req(_fast_path2);
+ // result_reg->del_req(_fast_path2);
+ // result_io->del_req(_fast_path2);
+ // result_mem->del_req(_fast_path2);
- if (!UseObjectMonitorTable) {
- // Test the header to see if it is safe to read w.r.t. locking.
- Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
- Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
- if (LockingMode == LM_LIGHTWEIGHT) {
- Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
- Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
- Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
+ Node* obj_klass = load_object_klass(obj);
+ Node* hash_addr;
+ const TypeKlassPtr* klass_t = _gvn.type(obj_klass)->isa_klassptr();
+ bool load_offset_runtime = true;
+
+ if (klass_t != nullptr) {
+ if (klass_t->klass_is_exact() && klass_t->isa_instklassptr()) {
+ ciInstanceKlass* ciKlass = reinterpret_cast(klass_t->is_instklassptr()->exact_klass());
+ if (!ciKlass->is_mirror_instance_klass() && !ciKlass->is_reference_instance_klass()) {
+ // We know the InstanceKlass, load hash_offset from there at compile-time.
+ int hash_offset = ciKlass->hash_offset_in_bytes();
+ hash_addr = basic_plus_adr(obj, hash_offset);
+ Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ result_val->init_req(_fast_path2, loaded_hash);
+ result_reg->init_req(_fast_path2, control());
+ load_offset_runtime = false;
+ }
+ }
+ }
- generate_slow_guard(test_monitor, slow_region);
- } else {
- Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
- Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
- Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
+ //tty->print_cr("Load hash-offset at runtime: %s", BOOL_TO_STR(load_offset_runtime));
+
+ if (load_offset_runtime) {
+ // We don't know if it is an array or an exact type, figure it out at run-time.
+ // If not an ordinary instance, then we need to take slow-path.
+ Node* kind_addr = basic_plus_adr(obj_klass, Klass::kind_offset_in_bytes());
+ Node* kind = make_load(control(), kind_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ Node* instance_val = _gvn.intcon(Klass::InstanceKlassKind);
+ Node* chk_inst = _gvn.transform(new CmpINode(kind, instance_val));
+ Node* test_inst = _gvn.transform(new BoolNode(chk_inst, BoolTest::ne));
+ generate_slow_guard(test_inst, slow_region);
+
+ // Otherwise it's an instance and we can read the hash_offset from the InstanceKlass.
+ Node* hash_offset_addr = basic_plus_adr(obj_klass, InstanceKlass::hash_offset_offset_in_bytes());
+ Node* hash_offset = make_load(control(), hash_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ // hash_offset->dump();
+ Node* hash_addr = basic_plus_adr(obj, ConvI2X(hash_offset));
+ Compile::current()->set_has_unsafe_access(true);
+ Node* loaded_hash = make_load(control(), hash_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ result_val->init_req(_fast_path2, loaded_hash);
+ result_reg->init_req(_fast_path2, control());
+ }
+
+ // Hashed-only path: recompute hash-code from object address.
+ set_control(if_false);
+ // Our constants.
+ Node* M = _gvn.intcon(0x337954D5);
+ Node* A = _gvn.intcon(0xAAAAAAAA);
+ // Split object address into lo and hi 32 bits.
+ Node* obj_addr = _gvn.transform(new CastP2XNode(nullptr, obj));
+ Node* x = _gvn.transform(new ConvL2INode(obj_addr));
+ Node* upper_addr = _gvn.transform(new URShiftLNode(obj_addr, _gvn.intcon(32)));
+ Node* y = _gvn.transform(new ConvL2INode(upper_addr));
+
+ Node* H0 = _gvn.transform(new XorINode(x, y));
+ Node* L0 = _gvn.transform(new XorINode(x, A));
+
+ // Full multiplication of two 32 bit values L0 and M into a hi/lo result in two 32 bit values V0 and U0.
+ Node* L0_64 = _gvn.transform(new ConvI2LNode(L0));
+ L0_64 = _gvn.transform(new AndLNode(L0_64, _gvn.longcon(0xFFFFFFFF)));
+ Node* M_64 = _gvn.transform(new ConvI2LNode(M));
+ // M_64 = _gvn.transform(new AndLNode(M_64, _gvn.longcon(0xFFFFFFFF)));
+ Node* prod64 = _gvn.transform(new MulLNode(L0_64, M_64));
+ Node* V0 = _gvn.transform(new ConvL2INode(prod64));
+ Node* prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
+ Node* U0 = _gvn.transform(new ConvL2INode(prod_upper));
+
+ Node* Q0 = _gvn.transform(new MulINode(H0, M));
+ Node* L1 = _gvn.transform(new XorINode(Q0, U0));
+
+ // Full multiplication of two 32 bit values L1 and M into a hi/lo result in two 32 bit values V1 and U1.
+ Node* L1_64 = _gvn.transform(new ConvI2LNode(L1));
+ L1_64 = _gvn.transform(new AndLNode(L1_64, _gvn.longcon(0xFFFFFFFF)));
+ prod64 = _gvn.transform(new MulLNode(L1_64, M_64));
+ Node* V1 = _gvn.transform(new ConvL2INode(prod64));
+ prod_upper = _gvn.transform(new URShiftLNode(prod64, _gvn.intcon(32)));
+ Node* U1 = _gvn.transform(new ConvL2INode(prod_upper));
+
+ Node* P1 = _gvn.transform(new XorINode(V0, M));
+
+ // Right rotate P1 by distance L1.
+ Node* distance = _gvn.transform(new AndINode(L1, _gvn.intcon(32 - 1)));
+ Node* inverse_distance = _gvn.transform(new SubINode(_gvn.intcon(32), distance));
+ Node* ror_part1 = _gvn.transform(new URShiftINode(P1, distance));
+ Node* ror_part2 = _gvn.transform(new LShiftINode(P1, inverse_distance));
+ Node* Q1 = _gvn.transform(new OrINode(ror_part1, ror_part2));
+
+ Node* L2 = _gvn.transform(new XorINode(Q1, U1));
+ Node* hash = _gvn.transform(new XorINode(V1, L2));
+ Node* hash_truncated = _gvn.transform(new AndINode(hash, _gvn.intcon(markWord::hash_mask)));
+
+ // TODO: We could generate a fast case here under the following conditions:
+ // - The hashctrl is set to hash_is_copied (see markWord::hash_is_copied())
+ // - The type of the object is known
+ // Then we can load the identity hashcode from the int field at Klass::hash_offset_in_bytes() of the object.
+ result_val->init_req(_fast_path, hash_truncated);
+ } else {
+ // Get the header out of the object, use LoadMarkNode when available
+ Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
+ // The control of the load must be null. Otherwise, the load can move before
+ // the null check after castPP removal.
+ Node* no_ctrl = nullptr;
+ Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+
+ if (!UseObjectMonitorTable) {
+ // Test the header to see if it is safe to read w.r.t. locking.
+ Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place);
+ Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
+ if (LockingMode == LM_LIGHTWEIGHT) {
+ Node *monitor_val = _gvn.MakeConX(markWord::monitor_value);
+ Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val));
+ Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq));
+
+ generate_slow_guard(test_monitor, slow_region);
+ } else {
+ Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
+ Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val));
+ Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne));
- generate_slow_guard(test_not_unlocked, slow_region);
+ generate_slow_guard(test_not_unlocked, slow_region);
+ }
}
- }
- // Get the hash value and check to see that it has been properly assigned.
- // We depend on hash_mask being at most 32 bits and avoid the use of
- // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
- // vm: see markWord.hpp.
- Node *hash_mask = _gvn.intcon(markWord::hash_mask);
- Node *hash_shift = _gvn.intcon(markWord::hash_shift);
- Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
- // This hack lets the hash bits live anywhere in the mark object now, as long
- // as the shift drops the relevant bits into the low 32 bits. Note that
- // Java spec says that HashCode is an int so there's no point in capturing
- // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
- hshifted_header = ConvX2I(hshifted_header);
- Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
+ // Get the hash value and check to see that it has been properly assigned.
+ // We depend on hash_mask being at most 32 bits and avoid the use of
+ // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
+ // vm: see markWord.hpp.
+ Node *hash_mask = _gvn.intcon(markWord::hash_mask);
+ Node *hash_shift = _gvn.intcon(markWord::hash_shift);
+ Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
+ // This hack lets the hash bits live anywhere in the mark object now, as long
+ // as the shift drops the relevant bits into the low 32 bits. Note that
+ // Java spec says that HashCode is an int so there's no point in capturing
+ // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
+ hshifted_header = ConvX2I(hshifted_header);
+ Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
+
+ Node *no_hash_val = _gvn.intcon(markWord::no_hash);
+ Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
+ Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
- Node *no_hash_val = _gvn.intcon(markWord::no_hash);
- Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
- Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
+ generate_slow_guard(test_assigned, slow_region);
- generate_slow_guard(test_assigned, slow_region);
+ result_val->init_req(_fast_path, hash_val);
+
+ // _fast_path2 is not used here.
+ result_val->del_req(_fast_path2);
+ result_reg->del_req(_fast_path2);
+ result_io->del_req(_fast_path2);
+ result_mem->del_req(_fast_path2);
+ }
Node* init_mem = reset_memory();
// fill in the rest of the null path:
result_io ->init_req(_null_path, i_o());
result_mem->init_req(_null_path, init_mem);
- result_val->init_req(_fast_path, hash_val);
result_reg->init_req(_fast_path, control());
result_io ->init_req(_fast_path, i_o());
result_mem->init_req(_fast_path, init_mem);
+ if (UseCompactObjectHeaders) {
+ result_io->init_req(_fast_path2, i_o());
+ result_mem->init_req(_fast_path2, init_mem);
+ }
+
// Generate code for the slow case. We make a call to hashCode().
+ assert(slow_region != nullptr, "must have slow_region");
set_control(_gvn.transform(slow_region));
if (!stopped()) {
// No need for PreserveJVMState, because we're using up the present state.
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 11481954e10..10b30356d4d 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -3690,6 +3690,9 @@ void Arguments::set_compact_headers_flags() {
if (UseCompactObjectHeaders && !UseCompressedClassPointers) {
FLAG_SET_DEFAULT(UseCompressedClassPointers, true);
}
+ if (UseCompactObjectHeaders && FLAG_IS_DEFAULT(hashCode)) {
+ hashCode = 6;
+ }
#endif
}
@@ -3810,6 +3813,12 @@ jint Arguments::apply_ergo() {
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(valuebasedclasses));
}
}
+
+ if (UseObjectMonitorTable && LockingMode != LM_LIGHTWEIGHT) {
+ // ObjectMonitorTable requires lightweight locking.
+ FLAG_SET_DEFAULT(LockingMode, LM_LIGHTWEIGHT);
+ }
+
return JNI_OK;
}
diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.cpp b/src/hotspot/share/runtime/lightweightSynchronizer.cpp
index b3bcd5f7029..15d6924e1ab 100644
--- a/src/hotspot/share/runtime/lightweightSynchronizer.cpp
+++ b/src/hotspot/share/runtime/lightweightSynchronizer.cpp
@@ -51,6 +51,18 @@
#include "utilities/concurrentHashTableTasks.inline.hpp"
#include "utilities/globalDefinitions.hpp"
+static uintx objhash(oop obj) {
+ if (UseCompactObjectHeaders) {
+ uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
+ assert(hash != 0, "should have a hash");
+ return hash;
+ } else {
+ uintx hash = obj->mark().hash();
+ assert(hash != 0, "should have a hash");
+ return hash;
+ }
+}
+
// ConcurrentHashTable storing links from objects to ObjectMonitors
class ObjectMonitorTable : AllStatic {
struct Config {
@@ -81,9 +93,7 @@ class ObjectMonitorTable : AllStatic {
explicit Lookup(oop obj) : _obj(obj) {}
uintx get_hash() const {
- uintx hash = _obj->mark().hash();
- assert(hash != 0, "should have a hash");
- return hash;
+ return objhash(_obj);
}
bool equals(ObjectMonitor** value) {
@@ -285,6 +295,7 @@ class ObjectMonitorTable : AllStatic {
Lookup lookup_f(obj);
auto found_f = [&](ObjectMonitor** found) {
assert((*found)->object_peek() == obj, "must be");
+ assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
result = *found;
};
bool grow;
@@ -317,7 +328,7 @@ class ObjectMonitorTable : AllStatic {
oop obj = om->object_peek();
st->print("monitor=" PTR_FORMAT ", ", p2i(om));
st->print("object=" PTR_FORMAT, p2i(obj));
- assert(obj->mark().hash() == om->hash(), "hash must match");
+ assert(objhash(obj) == (uintx)om->hash(), "hash must match");
st->cr();
return true;
};
@@ -406,7 +417,7 @@ ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectM
assert(UseObjectMonitorTable, "must be");
assert(obj == monitor->object(), "must be");
- intptr_t hash = obj->mark().hash();
+ intptr_t hash = objhash(obj);
assert(hash != 0, "must be set when claiming the object monitor");
monitor->set_hash(hash);
@@ -1217,3 +1228,60 @@ bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread*
// Slow-path.
return false;
}
+
+uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
+ assert(UseCompactObjectHeaders, "Only with compact i-hash");
+ //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
+ assert(mark.is_hashed(), "only from hashed or copied object");
+ if (mark.is_hashed_expanded()) {
+ return obj->int_field(klass->hash_offset_in_bytes(obj));
+ } else {
+ assert(mark.is_hashed_not_expanded(), "must be hashed");
+ assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
+ // Already marked as hashed, but not yet copied. Recompute hash and return it.
+ return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
+ }
+}
+
+uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
+ return get_hash(mark, obj, mark.klass());
+}
+
+intptr_t LightweightSynchronizer::FastHashCode(Thread* current, oop obj) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+
+ markWord mark = obj->mark_acquire();
+ for(;;) {
+ intptr_t hash;
+ markWord old_mark = mark;
+ markWord new_mark;
+ if (UseCompactObjectHeaders) {
+ if (mark.is_hashed()) {
+ return get_hash(mark, obj);
+ }
+ hash = ObjectSynchronizer::get_next_hash(current, obj); // get a new hash
+ if (mark.is_not_hashed_expanded()) {
+ new_mark = mark.set_hashed_expanded();
+ int offset = mark.klass()->hash_offset_in_bytes(obj);
+ obj->int_field_put(offset, (jint) hash);
+ } else {
+ new_mark = mark.set_hashed_not_expanded();
+ }
+ // Let i-hashed objects promote immediately, to avoid young-gen overflow
+ // through i-hash expansion.
+ //new_mark = new_mark.set_age(markWord::max_age);
+ } else {
+ hash = mark.hash();
+ if (hash != 0) {
+ return hash;
+ }
+ hash = ObjectSynchronizer::get_next_hash(current, obj);
+ new_mark = old_mark.copy_set_hash(hash);
+ }
+
+ mark = obj->cas_set_mark(new_mark, old_mark);
+ if (old_mark == mark) {
+ return hash;
+ }
+ }
+}
diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.hpp b/src/hotspot/share/runtime/lightweightSynchronizer.hpp
index fdd753e9b9c..5786e282ea6 100644
--- a/src/hotspot/share/runtime/lightweightSynchronizer.hpp
+++ b/src/hotspot/share/runtime/lightweightSynchronizer.hpp
@@ -75,6 +75,12 @@ class LightweightSynchronizer : AllStatic {
static bool contains_monitor(Thread* current, ObjectMonitor* monitor);
static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current);
+
+ // NOTE: May not cause monitor inflation
+ static uint32_t get_hash(markWord mark, oop obj);
+ // For CDS path.
+ static uint32_t get_hash(markWord mark, oop obj, Klass* klass);
+ static intptr_t FastHashCode(Thread* current, oop obj);
};
#endif // SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP
diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp
index 941309ca2ac..f956c3eaa29 100644
--- a/src/hotspot/share/runtime/synchronizer.cpp
+++ b/src/hotspot/share/runtime/synchronizer.cpp
@@ -64,6 +64,7 @@
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/fastHash.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/preserveException.hpp"
@@ -932,7 +933,7 @@ static markWord read_stable_mark(oop obj) {
// There are simple ways to "diffuse" the middle address bits over the
// generated hashCode values:
-static intptr_t get_next_hash(Thread* current, oop obj) {
+intptr_t ObjectSynchronizer::get_next_hash(Thread* current, oop obj) {
intptr_t value = 0;
if (hashCode == 0) {
// This form uses global Park-Miller RNG.
@@ -951,7 +952,7 @@ static intptr_t get_next_hash(Thread* current, oop obj) {
value = ++GVars.hc_sequence;
} else if (hashCode == 4) {
value = cast_from_oop(obj);
- } else {
+ } else if (hashCode == 5) {
// Marsaglia's xor-shift scheme with thread-specific state
// This is probably the best overall implementation -- we'll
// likely make this the default in future releases.
@@ -964,11 +965,21 @@ static intptr_t get_next_hash(Thread* current, oop obj) {
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
current->_hashStateW = v;
value = v;
+ } else {
+ assert(UseCompactObjectHeaders, "Only with compact i-hash");
+#ifdef _LP64
+ uint64_t val = cast_from_oop(obj);
+ uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32));
+#else
+ uint32_t val = cast_from_oop(obj);
+ uint32_t hash = FastHash::get_hash32(val, UCONST64(0xAAAAAAAA));
+#endif
+ value= static_cast(hash);
}
value &= markWord::hash_mask;
- if (value == 0) value = 0xBAD;
- assert(value != markWord::no_hash, "invariant");
+ if (hashCode != 6 && value == 0) value = 0xBAD;
+ assert(value != markWord::no_hash || hashCode == 6, "invariant");
return value;
}
@@ -977,18 +988,38 @@ static intptr_t install_hash_code(Thread* current, oop obj) {
markWord mark = obj->mark_acquire();
for (;;) {
- intptr_t hash = mark.hash();
- if (hash != 0) {
- return hash;
- }
+ if (UseCompactObjectHeaders) {
+ if (mark.is_hashed()) {
+ return LightweightSynchronizer::get_hash(mark, obj);
+ }
+ intptr_t hash = ObjectSynchronizer::get_next_hash(current, obj); // get a new hash
+ markWord new_mark;
+ if (mark.is_not_hashed_expanded()) {
+ new_mark = mark.set_hashed_expanded();
+ int offset = mark.klass()->hash_offset_in_bytes(obj);
+ obj->int_field_put(offset, (jint) hash);
+ } else {
+ new_mark = mark.set_hashed_not_expanded();
+ }
+ markWord old_mark = obj->cas_set_mark(new_mark, mark);
+ if (old_mark == mark) {
+ return hash;
+ }
+ mark = old_mark;
+ } else {
+ intptr_t hash = mark.hash();
+ if (hash != 0) {
+ return hash;
+ }
- hash = get_next_hash(current, obj);
- const markWord old_mark = mark;
- const markWord new_mark = old_mark.copy_set_hash(hash);
+ hash = ObjectSynchronizer::get_next_hash(current, obj);
+ const markWord old_mark = mark;
+ const markWord new_mark = old_mark.copy_set_hash(hash);
- mark = obj->cas_set_mark(new_mark, old_mark);
- if (old_mark == mark) {
- return hash;
+ mark = obj->cas_set_mark(new_mark, old_mark);
+ if (old_mark == mark) {
+ return hash;
+ }
}
}
}
diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp
index d3d1f4ee4ce..81e56b9f850 100644
--- a/src/hotspot/share/runtime/synchronizer.hpp
+++ b/src/hotspot/share/runtime/synchronizer.hpp
@@ -209,6 +209,8 @@ class ObjectSynchronizer : AllStatic {
static void do_final_audit_and_print_stats();
static void log_in_use_monitor_details(outputStream* out, bool log_all);
+ static intptr_t get_next_hash(Thread* current, oop obj);
+
private:
friend class SynchronizerTest;
friend class LightweightSynchronizer;
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index eaf259cedba..c871197da38 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -248,6 +248,7 @@
volatile_nonstatic_field(InstanceKlass, _init_thread, JavaThread*) \
nonstatic_field(InstanceKlass, _itable_len, int) \
nonstatic_field(InstanceKlass, _nest_host_index, u2) \
+ nonstatic_field(InstanceKlass, _hash_offset, int) \
nonstatic_field(InstanceKlass, _reference_type, u1) \
volatile_nonstatic_field(InstanceKlass, _oop_map_cache, OopMapCache*) \
nonstatic_field(InstanceKlass, _jni_ids, JNIid*) \
@@ -2511,10 +2512,12 @@
declare_constant(markWord::lock_bits) \
declare_constant(markWord::max_hash_bits) \
declare_constant(markWord::hash_bits) \
+ declare_constant(markWord::hashctrl_bits) \
\
declare_constant(markWord::lock_shift) \
declare_constant(markWord::age_shift) \
declare_constant(markWord::hash_shift) \
+ declare_constant(markWord::hashctrl_shift) \
LP64_ONLY(declare_constant(markWord::klass_shift)) \
\
declare_constant(markWord::lock_mask) \
@@ -2523,6 +2526,8 @@
declare_constant(markWord::age_mask_in_place) \
declare_constant(markWord::hash_mask) \
declare_constant(markWord::hash_mask_in_place) \
+ declare_constant(markWord::hashctrl_mask) \
+ declare_constant(markWord::hashctrl_mask_in_place) \
\
declare_constant(markWord::locked_value) \
declare_constant(markWord::unlocked_value) \
diff --git a/src/hotspot/share/utilities/resourceHash.hpp b/src/hotspot/share/utilities/resourceHash.hpp
index a99239b21a0..852c52a0728 100644
--- a/src/hotspot/share/utilities/resourceHash.hpp
+++ b/src/hotspot/share/utilities/resourceHash.hpp
@@ -178,6 +178,7 @@ class ResourceHashtableBase : public STORAGE {
} else {
*ptr = new Node(hv, key, value);
}
+ assert(*ptr != nullptr, "allocation failed");
_number_of_entries ++;
return true;
}
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java
index c4eeaf4a367..f9e200ed352 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java
@@ -104,6 +104,13 @@ public long getObjectSize() {
// object size.
long s = getLength() << klass.getLog2ElementSize();
s += klass.getArrayHeaderInBytes();
+ if (VM.getVM().isCompactObjectHeadersEnabled()) {
+ Mark mark = getMark();
+ if (mark.isCopiedHash()) {
+ // Needs extra 4 bytes for identity hash-code.
+ s += 4;
+ }
+ }
s = Oop.alignObjectSize(s);
return s;
}
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
index 0f05b15af76..4bd20eaecc6 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
@@ -84,6 +84,7 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
initState = new CIntField(type.getCIntegerField("_init_state"), 0);
itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0);
nestHostIndex = new CIntField(type.getCIntegerField("_nest_host_index"), 0);
+ hashOffset = new CIntField(type.getCIntegerField("_hash_offset"), 0);
if (VM.getVM().isJvmtiSupported()) {
breakpoints = type.getAddressField("_breakpoints");
}
@@ -150,6 +151,7 @@ public InstanceKlass(Address addr) {
private static CIntField initState;
private static CIntField itableLen;
private static CIntField nestHostIndex;
+ private static CIntField hashOffset;
private static AddressField breakpoints;
// type safe enum for ClassState from instanceKlass.hpp
@@ -240,7 +242,15 @@ public int getClassStatus() {
private static long headerSize;
public long getObjectSize(Oop object) {
- return getSizeHelper() * VM.getVM().getAddressSize();
+ long baseSize = getSizeHelper() * VM.getVM().getAddressSize();
+ if (VM.getVM().isCompactObjectHeadersEnabled()) {
+ Mark mark = object.getMark();
+ if (mark.isCopiedHash() && (getHashOffset() + 4 /* size of hash field */) > baseSize) {
+ // Needs extra word for identity hash-code.
+ return baseSize + VM.getVM().getBytesPerWord();
+ }
+ }
+ return baseSize;
}
public long getSize() { // in number of bytes
@@ -374,6 +384,7 @@ public int getAllFieldsCount() {
public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); }
public long getItableLen() { return itableLen.getValue(this); }
public short getNestHostIndex() { return (short) nestHostIndex.getValue(this); }
+ public long getHashOffset() { return hashOffset.getValue(this); }
public long majorVersion() { return getConstants().majorVersion(); }
public long minorVersion() { return getConstants().minorVersion(); }
public Symbol getGenericSignature() { return getConstants().getGenericSignature(); }
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceMirrorKlass.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceMirrorKlass.java
index cfd85938f7d..486b8723215 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceMirrorKlass.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceMirrorKlass.java
@@ -55,7 +55,15 @@ public InstanceMirrorKlass(Address addr) {
}
public long getObjectSize(Oop o) {
- return java_lang_Class.getOopSize(o) * VM.getVM().getAddressSize();
+ long s = java_lang_Class.getOopSize(o) * VM.getVM().getAddressSize();
+ if (VM.getVM().isCompactObjectHeadersEnabled()) {
+ Mark mark = o.getMark();
+ if (mark.isCopiedHash()) {
+ // Needs extra 4 bytes for identity hash-code (and align-up to whole word).
+ s += VM.getVM().getAddressSize();
+ }
+ }
+ return s;
}
public void iterateNonStaticFields(OopVisitor visitor, Oop obj) {
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
index edffd56d2cd..3754f3a1053 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java
@@ -51,9 +51,11 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
lockBits = db.lookupLongConstant("markWord::lock_bits").longValue();
maxHashBits = db.lookupLongConstant("markWord::max_hash_bits").longValue();
hashBits = db.lookupLongConstant("markWord::hash_bits").longValue();
+ hashCtrlBits = db.lookupLongConstant("markWord::hashctrl_bits").longValue();
lockShift = db.lookupLongConstant("markWord::lock_shift").longValue();
ageShift = db.lookupLongConstant("markWord::age_shift").longValue();
hashShift = db.lookupLongConstant("markWord::hash_shift").longValue();
+ hashCtrlShift = db.lookupLongConstant("markWord::hashctrl_shift").longValue();
if (VM.getVM().isLP64()) {
klassShift = db.lookupLongConstant("markWord::klass_shift").longValue();
}
@@ -63,6 +65,8 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
ageMaskInPlace = db.lookupLongConstant("markWord::age_mask_in_place").longValue();
hashMask = db.lookupLongConstant("markWord::hash_mask").longValue();
hashMaskInPlace = db.lookupLongConstant("markWord::hash_mask_in_place").longValue();
+ hashCtrlMask = db.lookupLongConstant("markWord::hashctrl_mask").longValue();
+ hashCtrlMaskInPlace = db.lookupLongConstant("markWord::hashctrl_mask_in_place").longValue();
lockedValue = db.lookupLongConstant("markWord::locked_value").longValue();
unlockedValue = db.lookupLongConstant("markWord::unlocked_value").longValue();
monitorValue = db.lookupLongConstant("markWord::monitor_value").longValue();
@@ -81,10 +85,12 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
private static long lockBits;
private static long maxHashBits;
private static long hashBits;
+ private static long hashCtrlBits;
private static long lockShift;
private static long ageShift;
private static long hashShift;
+ private static long hashCtrlShift;
private static long klassShift;
private static long lockMask;
@@ -93,6 +99,8 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
private static long ageMaskInPlace;
private static long hashMask;
private static long hashMaskInPlace;
+ private static long hashCtrlMask;
+ private static long hashCtrlMaskInPlace;
private static long lockedValue;
private static long unlockedValue;
@@ -192,13 +200,22 @@ public Mark displacedMarkHelper() {
// hash operations
public long hash() {
- return Bits.maskBitsLong(value() >> hashShift, hashMask);
+ if (VM.getVM().isCompactObjectHeadersEnabled()) {
+ throw new RuntimeException("Compact I-Hash not yet implemented");
+ } else {
+ return Bits.maskBitsLong(value() >> hashShift, hashMask);
+ }
}
public boolean hasNoHash() {
return hash() == noHash;
}
+ public boolean isCopiedHash() {
+ assert(VM.getVM().isCompactObjectHeadersEnabled());
+ return (Bits.maskBitsLong(value(), hashCtrlMaskInPlace) >> hashCtrlShift) == 2;
+ }
+
public Klass getKlass() {
assert(VM.getVM().isCompactObjectHeadersEnabled());
return (Klass)Metadata.instantiateWrapperFor(addr.getCompKlassAddressAt(0));
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java
index 825b1bf1437..b5b618cbf24 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java
@@ -126,12 +126,15 @@ public boolean equals(Object obj) {
return getHandle().equals(other.getHandle());
}
return false;
- }
+ }
public int hashCode() { return getHandle().hashCode(); }
/** Identity hash in the target VM */
public long identityHash() {
+ if (VM.getVM().isCompactObjectHeadersEnabled()) {
+ throw new InternalError("Not yet implemented");
+ }
Mark mark = getMark();
if (mark.isUnlocked() && (!mark.hasNoHash())) {
return (int) mark.hash();
diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
index 29352ebdaa4..bed9f070aba 100644
--- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
+++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp
@@ -28,7 +28,7 @@
#include "unittest.hpp"
static markWord originalMark() { return markWord(markWord::lock_mask_in_place); }
-static markWord changedMark() { return markWord(0x4711); }
+static markWord changedMark() { return markWord(0x4712); }
#define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value())
diff --git a/test/hotspot/gtest/oops/test_markWord.cpp b/test/hotspot/gtest/oops/test_markWord.cpp
index 36abcce3e44..1a8a00d218b 100644
--- a/test/hotspot/gtest/oops/test_markWord.cpp
+++ b/test/hotspot/gtest/oops/test_markWord.cpp
@@ -92,7 +92,7 @@ TEST_VM(markWord, printing) {
// Hash the object then print it.
intx hash = h_obj->identity_hash();
- assert_test_pattern(h_obj, "is_unlocked hash=0x");
+ assert_test_pattern(h_obj, "is_unlocked hash is-hashed=true is-copied=false");
// Wait gets the lock inflated.
{
diff --git a/test/hotspot/jtreg/runtime/cds/DeterministicDump.java b/test/hotspot/jtreg/runtime/cds/DeterministicDump.java
index 92643d648b7..832d40c4e13 100644
--- a/test/hotspot/jtreg/runtime/cds/DeterministicDump.java
+++ b/test/hotspot/jtreg/runtime/cds/DeterministicDump.java
@@ -74,6 +74,8 @@ public static void doTest(boolean compressed) throws Exception {
// This option is available only on 64-bit.
String sign = (compressed) ? "+" : "-";
baseArgs.add("-XX:" + sign + "UseCompressedOops");
+ baseArgs.add("-XX:+UnlockExperimentalVMOptions");
+ baseArgs.add("-XX:-UseCompactObjectHeaders");
}
String baseArchive = dump(baseArgs);