From 878ba487e8693492f6c614da16e2e510740fff59 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 15 May 2024 10:57:02 +0000 Subject: [PATCH] 19 bit tiny classpointers --- .../cpu/aarch64/c2_MacroAssembler_aarch64.cpp | 2 +- src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp | 2 +- src/hotspot/share/oops/compressedKlass.cpp | 9 +++++++++ src/hotspot/share/oops/compressedKlass.hpp | 2 +- src/hotspot/share/oops/markWord.cpp | 6 ------ src/hotspot/share/oops/markWord.hpp | 12 ++---------- src/hotspot/share/oops/oop.hpp | 12 ++---------- src/hotspot/share/opto/compile.cpp | 4 ++-- src/hotspot/share/opto/doCall.cpp | 4 ++-- src/hotspot/share/opto/escape.cpp | 4 ++-- src/hotspot/share/opto/graphKit.cpp | 4 ++-- src/hotspot/share/opto/macro.cpp | 4 ++-- src/hotspot/share/opto/memnode.cpp | 10 +++++----- src/hotspot/share/opto/parse1.cpp | 2 +- src/hotspot/share/opto/parse2.cpp | 2 +- src/hotspot/share/opto/parseHelper.cpp | 2 +- src/hotspot/share/opto/subnode.cpp | 2 +- src/hotspot/share/opto/subtypenode.cpp | 4 ++-- src/hotspot/share/opto/type.cpp | 4 ++-- src/hotspot/share/opto/type.hpp | 13 ++++++++++++- src/hotspot/share/runtime/globals.hpp | 2 +- .../CompressedOops/CompressedClassSpaceSize.java | 13 +------------ .../runtime/locking/TestRecursiveMonitorChurn.java | 2 +- 23 files changed, 54 insertions(+), 67 deletions(-) diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index dc454faa79c..57cea805232 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -2572,7 +2572,7 @@ void C2_MacroAssembler::load_nklass_compact(Register dst, Register obj, Register // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and // then passes that register as obj and 0 in disp. The following code extracts the base // and offset to load the mark-word. - int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes(); + int offset = oopDesc::mark_offset_in_bytes() + disp - Type::klass_offset(); if (index == noreg) { ldr(dst, Address(obj, offset)); } else { diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index df3538badf8..c4733b193ff 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -6407,7 +6407,7 @@ void C2_MacroAssembler::load_nklass_compact_c2(Register dst, Register obj, Regis // emits code that pre-computes obj-start + klass_offset_in_bytes into a register, and // then passes that register as obj and 0 in disp. The following code extracts the base // and offset to load the mark-word. - int offset = oopDesc::mark_offset_in_bytes() + disp - oopDesc::klass_offset_in_bytes(); + int offset = oopDesc::mark_offset_in_bytes() + disp - Type::klass_offset(); movq(dst, Address(obj, index, scale, offset)); shrq(dst, markWord::klass_shift); } diff --git a/src/hotspot/share/oops/compressedKlass.cpp b/src/hotspot/share/oops/compressedKlass.cpp index afb76e39bb9..3c931a25e25 100644 --- a/src/hotspot/share/oops/compressedKlass.cpp +++ b/src/hotspot/share/oops/compressedKlass.cpp @@ -84,6 +84,10 @@ void CompressedKlassPointers::sanity_check_after_initialization() { #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp); #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp); + // There is no technical reason preventing us from using other klass pointer bit lengths, + // but it should be a deliberate choice + ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19); + // All values must be inited ASSERT_HERE(_max_shift != -1); ASSERT_HERE(_klass_range_start != (address)-1); @@ -223,6 +227,11 @@ void CompressedKlassPointers::initialize(address addr, size_t len) { if (tiny_classpointer_mode()) { + // This handles the case that we - experimentally - reduce the number of + // class pointer bits further, such that (shift + num bits) < 32. + assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()), + "klass range size exceeds encoding, len: " SIZE_FORMAT ", narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift()); + // In tiny classpointer mode, we don't attempt for zero-based mode. // Instead, we set the base to the start of the klass range and then try // for the smallest shift possible that still covers the whole range. diff --git a/src/hotspot/share/oops/compressedKlass.hpp b/src/hotspot/share/oops/compressedKlass.hpp index d9f36d79ea0..69bd7012153 100644 --- a/src/hotspot/share/oops/compressedKlass.hpp +++ b/src/hotspot/share/oops/compressedKlass.hpp @@ -48,7 +48,7 @@ class CompressedKlassPointers : public AllStatic { // Narrow klass pointer bits for an unshifted narrow Klass pointer. static constexpr int narrow_klass_pointer_bits_legacy = 32; - static constexpr int narrow_klass_pointer_bits_tinycp = 22; + static constexpr int narrow_klass_pointer_bits_tinycp = 19; static int _narrow_klass_pointer_bits; diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp index 0a7699afa54..8d57ecfbcc2 100644 --- a/src/hotspot/share/oops/markWord.cpp +++ b/src/hotspot/share/oops/markWord.cpp @@ -29,12 +29,6 @@ #include "runtime/objectMonitor.inline.hpp" #include "utilities/ostream.hpp" -#ifdef _LP64 -STATIC_ASSERT((markWord::klass_shadow_mask_inplace & markWord::klass_mask_in_place) == 0); -STATIC_ASSERT((markWord::klass_load_shift + markWord::klass_shadow_bits) == markWord::klass_shift); -STATIC_ASSERT(markWord::klass_shift + markWord::klass_bits == 64); -#endif - markWord markWord::displaced_mark_helper() const { assert(has_displaced_mark_helper(), "check"); if (has_monitor()) { diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index 2eab764c7ba..3e592acc89d 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -160,17 +160,9 @@ class markWord { // We store nKlass in the upper 22 bits of the markword. When extracting, we need to read the upper // 32 bits and rightshift by the lower 10 foreign bits. - // These are for loading the nKlass with a 32-bit load and subsequent masking of the lower - // shadow bits - static constexpr int klass_load_shift = 32; - static constexpr int klass_load_bits = 32; - static constexpr int klass_shadow_bits = 10; - static constexpr uintptr_t klass_shadow_mask = right_n_bits(klass_shadow_bits); - static constexpr uintptr_t klass_shadow_mask_inplace = klass_shadow_mask << klass_load_shift; - // These are for bit-precise extraction of the nKlass from the 64-bit Markword - static constexpr int klass_shift = 42; - static constexpr int klass_bits = 22; + static constexpr int klass_shift = 13; + static constexpr int klass_bits = 19; static constexpr uintptr_t klass_mask = right_n_bits(klass_bits); static constexpr uintptr_t klass_mask_in_place = klass_mask << klass_shift; #endif diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 547368caaa3..9970c903e65 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -359,16 +359,8 @@ class oopDesc { // for code generation static int mark_offset_in_bytes() { return (int)offset_of(oopDesc, _mark); } static int klass_offset_in_bytes() { -#ifdef _LP64 - if (UseCompactObjectHeaders) { - constexpr int load_shift = markWord::klass_load_shift; - STATIC_ASSERT(load_shift % 8 == 0); - return mark_offset_in_bytes() + load_shift / 8; - } else -#endif - { - return (int)offset_of(oopDesc, _metadata._klass); - } + assert(!UseCompactObjectHeaders, "don't use klass_offset_in_bytes with compact headers"); + return (int)offset_of(oopDesc, _metadata._klass); } static int klass_gap_offset_in_bytes() { assert(has_klass_gap(), "only applicable to compressed klass pointers"); diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index fbfa37b4a99..c3cc7a3b224 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -1362,7 +1362,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is. tj = ta = TypeAryPtr::RANGE; - } else if( offset == oopDesc::klass_offset_in_bytes() ) { + } else if( offset == Type::klass_offset() ) { tj = TypeInstPtr::KLASS; // all klass loads look alike ta = TypeAryPtr::RANGE; // generic ignored junk ptr = TypePtr::BotPTR; @@ -1529,7 +1529,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || - (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || + (offset == Type::klass_offset() && tj->base() == Type::AryPtr) || (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr), "For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); assert( tj->ptr() != TypePtr::TopPTR && diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 0a5e27ed5b1..c2f9874821e 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -942,7 +942,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { // Get the exception oop klass from its header Node* ex_klass_node = nullptr; if (has_exception_handler() && !ex_type->klass_is_exact()) { - Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); + Node* p = basic_plus_adr( ex_node, ex_node, Type::klass_offset()); ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); // Compute the exception klass a little more cleverly. @@ -960,7 +960,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { ex_klass_node->init_req(i, top()); continue; } - Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); + Node* p = basic_plus_adr(ex_in, ex_in, Type::klass_offset()); Node* k = _gvn.transform( LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); ex_klass_node->init_req( i, k ); } diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index 8a80392d5c7..3305c01c089 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -3375,7 +3375,7 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { bt = T_OBJECT; } } - } else if (offset != oopDesc::klass_offset_in_bytes()) { + } else if (offset != Type::klass_offset()) { if (adr_type->isa_instptr()) { ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); if (field != nullptr) { @@ -4374,7 +4374,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, // the header emitted during macro expansion wouldn't have // correct memory state otherwise. _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); - _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); + _compile->get_alias_index(tinst->add_offset(Type::klass_offset())); if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { // First, put on the worklist all Field edges from Connection Graph diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index 134d21e5bad..6b3de3b49ac 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -1198,7 +1198,7 @@ Node* GraphKit::load_object_klass(Node* obj) { // Special-case a fresh allocation to avoid building nodes: Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); if (akls != nullptr) return akls; - Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); + Node* k_adr = basic_plus_adr(obj, Type::klass_offset()); return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS)); } @@ -3661,7 +3661,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, // Add an edge in the MergeMem for the header fields so an access // to one of those has correct memory state set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes()))); - set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes()))); + set_memory(minit_out, C->get_alias_index(oop_type->add_offset(Type::klass_offset()))); if (oop_type->isa_aryptr()) { const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); int elemidx = C->get_alias_index(telemref); diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 90b1aaf2c3f..c245be84c5e 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1707,7 +1707,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); if (!UseCompactObjectHeaders) { - rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + rawmem = make_store(control, rawmem, object, Type::klass_offset(), klass_node, T_METADATA); } int header_size = alloc->minimum_header_size(); // conservatively small @@ -2334,7 +2334,7 @@ void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) { if (_igvn.type(obj_or_subklass)->isa_klassptr()) { subklass = obj_or_subklass; } else { - Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes()); + Node* k_adr = basic_plus_adr(obj_or_subklass, Type::klass_offset()); subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS)); } diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 580fc85dbb2..fb8472e6540 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -260,7 +260,7 @@ static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const T tp->isa_aryptr() && tp->offset() == Type::OffsetBot && adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || - adr_check->offset() == oopDesc::klass_offset_in_bytes() || + adr_check->offset() == Type::klass_offset() || adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) { // don't assert if it is dead code. consistent = true; @@ -922,7 +922,7 @@ Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypeP // sanity check the alias category against the created node type assert(!(adr_type->isa_oopptr() && - adr_type->offset() == oopDesc::klass_offset_in_bytes()), + adr_type->offset() == Type::klass_offset()), "use LoadKlassNode instead"); assert(!(adr_type->isa_aryptr() && adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), @@ -2432,7 +2432,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { } if (!tinst->is_loaded()) return _type; // Bail out if not loaded - if (offset == oopDesc::klass_offset_in_bytes()) { + if (offset == Type::klass_offset()) { return tinst->as_klass_type(true); } } @@ -2440,7 +2440,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // Check for loading klass from an array const TypeAryPtr *tary = tp->isa_aryptr(); if (tary != nullptr && - tary->offset() == oopDesc::klass_offset_in_bytes()) { + tary->offset() == Type::klass_offset()) { return tary->as_klass_type(true); } @@ -2506,7 +2506,7 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) { // We can fetch the klass directly through an AllocateNode. // This works even if the klass is not constant (clone or newArray). - if (offset == oopDesc::klass_offset_in_bytes()) { + if (offset == Type::klass_offset()) { Node* allocated_klass = AllocateNode::Ideal_klass(base, phase); if (allocated_klass != nullptr) { return allocated_klass; diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index 3989020451e..90870dda2a0 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -2127,7 +2127,7 @@ void Parse::call_register_finalizer() { // Insert a dynamic test for whether the instance needs // finalization. In general this will fold up since the concrete // class is often visible so the access flags are constant. - Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); + Node* klass_addr = basic_plus_adr( receiver, receiver, Type::klass_offset() ); Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index d43b8335ffd..51af0e497f5 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -1651,7 +1651,7 @@ static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { Node* adr = ldk->in(MemNode::Address); intptr_t off = 0; Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); - if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? + if (obj == nullptr || off != Type::klass_offset()) // loading oopDesc::_klass? return nullptr; const TypePtr* tp = gvn->type(obj)->is_ptr(); if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? diff --git a/src/hotspot/share/opto/parseHelper.cpp b/src/hotspot/share/opto/parseHelper.cpp index ba4cc612cc3..1660b604602 100644 --- a/src/hotspot/share/opto/parseHelper.cpp +++ b/src/hotspot/share/opto/parseHelper.cpp @@ -154,7 +154,7 @@ void Parse::array_store_check() { } // Extract the array klass type - int klass_offset = oopDesc::klass_offset_in_bytes(); + int klass_offset = Type::klass_offset(); Node* p = basic_plus_adr( ary, ary, klass_offset ); // p's type is array-of-OOPS plus klass_offset Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS)); diff --git a/src/hotspot/share/opto/subnode.cpp b/src/hotspot/share/opto/subnode.cpp index 7eb7922c5fb..cf5d3633992 100644 --- a/src/hotspot/share/opto/subnode.cpp +++ b/src/hotspot/share/opto/subnode.cpp @@ -1161,7 +1161,7 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { Node* ldk2 = AddPNode::Ideal_base_and_offset(adr1, phase, con2); if (ldk2 == nullptr) return nullptr; - if (con2 == oopDesc::klass_offset_in_bytes()) { + if (con2 == Type::klass_offset()) { // We are inspecting an object's concrete class. // Short-circuit the check if the query is abstract. if (superklass->is_interface() || diff --git a/src/hotspot/share/opto/subtypenode.cpp b/src/hotspot/share/opto/subtypenode.cpp index 77e46614f21..2dcba77f6ae 100644 --- a/src/hotspot/share/opto/subtypenode.cpp +++ b/src/hotspot/share/opto/subtypenode.cpp @@ -94,7 +94,7 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (addr != nullptr) { intptr_t con = 0; Node* obj = AddPNode::Ideal_base_and_offset(addr, phase, con); - if (con == oopDesc::klass_offset_in_bytes() && obj != nullptr) { + if (con == Type::klass_offset() && obj != nullptr) { assert(is_oop(phase, obj), "only for oop input"); set_req_X(ObjOrSubKlass, obj, phase); return this; @@ -218,7 +218,7 @@ Node* SubTypeCheckNode::load_klass(PhaseGVN* phase) const { const Type* sub_t = phase->type(obj_or_subklass); Node* subklass = nullptr; if (sub_t->isa_oopptr()) { - Node* adr = phase->transform(new AddPNode(obj_or_subklass, obj_or_subklass, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); + Node* adr = phase->transform(new AddPNode(obj_or_subklass, obj_or_subklass, phase->MakeConX(Type::klass_offset()))); subklass = phase->transform(LoadKlassNode::make(*phase, nullptr, phase->C->immutable_memory(), adr, TypeInstPtr::KLASS)); record_for_cleanup(subklass, phase); } else { diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 690107f169c..c78d799603b 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -556,7 +556,7 @@ void Type::Initialize_shared(Compile* current) { TypeInstPtr::MARK = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), false, 0, oopDesc::mark_offset_in_bytes()); TypeInstPtr::KLASS = TypeInstPtr::make(TypePtr::BotPTR, current->env()->Object_klass(), - false, 0, oopDesc::klass_offset_in_bytes()); + false, 0, Type::klass_offset()); TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot); TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, nullptr, OffsetBot); @@ -3530,7 +3530,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const TypeInterfaces* inter } #ifdef _LP64 if (_offset > 0 || _offset == Type::OffsetTop || _offset == Type::OffsetBot) { - if (_offset == oopDesc::klass_offset_in_bytes()) { + if (_offset == Type::klass_offset()) { _is_ptr_to_narrowklass = UseCompressedClassPointers; } else if (klass() == nullptr) { // Array with unknown body type diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp index b9883d51391..3dd2feec70a 100644 --- a/src/hotspot/share/opto/type.hpp +++ b/src/hotspot/share/opto/type.hpp @@ -192,6 +192,17 @@ class Type { public: + // This is used as a marker to identify narrow Klass* loads, which + // are really extracted from the mark-word, but we still want to + // distinguish it. + static int klass_offset() { + if (UseCompactObjectHeaders) { + return 1; + } else { + return oopDesc::klass_offset_in_bytes(); + } + } + inline void* operator new( size_t x ) throw() { Compile* compile = Compile::current(); compile->set_type_last_size(x); @@ -1404,7 +1415,7 @@ class TypeAryPtr : public TypeOopPtr { if (UseCompressedOops && (elem()->make_oopptr() != nullptr && !top_or_bottom) && _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes() && - _offset != arrayOopDesc::klass_offset_in_bytes()) { + _offset != Type::klass_offset()) { _is_ptr_to_narrowoop = true; } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 3f4255fce10..60e7b88a0fa 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1400,7 +1400,7 @@ const int ObjectAlignmentInBytes = 8; "Maximum size of Metaspaces (in bytes)") \ constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \ \ - product(size_t, CompressedClassSpaceSize, 1*G, \ + product(size_t, CompressedClassSpaceSize, 128*M, \ "Maximum size of class area in Metaspace when compressed " \ "class pointers are used") \ range(1*M, LP64_ONLY(4*G) NOT_LP64(max_uintx)) \ diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java index 53b8e5aed4d..e4e794e10f4 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java @@ -54,17 +54,6 @@ * @run driver CompressedClassSpaceSize valid_large_nocds */ -/* - * @test id=valid_large_cds - * @bug 8022865 - * @summary Tests for the -XX:CompressedClassSpaceSize command line option - * @requires vm.bits == 64 & vm.opt.final.UseCompressedOops == true & vm.cds - * @requires vm.flagless - * @library /test/lib - * @modules java.base/jdk.internal.misc java.management - * @run driver CompressedClassSpaceSize valid_large_cds - */ - import jdk.test.lib.process.ProcessTools; import jdk.test.lib.process.OutputAnalyzer; @@ -74,7 +63,7 @@ public class CompressedClassSpaceSize { final static long minAllowedClassSpaceSize = MB; final static long minRealClassSpaceSize = 16 * MB; - final static long maxClassSpaceSize = 4096 * MB; + final static long maxClassSpaceSize = 512 * MB; // For the valid_large_cds sub test: we need to have a notion of what archive size to // maximally expect, with a generous fudge factor to avoid having to tweak this test diff --git a/test/hotspot/jtreg/runtime/locking/TestRecursiveMonitorChurn.java b/test/hotspot/jtreg/runtime/locking/TestRecursiveMonitorChurn.java index 47cb5613171..d6a8564b81f 100644 --- a/test/hotspot/jtreg/runtime/locking/TestRecursiveMonitorChurn.java +++ b/test/hotspot/jtreg/runtime/locking/TestRecursiveMonitorChurn.java @@ -83,7 +83,7 @@ public static void main(String[] args) throws IOException { long reserved = Long.parseLong(m.group(1)); long committed = Long.parseLong(m.group(2)); System.out.println(">>>>> " + line + ": " + reserved + " - " + committed); - if (committed > 1000) { + if (committed > 300000) { throw new RuntimeException("Allocated too many monitors"); } return;