Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8331572: Allow using OopMapCache outside of STW GC phases #610

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/shared/gcVMOperations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,9 @@ bool VM_GC_Operation::doit_prologue() {


void VM_GC_Operation::doit_epilogue() {
// Clean up old interpreter OopMap entries that were replaced
// during the GC thread root traversal.
OopMapCache::cleanup_old_entries();
// GC thread root traversal likely used OopMapCache a lot, which
// might have created lots of old entries. Trigger the cleanup now.
OopMapCache::trigger_cleanup();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ bool VM_ShenandoahOperation::doit_prologue() {

void VM_ShenandoahOperation::doit_epilogue() {
assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State was not synchronized to java threads.");
// GC thread root traversal likely used OopMapCache a lot, which
// might have created lots of old entries. Trigger the cleanup now.
OopMapCache::trigger_cleanup();
}

bool VM_ShenandoahReferenceOperation::doit_prologue() {
Expand All @@ -52,7 +55,6 @@ bool VM_ShenandoahReferenceOperation::doit_prologue() {

void VM_ShenandoahReferenceOperation::doit_epilogue() {
VM_ShenandoahOperation::doit_epilogue();
OopMapCache::cleanup_old_entries();
if (Universe::has_reference_pending_list()) {
Heap_lock->notify_all();
}
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/x/xDriver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include "gc/x/xServiceability.hpp"
#include "gc/x/xStat.hpp"
#include "gc/x/xVerify.hpp"
#include "interpreter/oopMapCache.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/threads.hpp"
Expand Down Expand Up @@ -130,6 +131,10 @@ class VM_XOperation : public VM_Operation {

virtual void doit_epilogue() {
Heap_lock->unlock();

// GC thread root traversal likely used OopMapCache a lot, which
// might have created lots of old entries. Trigger the cleanup now.
OopMapCache::trigger_cleanup();
}

bool gc_locked() const {
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/z/zGeneration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
#include "gc/z/zUncoloredRoot.inline.hpp"
#include "gc/z/zVerify.hpp"
#include "gc/z/zWorkers.hpp"
#include "interpreter/oopMapCache.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "prims/jvmtiTagMap.hpp"
Expand Down Expand Up @@ -452,6 +453,10 @@ class VM_ZOperation : public VM_Operation {

virtual void doit_epilogue() {
Heap_lock->unlock();

// GC thread root traversal likely used OopMapCache a lot, which
// might have created lots of old entries. Trigger the cleanup now.
OopMapCache::trigger_cleanup();
}

bool success() const {
Expand Down
75 changes: 49 additions & 26 deletions src/hotspot/share/interpreter/oopMapCache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/signature.hpp"
#include "utilities/globalCounter.inline.hpp"

class OopMapCacheEntry: private InterpreterOopMap {
friend class InterpreterOopMap;
Expand Down Expand Up @@ -502,15 +503,11 @@ void OopMapCache::flush_obsolete_entries() {
}
}

// Called by GC for thread root scan during a safepoint only. The other interpreted frame oopmaps
// are generated locally and not cached.
// Lookup or compute/cache the entry.
void OopMapCache::lookup(const methodHandle& method,
int bci,
InterpreterOopMap* entry_for) {
assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint");
int probe = hash_value_for(method, bci);
int i;
OopMapCacheEntry* entry = nullptr;

if (log_is_enabled(Debug, interpreter, oopmap)) {
static int count = 0;
Expand All @@ -520,14 +517,18 @@ void OopMapCache::lookup(const methodHandle& method,
method()->name_and_sig_as_C_string(), probe);
}

// Search hashtable for match
for(i = 0; i < _probe_depth; i++) {
entry = entry_at(probe + i);
if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {
entry_for->resource_copy(entry);
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
return;
// Search hashtable for match.
// Need a critical section to avoid race against concurrent reclamation.
{
GlobalCounter::CriticalSection cs(Thread::current());
for (int i = 0; i < _probe_depth; i++) {
OopMapCacheEntry *entry = entry_at(probe + i);
if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {
entry_for->resource_copy(entry);
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
return;
}
}
}

Expand All @@ -549,8 +550,8 @@ void OopMapCache::lookup(const methodHandle& method,
}

// First search for an empty slot
for(i = 0; i < _probe_depth; i++) {
entry = entry_at(probe + i);
for (int i = 0; i < _probe_depth; i++) {
OopMapCacheEntry* entry = entry_at(probe + i);
if (entry == nullptr) {
if (put_at(probe + i, tmp, nullptr)) {
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
Expand All @@ -565,6 +566,10 @@ void OopMapCache::lookup(const methodHandle& method,
// where the first entry in the collision array is replaced with the new one.
OopMapCacheEntry* old = entry_at(probe + 0);
if (put_at(probe + 0, tmp, old)) {
// Cannot deallocate old entry on the spot: it can still be used by readers
// that got a reference to it before we were able to replace it in the map.
// Instead of synchronizing on GlobalCounter here and incurring heavy thread
// walk, we do this clean up out of band.
enqueue_for_cleanup(old);
} else {
OopMapCacheEntry::deallocate(tmp);
Expand All @@ -575,13 +580,14 @@ void OopMapCache::lookup(const methodHandle& method,
}

void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
bool success = false;
OopMapCacheEntry* head;
do {
head = _old_entries;
while (true) {
OopMapCacheEntry* head = Atomic::load(&_old_entries);
entry->_next = head;
success = Atomic::cmpxchg(&_old_entries, head, entry) == head;
} while (!success);
if (Atomic::cmpxchg(&_old_entries, head, entry) == head) {
// Enqueued successfully.
break;
}
}

if (log_is_enabled(Debug, interpreter, oopmap)) {
ResourceMark rm;
Expand All @@ -590,11 +596,28 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
}
}

// This is called after GC threads are done and nothing is accessing the old_entries
// list, so no synchronization needed.
void OopMapCache::cleanup_old_entries() {
OopMapCacheEntry* entry = _old_entries;
_old_entries = nullptr;
bool OopMapCache::has_cleanup_work() {
return Atomic::load(&_old_entries) != nullptr;
}

void OopMapCache::trigger_cleanup() {
if (has_cleanup_work()) {
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
Service_lock->notify_all();
}
}

void OopMapCache::cleanup() {
OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr);
if (entry == nullptr) {
// No work.
return;
}

// About to delete the entries than might still be accessed by other threads
// on lookup path. Need to sync up with them before proceeding.
GlobalCounter::write_synchronize();

while (entry != nullptr) {
if (log_is_enabled(Debug, interpreter, oopmap)) {
ResourceMark rm;
Expand Down
10 changes: 9 additions & 1 deletion src/hotspot/share/interpreter/oopMapCache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,15 @@ class OopMapCache : public CHeapObj<mtClass> {

// Compute an oop map without updating the cache or grabbing any locks (for debugging)
static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry);
static void cleanup_old_entries();

// Check if we need to clean up old entries
static bool has_cleanup_work();

// Request cleanup if work is needed
static void trigger_cleanup();

// Clean up the old entries
static void cleanup();
};

#endif // SHARE_INTERPRETER_OOPMAPCACHE_HPP
13 changes: 5 additions & 8 deletions src/hotspot/share/oops/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,16 +308,13 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla

void Method::mask_for(int bci, InterpreterOopMap* mask) {
methodHandle h_this(Thread::current(), this);
// Only GC uses the OopMapCache during thread stack root scanning
// any other uses generate an oopmap but do not save it in the cache.
if (Universe::heap()->is_stw_gc_active()) {
method_holder()->mask_for(h_this, bci, mask);
} else {
OopMapCache::compute_one_oop_map(h_this, bci, mask);
}
return;
mask_for(h_this, bci, mask);
}

void Method::mask_for(const methodHandle& this_mh, int bci, InterpreterOopMap* mask) {
assert(this_mh() == this, "Sanity");
method_holder()->mask_for(this_mh, bci, mask);
}

int Method::bci_from(address bcp) const {
if (is_native() && bcp == 0) {
Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/oops/method.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -499,8 +499,10 @@ class Method : public Metadata {
address signature_handler() const { return *(signature_handler_addr()); }
void set_signature_handler(address handler);

// Interpreter oopmap support
// Interpreter oopmap support.
// If handle is already available, call with it for better performance.
void mask_for(int bci, InterpreterOopMap* mask);
void mask_for(const methodHandle& this_mh, int bci, InterpreterOopMap* mask);

// operations on invocation counter
void print_invocation_count();
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/runtime/frame.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -954,7 +954,7 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer
// process locals & expression stack
InterpreterOopMap mask;
if (query_oop_map_cache) {
m->mask_for(bci, &mask);
m->mask_for(m, bci, &mask);
} else {
OopMapCache::compute_one_oop_map(m, bci, &mask);
}
Expand Down
8 changes: 8 additions & 0 deletions src/hotspot/share/runtime/safepoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include "gc/shared/workerThread.hpp"
#include "gc/shared/workerUtils.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
Expand Down Expand Up @@ -610,6 +611,13 @@ class ParallelCleanupTask : public WorkerTask {
OopStorage::trigger_cleanup_if_needed();
}

if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPMAPCACHE_CLEANUP)) {
if (OopMapCache::has_cleanup_work()) {
Tracer t("triggering oopmap cache cleanup");
OopMapCache::trigger_cleanup();
}
}

_subtasks.all_tasks_claimed();
}
};
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/runtime/safepoint.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class SafepointSynchronize : AllStatic {
SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH,
SAFEPOINT_CLEANUP_STRING_TABLE_REHASH,
SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP,
SAFEPOINT_CLEANUP_REQUEST_OOPMAPCACHE_CLEANUP,
// Leave this one last.
SAFEPOINT_CLEANUP_NUM_TASKS
};
Expand Down
9 changes: 8 additions & 1 deletion src/hotspot/share/runtime/serviceThread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/oopStorageSet.hpp"
#include "memory/universe.hpp"
#include "interpreter/oopMapCache.hpp"
#include "oops/oopHandle.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
Expand Down Expand Up @@ -95,6 +96,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
bool oop_handles_to_release = false;
bool cldg_cleanup_work = false;
bool jvmti_tagmap_work = false;
bool oopmap_cache_work = false;
{
// Need state transition ThreadBlockInVM so that this thread
// will be handled by safepoint correctly when this thread is
Expand Down Expand Up @@ -124,7 +126,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()) |
(oop_handles_to_release = JavaThread::has_oop_handles_to_release()) |
(cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) |
(jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset())
(jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) |
(oopmap_cache_work = OopMapCache::has_cleanup_work())
) == 0) {
// Wait until notified that there is some work to do.
ml.wait();
Expand Down Expand Up @@ -195,6 +198,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
if (jvmti_tagmap_work) {
JvmtiTagMap::flush_all_object_free_events();
}

if (oopmap_cache_work) {
OopMapCache::cleanup();
}
}
}

Expand Down