--- old/src/hotspot/share/classfile/classLoaderData.cpp 2018-10-29 23:13:07.610228267 -0400 +++ new/src/hotspot/share/classfile/classLoaderData.cpp 2018-10-29 23:13:07.366228255 -0400 @@ -484,7 +484,7 @@ // Remove a klass from the _klasses list for scratch_class during redefinition // or parsed class in the case of an error. void ClassLoaderData::remove_class(Klass* scratch_class) { - assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); // Adjust global class iterator. ClassLoaderDataGraph::adjust_saved_class(scratch_class); @@ -804,7 +804,8 @@ // Deallocate free metadata on the free list. How useful the PermGen was! void ClassLoaderData::free_deallocate_list() { - // Don't need lock, at safepoint + // This must be called at a safepoint because it depends on metadata walking at + // safepoint cleanup time. assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); assert(!is_unloading(), "only called for ClassLoaderData that are not unloading"); if (_deallocate_list == NULL) { @@ -844,8 +845,7 @@ // classes. The metadata is removed with the unloading metaspace. // There isn't C heap memory allocated for methods, so nothing is done for them. void ClassLoaderData::free_deallocate_list_C_heap_structures() { - // Don't need lock, at safepoint - assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint"); + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); assert(is_unloading(), "only called for ClassLoaderData that are unloading"); if (_deallocate_list == NULL) { return; --- old/src/hotspot/share/classfile/classLoaderDataGraph.cpp 2018-10-29 23:13:07.890228281 -0400 +++ new/src/hotspot/share/classfile/classLoaderDataGraph.cpp 2018-10-29 23:13:07.674228270 -0400 @@ -580,7 +580,7 @@ } void ClassLoaderDataGraph::purge() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); ClassLoaderData* list = _unloading; _unloading = NULL; ClassLoaderData* next = list; --- old/src/hotspot/share/classfile/moduleEntry.cpp 2018-10-29 23:13:08.158228294 -0400 +++ new/src/hotspot/share/classfile/moduleEntry.cpp 2018-10-29 23:13:07.950228284 -0400 @@ -204,7 +204,7 @@ // Purge dead module entries out of reads list. void ModuleEntry::purge_reads() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(Module_lock); if (_must_walk_reads && has_reads_list()) { // This module's _must_walk_reads flag will be reset based @@ -245,7 +245,6 @@ } void ModuleEntry::delete_reads() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); delete _reads; _reads = NULL; } @@ -319,8 +318,6 @@ } ModuleEntryTable::~ModuleEntryTable() { - assert_locked_or_safepoint(Module_lock); - // Walk through all buckets and all entries in each bucket, // freeing each entry. for (int i = 0; i < table_size(); ++i) { --- old/src/hotspot/share/classfile/packageEntry.cpp 2018-10-29 23:13:08.426228307 -0400 +++ new/src/hotspot/share/classfile/packageEntry.cpp 2018-10-29 23:13:08.210228296 -0400 @@ -125,7 +125,7 @@ // get deleted. This prevents the package from illegally transitioning from // exported to non-exported. void PackageEntry::purge_qualified_exports() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(Module_lock); if (_must_walk_exports && _qualified_exports != NULL && !_qualified_exports->is_empty()) { @@ -160,7 +160,6 @@ } void PackageEntry::delete_qualified_exports() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); if (_qualified_exports != NULL) { delete _qualified_exports; } @@ -228,29 +227,20 @@ } PackageEntry* PackageEntryTable::lookup(Symbol* name, ModuleEntry* module) { + MutexLocker ml(Module_lock); PackageEntry* p = lookup_only(name); if (p != NULL) { return p; } else { - // If not found, add to table. Grab the PackageEntryTable lock first. - MutexLocker ml(Module_lock); - - // Since look-up was done lock-free, we need to check if another thread beat - // us in the race to insert the package. - PackageEntry* test = lookup_only(name); - if (test != NULL) { - // A race occurred and another thread introduced the package. - return test; - } else { - assert(module != NULL, "module should never be null"); - PackageEntry* entry = new_entry(compute_hash(name), name, module); - add_entry(index_for(name), entry); - return entry; - } + assert(module != NULL, "module should never be null"); + PackageEntry* entry = new_entry(compute_hash(name), name, module); + add_entry(index_for(name), entry); + return entry; } } PackageEntry* PackageEntryTable::lookup_only(Symbol* name) { + MutexLockerEx ml(Module_lock->owned_by_self() ? NULL : Module_lock); int index = index_for(name); for (PackageEntry* p = bucket(index); p != NULL; p = p->next()) { if (p->name()->fast_compare(name) == 0) { @@ -296,7 +286,7 @@ } bool PackageEntry::exported_pending_delete() const { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(Module_lock); return (is_unqual_exported() && _qualified_exports != NULL); } --- old/src/hotspot/share/classfile/packageEntry.hpp 2018-10-29 23:13:08.706228320 -0400 +++ new/src/hotspot/share/classfile/packageEntry.hpp 2018-10-29 23:13:08.474228309 -0400 @@ -253,7 +253,7 @@ // lookup Package with loader's package entry table, if not found add PackageEntry* lookup(Symbol* name, ModuleEntry* module); - // Only lookup Package within loader's package entry table. The table read is lock-free. + // Only lookup Package within loader's package entry table. PackageEntry* lookup_only(Symbol* Package); void verify_javabase_packages(GrowableArray *pkg_list); --- old/src/hotspot/share/classfile/systemDictionary.cpp 2018-10-29 23:13:08.990228334 -0400 +++ new/src/hotspot/share/classfile/systemDictionary.cpp 2018-10-29 23:13:08.758228323 -0400 @@ -1855,18 +1855,26 @@ bool do_cleaning) { bool unloading_occurred; + bool is_concurrent = SafepointSynchronize::is_at_safepoint(); { GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer); - + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); // caller locks. // First, mark for unload all ClassLoaderData referencing a dead class loader. unloading_occurred = ClassLoaderDataGraph::do_unloading(do_cleaning); + if (unloading_occurred) { + MutexLockerEx ml2(is_concurrent ? Module_lock : NULL); JFR_ONLY(Jfr::on_unloading_classes();) + MutexLockerEx ml1(is_concurrent ? SystemDictionary_lock : NULL); ClassLoaderDataGraph::clean_module_and_package_info(); } } - // TODO: just return if !unloading_occurred. + if (do_cleaning) { + GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer); + ResolvedMethodTable::trigger_cleanup(); + } + if (unloading_occurred) { { GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer); @@ -1875,23 +1883,21 @@ } { + MutexLockerEx ml(is_concurrent ? SystemDictionary_lock : NULL); GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer); constraints()->purge_loader_constraints(); resolution_errors()->purge_resolution_errors(); } - } - { - GCTraceTime(Debug, gc, phases) t("ProtectionDomainCacheTable", gc_timer); - // Oops referenced by the protection domain cache table may get unreachable independently - // of the class loader (eg. cached protection domain oops). So we need to - // explicitly unlink them here. - _pd_cache_table->trigger_cleanup(); - } - - if (do_cleaning) { - GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer); - ResolvedMethodTable::trigger_cleanup(); + { + GCTraceTime(Debug, gc, phases) t("ProtectionDomainCacheTable", gc_timer); + // Oops referenced by the protection domain cache table may get unreachable independently + // of the class loader (eg. cached protection domain oops). So we need to + // explicitly unlink them here. + // All protection domain oops are linked to the caller class, so if nothing + // unloads, this is not needed. + _pd_cache_table->trigger_cleanup(); + } } return unloading_occurred; --- old/src/hotspot/share/memory/metaspace.cpp 2018-10-29 23:13:09.278228348 -0400 +++ new/src/hotspot/share/memory/metaspace.cpp 2018-10-29 23:13:09.066228338 -0400 @@ -1518,7 +1518,8 @@ DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs)); - MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); + MutexLockerEx ml(vsm()->lock()->owned_by_self() ? NULL : vsm()->lock(), + Mutex::_no_safepoint_check_flag); if (is_class && Metaspace::using_class_space()) { class_vsm()->deallocate(ptr, word_size); --- old/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2018-10-29 23:13:09.558228362 -0400 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2018-10-29 23:13:09.342228351 -0400 @@ -89,7 +89,6 @@ // nodes with a 0 container_count. Remove Metachunks in // the node from their respective freelists. void VirtualSpaceList::purge(ChunkManager* chunk_manager) { - assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); assert_lock_strong(MetaspaceExpand_lock); // Don't use a VirtualSpaceListIterator because this // list is being changed and a straightforward use of an iterator is not safe. @@ -142,6 +141,8 @@ // The chunks are added with store ordering and not deleted except for at // unloading time during a safepoint. VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) { + MutexLockerEx cl(MetaspaceExpand_lock, + Mutex::_no_safepoint_check_flag); // List should be stable enough to use an iterator here because removing virtual // space nodes is only allowed at a safepoint. VirtualSpaceListIterator iter(virtual_space_list());