1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 35 #include "logging/log.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "memory/universe.hpp" 39 #include "oops/access.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/java.hpp" 42 43 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 44 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 45 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 46 47 void referenceProcessor_init() { 48 ReferenceProcessor::init_statics(); 49 } 50 51 void ReferenceProcessor::init_statics() { 52 // We need a monotonically non-decreasing time in ms but 53 // os::javaTimeMillis() does not guarantee monotonicity. 54 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 55 56 // Initialize the soft ref timestamp clock. 57 _soft_ref_timestamp_clock = now; 58 // Also update the soft ref clock in j.l.r.SoftReference 59 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 60 61 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 62 if (is_server_compilation_mode_vm()) { 63 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 64 } else { 65 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 66 } 67 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 68 vm_exit_during_initialization("Could not allocate reference policy object"); 69 } 70 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 71 RefDiscoveryPolicy == ReferentBasedDiscovery, 72 "Unrecognized RefDiscoveryPolicy"); 73 } 74 75 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 76 #ifdef ASSERT 77 // Verify that we're not currently discovering refs 78 assert(!_discovering_refs, "nested call?"); 79 80 if (check_no_refs) { 81 // Verify that the discovered lists are empty 82 verify_no_references_recorded(); 83 } 84 #endif // ASSERT 85 86 // Someone could have modified the value of the static 87 // field in the j.l.r.SoftReference class that holds the 88 // soft reference timestamp clock using reflection or 89 // Unsafe between GCs. Unconditionally update the static 90 // field in ReferenceProcessor here so that we use the new 91 // value during reference discovery. 92 93 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 94 _discovering_refs = true; 95 } 96 97 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 98 bool mt_processing, 99 uint mt_processing_degree, 100 bool mt_discovery, 101 uint mt_discovery_degree, 102 bool atomic_discovery, 103 BoolObjectClosure* is_alive_non_header, 104 bool adjust_no_of_processing_threads) : 105 _is_subject_to_discovery(is_subject_to_discovery), 106 _discovering_refs(false), 107 _enqueuing_is_done(false), 108 _processing_is_mt(mt_processing), 109 _next_id(0), 110 _adjust_no_of_processing_threads(adjust_no_of_processing_threads), 111 _is_alive_non_header(is_alive_non_header) 112 { 113 assert(is_subject_to_discovery != NULL, "must be set"); 114 115 _discovery_is_atomic = atomic_discovery; 116 _discovery_is_mt = mt_discovery; 117 _num_queues = MAX2(1U, mt_processing_degree); 118 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 119 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 120 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 121 122 _discoveredSoftRefs = &_discovered_refs[0]; 123 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 124 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 125 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 126 127 // Initialize all entries to NULL 128 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 129 _discovered_refs[i].clear(); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 #ifndef PRODUCT 136 void ReferenceProcessor::verify_no_references_recorded() { 137 guarantee(!_discovering_refs, "Discovering refs?"); 138 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 139 guarantee(_discovered_refs[i].is_empty(), 140 "Found non-empty discovered list at %u", i); 141 } 142 } 143 #endif 144 145 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 146 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 147 if (UseCompressedOops) { 148 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 149 } else { 150 f->do_oop((oop*)_discovered_refs[i].adr_head()); 151 } 152 } 153 } 154 155 void ReferenceProcessor::update_soft_ref_master_clock() { 156 // Update (advance) the soft ref master clock field. This must be done 157 // after processing the soft ref list. 158 159 // We need a monotonically non-decreasing time in ms but 160 // os::javaTimeMillis() does not guarantee monotonicity. 161 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 162 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 163 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 164 165 NOT_PRODUCT( 166 if (now < _soft_ref_timestamp_clock) { 167 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 168 _soft_ref_timestamp_clock, now); 169 } 170 ) 171 // The values of now and _soft_ref_timestamp_clock are set using 172 // javaTimeNanos(), which is guaranteed to be monotonically 173 // non-decreasing provided the underlying platform provides such 174 // a time source (and it is bug free). 175 // In product mode, however, protect ourselves from non-monotonicity. 176 if (now > _soft_ref_timestamp_clock) { 177 _soft_ref_timestamp_clock = now; 178 java_lang_ref_SoftReference::set_clock(now); 179 } 180 // Else leave clock stalled at its old value until time progresses 181 // past clock value. 182 } 183 184 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 185 size_t total = 0; 186 for (uint i = 0; i < _max_num_queues; ++i) { 187 total += lists[i].length(); 188 } 189 return total; 190 } 191 192 #ifdef ASSERT 193 void ReferenceProcessor::verify_total_count_zero(DiscoveredList lists[], const char* type) { 194 size_t count = total_count(lists); 195 assert(count == 0, "%ss must be empty but has " SIZE_FORMAT " elements", type, count); 196 } 197 #endif 198 199 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 200 BoolObjectClosure* is_alive, 201 OopClosure* keep_alive, 202 VoidClosure* complete_gc, 203 AbstractRefProcTaskExecutor* task_executor, 204 ReferenceProcessorPhaseTimes* phase_times) { 205 206 double start_time = os::elapsedTime(); 207 208 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 209 // Stop treating discovered references specially. 210 disable_discovery(); 211 212 // If discovery was concurrent, someone could have modified 213 // the value of the static field in the j.l.r.SoftReference 214 // class that holds the soft reference timestamp clock using 215 // reflection or Unsafe between when discovery was enabled and 216 // now. Unconditionally update the static field in ReferenceProcessor 217 // here so that we use the new value during processing of the 218 // discovered soft refs. 219 220 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 221 222 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 223 total_count(_discoveredWeakRefs), 224 total_count(_discoveredFinalRefs), 225 total_count(_discoveredPhantomRefs)); 226 227 { 228 RefProcTotalPhaseTimesTracker tt(RefPhase1, phase_times, this); 229 process_soft_ref_reconsider(is_alive, keep_alive, complete_gc, 230 task_executor, phase_times); 231 } 232 233 update_soft_ref_master_clock(); 234 235 { 236 RefProcTotalPhaseTimesTracker tt(RefPhase2, phase_times, this); 237 process_soft_weak_final_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 238 } 239 240 { 241 RefProcTotalPhaseTimesTracker tt(RefPhase3, phase_times, this); 242 process_final_keep_alive(keep_alive, complete_gc, task_executor, phase_times); 243 } 244 245 { 246 RefProcTotalPhaseTimesTracker tt(RefPhase4, phase_times, this); 247 process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 248 } 249 250 if (task_executor != NULL) { 251 // Record the work done by the parallel workers. 252 task_executor->set_single_threaded_mode(); 253 } 254 255 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 256 257 return stats; 258 } 259 260 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 261 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 262 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 263 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 264 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 265 _next_discovered = discovered; 266 267 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 268 _referent = java_lang_ref_Reference::referent(_current_discovered); 269 assert(Universe::heap()->is_in_or_null(_referent), 270 "Wrong oop found in java.lang.Reference object"); 271 assert(allow_null_referent ? 272 oopDesc::is_oop_or_null(_referent) 273 : oopDesc::is_oop(_referent), 274 "Expected an oop%s for referent field at " PTR_FORMAT, 275 (allow_null_referent ? " or NULL" : ""), 276 p2i(_referent)); 277 } 278 279 void DiscoveredListIterator::remove() { 280 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 281 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 282 283 // First _prev_next ref actually points into DiscoveredList (gross). 284 oop new_next; 285 if (_next_discovered == _current_discovered) { 286 // At the end of the list, we should make _prev point to itself. 287 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 288 // and _prev will be NULL. 289 new_next = _prev_discovered; 290 } else { 291 new_next = _next_discovered; 292 } 293 // Remove Reference object from discovered list. Note that G1 does not need a 294 // pre-barrier here because we know the Reference has already been found/marked, 295 // that's how it ended up in the discovered list in the first place. 296 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 297 _removed++; 298 _refs_list.dec_length(1); 299 } 300 301 void DiscoveredListIterator::clear_referent() { 302 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 303 } 304 305 void DiscoveredListIterator::enqueue() { 306 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered, 307 java_lang_ref_Reference::discovered_offset, 308 _next_discovered); 309 } 310 311 void DiscoveredListIterator::complete_enqueue() { 312 if (_prev_discovered != NULL) { 313 // This is the last object. 314 // Swap refs_list into pending list and set obj's 315 // discovered to what we read from the pending list. 316 oop old = Universe::swap_reference_pending_list(_refs_list.head()); 317 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old); 318 } 319 } 320 321 inline void log_dropped_ref(const DiscoveredListIterator& iter, const char* reason) { 322 if (log_develop_is_enabled(Trace, gc, ref)) { 323 ResourceMark rm; 324 log_develop_trace(gc, ref)("Dropping %s reference " PTR_FORMAT ": %s", 325 reason, p2i(iter.obj()), 326 iter.obj()->klass()->internal_name()); 327 } 328 } 329 330 inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* reason) { 331 if (log_develop_is_enabled(Trace, gc, ref)) { 332 ResourceMark rm; 333 log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)", 334 reason, p2i(iter.obj()), iter.obj()->klass()->internal_name()); 335 } 336 assert(oopDesc::is_oop(iter.obj()), "Adding a bad reference"); 337 } 338 339 size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& refs_list, 340 ReferencePolicy* policy, 341 BoolObjectClosure* is_alive, 342 OopClosure* keep_alive, 343 VoidClosure* complete_gc) { 344 assert(policy != NULL, "Must have a non-NULL policy"); 345 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 346 // Decide which softly reachable refs should be kept alive. 347 while (iter.has_next()) { 348 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 349 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 350 if (referent_is_dead && 351 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 352 log_dropped_ref(iter, "by policy"); 353 // Remove Reference object from list 354 iter.remove(); 355 // keep the referent around 356 iter.make_referent_alive(); 357 iter.move_to_next(); 358 } else { 359 iter.next(); 360 } 361 } 362 // Close the reachable set 363 complete_gc->do_void(); 364 365 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 366 iter.removed(), iter.processed(), p2i(&refs_list)); 367 return iter.removed(); 368 } 369 370 size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList& refs_list, 371 BoolObjectClosure* is_alive, 372 OopClosure* keep_alive, 373 bool do_enqueue_and_clear) { 374 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 375 while (iter.has_next()) { 376 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 377 if (iter.referent() == NULL) { 378 // Reference has been cleared since discovery; only possible if 379 // discovery is not atomic (checked by load_ptrs). Remove 380 // reference from list. 381 log_dropped_ref(iter, "cleared"); 382 iter.remove(); 383 iter.move_to_next(); 384 } else if (iter.is_referent_alive()) { 385 // The referent is reachable after all. 386 // Remove reference from list. 387 log_dropped_ref(iter, "reachable"); 388 iter.remove(); 389 // Update the referent pointer as necessary. Note that this 390 // should not entail any recursive marking because the 391 // referent must already have been traversed. 392 iter.make_referent_alive(); 393 iter.move_to_next(); 394 } else { 395 if (do_enqueue_and_clear) { 396 iter.clear_referent(); 397 iter.enqueue(); 398 log_enqueued_ref(iter, "cleared"); 399 } 400 // Keep in discovered list 401 iter.next(); 402 } 403 } 404 if (do_enqueue_and_clear) { 405 iter.complete_enqueue(); 406 refs_list.clear(); 407 } 408 409 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 410 " Refs in discovered list " INTPTR_FORMAT, 411 iter.removed(), iter.processed(), p2i(&refs_list)); 412 return iter.removed(); 413 } 414 415 size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list, 416 OopClosure* keep_alive, 417 VoidClosure* complete_gc) { 418 DiscoveredListIterator iter(refs_list, keep_alive, NULL); 419 while (iter.has_next()) { 420 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 421 // keep the referent and followers around 422 iter.make_referent_alive(); 423 424 // Self-loop next, to mark the FinalReference not active. 425 assert(java_lang_ref_Reference::next(iter.obj()) == NULL, "enqueued FinalReference"); 426 java_lang_ref_Reference::set_next_raw(iter.obj(), iter.obj()); 427 428 iter.enqueue(); 429 log_enqueued_ref(iter, "Final"); 430 iter.next(); 431 } 432 iter.complete_enqueue(); 433 // Close the reachable set 434 complete_gc->do_void(); 435 refs_list.clear(); 436 437 assert(iter.removed() == 0, "This phase does not remove anything."); 438 return iter.removed(); 439 } 440 441 size_t ReferenceProcessor::process_phantom_refs_work(DiscoveredList& refs_list, 442 BoolObjectClosure* is_alive, 443 OopClosure* keep_alive, 444 VoidClosure* complete_gc) { 445 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 446 while (iter.has_next()) { 447 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 448 449 oop const referent = iter.referent(); 450 451 if (referent == NULL || iter.is_referent_alive()) { 452 iter.make_referent_alive(); 453 iter.remove(); 454 iter.move_to_next(); 455 } else { 456 iter.clear_referent(); 457 iter.enqueue(); 458 log_enqueued_ref(iter, "cleared Phantom"); 459 iter.next(); 460 } 461 } 462 iter.complete_enqueue(); 463 // Close the reachable set; needed for collectors which keep_alive_closure do 464 // not immediately complete their work. 465 complete_gc->do_void(); 466 refs_list.clear(); 467 468 return iter.removed(); 469 } 470 471 void 472 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 473 oop obj = NULL; 474 oop next = refs_list.head(); 475 while (next != obj) { 476 obj = next; 477 next = java_lang_ref_Reference::discovered(obj); 478 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 479 } 480 refs_list.clear(); 481 } 482 483 void ReferenceProcessor::abandon_partial_discovery() { 484 // loop over the lists 485 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 486 if ((i % _max_num_queues) == 0) { 487 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 488 } 489 clear_discovered_references(_discovered_refs[i]); 490 } 491 } 492 493 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 494 DiscoveredList* list = NULL; 495 496 switch (type) { 497 case REF_SOFT: 498 list = _discoveredSoftRefs; 499 break; 500 case REF_WEAK: 501 list = _discoveredWeakRefs; 502 break; 503 case REF_FINAL: 504 list = _discoveredFinalRefs; 505 break; 506 case REF_PHANTOM: 507 list = _discoveredPhantomRefs; 508 break; 509 case REF_OTHER: 510 case REF_NONE: 511 default: 512 ShouldNotReachHere(); 513 } 514 return total_count(list); 515 } 516 517 class RefProcPhase1Task : public AbstractRefProcTaskExecutor::ProcessTask { 518 public: 519 RefProcPhase1Task(ReferenceProcessor& ref_processor, 520 ReferenceProcessorPhaseTimes* phase_times, 521 ReferencePolicy* policy) 522 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times), 523 _policy(policy) { } 524 525 virtual void work(uint worker_id, 526 BoolObjectClosure& is_alive, 527 OopClosure& keep_alive, 528 VoidClosure& complete_gc) 529 { 530 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase1, _phase_times, worker_id); 531 size_t const removed = _ref_processor.process_soft_ref_reconsider_work(_ref_processor._discoveredSoftRefs[worker_id], 532 _policy, 533 &is_alive, 534 &keep_alive, 535 &complete_gc); 536 _phase_times->add_ref_cleared(REF_SOFT, removed); 537 } 538 private: 539 ReferencePolicy* _policy; 540 }; 541 542 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 543 void run_phase2(uint worker_id, 544 DiscoveredList list[], 545 BoolObjectClosure& is_alive, 546 OopClosure& keep_alive, 547 bool do_enqueue_and_clear, 548 ReferenceType ref_type) { 549 size_t const removed = _ref_processor.process_soft_weak_final_refs_work(list[worker_id], 550 &is_alive, 551 &keep_alive, 552 do_enqueue_and_clear); 553 _phase_times->add_ref_cleared(ref_type, removed); 554 } 555 556 public: 557 RefProcPhase2Task(ReferenceProcessor& ref_processor, 558 ReferenceProcessorPhaseTimes* phase_times) 559 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 560 561 virtual void work(uint worker_id, 562 BoolObjectClosure& is_alive, 563 OopClosure& keep_alive, 564 VoidClosure& complete_gc) { 565 RefProcWorkerTimeTracker t(_phase_times->phase2_worker_time_sec(), worker_id); 566 { 567 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase2, _phase_times, worker_id); 568 run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_SOFT); 569 } 570 { 571 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::WeakRefSubPhase2, _phase_times, worker_id); 572 run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_WEAK); 573 } 574 { 575 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase2, _phase_times, worker_id); 576 run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, false /* do_enqueue_and_clear */, REF_FINAL); 577 } 578 // Close the reachable set; needed for collectors which keep_alive_closure do 579 // not immediately complete their work. 580 complete_gc.do_void(); 581 } 582 }; 583 584 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 585 public: 586 RefProcPhase3Task(ReferenceProcessor& ref_processor, 587 ReferenceProcessorPhaseTimes* phase_times) 588 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times) { } 589 590 virtual void work(uint worker_id, 591 BoolObjectClosure& is_alive, 592 OopClosure& keep_alive, 593 VoidClosure& complete_gc) 594 { 595 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase3, _phase_times, worker_id); 596 _ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], &keep_alive, &complete_gc); 597 } 598 }; 599 600 class RefProcPhase4Task: public AbstractRefProcTaskExecutor::ProcessTask { 601 public: 602 RefProcPhase4Task(ReferenceProcessor& ref_processor, 603 ReferenceProcessorPhaseTimes* phase_times) 604 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 605 606 virtual void work(uint worker_id, 607 BoolObjectClosure& is_alive, 608 OopClosure& keep_alive, 609 VoidClosure& complete_gc) 610 { 611 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::PhantomRefSubPhase4, _phase_times, worker_id); 612 size_t const removed = _ref_processor.process_phantom_refs_work(_ref_processor._discoveredPhantomRefs[worker_id], 613 &is_alive, 614 &keep_alive, 615 &complete_gc); 616 _phase_times->add_ref_cleared(REF_PHANTOM, removed); 617 } 618 }; 619 620 void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { 621 LogTarget(Trace, gc, ref) lt; 622 623 if (!lt.is_enabled()) { 624 return; 625 } 626 627 size_t total = 0; 628 629 LogStream ls(lt); 630 ls.print("%s", prefix); 631 for (uint i = 0; i < num_active_queues; i++) { 632 ls.print(SIZE_FORMAT " ", list[i].length()); 633 total += list[i].length(); 634 } 635 ls.print_cr("(" SIZE_FORMAT ")", total); 636 } 637 638 #ifndef PRODUCT 639 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { 640 if (!log_is_enabled(Trace, gc, ref)) { 641 return; 642 } 643 644 log_reflist("", ref_lists, num_active_queues); 645 #ifdef ASSERT 646 for (uint i = num_active_queues; i < _max_num_queues; i++) { 647 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 648 ref_lists[i].length(), i); 649 } 650 #endif 651 } 652 #endif 653 654 void ReferenceProcessor::set_active_mt_degree(uint v) { 655 _num_queues = v; 656 _next_id = 0; 657 } 658 659 bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { 660 assert(_processing_is_mt, "why balance non-mt processing?"); 661 // _num_queues is the processing degree. Only list entries up to 662 // _num_queues will be processed, so any non-empty lists beyond 663 // that must be redistributed to lists in that range. Even if not 664 // needed for that, balancing may be desirable to eliminate poor 665 // distribution of references among the lists. 666 if (ParallelRefProcBalancingEnabled) { 667 return true; // Configuration says do it. 668 } else { 669 // Configuration says don't balance, but if there are non-empty 670 // lists beyond the processing degree, then must ignore the 671 // configuration and balance anyway. 672 for (uint i = _num_queues; i < _max_num_queues; ++i) { 673 if (!refs_lists[i].is_empty()) { 674 return true; // Must balance despite configuration. 675 } 676 } 677 return false; // Safe to obey configuration and not balance. 678 } 679 } 680 681 void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) { 682 assert(_processing_is_mt, "Should not call this otherwise"); 683 if (need_balance_queues(refs_lists)) { 684 balance_queues(refs_lists); 685 } 686 } 687 688 // Balances reference queues. 689 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 690 // queues[0, 1, ..., _num_q-1] because only the first _num_q 691 // corresponding to the active workers will be processed. 692 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 693 { 694 // calculate total length 695 size_t total_refs = 0; 696 log_develop_trace(gc, ref)("Balance ref_lists "); 697 698 log_reflist_counts(ref_lists, _max_num_queues); 699 700 for (uint i = 0; i < _max_num_queues; ++i) { 701 total_refs += ref_lists[i].length(); 702 } 703 size_t avg_refs = total_refs / _num_queues + 1; 704 uint to_idx = 0; 705 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 706 bool move_all = false; 707 if (from_idx >= _num_queues) { 708 move_all = ref_lists[from_idx].length() > 0; 709 } 710 while ((ref_lists[from_idx].length() > avg_refs) || 711 move_all) { 712 assert(to_idx < _num_queues, "Sanity Check!"); 713 if (ref_lists[to_idx].length() < avg_refs) { 714 // move superfluous refs 715 size_t refs_to_move; 716 // Move all the Ref's if the from queue will not be processed. 717 if (move_all) { 718 refs_to_move = MIN2(ref_lists[from_idx].length(), 719 avg_refs - ref_lists[to_idx].length()); 720 } else { 721 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 722 avg_refs - ref_lists[to_idx].length()); 723 } 724 725 assert(refs_to_move > 0, "otherwise the code below will fail"); 726 727 oop move_head = ref_lists[from_idx].head(); 728 oop move_tail = move_head; 729 oop new_head = move_head; 730 // find an element to split the list on 731 for (size_t j = 0; j < refs_to_move; ++j) { 732 move_tail = new_head; 733 new_head = java_lang_ref_Reference::discovered(new_head); 734 } 735 736 // Add the chain to the to list. 737 if (ref_lists[to_idx].head() == NULL) { 738 // to list is empty. Make a loop at the end. 739 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 740 } else { 741 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 742 } 743 ref_lists[to_idx].set_head(move_head); 744 ref_lists[to_idx].inc_length(refs_to_move); 745 746 // Remove the chain from the from list. 747 if (move_tail == new_head) { 748 // We found the end of the from list. 749 ref_lists[from_idx].set_head(NULL); 750 } else { 751 ref_lists[from_idx].set_head(new_head); 752 } 753 ref_lists[from_idx].dec_length(refs_to_move); 754 if (ref_lists[from_idx].length() == 0) { 755 break; 756 } 757 } else { 758 to_idx = (to_idx + 1) % _num_queues; 759 } 760 } 761 } 762 #ifdef ASSERT 763 log_reflist_counts(ref_lists, _num_queues); 764 size_t balanced_total_refs = 0; 765 for (uint i = 0; i < _num_queues; ++i) { 766 balanced_total_refs += ref_lists[i].length(); 767 } 768 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 769 #endif 770 } 771 772 bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const { 773 return task_executor != NULL && _processing_is_mt; 774 } 775 776 void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive, 777 OopClosure* keep_alive, 778 VoidClosure* complete_gc, 779 AbstractRefProcTaskExecutor* task_executor, 780 ReferenceProcessorPhaseTimes* phase_times) { 781 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 782 783 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 784 phase_times->set_ref_discovered(REF_SOFT, num_soft_refs); 785 786 phase_times->set_processing_is_mt(_processing_is_mt); 787 788 if (num_soft_refs == 0 || _current_soft_ref_policy == NULL) { 789 log_debug(gc, ref)("Skipped phase1 of Reference Processing due to unavailable references"); 790 return; 791 } 792 793 RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs); 794 795 if (_processing_is_mt) { 796 RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times); 797 maybe_balance_queues(_discoveredSoftRefs); 798 } 799 800 RefProcPhaseTimeTracker tt(RefPhase1, phase_times); 801 802 log_reflist("Phase1 Soft before", _discoveredSoftRefs, _max_num_queues); 803 if (_processing_is_mt) { 804 RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy); 805 task_executor->execute(phase1, num_queues()); 806 } else { 807 size_t removed = 0; 808 809 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase1, phase_times, 0); 810 for (uint i = 0; i < _max_num_queues; i++) { 811 removed += process_soft_ref_reconsider_work(_discoveredSoftRefs[i], _current_soft_ref_policy, 812 is_alive, keep_alive, complete_gc); 813 } 814 815 phase_times->add_ref_cleared(REF_SOFT, removed); 816 } 817 log_reflist("Phase1 Soft after", _discoveredSoftRefs, _max_num_queues); 818 } 819 820 void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_alive, 821 OopClosure* keep_alive, 822 VoidClosure* complete_gc, 823 AbstractRefProcTaskExecutor* task_executor, 824 ReferenceProcessorPhaseTimes* phase_times) { 825 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 826 827 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 828 size_t const num_weak_refs = total_count(_discoveredWeakRefs); 829 size_t const num_final_refs = total_count(_discoveredFinalRefs); 830 size_t const num_total_refs = num_soft_refs + num_weak_refs + num_final_refs; 831 phase_times->set_ref_discovered(REF_WEAK, num_weak_refs); 832 phase_times->set_ref_discovered(REF_FINAL, num_final_refs); 833 834 phase_times->set_processing_is_mt(_processing_is_mt); 835 836 if (num_total_refs == 0) { 837 log_debug(gc, ref)("Skipped phase2 of Reference Processing due to unavailable references"); 838 return; 839 } 840 841 RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs); 842 843 if (_processing_is_mt) { 844 RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times); 845 maybe_balance_queues(_discoveredSoftRefs); 846 maybe_balance_queues(_discoveredWeakRefs); 847 maybe_balance_queues(_discoveredFinalRefs); 848 } 849 850 RefProcPhaseTimeTracker tt(RefPhase2, phase_times); 851 852 log_reflist("Phase2 Soft before", _discoveredSoftRefs, _max_num_queues); 853 log_reflist("Phase2 Weak before", _discoveredWeakRefs, _max_num_queues); 854 log_reflist("Phase2 Final before", _discoveredFinalRefs, _max_num_queues); 855 if (_processing_is_mt) { 856 RefProcPhase2Task phase2(*this, phase_times); 857 task_executor->execute(phase2, num_queues()); 858 } else { 859 RefProcWorkerTimeTracker t(phase_times->phase2_worker_time_sec(), 0); 860 { 861 size_t removed = 0; 862 863 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase2, phase_times, 0); 864 for (uint i = 0; i < _max_num_queues; i++) { 865 removed += process_soft_weak_final_refs_work(_discoveredSoftRefs[i], is_alive, keep_alive, true /* do_enqueue */); 866 } 867 868 phase_times->add_ref_cleared(REF_SOFT, removed); 869 } 870 { 871 size_t removed = 0; 872 873 RefProcSubPhasesWorkerTimeTracker tt2(WeakRefSubPhase2, phase_times, 0); 874 for (uint i = 0; i < _max_num_queues; i++) { 875 removed += process_soft_weak_final_refs_work(_discoveredWeakRefs[i], is_alive, keep_alive, true /* do_enqueue */); 876 } 877 878 phase_times->add_ref_cleared(REF_WEAK, removed); 879 } 880 { 881 size_t removed = 0; 882 883 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase2, phase_times, 0); 884 for (uint i = 0; i < _max_num_queues; i++) { 885 removed += process_soft_weak_final_refs_work(_discoveredFinalRefs[i], is_alive, keep_alive, false /* do_enqueue */); 886 } 887 888 phase_times->add_ref_cleared(REF_FINAL, removed); 889 } 890 complete_gc->do_void(); 891 } 892 verify_total_count_zero(_discoveredSoftRefs, "SoftReference"); 893 verify_total_count_zero(_discoveredWeakRefs, "WeakReference"); 894 log_reflist("Phase2 Final after", _discoveredFinalRefs, _max_num_queues); 895 } 896 897 void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, 898 VoidClosure* complete_gc, 899 AbstractRefProcTaskExecutor* task_executor, 900 ReferenceProcessorPhaseTimes* phase_times) { 901 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 902 903 size_t const num_final_refs = total_count(_discoveredFinalRefs); 904 905 phase_times->set_processing_is_mt(_processing_is_mt); 906 907 if (num_final_refs == 0) { 908 log_debug(gc, ref)("Skipped phase3 of Reference Processing due to unavailable references"); 909 return; 910 } 911 912 RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs); 913 914 if (_processing_is_mt) { 915 RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times); 916 maybe_balance_queues(_discoveredFinalRefs); 917 } 918 919 // Phase 3: 920 // . Traverse referents of final references and keep them and followers alive. 921 RefProcPhaseTimeTracker tt(RefPhase3, phase_times); 922 923 if (_processing_is_mt) { 924 RefProcPhase3Task phase3(*this, phase_times); 925 task_executor->execute(phase3, num_queues()); 926 } else { 927 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase3, phase_times, 0); 928 for (uint i = 0; i < _max_num_queues; i++) { 929 process_final_keep_alive_work(_discoveredFinalRefs[i], keep_alive, complete_gc); 930 } 931 } 932 verify_total_count_zero(_discoveredFinalRefs, "FinalReference"); 933 } 934 935 void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, 936 OopClosure* keep_alive, 937 VoidClosure* complete_gc, 938 AbstractRefProcTaskExecutor* task_executor, 939 ReferenceProcessorPhaseTimes* phase_times) { 940 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 941 942 size_t const num_phantom_refs = total_count(_discoveredPhantomRefs); 943 phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs); 944 945 phase_times->set_processing_is_mt(_processing_is_mt); 946 947 if (num_phantom_refs == 0) { 948 log_debug(gc, ref)("Skipped phase4 of Reference Processing due to unavailable references"); 949 return; 950 } 951 952 RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs); 953 954 if (_processing_is_mt) { 955 RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times); 956 maybe_balance_queues(_discoveredPhantomRefs); 957 } 958 959 // Phase 4: Walk phantom references appropriately. 960 RefProcPhaseTimeTracker tt(RefPhase4, phase_times); 961 962 log_reflist("Phase4 Phantom before", _discoveredPhantomRefs, _max_num_queues); 963 if (_processing_is_mt) { 964 RefProcPhase4Task phase4(*this, phase_times); 965 task_executor->execute(phase4, num_queues()); 966 } else { 967 size_t removed = 0; 968 969 RefProcSubPhasesWorkerTimeTracker tt(PhantomRefSubPhase4, phase_times, 0); 970 for (uint i = 0; i < _max_num_queues; i++) { 971 removed += process_phantom_refs_work(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc); 972 } 973 974 phase_times->add_ref_cleared(REF_PHANTOM, removed); 975 } 976 verify_total_count_zero(_discoveredPhantomRefs, "PhantomReference"); 977 } 978 979 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 980 uint id = 0; 981 // Determine the queue index to use for this object. 982 if (_discovery_is_mt) { 983 // During a multi-threaded discovery phase, 984 // each thread saves to its "own" list. 985 Thread* thr = Thread::current(); 986 id = thr->as_Worker_thread()->id(); 987 } else { 988 // single-threaded discovery, we save in round-robin 989 // fashion to each of the lists. 990 if (_processing_is_mt) { 991 id = next_id(); 992 } 993 } 994 assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues); 995 996 // Get the discovered queue to which we will add 997 DiscoveredList* list = NULL; 998 switch (rt) { 999 case REF_OTHER: 1000 // Unknown reference type, no special treatment 1001 break; 1002 case REF_SOFT: 1003 list = &_discoveredSoftRefs[id]; 1004 break; 1005 case REF_WEAK: 1006 list = &_discoveredWeakRefs[id]; 1007 break; 1008 case REF_FINAL: 1009 list = &_discoveredFinalRefs[id]; 1010 break; 1011 case REF_PHANTOM: 1012 list = &_discoveredPhantomRefs[id]; 1013 break; 1014 case REF_NONE: 1015 // we should not reach here if we are an InstanceRefKlass 1016 default: 1017 ShouldNotReachHere(); 1018 } 1019 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 1020 return list; 1021 } 1022 1023 inline void 1024 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1025 oop obj, 1026 HeapWord* discovered_addr) { 1027 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1028 // First we must make sure this object is only enqueued once. CAS in a non null 1029 // discovered_addr. 1030 oop current_head = refs_list.head(); 1031 // The last ref must have its discovered field pointing to itself. 1032 oop next_discovered = (current_head != NULL) ? current_head : obj; 1033 1034 oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered); 1035 1036 if (retest == NULL) { 1037 // This thread just won the right to enqueue the object. 1038 // We have separate lists for enqueueing, so no synchronization 1039 // is necessary. 1040 refs_list.set_head(obj); 1041 refs_list.inc_length(1); 1042 1043 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1044 p2i(obj), obj->klass()->internal_name()); 1045 } else { 1046 // If retest was non NULL, another thread beat us to it: 1047 // The reference has already been discovered... 1048 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1049 p2i(obj), obj->klass()->internal_name()); 1050 } 1051 } 1052 1053 #ifndef PRODUCT 1054 // Non-atomic (i.e. concurrent) discovery might allow us 1055 // to observe j.l.References with NULL referents, being those 1056 // cleared concurrently by mutators during (or after) discovery. 1057 void ReferenceProcessor::verify_referent(oop obj) { 1058 bool da = discovery_is_atomic(); 1059 oop referent = java_lang_ref_Reference::referent(obj); 1060 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 1061 "Bad referent " INTPTR_FORMAT " found in Reference " 1062 INTPTR_FORMAT " during %satomic discovery ", 1063 p2i(referent), p2i(obj), da ? "" : "non-"); 1064 } 1065 #endif 1066 1067 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { 1068 return _is_subject_to_discovery->do_object_b(obj); 1069 } 1070 1071 // We mention two of several possible choices here: 1072 // #0: if the reference object is not in the "originating generation" 1073 // (or part of the heap being collected, indicated by our "span" 1074 // we don't treat it specially (i.e. we scan it as we would 1075 // a normal oop, treating its references as strong references). 1076 // This means that references can't be discovered unless their 1077 // referent is also in the same span. This is the simplest, 1078 // most "local" and most conservative approach, albeit one 1079 // that may cause weak references to be enqueued least promptly. 1080 // We call this choice the "ReferenceBasedDiscovery" policy. 1081 // #1: the reference object may be in any generation (span), but if 1082 // the referent is in the generation (span) being currently collected 1083 // then we can discover the reference object, provided 1084 // the object has not already been discovered by 1085 // a different concurrently running collector (as may be the 1086 // case, for instance, if the reference object is in CMS and 1087 // the referent in DefNewGeneration), and provided the processing 1088 // of this reference object by the current collector will 1089 // appear atomic to every other collector in the system. 1090 // (Thus, for instance, a concurrent collector may not 1091 // discover references in other generations even if the 1092 // referent is in its own generation). This policy may, 1093 // in certain cases, enqueue references somewhat sooner than 1094 // might Policy #0 above, but at marginally increased cost 1095 // and complexity in processing these references. 1096 // We call this choice the "RefeferentBasedDiscovery" policy. 1097 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1098 // Make sure we are discovering refs (rather than processing discovered refs). 1099 if (!_discovering_refs || !RegisterReferences) { 1100 return false; 1101 } 1102 1103 if ((rt == REF_FINAL) && (java_lang_ref_Reference::next(obj) != NULL)) { 1104 // Don't rediscover non-active FinalReferences. 1105 return false; 1106 } 1107 1108 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1109 !is_subject_to_discovery(obj)) { 1110 // Reference is not in the originating generation; 1111 // don't treat it specially (i.e. we want to scan it as a normal 1112 // object with strong references). 1113 return false; 1114 } 1115 1116 // We only discover references whose referents are not (yet) 1117 // known to be strongly reachable. 1118 if (is_alive_non_header() != NULL) { 1119 verify_referent(obj); 1120 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1121 return false; // referent is reachable 1122 } 1123 } 1124 if (rt == REF_SOFT) { 1125 // For soft refs we can decide now if these are not 1126 // current candidates for clearing, in which case we 1127 // can mark through them now, rather than delaying that 1128 // to the reference-processing phase. Since all current 1129 // time-stamp policies advance the soft-ref clock only 1130 // at a full collection cycle, this is always currently 1131 // accurate. 1132 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1133 return false; 1134 } 1135 } 1136 1137 ResourceMark rm; // Needed for tracing. 1138 1139 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 1140 const oop discovered = java_lang_ref_Reference::discovered(obj); 1141 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1142 if (discovered != NULL) { 1143 // The reference has already been discovered... 1144 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1145 p2i(obj), obj->klass()->internal_name()); 1146 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1147 // assumes that an object is not processed twice; 1148 // if it's been already discovered it must be on another 1149 // generation's discovered list; so we won't discover it. 1150 return false; 1151 } else { 1152 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1153 "Unrecognized policy"); 1154 // Check assumption that an object is not potentially 1155 // discovered twice except by concurrent collectors that potentially 1156 // trace the same Reference object twice. 1157 assert(UseG1GC || UseShenandoahGC, 1158 "Only possible with a concurrent marking collector"); 1159 return true; 1160 } 1161 } 1162 1163 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1164 verify_referent(obj); 1165 // Discover if and only if EITHER: 1166 // .. reference is in our span, OR 1167 // .. we are an atomic collector and referent is in our span 1168 if (is_subject_to_discovery(obj) || 1169 (discovery_is_atomic() && 1170 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 1171 } else { 1172 return false; 1173 } 1174 } else { 1175 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1176 is_subject_to_discovery(obj), "code inconsistency"); 1177 } 1178 1179 // Get the right type of discovered queue head. 1180 DiscoveredList* list = get_discovered_list(rt); 1181 if (list == NULL) { 1182 return false; // nothing special needs to be done 1183 } 1184 1185 if (_discovery_is_mt) { 1186 add_to_discovered_list_mt(*list, obj, discovered_addr); 1187 } else { 1188 // We do a raw store here: the field will be visited later when processing 1189 // the discovered references. 1190 oop current_head = list->head(); 1191 // The last ref must have its discovered field pointing to itself. 1192 oop next_discovered = (current_head != NULL) ? current_head : obj; 1193 1194 assert(discovered == NULL, "control point invariant"); 1195 RawAccess<>::oop_store(discovered_addr, next_discovered); 1196 list->set_head(obj); 1197 list->inc_length(1); 1198 1199 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1200 } 1201 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1202 verify_referent(obj); 1203 return true; 1204 } 1205 1206 bool ReferenceProcessor::has_discovered_references() { 1207 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1208 if (!_discovered_refs[i].is_empty()) { 1209 return true; 1210 } 1211 } 1212 return false; 1213 } 1214 1215 void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, 1216 OopClosure* keep_alive, 1217 VoidClosure* complete_gc, 1218 YieldClosure* yield, 1219 GCTimer* gc_timer) { 1220 // These lists can be handled here in any order and, indeed, concurrently. 1221 1222 // Soft references 1223 { 1224 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1225 log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); 1226 for (uint i = 0; i < _max_num_queues; i++) { 1227 if (yield->should_return()) { 1228 return; 1229 } 1230 if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1231 keep_alive, complete_gc, yield)) { 1232 log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); 1233 return; 1234 } 1235 } 1236 log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); 1237 } 1238 1239 // Weak references 1240 { 1241 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1242 log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); 1243 for (uint i = 0; i < _max_num_queues; i++) { 1244 if (yield->should_return()) { 1245 return; 1246 } 1247 if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1248 keep_alive, complete_gc, yield)) { 1249 log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); 1250 return; 1251 } 1252 } 1253 log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); 1254 } 1255 1256 // Final references 1257 { 1258 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1259 log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); 1260 for (uint i = 0; i < _max_num_queues; i++) { 1261 if (yield->should_return()) { 1262 return; 1263 } 1264 if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1265 keep_alive, complete_gc, yield)) { 1266 log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); 1267 return; 1268 } 1269 } 1270 log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); 1271 } 1272 1273 // Phantom references 1274 { 1275 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1276 log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); 1277 for (uint i = 0; i < _max_num_queues; i++) { 1278 if (yield->should_return()) { 1279 return; 1280 } 1281 if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1282 keep_alive, complete_gc, yield)) { 1283 log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); 1284 return; 1285 } 1286 } 1287 log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); 1288 } 1289 } 1290 1291 // Walk the given discovered ref list, and remove all reference objects 1292 // whose referents are still alive, whose referents are NULL or which 1293 // are not active (have a non-NULL next field). NOTE: When we are 1294 // thus precleaning the ref lists (which happens single-threaded today), 1295 // we do not disable refs discovery to honor the correct semantics of 1296 // java.lang.Reference. As a result, we need to be careful below 1297 // that ref removal steps interleave safely with ref discovery steps 1298 // (in this thread). 1299 bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1300 BoolObjectClosure* is_alive, 1301 OopClosure* keep_alive, 1302 VoidClosure* complete_gc, 1303 YieldClosure* yield) { 1304 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1305 while (iter.has_next()) { 1306 if (yield->should_return_fine_grain()) { 1307 return true; 1308 } 1309 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1310 if (iter.referent() == NULL || iter.is_referent_alive()) { 1311 // The referent has been cleared, or is alive; we need to trace 1312 // and mark its cohort. 1313 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1314 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1315 // Remove Reference object from list 1316 iter.remove(); 1317 // Keep alive its cohort. 1318 iter.make_referent_alive(); 1319 iter.move_to_next(); 1320 } else { 1321 iter.next(); 1322 } 1323 } 1324 // Close the reachable set 1325 complete_gc->do_void(); 1326 1327 if (iter.processed() > 0) { 1328 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1329 iter.removed(), iter.processed(), p2i(&refs_list)); 1330 } 1331 return false; 1332 } 1333 1334 const char* ReferenceProcessor::list_name(uint i) { 1335 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1336 "Out of bounds index"); 1337 1338 int j = i / _max_num_queues; 1339 switch (j) { 1340 case 0: return "SoftRef"; 1341 case 1: return "WeakRef"; 1342 case 2: return "FinalRef"; 1343 case 3: return "PhantomRef"; 1344 } 1345 ShouldNotReachHere(); 1346 return NULL; 1347 } 1348 1349 uint RefProcMTDegreeAdjuster::ergo_proc_thread_count(size_t ref_count, 1350 uint max_threads, 1351 RefProcPhases phase) const { 1352 assert(0 < max_threads, "must allow at least one thread"); 1353 1354 if (use_max_threads(phase) || (ReferencesPerThread == 0)) { 1355 return max_threads; 1356 } 1357 1358 size_t thread_count = 1 + (ref_count / ReferencesPerThread); 1359 return (uint)MIN3(thread_count, 1360 static_cast<size_t>(max_threads), 1361 (size_t)os::active_processor_count()); 1362 } 1363 1364 bool RefProcMTDegreeAdjuster::use_max_threads(RefProcPhases phase) const { 1365 // Even a small number of references in either of those cases could produce large amounts of work. 1366 return (phase == ReferenceProcessor::RefPhase1 || phase == ReferenceProcessor::RefPhase3); 1367 } 1368 1369 RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, 1370 RefProcPhases phase, 1371 size_t ref_count): 1372 _rp(rp), 1373 _saved_mt_processing(_rp->processing_is_mt()), 1374 _saved_num_queues(_rp->num_queues()) { 1375 if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) { 1376 return; 1377 } 1378 1379 uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase); 1380 1381 _rp->set_mt_processing(workers > 1); 1382 _rp->set_active_mt_degree(workers); 1383 } 1384 1385 RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() { 1386 // Revert to previous status. 1387 _rp->set_mt_processing(_saved_mt_processing); 1388 _rp->set_active_mt_degree(_saved_num_queues); 1389 }