1326 ShenandoahHeapRegion* current = get_region(i);
1327 blk->heap_region_do(current);
1328 }
1329 }
1330
1331 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1332 private:
1333 ShenandoahHeap* const _heap;
1334 ShenandoahHeapRegionClosure* const _blk;
1335
1336 shenandoah_padding(0);
1337 volatile size_t _index;
1338 shenandoah_padding(1);
1339
1340 public:
1341 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1342 AbstractGangTask("Parallel Region Task"),
1343 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1344
1345 void work(uint worker_id) {
1346 size_t stride = ShenandoahParallelRegionStride;
1347
1348 size_t max = _heap->num_regions();
1349 while (_index < max) {
1350 size_t cur = Atomic::fetch_and_add(&_index, stride);
1351 size_t start = cur;
1352 size_t end = MIN2(cur + stride, max);
1353 if (start >= max) break;
1354
1355 for (size_t i = cur; i < end; i++) {
1356 ShenandoahHeapRegion* current = _heap->get_region(i);
1357 _blk->heap_region_do(current);
1358 }
1359 }
1360 }
1361 };
1362
1363 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1364 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1365 if (num_regions() > ShenandoahParallelRegionStride) {
1395
1396 void ShenandoahHeap::op_init_mark() {
1397 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1398 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1399
1400 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1401 assert(!marking_context()->is_complete(), "should not be complete");
1402 assert(!has_forwarded_objects(), "No forwarded objects on this path");
1403
1404 if (ShenandoahVerify) {
1405 verifier()->verify_before_concmark();
1406 }
1407
1408 if (VerifyBeforeGC) {
1409 Universe::verify();
1410 }
1411
1412 set_concurrent_mark_in_progress(true);
1413 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1414 {
1415 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::make_parsable);
1416 make_parsable(true);
1417 }
1418
1419 {
1420 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1421 ShenandoahInitMarkUpdateRegionStateClosure cl;
1422 parallel_heap_region_iterate(&cl);
1423 }
1424
1425 // Make above changes visible to worker threads
1426 OrderAccess::fence();
1427
1428 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1429
1430 if (UseTLAB) {
1431 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1432 resize_tlabs();
1433 }
1434
1435 if (ShenandoahPacing) {
1436 pacer()->setup_for_mark();
1437 }
1438
1439 // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
1440 // we need to make sure that all its metadata are marked. alternative is to remark
1441 // thread roots at final mark pause, but it can be potential latency killer.
1442 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1443 ShenandoahCodeRoots::arm_nmethods();
1444 }
1445 }
1446
1447 void ShenandoahHeap::op_mark() {
1448 concurrent_mark()->mark_from_roots();
1449 }
1450
1451 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1500 assert(!has_forwarded_objects(), "No forwarded objects on this path");
1501
1502 // It is critical that we
1503 // evacuate roots right after finishing marking, so that we don't
1504 // get unmarked objects in the roots.
1505
1506 if (!cancelled_gc()) {
1507 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1508
1509 // Marking is completed, deactivate SATB barrier
1510 set_concurrent_mark_in_progress(false);
1511 mark_complete_marking_context();
1512
1513 parallel_cleaning(false /* full gc*/);
1514
1515 if (ShenandoahVerify) {
1516 verifier()->verify_roots_no_forwarded();
1517 }
1518
1519 {
1520 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1521 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1522 parallel_heap_region_iterate(&cl);
1523
1524 assert_pinned_region_status();
1525 }
1526
1527 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1528 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1529 // which would be outside the collection set, so no cset writes would happen there.
1530 // Weaker one: new allocations would happen past update watermark, and so less work would
1531 // be needed for reference updates (would update the large filler instead).
1532 {
1533 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1534 make_parsable(true);
1535 }
1536
1537 {
1538 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::choose_cset);
1539 ShenandoahHeapLocker locker(lock());
1540 _collection_set->clear();
1541 heuristics()->choose_collection_set(_collection_set);
1542 }
1543
1544 {
1545 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1546 ShenandoahHeapLocker locker(lock());
1547 _free_set->rebuild();
1548 }
1549
1550 if (!is_degenerated_gc_in_progress()) {
1551 prepare_concurrent_roots();
1552 prepare_concurrent_unloading();
1553 }
1554
1555 // If collection set has candidates, start evacuation.
1556 // Otherwise, bypass the rest of the cycle.
1557 if (!collection_set()->is_empty()) {
1558 ShenandoahGCSubPhase init_evac(ShenandoahPhaseTimings::init_evac);
1559
1560 if (ShenandoahVerify) {
1561 verifier()->verify_before_evacuation();
1562 }
1563
1564 set_evacuation_in_progress(true);
1565 // From here on, we need to update references.
1566 set_has_forwarded_objects(true);
1567
1568 if (!is_degenerated_gc_in_progress()) {
1569 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1570 ShenandoahCodeRoots::arm_nmethods();
1571 }
1572 evacuate_and_update_roots();
1573 }
1574
1575 if (ShenandoahPacing) {
1576 pacer()->setup_for_evac();
1577 }
1578
1634 free_set()->recycle_trash();
1635 }
1636
1637 void ShenandoahHeap::op_cleanup_complete() {
1638 free_set()->recycle_trash();
1639 }
1640
1641 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1642 private:
1643 ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
1644 ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1645 ShenandoahConcurrentStringDedupRoots _dedup_roots;
1646
1647 public:
1648 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1649 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1650 _vm_roots(phase),
1651 _cld_roots(phase) {}
1652
1653 void work(uint worker_id) {
1654 ShenandoahEvacOOMScope oom;
1655 {
1656 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1657 // may race against OopStorage::release() calls.
1658 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1659 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1660 }
1661
1662 {
1663 ShenandoahEvacuateUpdateRootsClosure<> cl;
1664 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1665 _cld_roots.cld_do(&clds, worker_id);
1666 }
1667
1668 {
1669 ShenandoahForwardedIsAliveClosure is_alive;
1670 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1671 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1672 }
1673 }
1772 _nmethod_itr(ShenandoahCodeRoots::table()),
1773 _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1774 StringTable::reset_dead_counter();
1775 ResolvedMethodTable::reset_dead_counter();
1776 if (_concurrent_class_unloading) {
1777 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1778 _nmethod_itr.nmethods_do_begin();
1779 }
1780 }
1781
1782 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1783 StringTable::finish_dead_counter();
1784 ResolvedMethodTable::finish_dead_counter();
1785 if (_concurrent_class_unloading) {
1786 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1787 _nmethod_itr.nmethods_do_end();
1788 }
1789 }
1790
1791 void work(uint worker_id) {
1792 {
1793 ShenandoahEvacOOMScope oom;
1794 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1795 // may race against OopStorage::release() calls.
1796 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1797 _jni_roots.oops_do(&cl, worker_id);
1798 _vm_roots.oops_do(&cl, worker_id);
1799
1800 cl.reset_dead_counter();
1801 _string_table_roots.oops_do(&cl, worker_id);
1802 StringTable::inc_dead_counter(cl.dead_counter());
1803
1804 cl.reset_dead_counter();
1805 _resolved_method_table_roots.oops_do(&cl, worker_id);
1806 ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1807 }
1808
1809 // If we are going to perform concurrent class unloading later on, we need to
1810 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1811 // can cleanup immediate garbage sooner.
1875 }
1876 reset_mark_bitmap();
1877
1878 ShenandoahResetUpdateRegionStateClosure cl;
1879 parallel_heap_region_iterate(&cl);
1880 }
1881
1882 void ShenandoahHeap::op_preclean() {
1883 if (ShenandoahPacing) {
1884 pacer()->setup_for_preclean();
1885 }
1886 concurrent_mark()->preclean_weak_refs();
1887 }
1888
1889 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1890 ShenandoahMetricsSnapshot metrics;
1891 metrics.snap_before();
1892
1893 full_gc()->do_it(cause);
1894 if (UseTLAB) {
1895 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1896 resize_all_tlabs();
1897 }
1898
1899 metrics.snap_after();
1900
1901 if (metrics.is_good_progress()) {
1902 _progress_last_gc.set();
1903 } else {
1904 // Nothing to do. Tell the allocation path that we have failed to make
1905 // progress, and it can finally fail.
1906 _progress_last_gc.unset();
1907 }
1908 }
1909
1910 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1911 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1912 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1913 // some phase, we have to upgrade the Degenerate GC to Full GC.
1914
1915 clear_cancelled_gc();
2186 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2187 control_thread()->prepare_for_graceful_shutdown();
2188
2189 // Step 2. Notify GC workers that we are cancelling GC.
2190 cancel_gc(GCCause::_shenandoah_stop_vm);
2191
2192 // Step 3. Wait until GC worker exits normally.
2193 control_thread()->stop();
2194
2195 // Step 4. Stop String Dedup thread if it is active
2196 if (ShenandoahStringDedup::is_enabled()) {
2197 ShenandoahStringDedup::stop();
2198 }
2199 }
2200
2201 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2202 if (!unload_classes()) return;
2203
2204 // Unload classes and purge SystemDictionary.
2205 {
2206 ShenandoahGCSubPhase phase(full_gc ?
2207 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2208 ShenandoahPhaseTimings::purge_class_unload);
2209 bool purged_class = SystemDictionary::do_unloading(gc_timer());
2210
2211 ShenandoahIsAliveSelector is_alive;
2212 uint num_workers = _workers->active_workers();
2213 ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class);
2214 _workers->run_task(&unlink_task);
2215 }
2216
2217 {
2218 ShenandoahGCSubPhase phase(full_gc ?
2219 ShenandoahPhaseTimings::full_gc_purge_cldg :
2220 ShenandoahPhaseTimings::purge_cldg);
2221 ClassLoaderDataGraph::purge();
2222 }
2223 // Resize and verify metaspace
2224 MetaspaceGC::compute_new_size();
2225 MetaspaceUtils::verify_metrics();
2226 }
2227
2228 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2229 // so they should not have forwarded oops.
2230 // However, we do need to "null" dead oops in the roots, if can not be done
2231 // in concurrent cycles.
2232 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2233 ShenandoahGCSubPhase root_phase(full_gc ?
2234 ShenandoahPhaseTimings::full_gc_purge :
2235 ShenandoahPhaseTimings::purge);
2236 uint num_workers = _workers->active_workers();
2237 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2238 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2239 ShenandoahPhaseTimings::purge_weak_par;
2240 ShenandoahGCSubPhase phase(timing_phase);
2241 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2242
2243 // Cleanup weak roots
2244 if (has_forwarded_objects()) {
2245 ShenandoahForwardedIsAliveClosure is_alive;
2246 ShenandoahUpdateRefsClosure keep_alive;
2247 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2248 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2249 _workers->run_task(&cleaning_task);
2250 } else {
2251 ShenandoahIsAliveClosure is_alive;
2252 #ifdef ASSERT
2253 ShenandoahAssertNotForwardedClosure verify_cl;
2254 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2255 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2256 #else
2257 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2258 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2259 #endif
2260 _workers->run_task(&cleaning_task);
2476 }
2477 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2478 return;
2479 }
2480 r = _regions->next();
2481 }
2482 }
2483 };
2484
2485 void ShenandoahHeap::update_heap_references(bool concurrent) {
2486 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2487 workers()->run_task(&task);
2488 }
2489
2490 void ShenandoahHeap::op_init_updaterefs() {
2491 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2492
2493 set_evacuation_in_progress(false);
2494
2495 {
2496 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2497 retire_and_reset_gclabs();
2498 }
2499
2500 if (ShenandoahVerify) {
2501 if (!is_degenerated_gc_in_progress()) {
2502 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2503 }
2504 verifier()->verify_before_updaterefs();
2505 }
2506
2507 set_update_refs_in_progress(true);
2508
2509 _update_refs_iterator.reset();
2510
2511 if (ShenandoahPacing) {
2512 pacer()->setup_for_updaterefs();
2513 }
2514 }
2515
2516 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2532 }
2533 } else {
2534 if (r->pin_count() > 0) {
2535 ShenandoahHeapLocker locker(_lock);
2536 r->make_pinned();
2537 }
2538 }
2539 }
2540 }
2541
2542 bool is_thread_safe() { return true; }
2543 };
2544
2545 void ShenandoahHeap::op_final_updaterefs() {
2546 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2547
2548 finish_concurrent_unloading();
2549
2550 // Check if there is left-over work, and finish it
2551 if (_update_refs_iterator.has_next()) {
2552 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2553
2554 // Finish updating references where we left off.
2555 clear_cancelled_gc();
2556 update_heap_references(false);
2557 }
2558
2559 // Clear cancelled GC, if set. On cancellation path, the block before would handle
2560 // everything. On degenerated paths, cancelled gc would not be set anyway.
2561 if (cancelled_gc()) {
2562 clear_cancelled_gc();
2563 }
2564 assert(!cancelled_gc(), "Should have been done right before");
2565
2566 if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2567 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2568 }
2569
2570 if (is_degenerated_gc_in_progress()) {
2571 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2572 } else {
2573 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2574 }
2575
2576 // Has to be done before cset is clear
2577 if (ShenandoahVerify) {
2578 verifier()->verify_roots_in_to_space();
2579 }
2580
2581 {
2582 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2583 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2584 parallel_heap_region_iterate(&cl);
2585
2586 assert_pinned_region_status();
2587 }
2588
2589 {
2590 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2591 trash_cset_regions();
2592 }
2593
2594 set_has_forwarded_objects(false);
2595 set_update_refs_in_progress(false);
2596
2597 if (ShenandoahVerify) {
2598 verifier()->verify_after_updaterefs();
2599 }
2600
2601 if (VerifyAfterGC) {
2602 Universe::verify();
2603 }
2604
2605 {
2606 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2607 ShenandoahHeapLocker locker(lock());
2608 _free_set->rebuild();
2609 }
2610 }
2611
2612 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2613 print_on(st);
2614 print_heap_regions_on(st);
2615 }
2616
2617 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2618 size_t slice = r->index() / _bitmap_regions_per_slice;
2619
2620 size_t regions_from = _bitmap_regions_per_slice * slice;
2621 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2622 for (size_t g = regions_from; g < regions_to; g++) {
2623 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2624 if (skip_self && g == r->index()) continue;
2625 if (get_region(g)->is_committed()) {
2626 return true;
2674 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2675 return false;
2676 }
2677 return true;
2678 }
2679
2680 void ShenandoahHeap::safepoint_synchronize_begin() {
2681 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2682 SuspendibleThreadSet::synchronize();
2683 }
2684 }
2685
2686 void ShenandoahHeap::safepoint_synchronize_end() {
2687 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2688 SuspendibleThreadSet::desynchronize();
2689 }
2690 }
2691
2692 void ShenandoahHeap::vmop_entry_init_mark() {
2693 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2694 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2695
2696 try_inject_alloc_failure();
2697 VM_ShenandoahInitMark op;
2698 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2699 }
2700
2701 void ShenandoahHeap::vmop_entry_final_mark() {
2702 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2703 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2704
2705 try_inject_alloc_failure();
2706 VM_ShenandoahFinalMarkStartEvac op;
2707 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2708 }
2709
2710 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2711 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2712 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2713
2714 try_inject_alloc_failure();
2715 VM_ShenandoahInitUpdateRefs op;
2716 VMThread::execute(&op);
2717 }
2718
2719 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2720 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2721 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2722
2723 try_inject_alloc_failure();
2724 VM_ShenandoahFinalUpdateRefs op;
2725 VMThread::execute(&op);
2726 }
2727
2728 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2729 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2730 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2731
2732 try_inject_alloc_failure();
2733 VM_ShenandoahFullGC op(cause);
2734 VMThread::execute(&op);
2735 }
2736
2737 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2738 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2739 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2740
2741 VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2742 VMThread::execute(°enerated_gc);
2743 }
2744
2745 void ShenandoahHeap::entry_init_mark() {
2746 const char* msg = init_mark_event_message();
2747 ShenandoahPausePhase gc_phase(msg);
2748 EventMark em("%s", msg);
2749
2750 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2751
2752 ShenandoahWorkerScope scope(workers(),
2753 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2754 "init marking");
2755
2756 op_init_mark();
2757 }
2758
2759 void ShenandoahHeap::entry_final_mark() {
2760 const char* msg = final_mark_event_message();
2761 ShenandoahPausePhase gc_phase(msg);
2762 EventMark em("%s", msg);
2763
2764 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2765
2766 ShenandoahWorkerScope scope(workers(),
2767 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2768 "final marking");
2769
2770 op_final_mark();
2771 }
2772
2773 void ShenandoahHeap::entry_init_updaterefs() {
2774 static const char* msg = "Pause Init Update Refs";
2775 ShenandoahPausePhase gc_phase(msg);
2776 EventMark em("%s", msg);
2777
2778 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2779
2780 // No workers used in this phase, no setup required
2781
2782 op_init_updaterefs();
2783 }
2784
2785 void ShenandoahHeap::entry_final_updaterefs() {
2786 static const char* msg = "Pause Final Update Refs";
2787 ShenandoahPausePhase gc_phase(msg);
2788 EventMark em("%s", msg);
2789
2790 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2791
2792 ShenandoahWorkerScope scope(workers(),
2793 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2794 "final reference update");
2795
2796 op_final_updaterefs();
2797 }
2798
2799 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2800 static const char* msg = "Pause Full";
2801 ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
2802 EventMark em("%s", msg);
2803
2804 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2805
2806 ShenandoahWorkerScope scope(workers(),
2807 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2808 "full gc");
2809
2810 op_full(cause);
2811 }
2812
2813 void ShenandoahHeap::entry_degenerated(int point) {
2814 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2815 const char* msg = degen_event_message(dpoint);
2816 ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
2817 EventMark em("%s", msg);
2818
2819 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2820
2821 ShenandoahWorkerScope scope(workers(),
2822 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2823 "stw degenerated gc");
2824
2825 set_degenerated_gc_in_progress(true);
2826 op_degenerated(dpoint);
2827 set_degenerated_gc_in_progress(false);
2828 }
2829
2830 void ShenandoahHeap::entry_mark() {
2831 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2832
2833 const char* msg = conc_mark_event_message();
2834 ShenandoahConcurrentPhase gc_phase(msg);
2835 EventMark em("%s", msg);
2836
2837 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
2838
2839 ShenandoahWorkerScope scope(workers(),
2840 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2841 "concurrent marking");
2842
2843 try_inject_alloc_failure();
2844 op_mark();
2845 }
2846
2847 void ShenandoahHeap::entry_evac() {
2848 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2849
2850 static const char* msg = "Concurrent evacuation";
2851 ShenandoahConcurrentPhase gc_phase(msg);
2852 EventMark em("%s", msg);
2853
2854 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2855
2856 ShenandoahWorkerScope scope(workers(),
2857 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2858 "concurrent evacuation");
2859
2860 try_inject_alloc_failure();
2861 op_conc_evac();
2862 }
2863
2864 void ShenandoahHeap::entry_updaterefs() {
2865 static const char* msg = "Concurrent update references";
2866 ShenandoahConcurrentPhase gc_phase(msg);
2867 EventMark em("%s", msg);
2868
2869 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2870
2871 ShenandoahWorkerScope scope(workers(),
2872 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2873 "concurrent reference update");
2874
2875 try_inject_alloc_failure();
2876 op_updaterefs();
2877 }
2878
2879 void ShenandoahHeap::entry_weak_roots() {
2880 static const char* msg = "Concurrent weak roots";
2881 ShenandoahConcurrentPhase gc_phase(msg);
2882 EventMark em("%s", msg);
2883
2884 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_weak_roots);
2885 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots);
2886
2887 ShenandoahWorkerScope scope(workers(),
2888 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2889 "concurrent weak root");
2890
2891 try_inject_alloc_failure();
2892 op_weak_roots();
2893 }
2894
2895 void ShenandoahHeap::entry_class_unloading() {
2896 static const char* msg = "Concurrent class unloading";
2897 ShenandoahConcurrentPhase gc_phase(msg);
2898 EventMark em("%s", msg);
2899
2900 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_class_unloading);
2901
2902 ShenandoahWorkerScope scope(workers(),
2903 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2904 "concurrent class unloading");
2905
2906 try_inject_alloc_failure();
2907 op_class_unloading();
2908 }
2909
2910 void ShenandoahHeap::entry_strong_roots() {
2911 static const char* msg = "Concurrent strong roots";
2912 ShenandoahConcurrentPhase gc_phase(msg);
2913 EventMark em("%s", msg);
2914
2915 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_strong_roots);
2916 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
2917
2918 ShenandoahWorkerScope scope(workers(),
2919 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2920 "concurrent strong root");
2921
2922 try_inject_alloc_failure();
2923 op_strong_roots();
2924 }
2925
2926 void ShenandoahHeap::entry_cleanup_early() {
2927 static const char* msg = "Concurrent cleanup";
2928 ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
2929 EventMark em("%s", msg);
2930
2931 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);
2932
2933 // This phase does not use workers, no need for setup
2934
2935 try_inject_alloc_failure();
2936 op_cleanup_early();
2937 }
2938
2939 void ShenandoahHeap::entry_cleanup_complete() {
2940 static const char* msg = "Concurrent cleanup";
2941 ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
2942 EventMark em("%s", msg);
2943
2944 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);
2945
2946 // This phase does not use workers, no need for setup
2947
2948 try_inject_alloc_failure();
2949 op_cleanup_complete();
2950 }
2951
2952 void ShenandoahHeap::entry_reset() {
2953 static const char* msg = "Concurrent reset";
2954 ShenandoahConcurrentPhase gc_phase(msg);
2955 EventMark em("%s", msg);
2956
2957 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_reset);
2958
2959 ShenandoahWorkerScope scope(workers(),
2960 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2961 "concurrent reset");
2962
2963 try_inject_alloc_failure();
2964 op_reset();
2965 }
2966
2967 void ShenandoahHeap::entry_preclean() {
2968 if (ShenandoahPreclean && process_references()) {
2969 static const char* msg = "Concurrent precleaning";
2970 ShenandoahConcurrentPhase gc_phase(msg);
2971 EventMark em("%s", msg);
2972
2973 ShenandoahGCSubPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2974
2975 ShenandoahWorkerScope scope(workers(),
2976 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2977 "concurrent preclean",
2978 /* check_workers = */ false);
2979
2980 try_inject_alloc_failure();
2981 op_preclean();
2982 }
2983 }
2984
2985 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2986 static const char *msg = "Concurrent uncommit";
2987 ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
2988 EventMark em("%s", msg);
2989
2990 ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2991
2992 op_uncommit(shrink_before);
2993 }
2994
2995 void ShenandoahHeap::try_inject_alloc_failure() {
2996 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2997 _inject_alloc_failure.set();
2998 os::naked_short_sleep(1);
2999 if (cancelled_gc()) {
3000 log_info(gc)("Allocation failure was successfully injected");
3001 }
3002 }
3003 }
3004
3005 bool ShenandoahHeap::should_inject_alloc_failure() {
3006 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3007 }
3008
3009 void ShenandoahHeap::initialize_serviceability() {
3010 _memory_pool = new ShenandoahMemoryPool(this);
|
1326 ShenandoahHeapRegion* current = get_region(i);
1327 blk->heap_region_do(current);
1328 }
1329 }
1330
1331 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1332 private:
1333 ShenandoahHeap* const _heap;
1334 ShenandoahHeapRegionClosure* const _blk;
1335
1336 shenandoah_padding(0);
1337 volatile size_t _index;
1338 shenandoah_padding(1);
1339
1340 public:
1341 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1342 AbstractGangTask("Parallel Region Task"),
1343 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1344
1345 void work(uint worker_id) {
1346 ShenandoahParallelWorkerSession worker_session(worker_id);
1347 size_t stride = ShenandoahParallelRegionStride;
1348
1349 size_t max = _heap->num_regions();
1350 while (_index < max) {
1351 size_t cur = Atomic::fetch_and_add(&_index, stride);
1352 size_t start = cur;
1353 size_t end = MIN2(cur + stride, max);
1354 if (start >= max) break;
1355
1356 for (size_t i = cur; i < end; i++) {
1357 ShenandoahHeapRegion* current = _heap->get_region(i);
1358 _blk->heap_region_do(current);
1359 }
1360 }
1361 }
1362 };
1363
1364 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1365 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1366 if (num_regions() > ShenandoahParallelRegionStride) {
1396
1397 void ShenandoahHeap::op_init_mark() {
1398 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1399 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1400
1401 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1402 assert(!marking_context()->is_complete(), "should not be complete");
1403 assert(!has_forwarded_objects(), "No forwarded objects on this path");
1404
1405 if (ShenandoahVerify) {
1406 verifier()->verify_before_concmark();
1407 }
1408
1409 if (VerifyBeforeGC) {
1410 Universe::verify();
1411 }
1412
1413 set_concurrent_mark_in_progress(true);
1414 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1415 {
1416 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1417 make_parsable(true);
1418 }
1419
1420 {
1421 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1422 ShenandoahInitMarkUpdateRegionStateClosure cl;
1423 parallel_heap_region_iterate(&cl);
1424 }
1425
1426 // Make above changes visible to worker threads
1427 OrderAccess::fence();
1428
1429 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1430
1431 if (UseTLAB) {
1432 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1433 resize_tlabs();
1434 }
1435
1436 if (ShenandoahPacing) {
1437 pacer()->setup_for_mark();
1438 }
1439
1440 // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
1441 // we need to make sure that all its metadata are marked. alternative is to remark
1442 // thread roots at final mark pause, but it can be potential latency killer.
1443 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1444 ShenandoahCodeRoots::arm_nmethods();
1445 }
1446 }
1447
1448 void ShenandoahHeap::op_mark() {
1449 concurrent_mark()->mark_from_roots();
1450 }
1451
1452 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1501 assert(!has_forwarded_objects(), "No forwarded objects on this path");
1502
1503 // It is critical that we
1504 // evacuate roots right after finishing marking, so that we don't
1505 // get unmarked objects in the roots.
1506
1507 if (!cancelled_gc()) {
1508 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1509
1510 // Marking is completed, deactivate SATB barrier
1511 set_concurrent_mark_in_progress(false);
1512 mark_complete_marking_context();
1513
1514 parallel_cleaning(false /* full gc*/);
1515
1516 if (ShenandoahVerify) {
1517 verifier()->verify_roots_no_forwarded();
1518 }
1519
1520 {
1521 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1522 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1523 parallel_heap_region_iterate(&cl);
1524
1525 assert_pinned_region_status();
1526 }
1527
1528 // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1529 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1530 // which would be outside the collection set, so no cset writes would happen there.
1531 // Weaker one: new allocations would happen past update watermark, and so less work would
1532 // be needed for reference updates (would update the large filler instead).
1533 {
1534 ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1535 make_parsable(true);
1536 }
1537
1538 {
1539 ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1540 ShenandoahHeapLocker locker(lock());
1541 _collection_set->clear();
1542 heuristics()->choose_collection_set(_collection_set);
1543 }
1544
1545 {
1546 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1547 ShenandoahHeapLocker locker(lock());
1548 _free_set->rebuild();
1549 }
1550
1551 if (!is_degenerated_gc_in_progress()) {
1552 prepare_concurrent_roots();
1553 prepare_concurrent_unloading();
1554 }
1555
1556 // If collection set has candidates, start evacuation.
1557 // Otherwise, bypass the rest of the cycle.
1558 if (!collection_set()->is_empty()) {
1559 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1560
1561 if (ShenandoahVerify) {
1562 verifier()->verify_before_evacuation();
1563 }
1564
1565 set_evacuation_in_progress(true);
1566 // From here on, we need to update references.
1567 set_has_forwarded_objects(true);
1568
1569 if (!is_degenerated_gc_in_progress()) {
1570 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1571 ShenandoahCodeRoots::arm_nmethods();
1572 }
1573 evacuate_and_update_roots();
1574 }
1575
1576 if (ShenandoahPacing) {
1577 pacer()->setup_for_evac();
1578 }
1579
1635 free_set()->recycle_trash();
1636 }
1637
1638 void ShenandoahHeap::op_cleanup_complete() {
1639 free_set()->recycle_trash();
1640 }
1641
1642 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1643 private:
1644 ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
1645 ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1646 ShenandoahConcurrentStringDedupRoots _dedup_roots;
1647
1648 public:
1649 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1650 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1651 _vm_roots(phase),
1652 _cld_roots(phase) {}
1653
1654 void work(uint worker_id) {
1655 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1656 ShenandoahEvacOOMScope oom;
1657 {
1658 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1659 // may race against OopStorage::release() calls.
1660 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1661 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1662 }
1663
1664 {
1665 ShenandoahEvacuateUpdateRootsClosure<> cl;
1666 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1667 _cld_roots.cld_do(&clds, worker_id);
1668 }
1669
1670 {
1671 ShenandoahForwardedIsAliveClosure is_alive;
1672 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1673 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1674 }
1675 }
1774 _nmethod_itr(ShenandoahCodeRoots::table()),
1775 _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1776 StringTable::reset_dead_counter();
1777 ResolvedMethodTable::reset_dead_counter();
1778 if (_concurrent_class_unloading) {
1779 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1780 _nmethod_itr.nmethods_do_begin();
1781 }
1782 }
1783
1784 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1785 StringTable::finish_dead_counter();
1786 ResolvedMethodTable::finish_dead_counter();
1787 if (_concurrent_class_unloading) {
1788 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1789 _nmethod_itr.nmethods_do_end();
1790 }
1791 }
1792
1793 void work(uint worker_id) {
1794 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1795 {
1796 ShenandoahEvacOOMScope oom;
1797 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1798 // may race against OopStorage::release() calls.
1799 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1800 _jni_roots.oops_do(&cl, worker_id);
1801 _vm_roots.oops_do(&cl, worker_id);
1802
1803 cl.reset_dead_counter();
1804 _string_table_roots.oops_do(&cl, worker_id);
1805 StringTable::inc_dead_counter(cl.dead_counter());
1806
1807 cl.reset_dead_counter();
1808 _resolved_method_table_roots.oops_do(&cl, worker_id);
1809 ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1810 }
1811
1812 // If we are going to perform concurrent class unloading later on, we need to
1813 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1814 // can cleanup immediate garbage sooner.
1878 }
1879 reset_mark_bitmap();
1880
1881 ShenandoahResetUpdateRegionStateClosure cl;
1882 parallel_heap_region_iterate(&cl);
1883 }
1884
1885 void ShenandoahHeap::op_preclean() {
1886 if (ShenandoahPacing) {
1887 pacer()->setup_for_preclean();
1888 }
1889 concurrent_mark()->preclean_weak_refs();
1890 }
1891
1892 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1893 ShenandoahMetricsSnapshot metrics;
1894 metrics.snap_before();
1895
1896 full_gc()->do_it(cause);
1897 if (UseTLAB) {
1898 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1899 resize_all_tlabs();
1900 }
1901
1902 metrics.snap_after();
1903
1904 if (metrics.is_good_progress()) {
1905 _progress_last_gc.set();
1906 } else {
1907 // Nothing to do. Tell the allocation path that we have failed to make
1908 // progress, and it can finally fail.
1909 _progress_last_gc.unset();
1910 }
1911 }
1912
1913 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1914 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1915 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1916 // some phase, we have to upgrade the Degenerate GC to Full GC.
1917
1918 clear_cancelled_gc();
2189 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2190 control_thread()->prepare_for_graceful_shutdown();
2191
2192 // Step 2. Notify GC workers that we are cancelling GC.
2193 cancel_gc(GCCause::_shenandoah_stop_vm);
2194
2195 // Step 3. Wait until GC worker exits normally.
2196 control_thread()->stop();
2197
2198 // Step 4. Stop String Dedup thread if it is active
2199 if (ShenandoahStringDedup::is_enabled()) {
2200 ShenandoahStringDedup::stop();
2201 }
2202 }
2203
2204 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2205 if (!unload_classes()) return;
2206
2207 // Unload classes and purge SystemDictionary.
2208 {
2209 ShenandoahGCPhase phase(full_gc ?
2210 ShenandoahPhaseTimings::full_gc_purge_class_unload :
2211 ShenandoahPhaseTimings::purge_class_unload);
2212 bool purged_class = SystemDictionary::do_unloading(gc_timer());
2213
2214 ShenandoahIsAliveSelector is_alive;
2215 uint num_workers = _workers->active_workers();
2216 ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class);
2217 _workers->run_task(&unlink_task);
2218 }
2219
2220 {
2221 ShenandoahGCPhase phase(full_gc ?
2222 ShenandoahPhaseTimings::full_gc_purge_cldg :
2223 ShenandoahPhaseTimings::purge_cldg);
2224 ClassLoaderDataGraph::purge();
2225 }
2226 // Resize and verify metaspace
2227 MetaspaceGC::compute_new_size();
2228 MetaspaceUtils::verify_metrics();
2229 }
2230
2231 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2232 // so they should not have forwarded oops.
2233 // However, we do need to "null" dead oops in the roots, if can not be done
2234 // in concurrent cycles.
2235 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2236 ShenandoahGCPhase root_phase(full_gc ?
2237 ShenandoahPhaseTimings::full_gc_purge :
2238 ShenandoahPhaseTimings::purge);
2239 uint num_workers = _workers->active_workers();
2240 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2241 ShenandoahPhaseTimings::full_gc_purge_weak_par :
2242 ShenandoahPhaseTimings::purge_weak_par;
2243 ShenandoahGCPhase phase(timing_phase);
2244 ShenandoahGCWorkerPhase worker_phase(timing_phase);
2245
2246 // Cleanup weak roots
2247 if (has_forwarded_objects()) {
2248 ShenandoahForwardedIsAliveClosure is_alive;
2249 ShenandoahUpdateRefsClosure keep_alive;
2250 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2251 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2252 _workers->run_task(&cleaning_task);
2253 } else {
2254 ShenandoahIsAliveClosure is_alive;
2255 #ifdef ASSERT
2256 ShenandoahAssertNotForwardedClosure verify_cl;
2257 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2258 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2259 #else
2260 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2261 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2262 #endif
2263 _workers->run_task(&cleaning_task);
2479 }
2480 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2481 return;
2482 }
2483 r = _regions->next();
2484 }
2485 }
2486 };
2487
2488 void ShenandoahHeap::update_heap_references(bool concurrent) {
2489 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2490 workers()->run_task(&task);
2491 }
2492
2493 void ShenandoahHeap::op_init_updaterefs() {
2494 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2495
2496 set_evacuation_in_progress(false);
2497
2498 {
2499 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2500 retire_and_reset_gclabs();
2501 }
2502
2503 if (ShenandoahVerify) {
2504 if (!is_degenerated_gc_in_progress()) {
2505 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2506 }
2507 verifier()->verify_before_updaterefs();
2508 }
2509
2510 set_update_refs_in_progress(true);
2511
2512 _update_refs_iterator.reset();
2513
2514 if (ShenandoahPacing) {
2515 pacer()->setup_for_updaterefs();
2516 }
2517 }
2518
2519 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2535 }
2536 } else {
2537 if (r->pin_count() > 0) {
2538 ShenandoahHeapLocker locker(_lock);
2539 r->make_pinned();
2540 }
2541 }
2542 }
2543 }
2544
2545 bool is_thread_safe() { return true; }
2546 };
2547
2548 void ShenandoahHeap::op_final_updaterefs() {
2549 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2550
2551 finish_concurrent_unloading();
2552
2553 // Check if there is left-over work, and finish it
2554 if (_update_refs_iterator.has_next()) {
2555 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2556
2557 // Finish updating references where we left off.
2558 clear_cancelled_gc();
2559 update_heap_references(false);
2560 }
2561
2562 // Clear cancelled GC, if set. On cancellation path, the block before would handle
2563 // everything. On degenerated paths, cancelled gc would not be set anyway.
2564 if (cancelled_gc()) {
2565 clear_cancelled_gc();
2566 }
2567 assert(!cancelled_gc(), "Should have been done right before");
2568
2569 if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2570 verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2571 }
2572
2573 if (is_degenerated_gc_in_progress()) {
2574 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2575 } else {
2576 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2577 }
2578
2579 // Has to be done before cset is clear
2580 if (ShenandoahVerify) {
2581 verifier()->verify_roots_in_to_space();
2582 }
2583
2584 {
2585 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2586 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2587 parallel_heap_region_iterate(&cl);
2588
2589 assert_pinned_region_status();
2590 }
2591
2592 {
2593 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2594 trash_cset_regions();
2595 }
2596
2597 set_has_forwarded_objects(false);
2598 set_update_refs_in_progress(false);
2599
2600 if (ShenandoahVerify) {
2601 verifier()->verify_after_updaterefs();
2602 }
2603
2604 if (VerifyAfterGC) {
2605 Universe::verify();
2606 }
2607
2608 {
2609 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2610 ShenandoahHeapLocker locker(lock());
2611 _free_set->rebuild();
2612 }
2613 }
2614
2615 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2616 print_on(st);
2617 print_heap_regions_on(st);
2618 }
2619
2620 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2621 size_t slice = r->index() / _bitmap_regions_per_slice;
2622
2623 size_t regions_from = _bitmap_regions_per_slice * slice;
2624 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2625 for (size_t g = regions_from; g < regions_to; g++) {
2626 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2627 if (skip_self && g == r->index()) continue;
2628 if (get_region(g)->is_committed()) {
2629 return true;
2677 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2678 return false;
2679 }
2680 return true;
2681 }
2682
2683 void ShenandoahHeap::safepoint_synchronize_begin() {
2684 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2685 SuspendibleThreadSet::synchronize();
2686 }
2687 }
2688
2689 void ShenandoahHeap::safepoint_synchronize_end() {
2690 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2691 SuspendibleThreadSet::desynchronize();
2692 }
2693 }
2694
2695 void ShenandoahHeap::vmop_entry_init_mark() {
2696 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2697 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
2698
2699 try_inject_alloc_failure();
2700 VM_ShenandoahInitMark op;
2701 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2702 }
2703
2704 void ShenandoahHeap::vmop_entry_final_mark() {
2705 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2706 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
2707
2708 try_inject_alloc_failure();
2709 VM_ShenandoahFinalMarkStartEvac op;
2710 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2711 }
2712
2713 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2714 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2715 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
2716
2717 try_inject_alloc_failure();
2718 VM_ShenandoahInitUpdateRefs op;
2719 VMThread::execute(&op);
2720 }
2721
2722 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2723 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2724 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
2725
2726 try_inject_alloc_failure();
2727 VM_ShenandoahFinalUpdateRefs op;
2728 VMThread::execute(&op);
2729 }
2730
2731 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2732 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2733 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
2734
2735 try_inject_alloc_failure();
2736 VM_ShenandoahFullGC op(cause);
2737 VMThread::execute(&op);
2738 }
2739
2740 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2741 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2742 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
2743
2744 VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2745 VMThread::execute(°enerated_gc);
2746 }
2747
2748 void ShenandoahHeap::entry_init_mark() {
2749 const char* msg = init_mark_event_message();
2750 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
2751 EventMark em("%s", msg);
2752
2753 ShenandoahWorkerScope scope(workers(),
2754 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2755 "init marking");
2756
2757 op_init_mark();
2758 }
2759
2760 void ShenandoahHeap::entry_final_mark() {
2761 const char* msg = final_mark_event_message();
2762 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
2763 EventMark em("%s", msg);
2764
2765 ShenandoahWorkerScope scope(workers(),
2766 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2767 "final marking");
2768
2769 op_final_mark();
2770 }
2771
2772 void ShenandoahHeap::entry_init_updaterefs() {
2773 static const char* msg = "Pause Init Update Refs";
2774 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
2775 EventMark em("%s", msg);
2776
2777 // No workers used in this phase, no setup required
2778
2779 op_init_updaterefs();
2780 }
2781
2782 void ShenandoahHeap::entry_final_updaterefs() {
2783 static const char* msg = "Pause Final Update Refs";
2784 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
2785 EventMark em("%s", msg);
2786
2787 ShenandoahWorkerScope scope(workers(),
2788 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2789 "final reference update");
2790
2791 op_final_updaterefs();
2792 }
2793
2794 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2795 static const char* msg = "Pause Full";
2796 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
2797 EventMark em("%s", msg);
2798
2799 ShenandoahWorkerScope scope(workers(),
2800 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2801 "full gc");
2802
2803 op_full(cause);
2804 }
2805
2806 void ShenandoahHeap::entry_degenerated(int point) {
2807 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2808 const char* msg = degen_event_message(dpoint);
2809 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
2810 EventMark em("%s", msg);
2811
2812 ShenandoahWorkerScope scope(workers(),
2813 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2814 "stw degenerated gc");
2815
2816 set_degenerated_gc_in_progress(true);
2817 op_degenerated(dpoint);
2818 set_degenerated_gc_in_progress(false);
2819 }
2820
2821 void ShenandoahHeap::entry_mark() {
2822 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2823
2824 const char* msg = conc_mark_event_message();
2825 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
2826 EventMark em("%s", msg);
2827
2828 ShenandoahWorkerScope scope(workers(),
2829 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2830 "concurrent marking");
2831
2832 try_inject_alloc_failure();
2833 op_mark();
2834 }
2835
2836 void ShenandoahHeap::entry_evac() {
2837 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2838
2839 static const char* msg = "Concurrent evacuation";
2840 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
2841 EventMark em("%s", msg);
2842
2843 ShenandoahWorkerScope scope(workers(),
2844 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2845 "concurrent evacuation");
2846
2847 try_inject_alloc_failure();
2848 op_conc_evac();
2849 }
2850
2851 void ShenandoahHeap::entry_updaterefs() {
2852 static const char* msg = "Concurrent update references";
2853 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
2854 EventMark em("%s", msg);
2855
2856 ShenandoahWorkerScope scope(workers(),
2857 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2858 "concurrent reference update");
2859
2860 try_inject_alloc_failure();
2861 op_updaterefs();
2862 }
2863
2864 void ShenandoahHeap::entry_weak_roots() {
2865 static const char* msg = "Concurrent weak roots";
2866 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
2867 EventMark em("%s", msg);
2868
2869 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots);
2870
2871 ShenandoahWorkerScope scope(workers(),
2872 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2873 "concurrent weak root");
2874
2875 try_inject_alloc_failure();
2876 op_weak_roots();
2877 }
2878
2879 void ShenandoahHeap::entry_class_unloading() {
2880 static const char* msg = "Concurrent class unloading";
2881 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unloading);
2882 EventMark em("%s", msg);
2883
2884 ShenandoahWorkerScope scope(workers(),
2885 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2886 "concurrent class unloading");
2887
2888 try_inject_alloc_failure();
2889 op_class_unloading();
2890 }
2891
2892 void ShenandoahHeap::entry_strong_roots() {
2893 static const char* msg = "Concurrent strong roots";
2894 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
2895 EventMark em("%s", msg);
2896
2897 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
2898
2899 ShenandoahWorkerScope scope(workers(),
2900 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2901 "concurrent strong root");
2902
2903 try_inject_alloc_failure();
2904 op_strong_roots();
2905 }
2906
2907 void ShenandoahHeap::entry_cleanup_early() {
2908 static const char* msg = "Concurrent cleanup";
2909 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
2910 EventMark em("%s", msg);
2911
2912 // This phase does not use workers, no need for setup
2913
2914 try_inject_alloc_failure();
2915 op_cleanup_early();
2916 }
2917
2918 void ShenandoahHeap::entry_cleanup_complete() {
2919 static const char* msg = "Concurrent cleanup";
2920 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
2921 EventMark em("%s", msg);
2922
2923 // This phase does not use workers, no need for setup
2924
2925 try_inject_alloc_failure();
2926 op_cleanup_complete();
2927 }
2928
2929 void ShenandoahHeap::entry_reset() {
2930 static const char* msg = "Concurrent reset";
2931 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
2932 EventMark em("%s", msg);
2933
2934 ShenandoahWorkerScope scope(workers(),
2935 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2936 "concurrent reset");
2937
2938 try_inject_alloc_failure();
2939 op_reset();
2940 }
2941
2942 void ShenandoahHeap::entry_preclean() {
2943 if (ShenandoahPreclean && process_references()) {
2944 static const char* msg = "Concurrent precleaning";
2945 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_preclean);
2946 EventMark em("%s", msg);
2947
2948 ShenandoahWorkerScope scope(workers(),
2949 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2950 "concurrent preclean",
2951 /* check_workers = */ false);
2952
2953 try_inject_alloc_failure();
2954 op_preclean();
2955 }
2956 }
2957
2958 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2959 static const char *msg = "Concurrent uncommit";
2960 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2961 EventMark em("%s", msg);
2962
2963 op_uncommit(shrink_before);
2964 }
2965
2966 void ShenandoahHeap::try_inject_alloc_failure() {
2967 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2968 _inject_alloc_failure.set();
2969 os::naked_short_sleep(1);
2970 if (cancelled_gc()) {
2971 log_info(gc)("Allocation failure was successfully injected");
2972 }
2973 }
2974 }
2975
2976 bool ShenandoahHeap::should_inject_alloc_failure() {
2977 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2978 }
2979
2980 void ShenandoahHeap::initialize_serviceability() {
2981 _memory_pool = new ShenandoahMemoryPool(this);
|