< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page



   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"

  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/heapRegion.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/shared/adaptiveSizePolicy.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"


 388   _accum_task_vtime(NULL),
 389 
 390   _concurrent_workers(NULL),
 391   _num_concurrent_workers(0),
 392   _max_concurrent_workers(0),
 393 
 394   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 395   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 396 {
 397   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 398   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 399 
 400   // Create & start ConcurrentMark thread.
 401   _cm_thread = new G1ConcurrentMarkThread(this);
 402   if (_cm_thread->osthread() == NULL) {
 403     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 404   }
 405 
 406   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 407 
 408   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
 409   satb_qs.set_buffer_size(G1SATBBufferSize);
 410 
 411   _root_regions.init(_g1h->survivor(), this);
 412 
 413   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 414     // Calculate the number of concurrent worker threads by scaling
 415     // the number of parallel GC threads.
 416     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 417     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 418   }
 419 
 420   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 421   if (ConcGCThreads > ParallelGCThreads) {
 422     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 423                     ConcGCThreads, ParallelGCThreads);
 424     return;
 425   }
 426 
 427   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 428   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);


 740   }
 741 };
 742 
 743 void G1ConcurrentMark::pre_initial_mark() {
 744   // Initialize marking structures. This has to be done in a STW phase.
 745   reset();
 746 
 747   // For each region note start of marking.
 748   NoteStartOfMarkHRClosure startcl;
 749   _g1h->heap_region_iterate(&startcl);
 750 }
 751 
 752 
 753 void G1ConcurrentMark::post_initial_mark() {
 754   // Start Concurrent Marking weak-reference discovery.
 755   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 756   // enable ("weak") refs discovery
 757   rp->enable_discovery();
 758   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 759 
 760   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 761   // This is the start of  the marking cycle, we're expected all
 762   // threads to have SATB queues with active set to false.
 763   satb_mq_set.set_active_all_threads(true, /* new active value */
 764                                      false /* expected_active */);
 765 
 766   _root_regions.prepare_for_scan();
 767 
 768   // update_g1_committed() will be called at the end of an evac pause
 769   // when marking is on. So, it's also called at the end of the
 770   // initial-mark pause to update the heap end, if the heap expands
 771   // during it. No need to call it here.
 772 }
 773 
 774 /*
 775  * Notice that in the next two methods, we actually leave the STS
 776  * during the barrier sync and join it immediately afterwards. If we
 777  * do not do this, the following deadlock can occur: one thread could
 778  * be in the barrier sync code, waiting for the other thread to also
 779  * sync up, whereas another one could be trying to yield, while also
 780  * waiting for the other threads to sync up too.


1051   }
1052 
1053   G1Policy* g1p = _g1h->g1_policy();
1054   g1p->record_concurrent_mark_remark_start();
1055 
1056   double start = os::elapsedTime();
1057 
1058   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1059 
1060   {
1061     GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1062     finalize_marking();
1063   }
1064 
1065   double mark_work_end = os::elapsedTime();
1066 
1067   bool const mark_finished = !has_overflown();
1068   if (mark_finished) {
1069     weak_refs_work(false /* clear_all_soft_refs */);
1070 
1071     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1072     // We're done with marking.
1073     // This is the end of the marking cycle, we're expected all
1074     // threads to have SATB queues with active set to true.
1075     satb_mq_set.set_active_all_threads(false, /* new active value */
1076                                        true /* expected_active */);
1077 
1078     {
1079       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1080       flush_all_task_caches();
1081     }
1082 
1083     {
1084       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1085       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1086       _g1h->heap_region_iterate(&cl);
1087       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1088                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1089     }
1090 
1091     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");


1673     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1674     _thread_parity(Threads::thread_claim_parity()) {}
1675 
1676   void do_thread(Thread* thread) {
1677     if (thread->is_Java_thread()) {
1678       if (thread->claim_oops_do(true, _thread_parity)) {
1679         JavaThread* jt = (JavaThread*)thread;
1680 
1681         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1682         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1683         // * Alive if on the stack of an executing method
1684         // * Weakly reachable otherwise
1685         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1686         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1687         jt->nmethods_do(&_code_cl);
1688 
1689         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1690       }
1691     } else if (thread->is_VM_thread()) {
1692       if (thread->claim_oops_do(true, _thread_parity)) {
1693         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1694       }
1695     }
1696   }
1697 };
1698 
1699 class G1CMRemarkTask : public AbstractGangTask {
1700   G1ConcurrentMark* _cm;
1701 public:
1702   void work(uint worker_id) {
1703     G1CMTask* task = _cm->task(worker_id);
1704     task->record_start_time();
1705     {
1706       ResourceMark rm;
1707       HandleMark hm;
1708 
1709       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1710       Threads::threads_do(&threads_f);
1711     }
1712 
1713     do {


1733   _g1h->ensure_parsability(false);
1734 
1735   // this is remark, so we'll use up all active threads
1736   uint active_workers = _g1h->workers()->active_workers();
1737   set_concurrency_and_phase(active_workers, false /* concurrent */);
1738   // Leave _parallel_marking_threads at it's
1739   // value originally calculated in the G1ConcurrentMark
1740   // constructor and pass values of the active workers
1741   // through the gang in the task.
1742 
1743   {
1744     StrongRootsScope srs(active_workers);
1745 
1746     G1CMRemarkTask remarkTask(this, active_workers);
1747     // We will start all available threads, even if we decide that the
1748     // active_workers will be fewer. The extra ones will just bail out
1749     // immediately.
1750     _g1h->workers()->run_task(&remarkTask);
1751   }
1752 
1753   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1754   guarantee(has_overflown() ||
1755             satb_mq_set.completed_buffers_num() == 0,
1756             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1757             BOOL_TO_STR(has_overflown()),
1758             satb_mq_set.completed_buffers_num());
1759 
1760   print_stats();
1761 }
1762 
1763 void G1ConcurrentMark::flush_all_task_caches() {
1764   size_t hits = 0;
1765   size_t misses = 0;
1766   for (uint i = 0; i < _max_num_tasks; i++) {
1767     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1768     hits += stats.first;
1769     misses += stats.second;
1770   }
1771   size_t sum = hits + misses;
1772   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1773                        hits, misses, percent_of(hits, sum));


1915 
1916   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
1917   // concurrent bitmap clearing.
1918   {
1919     GCTraceTime(Debug, gc)("Clear Next Bitmap");
1920     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1921   }
1922   // Note we cannot clear the previous marking bitmap here
1923   // since VerifyDuringGC verifies the objects marked during
1924   // a full GC against the previous bitmap.
1925 
1926   // Empty mark stack
1927   reset_marking_for_restart();
1928   for (uint i = 0; i < _max_num_tasks; ++i) {
1929     _tasks[i]->clear_region_fields();
1930   }
1931   _first_overflow_barrier_sync.abort();
1932   _second_overflow_barrier_sync.abort();
1933   _has_aborted = true;
1934 
1935   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1936   satb_mq_set.abandon_partial_marking();
1937   // This can be called either during or outside marking, we'll read
1938   // the expected_active value from the SATB queue set.
1939   satb_mq_set.set_active_all_threads(
1940                                  false, /* new active value */
1941                                  satb_mq_set.is_active() /* expected_active */);
1942 }
1943 
1944 static void print_ms_time_info(const char* prefix, const char* name,
1945                                NumberSeq& ns) {
1946   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
1947                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
1948   if (ns.num() > 0) {
1949     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
1950                            prefix, ns.sd(), ns.maximum());
1951   }
1952 }
1953 
1954 void G1ConcurrentMark::print_summary_info() {
1955   Log(gc, marking) log;


2125 
2126   // (4) We check whether we should yield. If we have to, then we abort.
2127   if (SuspendibleThreadSet::should_yield()) {
2128     // We should yield. To do this we abort the task. The caller is
2129     // responsible for yielding.
2130     set_has_aborted();
2131     return;
2132   }
2133 
2134   // (5) We check whether we've reached our time quota. If we have,
2135   // then we abort.
2136   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2137   if (elapsed_time_ms > _time_target_ms) {
2138     set_has_aborted();
2139     _has_timed_out = true;
2140     return;
2141   }
2142 
2143   // (6) Finally, we check whether there are enough completed STAB
2144   // buffers available for processing. If there are, we abort.
2145   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2146   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2147     // we do need to process SATB buffers, we'll abort and restart
2148     // the marking task to do so
2149     set_has_aborted();
2150     return;
2151   }
2152 }
2153 
2154 void G1CMTask::recalculate_limits() {
2155   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2156   _words_scanned_limit      = _real_words_scanned_limit;
2157 
2158   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2159   _refs_reached_limit       = _real_refs_reached_limit;
2160 }
2161 
2162 void G1CMTask::decrease_limits() {
2163   // This is called when we believe that we're going to do an infrequent
2164   // operation which will increase the per byte scanned cost (i.e. move
2165   // entries to/from the global stack). It basically tries to decrease the


2280     }
2281   }
2282 }
2283 
2284 // SATB Queue has several assumptions on whether to call the par or
2285 // non-par versions of the methods. this is why some of the code is
2286 // replicated. We should really get rid of the single-threaded version
2287 // of the code to simplify things.
2288 void G1CMTask::drain_satb_buffers() {
2289   if (has_aborted()) {
2290     return;
2291   }
2292 
2293   // We set this so that the regular clock knows that we're in the
2294   // middle of draining buffers and doesn't set the abort flag when it
2295   // notices that SATB buffers are available for draining. It'd be
2296   // very counter productive if it did that. :-)
2297   _draining_satb_buffers = true;
2298 
2299   G1CMSATBBufferClosure satb_cl(this, _g1h);
2300   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2301 
2302   // This keeps claiming and applying the closure to completed buffers
2303   // until we run out of buffers or we need to abort.
2304   while (!has_aborted() &&
2305          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2306     regular_clock_call();
2307   }
2308 
2309   _draining_satb_buffers = false;
2310 
2311   assert(has_aborted() ||
2312          _cm->concurrent() ||
2313          satb_mq_set.completed_buffers_num() == 0, "invariant");
2314 
2315   // again, this was a potentially expensive operation, decrease the
2316   // limits to get the regular clock call early
2317   decrease_limits();
2318 }
2319 
2320 void G1CMTask::clear_mark_stats_cache(uint region_idx) {



   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/g1BarrierSet.hpp"
  30 #include "gc/g1/g1CollectedHeap.inline.hpp"
  31 #include "gc/g1/g1CollectorState.hpp"
  32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  33 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1HeapVerifier.hpp"
  35 #include "gc/g1/g1OopClosures.inline.hpp"
  36 #include "gc/g1/g1Policy.hpp"
  37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/adaptiveSizePolicy.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"


 389   _accum_task_vtime(NULL),
 390 
 391   _concurrent_workers(NULL),
 392   _num_concurrent_workers(0),
 393   _max_concurrent_workers(0),
 394 
 395   _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
 396   _top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_regions(), mtGC))
 397 {
 398   _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage);
 399   _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage);
 400 
 401   // Create & start ConcurrentMark thread.
 402   _cm_thread = new G1ConcurrentMarkThread(this);
 403   if (_cm_thread->osthread() == NULL) {
 404     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
 405   }
 406 
 407   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 408 
 409   SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
 410   satb_qs.set_buffer_size(G1SATBBufferSize);
 411 
 412   _root_regions.init(_g1h->survivor(), this);
 413 
 414   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 415     // Calculate the number of concurrent worker threads by scaling
 416     // the number of parallel GC threads.
 417     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 418     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 419   }
 420 
 421   assert(ConcGCThreads > 0, "ConcGCThreads have been set.");
 422   if (ConcGCThreads > ParallelGCThreads) {
 423     log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).",
 424                     ConcGCThreads, ParallelGCThreads);
 425     return;
 426   }
 427 
 428   log_debug(gc)("ConcGCThreads: %u offset %u", ConcGCThreads, _worker_id_offset);
 429   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);


 741   }
 742 };
 743 
 744 void G1ConcurrentMark::pre_initial_mark() {
 745   // Initialize marking structures. This has to be done in a STW phase.
 746   reset();
 747 
 748   // For each region note start of marking.
 749   NoteStartOfMarkHRClosure startcl;
 750   _g1h->heap_region_iterate(&startcl);
 751 }
 752 
 753 
 754 void G1ConcurrentMark::post_initial_mark() {
 755   // Start Concurrent Marking weak-reference discovery.
 756   ReferenceProcessor* rp = _g1h->ref_processor_cm();
 757   // enable ("weak") refs discovery
 758   rp->enable_discovery();
 759   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 760 
 761   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
 762   // This is the start of  the marking cycle, we're expected all
 763   // threads to have SATB queues with active set to false.
 764   satb_mq_set.set_active_all_threads(true, /* new active value */
 765                                      false /* expected_active */);
 766 
 767   _root_regions.prepare_for_scan();
 768 
 769   // update_g1_committed() will be called at the end of an evac pause
 770   // when marking is on. So, it's also called at the end of the
 771   // initial-mark pause to update the heap end, if the heap expands
 772   // during it. No need to call it here.
 773 }
 774 
 775 /*
 776  * Notice that in the next two methods, we actually leave the STS
 777  * during the barrier sync and join it immediately afterwards. If we
 778  * do not do this, the following deadlock can occur: one thread could
 779  * be in the barrier sync code, waiting for the other thread to also
 780  * sync up, whereas another one could be trying to yield, while also
 781  * waiting for the other threads to sync up too.


1052   }
1053 
1054   G1Policy* g1p = _g1h->g1_policy();
1055   g1p->record_concurrent_mark_remark_start();
1056 
1057   double start = os::elapsedTime();
1058 
1059   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1060 
1061   {
1062     GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1063     finalize_marking();
1064   }
1065 
1066   double mark_work_end = os::elapsedTime();
1067 
1068   bool const mark_finished = !has_overflown();
1069   if (mark_finished) {
1070     weak_refs_work(false /* clear_all_soft_refs */);
1071 
1072     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1073     // We're done with marking.
1074     // This is the end of the marking cycle, we're expected all
1075     // threads to have SATB queues with active set to true.
1076     satb_mq_set.set_active_all_threads(false, /* new active value */
1077                                        true /* expected_active */);
1078 
1079     {
1080       GCTraceTime(Debug, gc, phases)("Flush Task Caches");
1081       flush_all_task_caches();
1082     }
1083 
1084     {
1085       GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking Before Rebuild");
1086       G1UpdateRemSetTrackingBeforeRebuild cl(_g1h, this);
1087       _g1h->heap_region_iterate(&cl);
1088       log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
1089                                       _g1h->num_regions(), cl.num_selected_for_rebuild());
1090     }
1091 
1092     verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after");


1674     _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1675     _thread_parity(Threads::thread_claim_parity()) {}
1676 
1677   void do_thread(Thread* thread) {
1678     if (thread->is_Java_thread()) {
1679       if (thread->claim_oops_do(true, _thread_parity)) {
1680         JavaThread* jt = (JavaThread*)thread;
1681 
1682         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1683         // however the liveness of oops reachable from nmethods have very complex lifecycles:
1684         // * Alive if on the stack of an executing method
1685         // * Weakly reachable otherwise
1686         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1687         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1688         jt->nmethods_do(&_code_cl);
1689 
1690         jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1691       }
1692     } else if (thread->is_VM_thread()) {
1693       if (thread->claim_oops_do(true, _thread_parity)) {
1694         G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1695       }
1696     }
1697   }
1698 };
1699 
1700 class G1CMRemarkTask : public AbstractGangTask {
1701   G1ConcurrentMark* _cm;
1702 public:
1703   void work(uint worker_id) {
1704     G1CMTask* task = _cm->task(worker_id);
1705     task->record_start_time();
1706     {
1707       ResourceMark rm;
1708       HandleMark hm;
1709 
1710       G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1711       Threads::threads_do(&threads_f);
1712     }
1713 
1714     do {


1734   _g1h->ensure_parsability(false);
1735 
1736   // this is remark, so we'll use up all active threads
1737   uint active_workers = _g1h->workers()->active_workers();
1738   set_concurrency_and_phase(active_workers, false /* concurrent */);
1739   // Leave _parallel_marking_threads at it's
1740   // value originally calculated in the G1ConcurrentMark
1741   // constructor and pass values of the active workers
1742   // through the gang in the task.
1743 
1744   {
1745     StrongRootsScope srs(active_workers);
1746 
1747     G1CMRemarkTask remarkTask(this, active_workers);
1748     // We will start all available threads, even if we decide that the
1749     // active_workers will be fewer. The extra ones will just bail out
1750     // immediately.
1751     _g1h->workers()->run_task(&remarkTask);
1752   }
1753 
1754   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1755   guarantee(has_overflown() ||
1756             satb_mq_set.completed_buffers_num() == 0,
1757             "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1758             BOOL_TO_STR(has_overflown()),
1759             satb_mq_set.completed_buffers_num());
1760 
1761   print_stats();
1762 }
1763 
1764 void G1ConcurrentMark::flush_all_task_caches() {
1765   size_t hits = 0;
1766   size_t misses = 0;
1767   for (uint i = 0; i < _max_num_tasks; i++) {
1768     Pair<size_t, size_t> stats = _tasks[i]->flush_mark_stats_cache();
1769     hits += stats.first;
1770     misses += stats.second;
1771   }
1772   size_t sum = hits + misses;
1773   log_debug(gc, stats)("Mark stats cache hits " SIZE_FORMAT " misses " SIZE_FORMAT " ratio %1.3lf",
1774                        hits, misses, percent_of(hits, sum));


1916 
1917   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
1918   // concurrent bitmap clearing.
1919   {
1920     GCTraceTime(Debug, gc)("Clear Next Bitmap");
1921     clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1922   }
1923   // Note we cannot clear the previous marking bitmap here
1924   // since VerifyDuringGC verifies the objects marked during
1925   // a full GC against the previous bitmap.
1926 
1927   // Empty mark stack
1928   reset_marking_for_restart();
1929   for (uint i = 0; i < _max_num_tasks; ++i) {
1930     _tasks[i]->clear_region_fields();
1931   }
1932   _first_overflow_barrier_sync.abort();
1933   _second_overflow_barrier_sync.abort();
1934   _has_aborted = true;
1935 
1936   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1937   satb_mq_set.abandon_partial_marking();
1938   // This can be called either during or outside marking, we'll read
1939   // the expected_active value from the SATB queue set.
1940   satb_mq_set.set_active_all_threads(
1941                                  false, /* new active value */
1942                                  satb_mq_set.is_active() /* expected_active */);
1943 }
1944 
1945 static void print_ms_time_info(const char* prefix, const char* name,
1946                                NumberSeq& ns) {
1947   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
1948                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
1949   if (ns.num() > 0) {
1950     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
1951                            prefix, ns.sd(), ns.maximum());
1952   }
1953 }
1954 
1955 void G1ConcurrentMark::print_summary_info() {
1956   Log(gc, marking) log;


2126 
2127   // (4) We check whether we should yield. If we have to, then we abort.
2128   if (SuspendibleThreadSet::should_yield()) {
2129     // We should yield. To do this we abort the task. The caller is
2130     // responsible for yielding.
2131     set_has_aborted();
2132     return;
2133   }
2134 
2135   // (5) We check whether we've reached our time quota. If we have,
2136   // then we abort.
2137   double elapsed_time_ms = curr_time_ms - _start_time_ms;
2138   if (elapsed_time_ms > _time_target_ms) {
2139     set_has_aborted();
2140     _has_timed_out = true;
2141     return;
2142   }
2143 
2144   // (6) Finally, we check whether there are enough completed STAB
2145   // buffers available for processing. If there are, we abort.
2146   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2147   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2148     // we do need to process SATB buffers, we'll abort and restart
2149     // the marking task to do so
2150     set_has_aborted();
2151     return;
2152   }
2153 }
2154 
2155 void G1CMTask::recalculate_limits() {
2156   _real_words_scanned_limit = _words_scanned + words_scanned_period;
2157   _words_scanned_limit      = _real_words_scanned_limit;
2158 
2159   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
2160   _refs_reached_limit       = _real_refs_reached_limit;
2161 }
2162 
2163 void G1CMTask::decrease_limits() {
2164   // This is called when we believe that we're going to do an infrequent
2165   // operation which will increase the per byte scanned cost (i.e. move
2166   // entries to/from the global stack). It basically tries to decrease the


2281     }
2282   }
2283 }
2284 
2285 // SATB Queue has several assumptions on whether to call the par or
2286 // non-par versions of the methods. this is why some of the code is
2287 // replicated. We should really get rid of the single-threaded version
2288 // of the code to simplify things.
2289 void G1CMTask::drain_satb_buffers() {
2290   if (has_aborted()) {
2291     return;
2292   }
2293 
2294   // We set this so that the regular clock knows that we're in the
2295   // middle of draining buffers and doesn't set the abort flag when it
2296   // notices that SATB buffers are available for draining. It'd be
2297   // very counter productive if it did that. :-)
2298   _draining_satb_buffers = true;
2299 
2300   G1CMSATBBufferClosure satb_cl(this, _g1h);
2301   SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
2302 
2303   // This keeps claiming and applying the closure to completed buffers
2304   // until we run out of buffers or we need to abort.
2305   while (!has_aborted() &&
2306          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2307     regular_clock_call();
2308   }
2309 
2310   _draining_satb_buffers = false;
2311 
2312   assert(has_aborted() ||
2313          _cm->concurrent() ||
2314          satb_mq_set.completed_buffers_num() == 0, "invariant");
2315 
2316   // again, this was a potentially expensive operation, decrease the
2317   // limits to get the regular clock call early
2318   decrease_limits();
2319 }
2320 
2321 void G1CMTask::clear_mark_stats_cache(uint region_idx) {


< prev index next >