2258 // Setting next fields of discovered
2259 // lists requires a barrier.
2260 }
2261
2262 size_t G1CollectedHeap::capacity() const {
2263 return _g1_committed.byte_size();
2264 }
2265
2266 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2267 DirtyCardQueue* into_cset_dcq,
2268 bool concurrent,
2269 int worker_i) {
2270 // Clean cards in the hot card cache
2271 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2272
2273 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2274 int n_completed_buffers = 0;
2275 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2276 n_completed_buffers++;
2277 }
2278 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
2279 (double) n_completed_buffers);
2280 dcqs.clear_n_completed_buffers();
2281 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2282 }
2283
2284
2285 // Computes the sum of the storage used by the various regions.
2286
2287 size_t G1CollectedHeap::used() const {
2288 assert(Heap_lock->owner() != NULL,
2289 "Should be owned on this thread's behalf.");
2290 size_t result = _summary_bytes_used;
2291 // Read only once in case it is set to NULL concurrently
2292 HeapRegion* hr = _mutator_alloc_region.get();
2293 if (hr != NULL)
2294 result += hr->used();
2295 return result;
2296 }
2297
2298 size_t G1CollectedHeap::used_unlocked() const {
2299 size_t result = _summary_bytes_used;
3868 size_t bytes_before = capacity();
3869 // No need for an ergo verbose message here,
3870 // expansion_amount() does this when it returns a value > 0.
3871 if (!expand(expand_bytes)) {
3872 // We failed to expand the heap so let's verify that
3873 // committed/uncommitted amount match the backing store
3874 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3875 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3876 }
3877 }
3878 }
3879
3880 // We redo the verificaiton but now wrt to the new CSet which
3881 // has just got initialized after the previous CSet was freed.
3882 _cm->verify_no_cset_oops(true /* verify_stacks */,
3883 true /* verify_enqueued_buffers */,
3884 true /* verify_thread_buffers */,
3885 true /* verify_fingers */);
3886 _cm->note_end_of_gc();
3887
3888 // Collect thread local data to allow the ergonomics to use
3889 // the collected information
3890 g1_policy()->phase_times()->collapse_par_times();
3891
3892 // This timing is only used by the ergonomics to handle our pause target.
3893 // It is unclear why this should not include the full pause. We will
3894 // investigate this in CR 7178365.
3895 double sample_end_time_sec = os::elapsedTime();
3896 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3897 g1_policy()->record_collection_pause_end(pause_time_ms);
3898
3899 MemoryService::track_memory_usage();
3900
3901 // In prepare_for_verify() below we'll need to scan the deferred
3902 // update buffers to bring the RSets up-to-date if
3903 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3904 // the update buffers we'll probably need to scan cards on the
3905 // regions we just allocated to (i.e., the GC alloc
3906 // regions). However, during the last GC we called
3907 // set_saved_mark() on all the GC alloc regions, so card
3908 // scanning might skip the [saved_mark_word()...top()] area of
3909 // those regions (i.e., the area we allocated objects into
3910 // during the last GC). But it shouldn't. Given that
3911 // saved_mark_word() is conditional on whether the GC time stamp
4737 scan_perm_cl = &scan_mark_perm_cl;
4738 }
4739
4740 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4741
4742 pss.start_strong_roots();
4743 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4744 SharedHeap::SO_AllClasses,
4745 scan_root_cl,
4746 &push_heap_rs_cl,
4747 scan_perm_cl,
4748 worker_id);
4749 pss.end_strong_roots();
4750
4751 {
4752 double start = os::elapsedTime();
4753 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4754 evac.do_void();
4755 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4756 double term_ms = pss.term_time()*1000.0;
4757 _g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
4758 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4759 }
4760 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4761 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4762
4763 if (ParallelGCVerbose) {
4764 MutexLocker x(stats_lock());
4765 pss.print_termination_stats(worker_id);
4766 }
4767
4768 assert(pss.refs()->is_empty(), "should be empty");
4769
4770 // Close the inner scope so that the ResourceMark and HandleMark
4771 // destructors are executed here and are included as part of the
4772 // "GC Worker Time".
4773 }
4774
4775 double end_time_ms = os::elapsedTime() * 1000.0;
4776 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4777 }
4865
4866 process_strong_roots(false, // no scoping; this is parallel code
4867 collecting_perm_gen, so,
4868 &buf_scan_non_heap_roots,
4869 &eager_scan_code_roots,
4870 &buf_scan_perm);
4871
4872 // Now the CM ref_processor roots.
4873 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4874 // We need to treat the discovered reference lists of the
4875 // concurrent mark ref processor as roots and keep entries
4876 // (which are added by the marking threads) on them live
4877 // until they can be processed at the end of marking.
4878 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4879 }
4880
4881 // Finish up any enqueued closure apps (attributed as object copy time).
4882 buf_scan_non_heap_roots.done();
4883 buf_scan_perm.done();
4884
4885 double ext_roots_end = os::elapsedTime();
4886
4887 g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
4888 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4889 buf_scan_non_heap_roots.closure_app_seconds();
4890 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4891
4892 double ext_root_time_ms =
4893 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4894
4895 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4896
4897 // During conc marking we have to filter the per-thread SATB buffers
4898 // to make sure we remove any oops into the CSet (which will show up
4899 // as implicitly live).
4900 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4901 if (mark_in_progress()) {
4902 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4903 }
4904 }
4905 double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4906 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4907
4908 // Now scan the complement of the collection set.
4909 if (scan_rs != NULL) {
4910 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4911 }
4912
4913 _process_strong_tasks->all_tasks_completed();
4914 }
4915
4916 void
4917 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4918 OopClosure* non_root_closure) {
4919 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4920 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4921 }
4922
4923 // Weak Reference Processing support
4924
4925 // An always "is_alive" closure that is used to preserve referents.
|
2258 // Setting next fields of discovered
2259 // lists requires a barrier.
2260 }
2261
2262 size_t G1CollectedHeap::capacity() const {
2263 return _g1_committed.byte_size();
2264 }
2265
2266 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2267 DirtyCardQueue* into_cset_dcq,
2268 bool concurrent,
2269 int worker_i) {
2270 // Clean cards in the hot card cache
2271 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2272
2273 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2274 int n_completed_buffers = 0;
2275 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2276 n_completed_buffers++;
2277 }
2278 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2279 dcqs.clear_n_completed_buffers();
2280 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2281 }
2282
2283
2284 // Computes the sum of the storage used by the various regions.
2285
2286 size_t G1CollectedHeap::used() const {
2287 assert(Heap_lock->owner() != NULL,
2288 "Should be owned on this thread's behalf.");
2289 size_t result = _summary_bytes_used;
2290 // Read only once in case it is set to NULL concurrently
2291 HeapRegion* hr = _mutator_alloc_region.get();
2292 if (hr != NULL)
2293 result += hr->used();
2294 return result;
2295 }
2296
2297 size_t G1CollectedHeap::used_unlocked() const {
2298 size_t result = _summary_bytes_used;
3867 size_t bytes_before = capacity();
3868 // No need for an ergo verbose message here,
3869 // expansion_amount() does this when it returns a value > 0.
3870 if (!expand(expand_bytes)) {
3871 // We failed to expand the heap so let's verify that
3872 // committed/uncommitted amount match the backing store
3873 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3874 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3875 }
3876 }
3877 }
3878
3879 // We redo the verificaiton but now wrt to the new CSet which
3880 // has just got initialized after the previous CSet was freed.
3881 _cm->verify_no_cset_oops(true /* verify_stacks */,
3882 true /* verify_enqueued_buffers */,
3883 true /* verify_thread_buffers */,
3884 true /* verify_fingers */);
3885 _cm->note_end_of_gc();
3886
3887 // This timing is only used by the ergonomics to handle our pause target.
3888 // It is unclear why this should not include the full pause. We will
3889 // investigate this in CR 7178365.
3890 double sample_end_time_sec = os::elapsedTime();
3891 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3892 g1_policy()->record_collection_pause_end(pause_time_ms);
3893
3894 MemoryService::track_memory_usage();
3895
3896 // In prepare_for_verify() below we'll need to scan the deferred
3897 // update buffers to bring the RSets up-to-date if
3898 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3899 // the update buffers we'll probably need to scan cards on the
3900 // regions we just allocated to (i.e., the GC alloc
3901 // regions). However, during the last GC we called
3902 // set_saved_mark() on all the GC alloc regions, so card
3903 // scanning might skip the [saved_mark_word()...top()] area of
3904 // those regions (i.e., the area we allocated objects into
3905 // during the last GC). But it shouldn't. Given that
3906 // saved_mark_word() is conditional on whether the GC time stamp
4732 scan_perm_cl = &scan_mark_perm_cl;
4733 }
4734
4735 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4736
4737 pss.start_strong_roots();
4738 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4739 SharedHeap::SO_AllClasses,
4740 scan_root_cl,
4741 &push_heap_rs_cl,
4742 scan_perm_cl,
4743 worker_id);
4744 pss.end_strong_roots();
4745
4746 {
4747 double start = os::elapsedTime();
4748 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4749 evac.do_void();
4750 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4751 double term_ms = pss.term_time()*1000.0;
4752 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4753 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4754 }
4755 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4756 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4757
4758 if (ParallelGCVerbose) {
4759 MutexLocker x(stats_lock());
4760 pss.print_termination_stats(worker_id);
4761 }
4762
4763 assert(pss.refs()->is_empty(), "should be empty");
4764
4765 // Close the inner scope so that the ResourceMark and HandleMark
4766 // destructors are executed here and are included as part of the
4767 // "GC Worker Time".
4768 }
4769
4770 double end_time_ms = os::elapsedTime() * 1000.0;
4771 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4772 }
4860
4861 process_strong_roots(false, // no scoping; this is parallel code
4862 collecting_perm_gen, so,
4863 &buf_scan_non_heap_roots,
4864 &eager_scan_code_roots,
4865 &buf_scan_perm);
4866
4867 // Now the CM ref_processor roots.
4868 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4869 // We need to treat the discovered reference lists of the
4870 // concurrent mark ref processor as roots and keep entries
4871 // (which are added by the marking threads) on them live
4872 // until they can be processed at the end of marking.
4873 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4874 }
4875
4876 // Finish up any enqueued closure apps (attributed as object copy time).
4877 buf_scan_non_heap_roots.done();
4878 buf_scan_perm.done();
4879
4880 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4881 buf_scan_non_heap_roots.closure_app_seconds();
4882 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4883
4884 double ext_root_time_ms =
4885 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4886
4887 g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4888
4889 // During conc marking we have to filter the per-thread SATB buffers
4890 // to make sure we remove any oops into the CSet (which will show up
4891 // as implicitly live).
4892 double satb_filtering_ms = 0.0;
4893 if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4894 if (mark_in_progress()) {
4895 double satb_filter_start = os::elapsedTime();
4896
4897 JavaThread::satb_mark_queue_set().filter_thread_buffers();
4898
4899 satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4900 }
4901 }
4902 g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4903
4904 // Now scan the complement of the collection set.
4905 if (scan_rs != NULL) {
4906 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4907 }
4908
4909 _process_strong_tasks->all_tasks_completed();
4910 }
4911
4912 void
4913 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4914 OopClosure* non_root_closure) {
4915 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4916 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4917 }
4918
4919 // Weak Reference Processing support
4920
4921 // An always "is_alive" closure that is used to preserve referents.
|