1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Analytics.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1ConcurrentMark.hpp"
30 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
31 #include "gc/g1/g1ConcurrentRefine.hpp"
32 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
33 #include "gc/g1/g1HotCardCache.hpp"
34 #include "gc/g1/g1IHOPControl.hpp"
35 #include "gc/g1/g1GCPhaseTimes.hpp"
36 #include "gc/g1/g1Policy.hpp"
37 #include "gc/g1/g1SurvivorRegions.hpp"
38 #include "gc/g1/g1YoungGenSizer.hpp"
39 #include "gc/g1/heapRegion.inline.hpp"
40 #include "gc/g1/heapRegionRemSet.hpp"
41 #include "gc/shared/gcPolicyCounters.hpp"
42 #include "logging/logStream.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/mutexLocker.hpp"
421
422 update_young_list_max_and_target_length(rs_lengths_prediction);
423 }
424 }
425
426 void G1Policy::update_rs_lengths_prediction() {
427 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
428 }
429
430 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
431 if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
432 _rs_lengths_prediction = prediction;
433 }
434 }
435
436 void G1Policy::record_full_collection_start() {
437 _full_collection_start_sec = os::elapsedTime();
438 // Release the future to-space so that it is available for compaction into.
439 collector_state()->set_in_young_only_phase(false);
440 collector_state()->set_in_full_gc(true);
441 cset_chooser()->clear();
442 }
443
444 void G1Policy::record_full_collection_end() {
445 // Consider this like a collection pause for the purposes of allocation
446 // since last pause.
447 double end_sec = os::elapsedTime();
448 double full_gc_time_sec = end_sec - _full_collection_start_sec;
449 double full_gc_time_ms = full_gc_time_sec * 1000.0;
450
451 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
452
453 collector_state()->set_in_full_gc(false);
454
455 // "Nuke" the heuristics that control the young/mixed GC
456 // transitions and make sure we start with young GCs after the Full GC.
457 collector_state()->set_in_young_only_phase(true);
458 collector_state()->set_in_young_gc_before_mixed(false);
459 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
460 collector_state()->set_in_initial_mark_gc(false);
461 collector_state()->set_mark_or_rebuild_in_progress(false);
529 }
530
531 double G1Policy::young_other_time_ms() const {
532 return phase_times()->young_cset_choice_time_ms() +
533 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
534 }
535
536 double G1Policy::non_young_other_time_ms() const {
537 return phase_times()->non_young_cset_choice_time_ms() +
538 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
539 }
540
541 double G1Policy::other_time_ms(double pause_time_ms) const {
542 return pause_time_ms - phase_times()->cur_collection_par_time_ms();
543 }
544
545 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
546 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
547 }
548
549 CollectionSetChooser* G1Policy::cset_chooser() const {
550 return _collection_set->cset_chooser();
551 }
552
553 bool G1Policy::about_to_start_mixed_phase() const {
554 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
555 }
556
557 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
558 if (about_to_start_mixed_phase()) {
559 return false;
560 }
561
562 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
563
564 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
565 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
566 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
567
568 bool result = false;
569 if (marking_request_bytes > marking_initiating_used_threshold) {
570 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
571 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
572 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
756 // This skews the predicted marking length towards smaller values which might cause
757 // the mark start being too late.
758 _initial_mark_to_mixed.reset();
759 }
760
761 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
762 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
763
764 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
765 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
766 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
767 update_rs_time_goal_ms, scan_hcc_time_ms);
768
769 update_rs_time_goal_ms = 0;
770 } else {
771 update_rs_time_goal_ms -= scan_hcc_time_ms;
772 }
773 _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS),
774 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
775 update_rs_time_goal_ms);
776
777 cset_chooser()->verify();
778 }
779
780 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
781 if (G1UseAdaptiveIHOP) {
782 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
783 predictor,
784 G1ReservePercent,
785 G1HeapWastePercent);
786 } else {
787 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
788 }
789 }
790
791 void G1Policy::update_ihop_prediction(double mutator_time_s,
792 size_t mutator_alloc_bytes,
793 size_t young_gen_size,
794 bool this_gc_was_young_only) {
795 // Always try to update IHOP prediction. Even evacuation failures give information
796 // about e.g. whether to start IHOP earlier next time.
797
1015 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1016 } else {
1017 // The concurrent marking thread is still finishing up the
1018 // previous cycle. If we start one right now the two cycles
1019 // overlap. In particular, the concurrent marking thread might
1020 // be in the process of clearing the next marking bitmap (which
1021 // we will use for the next cycle if we start one). Starting a
1022 // cycle now will be bad given that parts of the marking
1023 // information might get cleared by the marking thread. And we
1024 // cannot wait for the marking thread to finish the cycle as it
1025 // periodically yields while clearing the next marking bitmap
1026 // and, if it's in a yield point, it's waiting for us to
1027 // finish. So, at this point we will not start a cycle and we'll
1028 // let the concurrent marking thread complete the last one.
1029 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1030 }
1031 }
1032 }
1033
1034 void G1Policy::record_concurrent_mark_cleanup_end() {
1035 cset_chooser()->rebuild(_g1h->workers(), _g1h->num_regions());
1036
1037 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1038 if (!mixed_gc_pending) {
1039 clear_collection_set_candidates();
1040 abort_time_to_mixed_tracking();
1041 }
1042 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1043 collector_state()->set_mark_or_rebuild_in_progress(false);
1044
1045 double end_sec = os::elapsedTime();
1046 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1047 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1048 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1049
1050 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1051 }
1052
1053 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1054 return percent_of(reclaimable_bytes, _g1h->capacity());
1055 }
1056
1057 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1058 virtual bool do_heap_region(HeapRegion* r) {
1059 r->rem_set()->clear_locked(true /* only_cardset */);
1060 return false;
1061 }
1062 };
1063
1064 void G1Policy::clear_collection_set_candidates() {
1065 // Clear remembered sets of remaining candidate regions and the actual candidate
1066 // list.
1067 G1ClearCollectionSetCandidateRemSets cl;
1068 cset_chooser()->iterate(&cl);
1069 cset_chooser()->clear();
1070 }
1071
1072 void G1Policy::maybe_start_marking() {
1073 if (need_to_start_conc_mark("end of GC")) {
1074 // Note: this might have already been set, if during the last
1075 // pause we decided to start a cycle but at the beginning of
1076 // this pause we decided to postpone it. That's OK.
1077 collector_state()->set_initiate_conc_mark_if_possible(true);
1078 }
1079 }
1080
1081 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1082 assert(!collector_state()->in_full_gc(), "must be");
1083 if (collector_state()->in_initial_mark_gc()) {
1084 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1085 return InitialMarkGC;
1086 } else if (collector_state()->in_young_gc_before_mixed()) {
1087 assert(!collector_state()->in_initial_mark_gc(), "must be");
1088 return LastYoungGC;
1089 } else if (collector_state()->in_mixed_phase()) {
1115 break;
1116 case InitialMarkGC:
1117 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1118 _initial_mark_to_mixed.record_initial_mark_end(end);
1119 }
1120 break;
1121 case MixedGC:
1122 _initial_mark_to_mixed.record_mixed_gc_start(start);
1123 break;
1124 default:
1125 ShouldNotReachHere();
1126 }
1127 }
1128
1129 void G1Policy::abort_time_to_mixed_tracking() {
1130 _initial_mark_to_mixed.reset();
1131 }
1132
1133 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1134 const char* false_action_str) const {
1135 if (cset_chooser()->is_empty()) {
1136 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1137 return false;
1138 }
1139
1140 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1141 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
1142 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1143 double threshold = (double) G1HeapWastePercent;
1144 if (reclaimable_percent <= threshold) {
1145 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1146 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1147 return false;
1148 }
1149 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1150 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1151 return true;
1152 }
1153
1154 uint G1Policy::calc_min_old_cset_length() const {
1155 // The min old CSet region bound is based on the maximum desired
1156 // number of mixed GCs after a cycle. I.e., even if some old regions
1157 // look expensive, we should add them to the CSet anyway to make
1158 // sure we go through the available old regions in no more than the
1159 // maximum desired number of mixed GCs.
1160 //
1161 // The calculation is based on the number of marked regions we added
1162 // to the CSet chooser in the first place, not how many remain, so
1163 // that the result is the same during all mixed GCs that follow a cycle.
1164
1165 const size_t region_num = (size_t) cset_chooser()->length();
1166 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1167 size_t result = region_num / gc_num;
1168 // emulate ceiling
1169 if (result * gc_num < region_num) {
1170 result += 1;
1171 }
1172 return (uint) result;
1173 }
1174
1175 uint G1Policy::calc_max_old_cset_length() const {
1176 // The max old CSet region bound is based on the threshold expressed
1177 // as a percentage of the heap size. I.e., it should bound the
1178 // number of old regions added to the CSet irrespective of how many
1179 // of them are available.
1180
1181 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1182 const size_t region_num = g1h->num_regions();
1183 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1184 size_t result = region_num * perc / 100;
1185 // emulate ceiling
|
1 /*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/collectionSetChooser.hpp"
27 #include "gc/g1/g1Analytics.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1CollectionSet.hpp"
30 #include "gc/g1/g1ConcurrentMark.hpp"
31 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
32 #include "gc/g1/g1ConcurrentRefine.hpp"
33 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
34 #include "gc/g1/g1HotCardCache.hpp"
35 #include "gc/g1/g1IHOPControl.hpp"
36 #include "gc/g1/g1GCPhaseTimes.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1SurvivorRegions.hpp"
39 #include "gc/g1/g1YoungGenSizer.hpp"
40 #include "gc/g1/heapRegion.inline.hpp"
41 #include "gc/g1/heapRegionRemSet.hpp"
42 #include "gc/shared/gcPolicyCounters.hpp"
43 #include "logging/logStream.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
422
423 update_young_list_max_and_target_length(rs_lengths_prediction);
424 }
425 }
426
427 void G1Policy::update_rs_lengths_prediction() {
428 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
429 }
430
431 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
432 if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
433 _rs_lengths_prediction = prediction;
434 }
435 }
436
437 void G1Policy::record_full_collection_start() {
438 _full_collection_start_sec = os::elapsedTime();
439 // Release the future to-space so that it is available for compaction into.
440 collector_state()->set_in_young_only_phase(false);
441 collector_state()->set_in_full_gc(true);
442 _collection_set->clear_candidates();
443 }
444
445 void G1Policy::record_full_collection_end() {
446 // Consider this like a collection pause for the purposes of allocation
447 // since last pause.
448 double end_sec = os::elapsedTime();
449 double full_gc_time_sec = end_sec - _full_collection_start_sec;
450 double full_gc_time_ms = full_gc_time_sec * 1000.0;
451
452 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
453
454 collector_state()->set_in_full_gc(false);
455
456 // "Nuke" the heuristics that control the young/mixed GC
457 // transitions and make sure we start with young GCs after the Full GC.
458 collector_state()->set_in_young_only_phase(true);
459 collector_state()->set_in_young_gc_before_mixed(false);
460 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
461 collector_state()->set_in_initial_mark_gc(false);
462 collector_state()->set_mark_or_rebuild_in_progress(false);
530 }
531
532 double G1Policy::young_other_time_ms() const {
533 return phase_times()->young_cset_choice_time_ms() +
534 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
535 }
536
537 double G1Policy::non_young_other_time_ms() const {
538 return phase_times()->non_young_cset_choice_time_ms() +
539 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
540 }
541
542 double G1Policy::other_time_ms(double pause_time_ms) const {
543 return pause_time_ms - phase_times()->cur_collection_par_time_ms();
544 }
545
546 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
547 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
548 }
549
550 bool G1Policy::about_to_start_mixed_phase() const {
551 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
552 }
553
554 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
555 if (about_to_start_mixed_phase()) {
556 return false;
557 }
558
559 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
560
561 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
562 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
563 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
564
565 bool result = false;
566 if (marking_request_bytes > marking_initiating_used_threshold) {
567 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
568 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
569 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
753 // This skews the predicted marking length towards smaller values which might cause
754 // the mark start being too late.
755 _initial_mark_to_mixed.reset();
756 }
757
758 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
759 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
760
761 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
762 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
763 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
764 update_rs_time_goal_ms, scan_hcc_time_ms);
765
766 update_rs_time_goal_ms = 0;
767 } else {
768 update_rs_time_goal_ms -= scan_hcc_time_ms;
769 }
770 _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS),
771 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
772 update_rs_time_goal_ms);
773 }
774
775 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
776 if (G1UseAdaptiveIHOP) {
777 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
778 predictor,
779 G1ReservePercent,
780 G1HeapWastePercent);
781 } else {
782 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
783 }
784 }
785
786 void G1Policy::update_ihop_prediction(double mutator_time_s,
787 size_t mutator_alloc_bytes,
788 size_t young_gen_size,
789 bool this_gc_was_young_only) {
790 // Always try to update IHOP prediction. Even evacuation failures give information
791 // about e.g. whether to start IHOP earlier next time.
792
1010 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1011 } else {
1012 // The concurrent marking thread is still finishing up the
1013 // previous cycle. If we start one right now the two cycles
1014 // overlap. In particular, the concurrent marking thread might
1015 // be in the process of clearing the next marking bitmap (which
1016 // we will use for the next cycle if we start one). Starting a
1017 // cycle now will be bad given that parts of the marking
1018 // information might get cleared by the marking thread. And we
1019 // cannot wait for the marking thread to finish the cycle as it
1020 // periodically yields while clearing the next marking bitmap
1021 // and, if it's in a yield point, it's waiting for us to
1022 // finish. So, at this point we will not start a cycle and we'll
1023 // let the concurrent marking thread complete the last one.
1024 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1025 }
1026 }
1027 }
1028
1029 void G1Policy::record_concurrent_mark_cleanup_end() {
1030 G1CollectionSetCandidates* candidates = CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
1031 _collection_set->set_candidates(candidates);
1032
1033 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1034 if (!mixed_gc_pending) {
1035 clear_collection_set_candidates();
1036 abort_time_to_mixed_tracking();
1037 }
1038 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1039 collector_state()->set_mark_or_rebuild_in_progress(false);
1040
1041 double end_sec = os::elapsedTime();
1042 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1043 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1044 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1045
1046 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1047 }
1048
1049 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1050 return percent_of(reclaimable_bytes, _g1h->capacity());
1051 }
1052
1053 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1054 virtual bool do_heap_region(HeapRegion* r) {
1055 r->rem_set()->clear_locked(true /* only_cardset */);
1056 return false;
1057 }
1058 };
1059
1060 void G1Policy::clear_collection_set_candidates() {
1061 // Clear remembered sets of remaining candidate regions and the actual candidate
1062 // set.
1063 G1ClearCollectionSetCandidateRemSets cl;
1064 _collection_set->candidates()->iterate(&cl);
1065 _collection_set->clear_candidates();
1066 }
1067
1068 void G1Policy::maybe_start_marking() {
1069 if (need_to_start_conc_mark("end of GC")) {
1070 // Note: this might have already been set, if during the last
1071 // pause we decided to start a cycle but at the beginning of
1072 // this pause we decided to postpone it. That's OK.
1073 collector_state()->set_initiate_conc_mark_if_possible(true);
1074 }
1075 }
1076
1077 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1078 assert(!collector_state()->in_full_gc(), "must be");
1079 if (collector_state()->in_initial_mark_gc()) {
1080 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1081 return InitialMarkGC;
1082 } else if (collector_state()->in_young_gc_before_mixed()) {
1083 assert(!collector_state()->in_initial_mark_gc(), "must be");
1084 return LastYoungGC;
1085 } else if (collector_state()->in_mixed_phase()) {
1111 break;
1112 case InitialMarkGC:
1113 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1114 _initial_mark_to_mixed.record_initial_mark_end(end);
1115 }
1116 break;
1117 case MixedGC:
1118 _initial_mark_to_mixed.record_mixed_gc_start(start);
1119 break;
1120 default:
1121 ShouldNotReachHere();
1122 }
1123 }
1124
1125 void G1Policy::abort_time_to_mixed_tracking() {
1126 _initial_mark_to_mixed.reset();
1127 }
1128
1129 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1130 const char* false_action_str) const {
1131 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1132
1133 if (candidates->is_empty()) {
1134 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1135 return false;
1136 }
1137
1138 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1139 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1140 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1141 double threshold = (double) G1HeapWastePercent;
1142 if (reclaimable_percent <= threshold) {
1143 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1144 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1145 return false;
1146 }
1147 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1148 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1149 return true;
1150 }
1151
1152 uint G1Policy::calc_min_old_cset_length() const {
1153 // The min old CSet region bound is based on the maximum desired
1154 // number of mixed GCs after a cycle. I.e., even if some old regions
1155 // look expensive, we should add them to the CSet anyway to make
1156 // sure we go through the available old regions in no more than the
1157 // maximum desired number of mixed GCs.
1158 //
1159 // The calculation is based on the number of marked regions we added
1160 // to the CSet candidates in the first place, not how many remain, so
1161 // that the result is the same during all mixed GCs that follow a cycle.
1162
1163 const size_t region_num = _collection_set->candidates()->num_regions();
1164 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1165 size_t result = region_num / gc_num;
1166 // emulate ceiling
1167 if (result * gc_num < region_num) {
1168 result += 1;
1169 }
1170 return (uint) result;
1171 }
1172
1173 uint G1Policy::calc_max_old_cset_length() const {
1174 // The max old CSet region bound is based on the threshold expressed
1175 // as a percentage of the heap size. I.e., it should bound the
1176 // number of old regions added to the CSet irrespective of how many
1177 // of them are available.
1178
1179 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1180 const size_t region_num = g1h->num_regions();
1181 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1182 size_t result = region_num * perc / 100;
1183 // emulate ceiling
|