rev 6670 : 8048085: Aborting marking just before remark results in useless additional clearing of the next mark bitmap Summary: After concurrent mark abort due to Full GC G1 unnecessarily clears the next mark bitmap a second time concurrently. The Full GC mark abort procedure already did that. Before clearing the next mark bitmap, check whether mark abort occurred to avoid this work. Reviewed-by: tbd rev 6671 : imported patch bengt-fixes rev 6672 : imported patch bengt-fixes2
1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 29 #include "gc_implementation/g1/g1Log.hpp" 30 #include "gc_implementation/g1/g1MMUTracker.hpp" 31 #include "gc_implementation/g1/vm_operations_g1.hpp" 32 #include "gc_implementation/shared/gcTrace.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "runtime/vmThread.hpp" 35 36 // ======= Concurrent Mark Thread ======== 37 38 // The CM thread is created when the G1 garbage collector is used 39 40 SurrogateLockerThread* 41 ConcurrentMarkThread::_slt = NULL; 42 43 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) : 44 ConcurrentGCThread(), 45 _cm(cm), 46 _started(false), 47 _in_progress(false), 48 _vtime_accum(0.0), 49 _vtime_mark_accum(0.0) { 50 51 set_name("G1 Main Concurrent Mark GC Thread"); 52 create_and_start(); 53 } 54 55 class CMCheckpointRootsFinalClosure: public VoidClosure { 56 57 ConcurrentMark* _cm; 58 public: 59 60 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) : 61 _cm(cm) {} 62 63 void do_void(){ 64 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs 65 } 66 }; 67 68 class CMCleanUp: public VoidClosure { 69 ConcurrentMark* _cm; 70 public: 71 72 CMCleanUp(ConcurrentMark* cm) : 73 _cm(cm) {} 74 75 void do_void(){ 76 _cm->cleanup(); 77 } 78 }; 79 80 81 82 void ConcurrentMarkThread::run() { 83 initialize_in_thread(); 84 _vtime_start = os::elapsedVTime(); 85 wait_for_universe_init(); 86 87 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 88 G1CollectorPolicy* g1_policy = g1h->g1_policy(); 89 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); 90 Thread *current_thread = Thread::current(); 91 92 while (!_should_terminate) { 93 // wait until started is set. 94 sleepBeforeNextCycle(); 95 if (_should_terminate) { 96 break; 97 } 98 99 { 100 ResourceMark rm; 101 HandleMark hm; 102 double cycle_start = os::elapsedVTime(); 103 104 // We have to ensure that we finish scanning the root regions 105 // before the next GC takes place. To ensure this we have to 106 // make sure that we do not join the STS until the root regions 107 // have been scanned. If we did then it's possible that a 108 // subsequent GC could block us from joining the STS and proceed 109 // without the root regions have been scanned which would be a 110 // correctness issue. 111 112 double scan_start = os::elapsedTime(); 113 if (!cm()->has_aborted()) { 114 if (G1Log::fine()) { 115 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 116 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); 117 } 118 119 _cm->scanRootRegions(); 120 121 double scan_end = os::elapsedTime(); 122 if (G1Log::fine()) { 123 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 124 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", 125 scan_end - scan_start); 126 } 127 } 128 129 double mark_start_sec = os::elapsedTime(); 130 if (G1Log::fine()) { 131 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 132 gclog_or_tty->print_cr("[GC concurrent-mark-start]"); 133 } 134 135 int iter = 0; 136 do { 137 iter++; 138 if (!cm()->has_aborted()) { 139 _cm->markFromRoots(); 140 } 141 142 double mark_end_time = os::elapsedVTime(); 143 double mark_end_sec = os::elapsedTime(); 144 _vtime_mark_accum += (mark_end_time - cycle_start); 145 if (!cm()->has_aborted()) { 146 if (g1_policy->adaptive_young_list_length()) { 147 double now = os::elapsedTime(); 148 double remark_prediction_ms = g1_policy->predict_remark_time_ms(); 149 jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms); 150 os::sleep(current_thread, sleep_time_ms, false); 151 } 152 153 if (G1Log::fine()) { 154 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 155 gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]", 156 mark_end_sec - mark_start_sec); 157 } 158 159 CMCheckpointRootsFinalClosure final_cl(_cm); 160 VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */); 161 VMThread::execute(&op); 162 } 163 if (cm()->restart_for_overflow()) { 164 if (G1TraceMarkStackOverflow) { 165 gclog_or_tty->print_cr("Restarting conc marking because of MS overflow " 166 "in remark (restart #%d).", iter); 167 } 168 if (G1Log::fine()) { 169 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 170 gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); 171 } 172 } 173 } while (cm()->restart_for_overflow()); 174 175 double end_time = os::elapsedVTime(); 176 // Update the total virtual time before doing this, since it will try 177 // to measure it to get the vtime for this marking. We purposely 178 // neglect the presumably-short "completeCleanup" phase here. 179 _vtime_accum = (end_time - _vtime_start); 180 181 if (!cm()->has_aborted()) { 182 if (g1_policy->adaptive_young_list_length()) { 183 double now = os::elapsedTime(); 184 double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms(); 185 jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms); 186 os::sleep(current_thread, sleep_time_ms, false); 187 } 188 189 CMCleanUp cl_cl(_cm); 190 VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); 191 VMThread::execute(&op); 192 } else { 193 // We don't want to update the marking status if a GC pause 194 // is already underway. 195 SuspendibleThreadSetJoiner sts; 196 g1h->set_marking_complete(); 197 } 198 199 // Check if cleanup set the free_regions_coming flag. If it 200 // hasn't, we can just skip the next step. 201 if (g1h->free_regions_coming()) { 202 // The following will finish freeing up any regions that we 203 // found to be empty during cleanup. We'll do this part 204 // without joining the suspendible set. If an evacuation pause 205 // takes place, then we would carry on freeing regions in 206 // case they are needed by the pause. If a Full GC takes 207 // place, it would wait for us to process the regions 208 // reclaimed by cleanup. 209 210 double cleanup_start_sec = os::elapsedTime(); 211 if (G1Log::fine()) { 212 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 213 gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); 214 } 215 216 // Now do the concurrent cleanup operation. 217 _cm->completeCleanup(); 218 219 // Notify anyone who's waiting that there are no more free 220 // regions coming. We have to do this before we join the STS 221 // (in fact, we should not attempt to join the STS in the 222 // interval between finishing the cleanup pause and clearing 223 // the free_regions_coming flag) otherwise we might deadlock: 224 // a GC worker could be blocked waiting for the notification 225 // whereas this thread will be blocked for the pause to finish 226 // while it's trying to join the STS, which is conditional on 227 // the GC workers finishing. 228 g1h->reset_free_regions_coming(); 229 230 double cleanup_end_sec = os::elapsedTime(); 231 if (G1Log::fine()) { 232 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 233 gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]", 234 cleanup_end_sec - cleanup_start_sec); 235 } 236 } 237 guarantee(cm()->cleanup_list_is_empty(), 238 "at this point there should be no regions on the cleanup list"); 239 240 // There is a tricky race before recording that the concurrent 241 // cleanup has completed and a potential Full GC starting around 242 // the same time. We want to make sure that the Full GC calls 243 // abort() on concurrent mark after 244 // record_concurrent_mark_cleanup_completed(), since abort() is 245 // the method that will reset the concurrent mark state. If we 246 // end up calling record_concurrent_mark_cleanup_completed() 247 // after abort() then we might incorrectly undo some of the work 248 // abort() did. Checking the has_aborted() flag after joining 249 // the STS allows the correct ordering of the two methods. There 250 // are two scenarios: 251 // 252 // a) If we reach here before the Full GC, the fact that we have 253 // joined the STS means that the Full GC cannot start until we 254 // leave the STS, so record_concurrent_mark_cleanup_completed() 255 // will complete before abort() is called. 256 // 257 // b) If we reach here during the Full GC, we'll be held up from 258 // joining the STS until the Full GC is done, which means that 259 // abort() will have completed and has_aborted() will return 260 // true to prevent us from calling 261 // record_concurrent_mark_cleanup_completed() (and, in fact, it's 262 // not needed any more as the concurrent mark state has been 263 // already reset). 264 { 265 SuspendibleThreadSetJoiner sts; 266 if (!cm()->has_aborted()) { 267 g1_policy->record_concurrent_mark_cleanup_completed(); 268 } 269 } 270 271 if (cm()->has_aborted()) { 272 if (G1Log::fine()) { 273 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); 274 gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); 275 } 276 } 277 278 // We now want to allow clearing of the marking bitmap to be 279 // suspended by a collection pause. 280 // We may have aborted just before the remark. Do not bother clearing the 281 // bitmap then, as it has been done during mark abort. 282 if (!cm()->has_aborted()) { 283 SuspendibleThreadSetJoiner sts; 284 _cm->clearNextBitmap(); 285 } else { 286 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear"); 287 } 288 } 289 290 // Update the number of full collections that have been 291 // completed. This will also notify the FullGCCount_lock in case a 292 // Java thread is waiting for a full GC to happen (e.g., it 293 // called System.gc() with +ExplicitGCInvokesConcurrent). 294 { 295 SuspendibleThreadSetJoiner sts; 296 g1h->increment_old_marking_cycles_completed(true /* concurrent */); 297 g1h->register_concurrent_cycle_end(); 298 } 299 } 300 assert(_should_terminate, "just checking"); 301 302 terminate(); 303 } 304 305 void ConcurrentMarkThread::stop() { 306 { 307 MutexLockerEx ml(Terminator_lock); 308 _should_terminate = true; 309 } 310 311 { 312 MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); 313 CGC_lock->notify_all(); 314 } 315 316 { 317 MutexLockerEx ml(Terminator_lock); 318 while (!_has_terminated) { 319 Terminator_lock->wait(); 320 } 321 } 322 } 323 324 void ConcurrentMarkThread::sleepBeforeNextCycle() { 325 // We join here because we don't want to do the "shouldConcurrentMark()" 326 // below while the world is otherwise stopped. 327 assert(!in_progress(), "should have been cleared"); 328 329 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 330 while (!started() && !_should_terminate) { 331 CGC_lock->wait(Mutex::_no_safepoint_check_flag); 332 } 333 334 if (started()) { 335 set_in_progress(); 336 clear_started(); 337 } 338 } 339 340 // Note: As is the case with CMS - this method, although exported 341 // by the ConcurrentMarkThread, which is a non-JavaThread, can only 342 // be called by a JavaThread. Currently this is done at vm creation 343 // time (post-vm-init) by the main/Primordial (Java)Thread. 344 // XXX Consider changing this in the future to allow the CM thread 345 // itself to create this thread? 346 void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) { 347 assert(UseG1GC, "SLT thread needed only for concurrent GC"); 348 assert(THREAD->is_Java_thread(), "must be a Java thread"); 349 assert(_slt == NULL, "SLT already created"); 350 _slt = SurrogateLockerThread::make(THREAD); 351 } --- EOF ---