rev 4008 : 8001985: G1: Backport fix for 7200261 to hsx24 Summary: The automatic backport of the fix for 7200261 did not apply cleanly to hsx24 - there were two rejected hunks that had to be fixed up by hand. Reviewed-by:
1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP 27 28 #include "gc_implementation/g1/heapRegionSets.hpp" 29 #include "utilities/taskqueue.hpp" 30 31 class G1CollectedHeap; 32 class CMTask; 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; 35 36 // Closure used by CM during concurrent reference discovery 37 // and reference processing (during remarking) to determine 38 // if a particular object is alive. It is primarily used 39 // to determine if referents of discovered reference objects 40 // are alive. An instance is also embedded into the 41 // reference processor as the _is_alive_non_header field 42 class G1CMIsAliveClosure: public BoolObjectClosure { 43 G1CollectedHeap* _g1; 44 public: 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 46 47 void do_object(oop obj) { 48 ShouldNotCallThis(); 49 } 50 bool do_object_b(oop obj); 51 }; 52 53 // A generic CM bit map. This is essentially a wrapper around the BitMap 54 // class, with one bit per (1<<_shifter) HeapWords. 55 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC { 57 protected: 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // map to char or bit 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 64 public: 65 // constructor 66 CMBitMapRO(ReservedSpace rs, int shifter); 67 68 enum { do_yield = true }; 69 70 // inquiries 71 HeapWord* startWord() const { return _bmStartWord; } 72 size_t sizeInWords() const { return _bmWordSize; } 73 // the following is one past the last word in space 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 75 76 // read marks 77 78 bool isMarked(HeapWord* addr) const { 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 80 "outside underlying space?"); 81 return _bm.at(heapWordToOffset(addr)); 82 } 83 84 // iteration 85 inline bool iterate(BitMapClosure* cl, MemRegion mr); 86 inline bool iterate(BitMapClosure* cl); 87 88 // Return the address corresponding to the next marked bit at or after 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr, 92 HeapWord* limit = NULL) const; 93 // Return the address corresponding to the next unmarked bit at or after 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr, 97 HeapWord* limit = NULL) const; 98 99 // conversion utilities 100 // XXX Fix these so that offsets are size_t's... 101 HeapWord* offsetToHeapWord(size_t offset) const { 102 return _bmStartWord + (offset << _shifter); 103 } 104 size_t heapWordToOffset(HeapWord* addr) const { 105 return pointer_delta(addr, _bmStartWord) >> _shifter; 106 } 107 int heapWordDiffToOffsetDiff(size_t diff) const; 108 HeapWord* nextWord(HeapWord* addr) { 109 return offsetToHeapWord(heapWordToOffset(addr) + 1); 110 } 111 112 // debugging 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;) 114 }; 115 116 class CMBitMap : public CMBitMapRO { 117 118 public: 119 // constructor 120 CMBitMap(ReservedSpace rs, int shifter) : 121 CMBitMapRO(rs, shifter) {} 122 123 // write marks 124 void mark(HeapWord* addr) { 125 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 126 "outside underlying space?"); 127 _bm.set_bit(heapWordToOffset(addr)); 128 } 129 void clear(HeapWord* addr) { 130 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 131 "outside underlying space?"); 132 _bm.clear_bit(heapWordToOffset(addr)); 133 } 134 bool parMark(HeapWord* addr) { 135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 136 "outside underlying space?"); 137 return _bm.par_set_bit(heapWordToOffset(addr)); 138 } 139 bool parClear(HeapWord* addr) { 140 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 141 "outside underlying space?"); 142 return _bm.par_clear_bit(heapWordToOffset(addr)); 143 } 144 void markRange(MemRegion mr); 145 void clearAll(); 146 void clearRange(MemRegion mr); 147 148 // Starting at the bit corresponding to "addr" (inclusive), find the next 149 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find 150 // the end of this run (stopping at "end_addr"). Return the MemRegion 151 // covering from the start of the region corresponding to the first bit 152 // of the run to the end of the region corresponding to the last bit of 153 // the run. If there is no "1" bit at or after "addr", return an empty 154 // MemRegion. 155 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); 156 }; 157 158 // Represents a marking stack used by the CM collector. 159 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 160 class CMMarkStack VALUE_OBJ_CLASS_SPEC { 161 ConcurrentMark* _cm; 162 oop* _base; // bottom of stack 163 jint _index; // one more than last occupied index 164 jint _capacity; // max #elements 165 jint _saved_index; // value of _index saved at start of GC 166 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run 167 168 bool _overflow; 169 DEBUG_ONLY(bool _drain_in_progress;) 170 DEBUG_ONLY(bool _drain_in_progress_yields;) 171 172 public: 173 CMMarkStack(ConcurrentMark* cm); 174 ~CMMarkStack(); 175 176 void allocate(size_t size); 177 178 oop pop() { 179 if (!isEmpty()) { 180 return _base[--_index] ; 181 } 182 return NULL; 183 } 184 185 // If overflow happens, don't do the push, and record the overflow. 186 // *Requires* that "ptr" is already marked. 187 void push(oop ptr) { 188 if (isFull()) { 189 // Record overflow. 190 _overflow = true; 191 return; 192 } else { 193 _base[_index++] = ptr; 194 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 195 } 196 } 197 // Non-block impl. Note: concurrency is allowed only with other 198 // "par_push" operations, not with "pop" or "drain". We would need 199 // parallel versions of them if such concurrency was desired. 200 void par_push(oop ptr); 201 202 // Pushes the first "n" elements of "ptr_arr" on the stack. 203 // Non-block impl. Note: concurrency is allowed only with other 204 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain". 205 void par_adjoin_arr(oop* ptr_arr, int n); 206 207 // Pushes the first "n" elements of "ptr_arr" on the stack. 208 // Locking impl: concurrency is allowed only with 209 // "par_push_arr" and/or "par_pop_arr" operations, which use the same 210 // locking strategy. 211 void par_push_arr(oop* ptr_arr, int n); 212 213 // If returns false, the array was empty. Otherwise, removes up to "max" 214 // elements from the stack, and transfers them to "ptr_arr" in an 215 // unspecified order. The actual number transferred is given in "n" ("n 216 // == 0" is deliberately redundant with the return value.) Locking impl: 217 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr" 218 // operations, which use the same locking strategy. 219 bool par_pop_arr(oop* ptr_arr, int max, int* n); 220 221 // Drain the mark stack, applying the given closure to all fields of 222 // objects on the stack. (That is, continue until the stack is empty, 223 // even if closure applications add entries to the stack.) The "bm" 224 // argument, if non-null, may be used to verify that only marked objects 225 // are on the mark stack. If "yield_after" is "true", then the 226 // concurrent marker performing the drain offers to yield after 227 // processing each object. If a yield occurs, stops the drain operation 228 // and returns false. Otherwise, returns true. 229 template<class OopClosureClass> 230 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); 231 232 bool isEmpty() { return _index == 0; } 233 bool isFull() { return _index == _capacity; } 234 int maxElems() { return _capacity; } 235 236 bool overflow() { return _overflow; } 237 void clear_overflow() { _overflow = false; } 238 239 int size() { return _index; } 240 241 void setEmpty() { _index = 0; clear_overflow(); } 242 243 // Record the current index. 244 void note_start_of_gc(); 245 246 // Make sure that we have not added any entries to the stack during GC. 247 void note_end_of_gc(); 248 249 // iterate over the oops in the mark stack, up to the bound recorded via 250 // the call above. 251 void oops_do(OopClosure* f); 252 }; 253 254 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC { 255 private: 256 #ifndef PRODUCT 257 uintx _num_remaining; 258 bool _force; 259 #endif // !defined(PRODUCT) 260 261 public: 262 void init() PRODUCT_RETURN; 263 void update() PRODUCT_RETURN; 264 bool should_force() PRODUCT_RETURN_( return false; ); 265 }; 266 267 // this will enable a variety of different statistics per GC task 268 #define _MARKING_STATS_ 0 269 // this will enable the higher verbose levels 270 #define _MARKING_VERBOSE_ 0 271 272 #if _MARKING_STATS_ 273 #define statsOnly(statement) \ 274 do { \ 275 statement ; \ 276 } while (0) 277 #else // _MARKING_STATS_ 278 #define statsOnly(statement) \ 279 do { \ 280 } while (0) 281 #endif // _MARKING_STATS_ 282 283 typedef enum { 284 no_verbose = 0, // verbose turned off 285 stats_verbose, // only prints stats at the end of marking 286 low_verbose, // low verbose, mostly per region and per major event 287 medium_verbose, // a bit more detailed than low 288 high_verbose // per object verbose 289 } CMVerboseLevel; 290 291 class YoungList; 292 293 // Root Regions are regions that are not empty at the beginning of a 294 // marking cycle and which we might collect during an evacuation pause 295 // while the cycle is active. Given that, during evacuation pauses, we 296 // do not copy objects that are explicitly marked, what we have to do 297 // for the root regions is to scan them and mark all objects reachable 298 // from them. According to the SATB assumptions, we only need to visit 299 // each object once during marking. So, as long as we finish this scan 300 // before the next evacuation pause, we can copy the objects from the 301 // root regions without having to mark them or do anything else to them. 302 // 303 // Currently, we only support root region scanning once (at the start 304 // of the marking cycle) and the root regions are all the survivor 305 // regions populated during the initial-mark pause. 306 class CMRootRegions VALUE_OBJ_CLASS_SPEC { 307 private: 308 YoungList* _young_list; 309 ConcurrentMark* _cm; 310 311 volatile bool _scan_in_progress; 312 volatile bool _should_abort; 313 HeapRegion* volatile _next_survivor; 314 315 public: 316 CMRootRegions(); 317 // We actually do most of the initialization in this method. 318 void init(G1CollectedHeap* g1h, ConcurrentMark* cm); 319 320 // Reset the claiming / scanning of the root regions. 321 void prepare_for_scan(); 322 323 // Forces get_next() to return NULL so that the iteration aborts early. 324 void abort() { _should_abort = true; } 325 326 // Return true if the CM thread are actively scanning root regions, 327 // false otherwise. 328 bool scan_in_progress() { return _scan_in_progress; } 329 330 // Claim the next root region to scan atomically, or return NULL if 331 // all have been claimed. 332 HeapRegion* claim_next(); 333 334 // Flag that we're done with root region scanning and notify anyone 335 // who's waiting on it. If aborted is false, assume that all regions 336 // have been claimed. 337 void scan_finished(); 338 339 // If CM threads are still scanning root regions, wait until they 340 // are done. Return true if we had to wait, false otherwise. 341 bool wait_until_scan_finished(); 342 }; 343 344 class ConcurrentMarkThread; 345 346 class ConcurrentMark: public CHeapObj<mtGC> { 347 friend class ConcurrentMarkThread; 348 friend class CMTask; 349 friend class CMBitMapClosure; 350 friend class CMGlobalObjectClosure; 351 friend class CMRemarkTask; 352 friend class CMConcurrentMarkingTask; 353 friend class G1ParNoteEndTask; 354 friend class CalcLiveObjectsClosure; 355 friend class G1CMRefProcTaskProxy; 356 friend class G1CMRefProcTaskExecutor; 357 friend class G1CMParKeepAliveAndDrainClosure; 358 friend class G1CMParDrainMarkingStackClosure; 359 360 protected: 361 ConcurrentMarkThread* _cmThread; // the thread doing the work 362 G1CollectedHeap* _g1h; // the heap. 363 uint _parallel_marking_threads; // the number of marking 364 // threads we're use 365 uint _max_parallel_marking_threads; // max number of marking 366 // threads we'll ever use 367 double _sleep_factor; // how much we have to sleep, with 368 // respect to the work we just did, to 369 // meet the marking overhead goal 370 double _marking_task_overhead; // marking target overhead for 371 // a single task 372 373 // same as the two above, but for the cleanup task 374 double _cleanup_sleep_factor; 375 double _cleanup_task_overhead; 376 377 FreeRegionList _cleanup_list; 378 379 // Concurrent marking support structures 380 CMBitMap _markBitMap1; 381 CMBitMap _markBitMap2; 382 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap 383 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap 384 385 BitMap _region_bm; 386 BitMap _card_bm; 387 388 // Heap bounds 389 HeapWord* _heap_start; 390 HeapWord* _heap_end; 391 392 // Root region tracking and claiming. 393 CMRootRegions _root_regions; 394 395 // For gray objects 396 CMMarkStack _markStack; // Grey objects behind global finger. 397 HeapWord* volatile _finger; // the global finger, region aligned, 398 // always points to the end of the 399 // last claimed region 400 401 // marking tasks 402 uint _max_task_num; // maximum task number 403 uint _active_tasks; // task num currently active 404 CMTask** _tasks; // task queue array (max_task_num len) 405 CMTaskQueueSet* _task_queues; // task queue set 406 ParallelTaskTerminator _terminator; // for termination 407 408 // Two sync barriers that are used to synchronise tasks when an 409 // overflow occurs. The algorithm is the following. All tasks enter 410 // the first one to ensure that they have all stopped manipulating 411 // the global data structures. After they exit it, they re-initialise 412 // their data structures and task 0 re-initialises the global data 413 // structures. Then, they enter the second sync barrier. This 414 // ensure, that no task starts doing work before all data 415 // structures (local and global) have been re-initialised. When they 416 // exit it, they are free to start working again. 417 WorkGangBarrierSync _first_overflow_barrier_sync; 418 WorkGangBarrierSync _second_overflow_barrier_sync; 419 420 // this is set by any task, when an overflow on the global data 421 // structures is detected. 422 volatile bool _has_overflown; 423 // true: marking is concurrent, false: we're in remark 424 volatile bool _concurrent; 425 // set at the end of a Full GC so that marking aborts 426 volatile bool _has_aborted; 427 428 // used when remark aborts due to an overflow to indicate that 429 // another concurrent marking phase should start 430 volatile bool _restart_for_overflow; 431 432 // This is true from the very start of concurrent marking until the 433 // point when all the tasks complete their work. It is really used 434 // to determine the points between the end of concurrent marking and 435 // time of remark. 436 volatile bool _concurrent_marking_in_progress; 437 438 // verbose level 439 CMVerboseLevel _verbose_level; 440 441 // All of these times are in ms. 442 NumberSeq _init_times; 443 NumberSeq _remark_times; 444 NumberSeq _remark_mark_times; 445 NumberSeq _remark_weak_ref_times; 446 NumberSeq _cleanup_times; 447 double _total_counting_time; 448 double _total_rs_scrub_time; 449 450 double* _accum_task_vtime; // accumulated task vtime 451 452 FlexibleWorkGang* _parallel_workers; 453 454 ForceOverflowSettings _force_overflow_conc; 455 ForceOverflowSettings _force_overflow_stw; 456 457 void weakRefsWork(bool clear_all_soft_refs); 458 459 void swapMarkBitMaps(); 460 461 // It resets the global marking data structures, as well as the 462 // task local ones; should be called during initial mark. 463 void reset(); 464 // It resets all the marking data structures. 465 void clear_marking_state(bool clear_overflow = true); 466 467 // It should be called to indicate which phase we're in (concurrent 468 // mark or remark) and how many threads are currently active. 469 void set_phase(uint active_tasks, bool concurrent); 470 // We do this after we're done with marking so that the marking data 471 // structures are initialised to a sensible and predictable state. 472 void set_non_marking_state(); 473 474 // prints all gathered CM-related statistics 475 void print_stats(); 476 477 bool cleanup_list_is_empty() { 478 return _cleanup_list.is_empty(); 479 } 480 481 // accessor methods 482 uint parallel_marking_threads() { return _parallel_marking_threads; } 483 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;} 484 double sleep_factor() { return _sleep_factor; } 485 double marking_task_overhead() { return _marking_task_overhead;} 486 double cleanup_sleep_factor() { return _cleanup_sleep_factor; } 487 double cleanup_task_overhead() { return _cleanup_task_overhead;} 488 489 HeapWord* finger() { return _finger; } 490 bool concurrent() { return _concurrent; } 491 uint active_tasks() { return _active_tasks; } 492 ParallelTaskTerminator* terminator() { return &_terminator; } 493 494 // It claims the next available region to be scanned by a marking 495 // task. It might return NULL if the next region is empty or we have 496 // run out of regions. In the latter case, out_of_regions() 497 // determines whether we've really run out of regions or the task 498 // should call claim_region() again. This might seem a bit 499 // awkward. Originally, the code was written so that claim_region() 500 // either successfully returned with a non-empty region or there 501 // were no more regions to be claimed. The problem with this was 502 // that, in certain circumstances, it iterated over large chunks of 503 // the heap finding only empty regions and, while it was working, it 504 // was preventing the calling task to call its regular clock 505 // method. So, this way, each task will spend very little time in 506 // claim_region() and is allowed to call the regular clock method 507 // frequently. 508 HeapRegion* claim_region(int task); 509 510 // It determines whether we've run out of regions to scan. 511 bool out_of_regions() { return _finger == _heap_end; } 512 513 // Returns the task with the given id 514 CMTask* task(int id) { 515 assert(0 <= id && id < (int) _active_tasks, 516 "task id not within active bounds"); 517 return _tasks[id]; 518 } 519 520 // Returns the task queue with the given id 521 CMTaskQueue* task_queue(int id) { 522 assert(0 <= id && id < (int) _active_tasks, 523 "task queue id not within active bounds"); 524 return (CMTaskQueue*) _task_queues->queue(id); 525 } 526 527 // Returns the task queue set 528 CMTaskQueueSet* task_queues() { return _task_queues; } 529 530 // Access / manipulation of the overflow flag which is set to 531 // indicate that the global stack has overflown 532 bool has_overflown() { return _has_overflown; } 533 void set_has_overflown() { _has_overflown = true; } 534 void clear_has_overflown() { _has_overflown = false; } 535 bool restart_for_overflow() { return _restart_for_overflow; } 536 537 bool has_aborted() { return _has_aborted; } 538 539 // Methods to enter the two overflow sync barriers 540 void enter_first_sync_barrier(int task_num); 541 void enter_second_sync_barrier(int task_num); 542 543 ForceOverflowSettings* force_overflow_conc() { 544 return &_force_overflow_conc; 545 } 546 547 ForceOverflowSettings* force_overflow_stw() { 548 return &_force_overflow_stw; 549 } 550 551 ForceOverflowSettings* force_overflow() { 552 if (concurrent()) { 553 return force_overflow_conc(); 554 } else { 555 return force_overflow_stw(); 556 } 557 } 558 559 // Live Data Counting data structures... 560 // These data structures are initialized at the start of 561 // marking. They are written to while marking is active. 562 // They are aggregated during remark; the aggregated values 563 // are then used to populate the _region_bm, _card_bm, and 564 // the total live bytes, which are then subsequently updated 565 // during cleanup. 566 567 // An array of bitmaps (one bit map per task). Each bitmap 568 // is used to record the cards spanned by the live objects 569 // marked by that task/worker. 570 BitMap* _count_card_bitmaps; 571 572 // Used to record the number of marked live bytes 573 // (for each region, by worker thread). 574 size_t** _count_marked_bytes; 575 576 // Card index of the bottom of the G1 heap. Used for biasing indices into 577 // the card bitmaps. 578 intptr_t _heap_bottom_card_num; 579 580 public: 581 // Manipulation of the global mark stack. 582 // Notice that the first mark_stack_push is CAS-based, whereas the 583 // two below are Mutex-based. This is OK since the first one is only 584 // called during evacuation pauses and doesn't compete with the 585 // other two (which are called by the marking tasks during 586 // concurrent marking or remark). 587 bool mark_stack_push(oop p) { 588 _markStack.par_push(p); 589 if (_markStack.overflow()) { 590 set_has_overflown(); 591 return false; 592 } 593 return true; 594 } 595 bool mark_stack_push(oop* arr, int n) { 596 _markStack.par_push_arr(arr, n); 597 if (_markStack.overflow()) { 598 set_has_overflown(); 599 return false; 600 } 601 return true; 602 } 603 void mark_stack_pop(oop* arr, int max, int* n) { 604 _markStack.par_pop_arr(arr, max, n); 605 } 606 size_t mark_stack_size() { return _markStack.size(); } 607 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; } 608 bool mark_stack_overflow() { return _markStack.overflow(); } 609 bool mark_stack_empty() { return _markStack.isEmpty(); } 610 611 CMRootRegions* root_regions() { return &_root_regions; } 612 613 bool concurrent_marking_in_progress() { 614 return _concurrent_marking_in_progress; 615 } 616 void set_concurrent_marking_in_progress() { 617 _concurrent_marking_in_progress = true; 618 } 619 void clear_concurrent_marking_in_progress() { 620 _concurrent_marking_in_progress = false; 621 } 622 623 void update_accum_task_vtime(int i, double vtime) { 624 _accum_task_vtime[i] += vtime; 625 } 626 627 double all_task_accum_vtime() { 628 double ret = 0.0; 629 for (int i = 0; i < (int)_max_task_num; ++i) 630 ret += _accum_task_vtime[i]; 631 return ret; 632 } 633 634 // Attempts to steal an object from the task queues of other tasks 635 bool try_stealing(int task_num, int* hash_seed, oop& obj) { 636 return _task_queues->steal(task_num, hash_seed, obj); 637 } 638 639 ConcurrentMark(ReservedSpace rs, uint max_regions); 640 ~ConcurrentMark(); 641 642 ConcurrentMarkThread* cmThread() { return _cmThread; } 643 644 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 645 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 646 647 // Returns the number of GC threads to be used in a concurrent 648 // phase based on the number of GC threads being used in a STW 649 // phase. 650 uint scale_parallel_threads(uint n_par_threads); 651 652 // Calculates the number of GC threads to be used in a concurrent phase. 653 uint calc_parallel_marking_threads(); 654 655 // The following three are interaction between CM and 656 // G1CollectedHeap 657 658 // This notifies CM that a root during initial-mark needs to be 659 // grayed. It is MT-safe. word_size is the size of the object in 660 // words. It is passed explicitly as sometimes we cannot calculate 661 // it from the given object because it might be in an inconsistent 662 // state (e.g., in to-space and being copied). So the caller is 663 // responsible for dealing with this issue (e.g., get the size from 664 // the from-space image when the to-space image might be 665 // inconsistent) and always passing the size. hr is the region that 666 // contains the object and it's passed optionally from callers who 667 // might already have it (no point in recalculating it). 668 inline void grayRoot(oop obj, size_t word_size, 669 uint worker_id, HeapRegion* hr = NULL); 670 671 // It iterates over the heap and for each object it comes across it 672 // will dump the contents of its reference fields, as well as 673 // liveness information for the object and its referents. The dump 674 // will be written to a file with the following name: 675 // G1PrintReachableBaseFile + "." + str. 676 // vo decides whether the prev (vo == UsePrevMarking), the next 677 // (vo == UseNextMarking) marking information, or the mark word 678 // (vo == UseMarkWord) will be used to determine the liveness of 679 // each object / referent. 680 // If all is true, all objects in the heap will be dumped, otherwise 681 // only the live ones. In the dump the following symbols / breviations 682 // are used: 683 // M : an explicitly live object (its bitmap bit is set) 684 // > : an implicitly live object (over tams) 685 // O : an object outside the G1 heap (typically: in the perm gen) 686 // NOT : a reference field whose referent is not live 687 // AND MARKED : indicates that an object is both explicitly and 688 // implicitly live (it should be one or the other, not both) 689 void print_reachable(const char* str, 690 VerifyOption vo, bool all) PRODUCT_RETURN; 691 692 // Clear the next marking bitmap (will be called concurrently). 693 void clearNextBitmap(); 694 695 // These two do the work that needs to be done before and after the 696 // initial root checkpoint. Since this checkpoint can be done at two 697 // different points (i.e. an explicit pause or piggy-backed on a 698 // young collection), then it's nice to be able to easily share the 699 // pre/post code. It might be the case that we can put everything in 700 // the post method. TP 701 void checkpointRootsInitialPre(); 702 void checkpointRootsInitialPost(); 703 704 // Scan all the root regions and mark everything reachable from 705 // them. 706 void scanRootRegions(); 707 708 // Scan a single root region and mark everything reachable from it. 709 void scanRootRegion(HeapRegion* hr, uint worker_id); 710 711 // Do concurrent phase of marking, to a tentative transitive closure. 712 void markFromRoots(); 713 714 void checkpointRootsFinal(bool clear_all_soft_refs); 715 void checkpointRootsFinalWork(); 716 void cleanup(); 717 void completeCleanup(); 718 719 // Mark in the previous bitmap. NB: this is usually read-only, so use 720 // this carefully! 721 inline void markPrev(oop p); 722 723 // Clears marks for all objects in the given range, for the prev, 724 // next, or both bitmaps. NB: the previous bitmap is usually 725 // read-only, so use this carefully! 726 void clearRangePrevBitmap(MemRegion mr); 727 void clearRangeNextBitmap(MemRegion mr); 728 void clearRangeBothBitmaps(MemRegion mr); 729 730 // Notify data structures that a GC has started. 731 void note_start_of_gc() { 732 _markStack.note_start_of_gc(); 733 } 734 735 // Notify data structures that a GC is finished. 736 void note_end_of_gc() { 737 _markStack.note_end_of_gc(); 738 } 739 740 // Verify that there are no CSet oops on the stacks (taskqueues / 741 // global mark stack), enqueued SATB buffers, per-thread SATB 742 // buffers, and fingers (global / per-task). The boolean parameters 743 // decide which of the above data structures to verify. If marking 744 // is not in progress, it's a no-op. 745 void verify_no_cset_oops(bool verify_stacks, 746 bool verify_enqueued_buffers, 747 bool verify_thread_buffers, 748 bool verify_fingers) PRODUCT_RETURN; 749 750 // It is called at the end of an evacuation pause during marking so 751 // that CM is notified of where the new end of the heap is. It 752 // doesn't do anything if concurrent_marking_in_progress() is false, 753 // unless the force parameter is true. 754 void update_g1_committed(bool force = false); 755 756 bool isMarked(oop p) const { 757 assert(p != NULL && p->is_oop(), "expected an oop"); 758 HeapWord* addr = (HeapWord*)p; 759 assert(addr >= _nextMarkBitMap->startWord() || 760 addr < _nextMarkBitMap->endWord(), "in a region"); 761 762 return _nextMarkBitMap->isMarked(addr); 763 } 764 765 inline bool not_yet_marked(oop p) const; 766 767 // XXX Debug code 768 bool containing_card_is_marked(void* p); 769 bool containing_cards_are_marked(void* start, void* last); 770 771 bool isPrevMarked(oop p) const { 772 assert(p != NULL && p->is_oop(), "expected an oop"); 773 HeapWord* addr = (HeapWord*)p; 774 assert(addr >= _prevMarkBitMap->startWord() || 775 addr < _prevMarkBitMap->endWord(), "in a region"); 776 777 return _prevMarkBitMap->isMarked(addr); 778 } 779 780 inline bool do_yield_check(uint worker_i = 0); 781 inline bool should_yield(); 782 783 // Called to abort the marking cycle after a Full GC takes palce. 784 void abort(); 785 786 // This prints the global/local fingers. It is used for debugging. 787 NOT_PRODUCT(void print_finger();) 788 789 void print_summary_info(); 790 791 void print_worker_threads_on(outputStream* st) const; 792 793 // The following indicate whether a given verbose level has been 794 // set. Notice that anything above stats is conditional to 795 // _MARKING_VERBOSE_ having been set to 1 796 bool verbose_stats() { 797 return _verbose_level >= stats_verbose; 798 } 799 bool verbose_low() { 800 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; 801 } 802 bool verbose_medium() { 803 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; 804 } 805 bool verbose_high() { 806 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; 807 } 808 809 // Liveness counting 810 811 // Utility routine to set an exclusive range of cards on the given 812 // card liveness bitmap 813 inline void set_card_bitmap_range(BitMap* card_bm, 814 BitMap::idx_t start_idx, 815 BitMap::idx_t end_idx, 816 bool is_par); 817 818 // Returns the card number of the bottom of the G1 heap. 819 // Used in biasing indices into accounting card bitmaps. 820 intptr_t heap_bottom_card_num() const { 821 return _heap_bottom_card_num; 822 } 823 824 // Returns the card bitmap for a given task or worker id. 825 BitMap* count_card_bitmap_for(uint worker_id) { 826 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 827 assert(_count_card_bitmaps != NULL, "uninitialized"); 828 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 829 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 830 return task_card_bm; 831 } 832 833 // Returns the array containing the marked bytes for each region, 834 // for the given worker or task id. 835 size_t* count_marked_bytes_array_for(uint worker_id) { 836 assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 837 assert(_count_marked_bytes != NULL, "uninitialized"); 838 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 839 assert(marked_bytes_array != NULL, "uninitialized"); 840 return marked_bytes_array; 841 } 842 843 // Returns the index in the liveness accounting card table bitmap 844 // for the given address 845 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr); 846 847 // Counts the size of the given memory region in the the given 848 // marked_bytes array slot for the given HeapRegion. 849 // Sets the bits in the given card bitmap that are associated with the 850 // cards that are spanned by the memory region. 851 inline void count_region(MemRegion mr, HeapRegion* hr, 852 size_t* marked_bytes_array, 853 BitMap* task_card_bm); 854 855 // Counts the given memory region in the task/worker counting 856 // data structures for the given worker id. 857 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id); 858 859 // Counts the given memory region in the task/worker counting 860 // data structures for the given worker id. 861 inline void count_region(MemRegion mr, uint worker_id); 862 863 // Counts the given object in the given task/worker counting 864 // data structures. 865 inline void count_object(oop obj, HeapRegion* hr, 866 size_t* marked_bytes_array, 867 BitMap* task_card_bm); 868 869 // Counts the given object in the task/worker counting data 870 // structures for the given worker id. 871 inline void count_object(oop obj, HeapRegion* hr, uint worker_id); 872 873 // Attempts to mark the given object and, if successful, counts 874 // the object in the given task/worker counting structures. 875 inline bool par_mark_and_count(oop obj, HeapRegion* hr, 876 size_t* marked_bytes_array, 877 BitMap* task_card_bm); 878 879 // Attempts to mark the given object and, if successful, counts 880 // the object in the task/worker counting structures for the 881 // given worker id. 882 inline bool par_mark_and_count(oop obj, size_t word_size, 883 HeapRegion* hr, uint worker_id); 884 885 // Attempts to mark the given object and, if successful, counts 886 // the object in the task/worker counting structures for the 887 // given worker id. 888 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id); 889 890 // Similar to the above routine but we don't know the heap region that 891 // contains the object to be marked/counted, which this routine looks up. 892 inline bool par_mark_and_count(oop obj, uint worker_id); 893 894 // Similar to the above routine but there are times when we cannot 895 // safely calculate the size of obj due to races and we, therefore, 896 // pass the size in as a parameter. It is the caller's reponsibility 897 // to ensure that the size passed in for obj is valid. 898 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id); 899 900 // Unconditionally mark the given object, and unconditinally count 901 // the object in the counting structures for worker id 0. 902 // Should *not* be called from parallel code. 903 inline bool mark_and_count(oop obj, HeapRegion* hr); 904 905 // Similar to the above routine but we don't know the heap region that 906 // contains the object to be marked/counted, which this routine looks up. 907 // Should *not* be called from parallel code. 908 inline bool mark_and_count(oop obj); 909 910 protected: 911 // Clear all the per-task bitmaps and arrays used to store the 912 // counting data. 913 void clear_all_count_data(); 914 915 // Aggregates the counting data for each worker/task 916 // that was constructed while marking. Also sets 917 // the amount of marked bytes for each region and 918 // the top at concurrent mark count. 919 void aggregate_count_data(); 920 921 // Verification routine 922 void verify_count_data(); 923 }; 924 925 // A class representing a marking task. 926 class CMTask : public TerminatorTerminator { 927 private: 928 enum PrivateConstants { 929 // the regular clock call is called once the scanned words reaches 930 // this limit 931 words_scanned_period = 12*1024, 932 // the regular clock call is called once the number of visited 933 // references reaches this limit 934 refs_reached_period = 384, 935 // initial value for the hash seed, used in the work stealing code 936 init_hash_seed = 17, 937 // how many entries will be transferred between global stack and 938 // local queues 939 global_stack_transfer_size = 16 940 }; 941 942 int _task_id; 943 G1CollectedHeap* _g1h; 944 ConcurrentMark* _cm; 945 CMBitMap* _nextMarkBitMap; 946 // the task queue of this task 947 CMTaskQueue* _task_queue; 948 private: 949 // the task queue set---needed for stealing 950 CMTaskQueueSet* _task_queues; 951 // indicates whether the task has been claimed---this is only for 952 // debugging purposes 953 bool _claimed; 954 955 // number of calls to this task 956 int _calls; 957 958 // when the virtual timer reaches this time, the marking step should 959 // exit 960 double _time_target_ms; 961 // the start time of the current marking step 962 double _start_time_ms; 963 964 // the oop closure used for iterations over oops 965 G1CMOopClosure* _cm_oop_closure; 966 967 // the region this task is scanning, NULL if we're not scanning any 968 HeapRegion* _curr_region; 969 // the local finger of this task, NULL if we're not scanning a region 970 HeapWord* _finger; 971 // limit of the region this task is scanning, NULL if we're not scanning one 972 HeapWord* _region_limit; 973 974 // the number of words this task has scanned 975 size_t _words_scanned; 976 // When _words_scanned reaches this limit, the regular clock is 977 // called. Notice that this might be decreased under certain 978 // circumstances (i.e. when we believe that we did an expensive 979 // operation). 980 size_t _words_scanned_limit; 981 // the initial value of _words_scanned_limit (i.e. what it was 982 // before it was decreased). 983 size_t _real_words_scanned_limit; 984 985 // the number of references this task has visited 986 size_t _refs_reached; 987 // When _refs_reached reaches this limit, the regular clock is 988 // called. Notice this this might be decreased under certain 989 // circumstances (i.e. when we believe that we did an expensive 990 // operation). 991 size_t _refs_reached_limit; 992 // the initial value of _refs_reached_limit (i.e. what it was before 993 // it was decreased). 994 size_t _real_refs_reached_limit; 995 996 // used by the work stealing stuff 997 int _hash_seed; 998 // if this is true, then the task has aborted for some reason 999 bool _has_aborted; 1000 // set when the task aborts because it has met its time quota 1001 bool _has_timed_out; 1002 // true when we're draining SATB buffers; this avoids the task 1003 // aborting due to SATB buffers being available (as we're already 1004 // dealing with them) 1005 bool _draining_satb_buffers; 1006 1007 // number sequence of past step times 1008 NumberSeq _step_times_ms; 1009 // elapsed time of this task 1010 double _elapsed_time_ms; 1011 // termination time of this task 1012 double _termination_time_ms; 1013 // when this task got into the termination protocol 1014 double _termination_start_time_ms; 1015 1016 // true when the task is during a concurrent phase, false when it is 1017 // in the remark phase (so, in the latter case, we do not have to 1018 // check all the things that we have to check during the concurrent 1019 // phase, i.e. SATB buffer availability...) 1020 bool _concurrent; 1021 1022 TruncatedSeq _marking_step_diffs_ms; 1023 1024 // Counting data structures. Embedding the task's marked_bytes_array 1025 // and card bitmap into the actual task saves having to go through 1026 // the ConcurrentMark object. 1027 size_t* _marked_bytes_array; 1028 BitMap* _card_bm; 1029 1030 // LOTS of statistics related with this task 1031 #if _MARKING_STATS_ 1032 NumberSeq _all_clock_intervals_ms; 1033 double _interval_start_time_ms; 1034 1035 int _aborted; 1036 int _aborted_overflow; 1037 int _aborted_cm_aborted; 1038 int _aborted_yield; 1039 int _aborted_timed_out; 1040 int _aborted_satb; 1041 int _aborted_termination; 1042 1043 int _steal_attempts; 1044 int _steals; 1045 1046 int _clock_due_to_marking; 1047 int _clock_due_to_scanning; 1048 1049 int _local_pushes; 1050 int _local_pops; 1051 int _local_max_size; 1052 int _objs_scanned; 1053 1054 int _global_pushes; 1055 int _global_pops; 1056 int _global_max_size; 1057 1058 int _global_transfers_to; 1059 int _global_transfers_from; 1060 1061 int _regions_claimed; 1062 int _objs_found_on_bitmap; 1063 1064 int _satb_buffers_processed; 1065 #endif // _MARKING_STATS_ 1066 1067 // it updates the local fields after this task has claimed 1068 // a new region to scan 1069 void setup_for_region(HeapRegion* hr); 1070 // it brings up-to-date the limit of the region 1071 void update_region_limit(); 1072 1073 // called when either the words scanned or the refs visited limit 1074 // has been reached 1075 void reached_limit(); 1076 // recalculates the words scanned and refs visited limits 1077 void recalculate_limits(); 1078 // decreases the words scanned and refs visited limits when we reach 1079 // an expensive operation 1080 void decrease_limits(); 1081 // it checks whether the words scanned or refs visited reached their 1082 // respective limit and calls reached_limit() if they have 1083 void check_limits() { 1084 if (_words_scanned >= _words_scanned_limit || 1085 _refs_reached >= _refs_reached_limit) { 1086 reached_limit(); 1087 } 1088 } 1089 // this is supposed to be called regularly during a marking step as 1090 // it checks a bunch of conditions that might cause the marking step 1091 // to abort 1092 void regular_clock_call(); 1093 bool concurrent() { return _concurrent; } 1094 1095 public: 1096 // It resets the task; it should be called right at the beginning of 1097 // a marking phase. 1098 void reset(CMBitMap* _nextMarkBitMap); 1099 // it clears all the fields that correspond to a claimed region. 1100 void clear_region_fields(); 1101 1102 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 1103 1104 // The main method of this class which performs a marking step 1105 // trying not to exceed the given duration. However, it might exit 1106 // prematurely, according to some conditions (i.e. SATB buffers are 1107 // available for processing). 1108 void do_marking_step(double target_ms, bool do_stealing, bool do_termination); 1109 1110 // These two calls start and stop the timer 1111 void record_start_time() { 1112 _elapsed_time_ms = os::elapsedTime() * 1000.0; 1113 } 1114 void record_end_time() { 1115 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1116 } 1117 1118 // returns the task ID 1119 int task_id() { return _task_id; } 1120 1121 // From TerminatorTerminator. It determines whether this task should 1122 // exit the termination protocol after it's entered it. 1123 virtual bool should_exit_termination(); 1124 1125 // Resets the local region fields after a task has finished scanning a 1126 // region; or when they have become stale as a result of the region 1127 // being evacuated. 1128 void giveup_current_region(); 1129 1130 HeapWord* finger() { return _finger; } 1131 1132 bool has_aborted() { return _has_aborted; } 1133 void set_has_aborted() { _has_aborted = true; } 1134 void clear_has_aborted() { _has_aborted = false; } 1135 bool has_timed_out() { return _has_timed_out; } 1136 bool claimed() { return _claimed; } 1137 1138 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 1139 1140 // It grays the object by marking it and, if necessary, pushing it 1141 // on the local queue 1142 inline void deal_with_reference(oop obj); 1143 1144 // It scans an object and visits its children. 1145 void scan_object(oop obj); 1146 1147 // It pushes an object on the local queue. 1148 inline void push(oop obj); 1149 1150 // These two move entries to/from the global stack. 1151 void move_entries_to_global_stack(); 1152 void get_entries_from_global_stack(); 1153 1154 // It pops and scans objects from the local queue. If partially is 1155 // true, then it stops when the queue size is of a given limit. If 1156 // partially is false, then it stops when the queue is empty. 1157 void drain_local_queue(bool partially); 1158 // It moves entries from the global stack to the local queue and 1159 // drains the local queue. If partially is true, then it stops when 1160 // both the global stack and the local queue reach a given size. If 1161 // partially if false, it tries to empty them totally. 1162 void drain_global_stack(bool partially); 1163 // It keeps picking SATB buffers and processing them until no SATB 1164 // buffers are available. 1165 void drain_satb_buffers(); 1166 1167 // moves the local finger to a new location 1168 inline void move_finger_to(HeapWord* new_finger) { 1169 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 1170 _finger = new_finger; 1171 } 1172 1173 CMTask(int task_num, ConcurrentMark *cm, 1174 size_t* marked_bytes, BitMap* card_bm, 1175 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1176 1177 // it prints statistics associated with this task 1178 void print_stats(); 1179 1180 #if _MARKING_STATS_ 1181 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; } 1182 #endif // _MARKING_STATS_ 1183 }; 1184 1185 // Class that's used to to print out per-region liveness 1186 // information. It's currently used at the end of marking and also 1187 // after we sort the old regions at the end of the cleanup operation. 1188 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 1189 private: 1190 outputStream* _out; 1191 1192 // Accumulators for these values. 1193 size_t _total_used_bytes; 1194 size_t _total_capacity_bytes; 1195 size_t _total_prev_live_bytes; 1196 size_t _total_next_live_bytes; 1197 1198 // These are set up when we come across a "stars humongous" region 1199 // (as this is where most of this information is stored, not in the 1200 // subsequent "continues humongous" regions). After that, for every 1201 // region in a given humongous region series we deduce the right 1202 // values for it by simply subtracting the appropriate amount from 1203 // these fields. All these values should reach 0 after we've visited 1204 // the last region in the series. 1205 size_t _hum_used_bytes; 1206 size_t _hum_capacity_bytes; 1207 size_t _hum_prev_live_bytes; 1208 size_t _hum_next_live_bytes; 1209 1210 static double perc(size_t val, size_t total) { 1211 if (total == 0) { 1212 return 0.0; 1213 } else { 1214 return 100.0 * ((double) val / (double) total); 1215 } 1216 } 1217 1218 static double bytes_to_mb(size_t val) { 1219 return (double) val / (double) M; 1220 } 1221 1222 // See the .cpp file. 1223 size_t get_hum_bytes(size_t* hum_bytes); 1224 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, 1225 size_t* prev_live_bytes, size_t* next_live_bytes); 1226 1227 public: 1228 // The header and footer are printed in the constructor and 1229 // destructor respectively. 1230 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); 1231 virtual bool doHeapRegion(HeapRegion* r); 1232 ~G1PrintRegionLivenessInfoClosure(); 1233 }; 1234 1235 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP --- EOF ---