1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp" 30 #include "gc/g1/g1RegionToSpaceMapper.hpp" 31 #include "gc/g1/heapRegionSet.hpp" 32 #include "gc/shared/taskqueue.hpp" 33 34 class G1CollectedHeap; 35 class G1CMBitMap; 36 class G1CMTask; 37 class G1ConcurrentMark; 38 class ConcurrentGCTimer; 39 class G1OldTracer; 40 class G1SurvivorRegions; 41 42 // This is a container class for either an oop or a continuation address for 43 // mark stack entries. Both are pushed onto the mark stack. 44 class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC { 45 private: 46 void* _holder; 47 48 static const uintptr_t ArraySliceBit = 1; 49 50 G1TaskQueueEntry(oop obj) : _holder(obj) { 51 assert(_holder != NULL, "Not allowed to set NULL task queue element"); 52 } 53 G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { } 54 public: 55 G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; } 56 G1TaskQueueEntry() : _holder(NULL) { } 57 58 static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); } 59 static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); } 60 61 void assign(const G1TaskQueueEntry& t) { 62 _holder = t._holder; 63 } 64 65 volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile { 66 _holder = t._holder; 67 return *this; 68 } 69 70 oop obj() const { 71 assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder)); 72 return (oop)_holder; 73 } 74 75 HeapWord* slice() const { 76 assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder)); 77 return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit); 78 } 79 80 bool is_oop() const { return !is_array_slice(); } 81 bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; } 82 bool is_null() const { return _holder == NULL; } 83 }; 84 85 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue; 86 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 87 88 // Closure used by CM during concurrent reference discovery 89 // and reference processing (during remarking) to determine 90 // if a particular object is alive. It is primarily used 91 // to determine if referents of discovered reference objects 92 // are alive. An instance is also embedded into the 93 // reference processor as the _is_alive_non_header field 94 class G1CMIsAliveClosure: public BoolObjectClosure { 95 G1CollectedHeap* _g1; 96 public: 97 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { } 98 99 bool do_object_b(oop obj); 100 }; 101 102 // A generic CM bit map. This is essentially a wrapper around the BitMap 103 // class, with one bit per (1<<_shifter) HeapWords. 104 105 class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC { 106 protected: 107 HeapWord* _bmStartWord; // base address of range covered by map 108 size_t _bmWordSize; // map size (in #HeapWords covered) 109 const int _shifter; // map to char or bit 110 BitMapView _bm; // the bit map itself 111 112 public: 113 // constructor 114 G1CMBitMapRO(int shifter); 115 116 // inquiries 117 HeapWord* startWord() const { return _bmStartWord; } 118 // the following is one past the last word in space 119 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 120 121 // read marks 122 123 bool isMarked(HeapWord* addr) const { 124 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), 125 "outside underlying space?"); 126 return _bm.at(heapWordToOffset(addr)); 127 } 128 129 // iteration 130 inline bool iterate(BitMapClosure* cl, MemRegion mr); 131 132 // Return the address corresponding to the next marked bit at or after 133 // "addr", and before "limit", if "limit" is non-NULL. If there is no 134 // such bit, returns "limit" if that is non-NULL, or else "endWord()". 135 HeapWord* getNextMarkedWordAddress(const HeapWord* addr, 136 const HeapWord* limit = NULL) const; 137 138 // conversion utilities 139 HeapWord* offsetToHeapWord(size_t offset) const { 140 return _bmStartWord + (offset << _shifter); 141 } 142 size_t heapWordToOffset(const HeapWord* addr) const { 143 return pointer_delta(addr, _bmStartWord) >> _shifter; 144 } 145 146 // The argument addr should be the start address of a valid object 147 inline HeapWord* nextObject(HeapWord* addr); 148 149 void print_on_error(outputStream* st, const char* prefix) const; 150 151 // debugging 152 NOT_PRODUCT(bool covers(MemRegion rs) const;) 153 }; 154 155 class G1CMBitMapMappingChangedListener : public G1MappingChangedListener { 156 private: 157 G1CMBitMap* _bm; 158 public: 159 G1CMBitMapMappingChangedListener() : _bm(NULL) {} 160 161 void set_bitmap(G1CMBitMap* bm) { _bm = bm; } 162 163 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled); 164 }; 165 166 class G1CMBitMap : public G1CMBitMapRO { 167 private: 168 G1CMBitMapMappingChangedListener _listener; 169 170 public: 171 static size_t compute_size(size_t heap_size); 172 // Returns the amount of bytes on the heap between two marks in the bitmap. 173 static size_t mark_distance(); 174 // Returns how many bytes (or bits) of the heap a single byte (or bit) of the 175 // mark bitmap corresponds to. This is the same as the mark distance above. 176 static size_t heap_map_factor() { 177 return mark_distance(); 178 } 179 180 G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } 181 182 // Initializes the underlying BitMap to cover the given area. 183 void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); 184 185 // Write marks. 186 inline void mark(HeapWord* addr); 187 inline void clear(HeapWord* addr); 188 inline bool parMark(HeapWord* addr); 189 190 void clear_range(MemRegion mr); 191 }; 192 193 // Represents the overflow mark stack used by concurrent marking. 194 // 195 // Stores oops in a huge buffer in virtual memory that is always fully committed. 196 // Resizing may only happen during a STW pause when the stack is empty. 197 // 198 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark 199 // stack memory is split into evenly sized chunks of oops. Users can only 200 // add or remove entries on that basis. 201 // Chunks are filled in increasing address order. Not completely filled chunks 202 // have a NULL element as a terminating element. 203 // 204 // Every chunk has a header containing a single pointer element used for memory 205 // management. This wastes some space, but is negligible (< .1% with current sizing). 206 // 207 // Memory management is done using a mix of tracking a high water-mark indicating 208 // that all chunks at a lower address are valid chunks, and a singly linked free 209 // list connecting all empty chunks. 210 class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { 211 public: 212 // Number of oops that can fit in a single chunk. 213 static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */; 214 private: 215 struct TaskQueueEntryChunk { 216 TaskQueueEntryChunk* next; 217 G1TaskQueueEntry data[EntriesPerChunk]; 218 }; 219 220 size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack. 221 222 TaskQueueEntryChunk* _base; // Bottom address of allocated memory area. 223 size_t _chunk_capacity; // Current maximum number of OopChunk elements. 224 225 char _pad0[DEFAULT_CACHE_LINE_SIZE]; 226 TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. 227 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)]; 228 TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. 229 volatile size_t _chunks_in_chunk_list; 230 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; 231 232 volatile size_t _hwm; // High water mark within the reserved space. 233 char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; 234 235 // Allocate a new chunk from the reserved memory, using the high water mark. Returns 236 // NULL if out of memory. 237 TaskQueueEntryChunk* allocate_new_chunk(); 238 239 volatile bool _out_of_memory; 240 241 // Atomically add the given chunk to the list. 242 void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); 243 // Atomically remove and return a chunk from the given list. Returns NULL if the 244 // list is empty. 245 TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); 246 247 void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); 248 void add_chunk_to_free_list(TaskQueueEntryChunk* elem); 249 250 TaskQueueEntryChunk* remove_chunk_from_chunk_list(); 251 TaskQueueEntryChunk* remove_chunk_from_free_list(); 252 253 bool _should_expand; 254 255 // Resizes the mark stack to the given new capacity. Releases any previous 256 // memory if successful. 257 bool resize(size_t new_capacity); 258 259 public: 260 G1CMMarkStack(); 261 ~G1CMMarkStack(); 262 263 // Alignment and minimum capacity of this mark stack in number of oops. 264 static size_t capacity_alignment(); 265 266 // Allocate and initialize the mark stack with the given number of oops. 267 bool initialize(size_t initial_capacity, size_t max_capacity); 268 269 // Pushes the given buffer containing at most EntriesPerChunk elements on the mark 270 // stack. If less than EntriesPerChunk elements are to be pushed, the array must 271 // be terminated with a NULL. 272 // Returns whether the buffer contents were successfully pushed to the global mark 273 // stack. 274 bool par_push_chunk(G1TaskQueueEntry* buffer); 275 276 // Pops a chunk from this mark stack, copying them into the given buffer. This 277 // chunk may contain up to EntriesPerChunk elements. If there are less, the last 278 // element in the array is a NULL pointer. 279 bool par_pop_chunk(G1TaskQueueEntry* buffer); 280 281 // Return whether the chunk list is empty. Racy due to unsynchronized access to 282 // _chunk_list. 283 bool is_empty() const { return _chunk_list == NULL; } 284 285 size_t capacity() const { return _chunk_capacity; } 286 287 bool is_out_of_memory() const { return _out_of_memory; } 288 void clear_out_of_memory() { _out_of_memory = false; } 289 290 bool should_expand() const { return _should_expand; } 291 void set_should_expand(bool value) { _should_expand = value; } 292 293 // Expand the stack, typically in response to an overflow condition 294 void expand(); 295 296 // Return the approximate number of oops on this mark stack. Racy due to 297 // unsynchronized access to _chunks_in_chunk_list. 298 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; } 299 300 void set_empty(); 301 302 // Apply Fn to every oop on the mark stack. The mark stack must not 303 // be modified while iterating. 304 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN; 305 }; 306 307 // Root Regions are regions that are not empty at the beginning of a 308 // marking cycle and which we might collect during an evacuation pause 309 // while the cycle is active. Given that, during evacuation pauses, we 310 // do not copy objects that are explicitly marked, what we have to do 311 // for the root regions is to scan them and mark all objects reachable 312 // from them. According to the SATB assumptions, we only need to visit 313 // each object once during marking. So, as long as we finish this scan 314 // before the next evacuation pause, we can copy the objects from the 315 // root regions without having to mark them or do anything else to them. 316 // 317 // Currently, we only support root region scanning once (at the start 318 // of the marking cycle) and the root regions are all the survivor 319 // regions populated during the initial-mark pause. 320 class G1CMRootRegions VALUE_OBJ_CLASS_SPEC { 321 private: 322 const G1SurvivorRegions* _survivors; 323 G1ConcurrentMark* _cm; 324 325 volatile bool _scan_in_progress; 326 volatile bool _should_abort; 327 volatile int _claimed_survivor_index; 328 329 void notify_scan_done(); 330 331 public: 332 G1CMRootRegions(); 333 // We actually do most of the initialization in this method. 334 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm); 335 336 // Reset the claiming / scanning of the root regions. 337 void prepare_for_scan(); 338 339 // Forces get_next() to return NULL so that the iteration aborts early. 340 void abort() { _should_abort = true; } 341 342 // Return true if the CM thread are actively scanning root regions, 343 // false otherwise. 344 bool scan_in_progress() { return _scan_in_progress; } 345 346 // Claim the next root region to scan atomically, or return NULL if 347 // all have been claimed. 348 HeapRegion* claim_next(); 349 350 // The number of root regions to scan. 351 uint num_root_regions() const; 352 353 void cancel_scan(); 354 355 // Flag that we're done with root region scanning and notify anyone 356 // who's waiting on it. If aborted is false, assume that all regions 357 // have been claimed. 358 void scan_finished(); 359 360 // If CM threads are still scanning root regions, wait until they 361 // are done. Return true if we had to wait, false otherwise. 362 bool wait_until_scan_finished(); 363 }; 364 365 class ConcurrentMarkThread; 366 367 class G1ConcurrentMark: public CHeapObj<mtGC> { 368 friend class ConcurrentMarkThread; 369 friend class G1ParNoteEndTask; 370 friend class G1VerifyLiveDataClosure; 371 friend class G1CMRefProcTaskProxy; 372 friend class G1CMRefProcTaskExecutor; 373 friend class G1CMKeepAliveAndDrainClosure; 374 friend class G1CMDrainMarkingStackClosure; 375 friend class G1CMBitMapClosure; 376 friend class G1CMConcurrentMarkingTask; 377 friend class G1CMRemarkTask; 378 friend class G1CMTask; 379 380 protected: 381 ConcurrentMarkThread* _cmThread; // The thread doing the work 382 G1CollectedHeap* _g1h; // The heap 383 uint _parallel_marking_threads; // The number of marking 384 // threads we're using 385 uint _max_parallel_marking_threads; // Max number of marking 386 // threads we'll ever use 387 double _sleep_factor; // How much we have to sleep, with 388 // respect to the work we just did, to 389 // meet the marking overhead goal 390 double _marking_task_overhead; // Marking target overhead for 391 // a single task 392 393 FreeRegionList _cleanup_list; 394 395 // Concurrent marking support structures 396 G1CMBitMap _markBitMap1; 397 G1CMBitMap _markBitMap2; 398 G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap 399 G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap 400 401 // Heap bounds 402 HeapWord* _heap_start; 403 HeapWord* _heap_end; 404 405 // Root region tracking and claiming 406 G1CMRootRegions _root_regions; 407 408 // For gray objects 409 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger 410 HeapWord* volatile _finger; // The global finger, region aligned, 411 // always points to the end of the 412 // last claimed region 413 414 // Marking tasks 415 uint _max_worker_id;// Maximum worker id 416 uint _active_tasks; // Task num currently active 417 G1CMTask** _tasks; // Task queue array (max_worker_id len) 418 G1CMTaskQueueSet* _task_queues; // Task queue set 419 ParallelTaskTerminator _terminator; // For termination 420 421 // Two sync barriers that are used to synchronize tasks when an 422 // overflow occurs. The algorithm is the following. All tasks enter 423 // the first one to ensure that they have all stopped manipulating 424 // the global data structures. After they exit it, they re-initialize 425 // their data structures and task 0 re-initializes the global data 426 // structures. Then, they enter the second sync barrier. This 427 // ensure, that no task starts doing work before all data 428 // structures (local and global) have been re-initialized. When they 429 // exit it, they are free to start working again. 430 WorkGangBarrierSync _first_overflow_barrier_sync; 431 WorkGangBarrierSync _second_overflow_barrier_sync; 432 433 // This is set by any task, when an overflow on the global data 434 // structures is detected 435 volatile bool _has_overflown; 436 // True: marking is concurrent, false: we're in remark 437 volatile bool _concurrent; 438 // Set at the end of a Full GC so that marking aborts 439 volatile bool _has_aborted; 440 441 // Used when remark aborts due to an overflow to indicate that 442 // another concurrent marking phase should start 443 volatile bool _restart_for_overflow; 444 445 // This is true from the very start of concurrent marking until the 446 // point when all the tasks complete their work. It is really used 447 // to determine the points between the end of concurrent marking and 448 // time of remark. 449 volatile bool _concurrent_marking_in_progress; 450 451 ConcurrentGCTimer* _gc_timer_cm; 452 453 G1OldTracer* _gc_tracer_cm; 454 455 // All of these times are in ms 456 NumberSeq _init_times; 457 NumberSeq _remark_times; 458 NumberSeq _remark_mark_times; 459 NumberSeq _remark_weak_ref_times; 460 NumberSeq _cleanup_times; 461 double _total_counting_time; 462 double _total_rs_scrub_time; 463 464 double* _accum_task_vtime; // Accumulated task vtime 465 466 WorkGang* _parallel_workers; 467 468 void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes); 469 void weakRefsWork(bool clear_all_soft_refs); 470 471 void swapMarkBitMaps(); 472 473 // It resets the global marking data structures, as well as the 474 // task local ones; should be called during initial mark. 475 void reset(); 476 477 // Resets all the marking data structures. Called when we have to restart 478 // marking or when marking completes (via set_non_marking_state below). 479 void reset_marking_state(bool clear_overflow = true); 480 481 // We do this after we're done with marking so that the marking data 482 // structures are initialized to a sensible and predictable state. 483 void set_non_marking_state(); 484 485 // Called to indicate how many threads are currently active. 486 void set_concurrency(uint active_tasks); 487 488 // It should be called to indicate which phase we're in (concurrent 489 // mark or remark) and how many threads are currently active. 490 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 491 492 // Prints all gathered CM-related statistics 493 void print_stats(); 494 495 bool cleanup_list_is_empty() { 496 return _cleanup_list.is_empty(); 497 } 498 499 // Accessor methods 500 uint parallel_marking_threads() const { return _parallel_marking_threads; } 501 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;} 502 double sleep_factor() { return _sleep_factor; } 503 double marking_task_overhead() { return _marking_task_overhead;} 504 505 HeapWord* finger() { return _finger; } 506 bool concurrent() { return _concurrent; } 507 uint active_tasks() { return _active_tasks; } 508 ParallelTaskTerminator* terminator() { return &_terminator; } 509 510 // It claims the next available region to be scanned by a marking 511 // task/thread. It might return NULL if the next region is empty or 512 // we have run out of regions. In the latter case, out_of_regions() 513 // determines whether we've really run out of regions or the task 514 // should call claim_region() again. This might seem a bit 515 // awkward. Originally, the code was written so that claim_region() 516 // either successfully returned with a non-empty region or there 517 // were no more regions to be claimed. The problem with this was 518 // that, in certain circumstances, it iterated over large chunks of 519 // the heap finding only empty regions and, while it was working, it 520 // was preventing the calling task to call its regular clock 521 // method. So, this way, each task will spend very little time in 522 // claim_region() and is allowed to call the regular clock method 523 // frequently. 524 HeapRegion* claim_region(uint worker_id); 525 526 // It determines whether we've run out of regions to scan. Note that 527 // the finger can point past the heap end in case the heap was expanded 528 // to satisfy an allocation without doing a GC. This is fine, because all 529 // objects in those regions will be considered live anyway because of 530 // SATB guarantees (i.e. their TAMS will be equal to bottom). 531 bool out_of_regions() { return _finger >= _heap_end; } 532 533 // Returns the task with the given id 534 G1CMTask* task(int id) { 535 assert(0 <= id && id < (int) _active_tasks, 536 "task id not within active bounds"); 537 return _tasks[id]; 538 } 539 540 // Returns the task queue with the given id 541 G1CMTaskQueue* task_queue(int id) { 542 assert(0 <= id && id < (int) _active_tasks, 543 "task queue id not within active bounds"); 544 return (G1CMTaskQueue*) _task_queues->queue(id); 545 } 546 547 // Returns the task queue set 548 G1CMTaskQueueSet* task_queues() { return _task_queues; } 549 550 // Access / manipulation of the overflow flag which is set to 551 // indicate that the global stack has overflown 552 bool has_overflown() { return _has_overflown; } 553 void set_has_overflown() { _has_overflown = true; } 554 void clear_has_overflown() { _has_overflown = false; } 555 bool restart_for_overflow() { return _restart_for_overflow; } 556 557 // Methods to enter the two overflow sync barriers 558 void enter_first_sync_barrier(uint worker_id); 559 void enter_second_sync_barrier(uint worker_id); 560 561 // Card index of the bottom of the G1 heap. Used for biasing indices into 562 // the card bitmaps. 563 intptr_t _heap_bottom_card_num; 564 565 // Set to true when initialization is complete 566 bool _completed_initialization; 567 568 // end_timer, true to end gc timer after ending concurrent phase. 569 void register_concurrent_phase_end_common(bool end_timer); 570 571 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 572 // true, periodically insert checks to see if this method should exit prematurely. 573 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 574 public: 575 // Manipulation of the global mark stack. 576 // The push and pop operations are used by tasks for transfers 577 // between task-local queues and the global mark stack. 578 bool mark_stack_push(G1TaskQueueEntry* arr) { 579 if (!_global_mark_stack.par_push_chunk(arr)) { 580 set_has_overflown(); 581 return false; 582 } 583 return true; 584 } 585 bool mark_stack_pop(G1TaskQueueEntry* arr) { 586 return _global_mark_stack.par_pop_chunk(arr); 587 } 588 size_t mark_stack_size() { return _global_mark_stack.size(); } 589 size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; } 590 bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); } 591 bool mark_stack_empty() { return _global_mark_stack.is_empty(); } 592 593 G1CMRootRegions* root_regions() { return &_root_regions; } 594 595 bool concurrent_marking_in_progress() { 596 return _concurrent_marking_in_progress; 597 } 598 void set_concurrent_marking_in_progress() { 599 _concurrent_marking_in_progress = true; 600 } 601 void clear_concurrent_marking_in_progress() { 602 _concurrent_marking_in_progress = false; 603 } 604 605 void concurrent_cycle_start(); 606 void concurrent_cycle_end(); 607 608 void update_accum_task_vtime(int i, double vtime) { 609 _accum_task_vtime[i] += vtime; 610 } 611 612 double all_task_accum_vtime() { 613 double ret = 0.0; 614 for (uint i = 0; i < _max_worker_id; ++i) 615 ret += _accum_task_vtime[i]; 616 return ret; 617 } 618 619 // Attempts to steal an object from the task queues of other tasks 620 bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry); 621 622 G1ConcurrentMark(G1CollectedHeap* g1h, 623 G1RegionToSpaceMapper* prev_bitmap_storage, 624 G1RegionToSpaceMapper* next_bitmap_storage); 625 ~G1ConcurrentMark(); 626 627 ConcurrentMarkThread* cmThread() { return _cmThread; } 628 629 G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; } 630 G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; } 631 632 // Returns the number of GC threads to be used in a concurrent 633 // phase based on the number of GC threads being used in a STW 634 // phase. 635 uint scale_parallel_threads(uint n_par_threads); 636 637 // Calculates the number of GC threads to be used in a concurrent phase. 638 uint calc_parallel_marking_threads(); 639 640 // The following three are interaction between CM and 641 // G1CollectedHeap 642 643 // This notifies CM that a root during initial-mark needs to be 644 // grayed. It is MT-safe. hr is the region that 645 // contains the object and it's passed optionally from callers who 646 // might already have it (no point in recalculating it). 647 inline void grayRoot(oop obj, 648 HeapRegion* hr = NULL); 649 650 // Prepare internal data structures for the next mark cycle. This includes clearing 651 // the next mark bitmap and some internal data structures. This method is intended 652 // to be called concurrently to the mutator. It will yield to safepoint requests. 653 void cleanup_for_next_mark(); 654 655 // Clear the previous marking bitmap during safepoint. 656 void clear_prev_bitmap(WorkGang* workers); 657 658 // Return whether the next mark bitmap has no marks set. To be used for assertions 659 // only. Will not yield to pause requests. 660 bool nextMarkBitmapIsClear(); 661 662 // These two do the work that needs to be done before and after the 663 // initial root checkpoint. Since this checkpoint can be done at two 664 // different points (i.e. an explicit pause or piggy-backed on a 665 // young collection), then it's nice to be able to easily share the 666 // pre/post code. It might be the case that we can put everything in 667 // the post method. TP 668 void checkpointRootsInitialPre(); 669 void checkpointRootsInitialPost(); 670 671 // Scan all the root regions and mark everything reachable from 672 // them. 673 void scan_root_regions(); 674 675 // Scan a single root region and mark everything reachable from it. 676 void scanRootRegion(HeapRegion* hr); 677 678 // Do concurrent phase of marking, to a tentative transitive closure. 679 void mark_from_roots(); 680 681 void checkpointRootsFinal(bool clear_all_soft_refs); 682 void checkpointRootsFinalWork(); 683 void cleanup(); 684 void complete_cleanup(); 685 686 // Mark in the previous bitmap. NB: this is usually read-only, so use 687 // this carefully! 688 inline void markPrev(oop p); 689 690 // Clears marks for all objects in the given range, for the prev or 691 // next bitmaps. NB: the previous bitmap is usually 692 // read-only, so use this carefully! 693 void clearRangePrevBitmap(MemRegion mr); 694 695 // Verify that there are no CSet oops on the stacks (taskqueues / 696 // global mark stack) and fingers (global / per-task). 697 // If marking is not in progress, it's a no-op. 698 void verify_no_cset_oops() PRODUCT_RETURN; 699 700 inline bool isPrevMarked(oop p) const; 701 702 inline bool do_yield_check(); 703 704 // Abandon current marking iteration due to a Full GC. 705 void abort(); 706 707 bool has_aborted() { return _has_aborted; } 708 709 void print_summary_info(); 710 711 void print_worker_threads_on(outputStream* st) const; 712 void threads_do(ThreadClosure* tc) const; 713 714 void print_on_error(outputStream* st) const; 715 716 // Attempts to mark the given object on the next mark bitmap. 717 inline bool par_mark(oop obj); 718 719 // Returns true if initialization was successfully completed. 720 bool completed_initialization() const { 721 return _completed_initialization; 722 } 723 724 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 725 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 726 727 private: 728 // Clear (Reset) all liveness count data. 729 void clear_live_data(WorkGang* workers); 730 731 #ifdef ASSERT 732 // Verify all of the above data structures that they are in initial state. 733 void verify_live_data_clear(); 734 #endif 735 736 // Aggregates the per-card liveness data based on the current marking. Also sets 737 // the amount of marked bytes for each region. 738 void create_live_data(); 739 740 void finalize_live_data(); 741 742 void verify_live_data(); 743 }; 744 745 // A class representing a marking task. 746 class G1CMTask : public TerminatorTerminator { 747 private: 748 enum PrivateConstants { 749 // The regular clock call is called once the scanned words reaches 750 // this limit 751 words_scanned_period = 12*1024, 752 // The regular clock call is called once the number of visited 753 // references reaches this limit 754 refs_reached_period = 1024, 755 // Initial value for the hash seed, used in the work stealing code 756 init_hash_seed = 17 757 }; 758 759 G1CMObjArrayProcessor _objArray_processor; 760 761 uint _worker_id; 762 G1CollectedHeap* _g1h; 763 G1ConcurrentMark* _cm; 764 G1CMBitMap* _nextMarkBitMap; 765 // the task queue of this task 766 G1CMTaskQueue* _task_queue; 767 private: 768 // the task queue set---needed for stealing 769 G1CMTaskQueueSet* _task_queues; 770 // indicates whether the task has been claimed---this is only for 771 // debugging purposes 772 bool _claimed; 773 774 // number of calls to this task 775 int _calls; 776 777 // when the virtual timer reaches this time, the marking step should 778 // exit 779 double _time_target_ms; 780 // the start time of the current marking step 781 double _start_time_ms; 782 783 // the oop closure used for iterations over oops 784 G1CMOopClosure* _cm_oop_closure; 785 786 // the region this task is scanning, NULL if we're not scanning any 787 HeapRegion* _curr_region; 788 // the local finger of this task, NULL if we're not scanning a region 789 HeapWord* _finger; 790 // limit of the region this task is scanning, NULL if we're not scanning one 791 HeapWord* _region_limit; 792 793 // the number of words this task has scanned 794 size_t _words_scanned; 795 // When _words_scanned reaches this limit, the regular clock is 796 // called. Notice that this might be decreased under certain 797 // circumstances (i.e. when we believe that we did an expensive 798 // operation). 799 size_t _words_scanned_limit; 800 // the initial value of _words_scanned_limit (i.e. what it was 801 // before it was decreased). 802 size_t _real_words_scanned_limit; 803 804 // the number of references this task has visited 805 size_t _refs_reached; 806 // When _refs_reached reaches this limit, the regular clock is 807 // called. Notice this this might be decreased under certain 808 // circumstances (i.e. when we believe that we did an expensive 809 // operation). 810 size_t _refs_reached_limit; 811 // the initial value of _refs_reached_limit (i.e. what it was before 812 // it was decreased). 813 size_t _real_refs_reached_limit; 814 815 // used by the work stealing stuff 816 int _hash_seed; 817 // if this is true, then the task has aborted for some reason 818 bool _has_aborted; 819 // set when the task aborts because it has met its time quota 820 bool _has_timed_out; 821 // true when we're draining SATB buffers; this avoids the task 822 // aborting due to SATB buffers being available (as we're already 823 // dealing with them) 824 bool _draining_satb_buffers; 825 826 // number sequence of past step times 827 NumberSeq _step_times_ms; 828 // elapsed time of this task 829 double _elapsed_time_ms; 830 // termination time of this task 831 double _termination_time_ms; 832 // when this task got into the termination protocol 833 double _termination_start_time_ms; 834 835 // true when the task is during a concurrent phase, false when it is 836 // in the remark phase (so, in the latter case, we do not have to 837 // check all the things that we have to check during the concurrent 838 // phase, i.e. SATB buffer availability...) 839 bool _concurrent; 840 841 TruncatedSeq _marking_step_diffs_ms; 842 843 // it updates the local fields after this task has claimed 844 // a new region to scan 845 void setup_for_region(HeapRegion* hr); 846 // it brings up-to-date the limit of the region 847 void update_region_limit(); 848 849 // called when either the words scanned or the refs visited limit 850 // has been reached 851 void reached_limit(); 852 // recalculates the words scanned and refs visited limits 853 void recalculate_limits(); 854 // decreases the words scanned and refs visited limits when we reach 855 // an expensive operation 856 void decrease_limits(); 857 // it checks whether the words scanned or refs visited reached their 858 // respective limit and calls reached_limit() if they have 859 void check_limits() { 860 if (_words_scanned >= _words_scanned_limit || 861 _refs_reached >= _refs_reached_limit) { 862 reached_limit(); 863 } 864 } 865 // this is supposed to be called regularly during a marking step as 866 // it checks a bunch of conditions that might cause the marking step 867 // to abort 868 void regular_clock_call(); 869 bool concurrent() { return _concurrent; } 870 871 // Test whether obj might have already been passed over by the 872 // mark bitmap scan, and so needs to be pushed onto the mark stack. 873 bool is_below_finger(oop obj, HeapWord* global_finger) const; 874 875 template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry); 876 public: 877 // Apply the closure on the given area of the objArray. Return the number of words 878 // scanned. 879 inline size_t scan_objArray(objArrayOop obj, MemRegion mr); 880 // It resets the task; it should be called right at the beginning of 881 // a marking phase. 882 void reset(G1CMBitMap* _nextMarkBitMap); 883 // it clears all the fields that correspond to a claimed region. 884 void clear_region_fields(); 885 886 void set_concurrent(bool concurrent) { _concurrent = concurrent; } 887 888 // The main method of this class which performs a marking step 889 // trying not to exceed the given duration. However, it might exit 890 // prematurely, according to some conditions (i.e. SATB buffers are 891 // available for processing). 892 void do_marking_step(double target_ms, 893 bool do_termination, 894 bool is_serial); 895 896 // These two calls start and stop the timer 897 void record_start_time() { 898 _elapsed_time_ms = os::elapsedTime() * 1000.0; 899 } 900 void record_end_time() { 901 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 902 } 903 904 // returns the worker ID associated with this task. 905 uint worker_id() { return _worker_id; } 906 907 // From TerminatorTerminator. It determines whether this task should 908 // exit the termination protocol after it's entered it. 909 virtual bool should_exit_termination(); 910 911 // Resets the local region fields after a task has finished scanning a 912 // region; or when they have become stale as a result of the region 913 // being evacuated. 914 void giveup_current_region(); 915 916 HeapWord* finger() { return _finger; } 917 918 bool has_aborted() { return _has_aborted; } 919 void set_has_aborted() { _has_aborted = true; } 920 void clear_has_aborted() { _has_aborted = false; } 921 bool has_timed_out() { return _has_timed_out; } 922 bool claimed() { return _claimed; } 923 924 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 925 926 // Increment the number of references this task has visited. 927 void increment_refs_reached() { ++_refs_reached; } 928 929 // Grey the object by marking it. If not already marked, push it on 930 // the local queue if below the finger. 931 // obj is below its region's NTAMS. 932 inline void make_reference_grey(oop obj); 933 934 // Grey the object (by calling make_grey_reference) if required, 935 // e.g. obj is below its containing region's NTAMS. 936 // Precondition: obj is a valid heap object. 937 inline void deal_with_reference(oop obj); 938 939 // It scans an object and visits its children. 940 inline void scan_task_entry(G1TaskQueueEntry task_entry); 941 942 // It pushes an object on the local queue. 943 inline void push(G1TaskQueueEntry task_entry); 944 945 // Move entries to the global stack. 946 void move_entries_to_global_stack(); 947 // Move entries from the global stack, return true if we were successful to do so. 948 bool get_entries_from_global_stack(); 949 950 // It pops and scans objects from the local queue. If partially is 951 // true, then it stops when the queue size is of a given limit. If 952 // partially is false, then it stops when the queue is empty. 953 void drain_local_queue(bool partially); 954 // It moves entries from the global stack to the local queue and 955 // drains the local queue. If partially is true, then it stops when 956 // both the global stack and the local queue reach a given size. If 957 // partially if false, it tries to empty them totally. 958 void drain_global_stack(bool partially); 959 // It keeps picking SATB buffers and processing them until no SATB 960 // buffers are available. 961 void drain_satb_buffers(); 962 963 // moves the local finger to a new location 964 inline void move_finger_to(HeapWord* new_finger) { 965 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 966 _finger = new_finger; 967 } 968 969 G1CMTask(uint worker_id, 970 G1ConcurrentMark *cm, 971 G1CMTaskQueue* task_queue, 972 G1CMTaskQueueSet* task_queues); 973 974 // it prints statistics associated with this task 975 void print_stats(); 976 }; 977 978 // Class that's used to to print out per-region liveness 979 // information. It's currently used at the end of marking and also 980 // after we sort the old regions at the end of the cleanup operation. 981 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { 982 private: 983 // Accumulators for these values. 984 size_t _total_used_bytes; 985 size_t _total_capacity_bytes; 986 size_t _total_prev_live_bytes; 987 size_t _total_next_live_bytes; 988 989 // Accumulator for the remembered set size 990 size_t _total_remset_bytes; 991 992 // Accumulator for strong code roots memory size 993 size_t _total_strong_code_roots_bytes; 994 995 static double perc(size_t val, size_t total) { 996 if (total == 0) { 997 return 0.0; 998 } else { 999 return 100.0 * ((double) val / (double) total); 1000 } 1001 } 1002 1003 static double bytes_to_mb(size_t val) { 1004 return (double) val / (double) M; 1005 } 1006 1007 public: 1008 // The header and footer are printed in the constructor and 1009 // destructor respectively. 1010 G1PrintRegionLivenessInfoClosure(const char* phase_name); 1011 virtual bool doHeapRegion(HeapRegion* r); 1012 ~G1PrintRegionLivenessInfoClosure(); 1013 }; 1014 1015 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP