1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP 27 28 #include "gc/g1/g1ConcurrentMarkBitMap.hpp" 29 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp" 30 #include "gc/g1/g1HeapVerifier.hpp" 31 #include "gc/g1/g1RegionMarkStatsCache.hpp" 32 #include "gc/g1/heapRegionSet.hpp" 33 #include "gc/shared/taskqueue.hpp" 34 #include "memory/allocation.hpp" 35 36 class ConcurrentGCTimer; 37 class G1ConcurrentMarkThread; 38 class G1CollectedHeap; 39 class G1CMTask; 40 class G1ConcurrentMark; 41 class G1OldTracer; 42 class G1RegionToSpaceMapper; 43 class G1SurvivorRegions; 44 45 #ifdef _MSC_VER 46 #pragma warning(push) 47 // warning C4522: multiple assignment operators specified 48 #pragma warning(disable:4522) 49 #endif 50 51 // This is a container class for either an oop or a continuation address for 52 // mark stack entries. Both are pushed onto the mark stack. 53 class G1TaskQueueEntry { 54 private: 55 void* _holder; 56 57 static const uintptr_t ArraySliceBit = 1; 58 59 G1TaskQueueEntry(oop obj) : _holder(obj) { 60 assert(_holder != NULL, "Not allowed to set NULL task queue element"); 61 } 62 G1TaskQueueEntry(HeapWord* addr) : _holder((void*)((uintptr_t)addr | ArraySliceBit)) { } 63 public: 64 G1TaskQueueEntry(const G1TaskQueueEntry& other) { _holder = other._holder; } 65 G1TaskQueueEntry() : _holder(NULL) { } 66 67 static G1TaskQueueEntry from_slice(HeapWord* what) { return G1TaskQueueEntry(what); } 68 static G1TaskQueueEntry from_oop(oop obj) { return G1TaskQueueEntry(obj); } 69 70 G1TaskQueueEntry& operator=(const G1TaskQueueEntry& t) { 71 _holder = t._holder; 72 return *this; 73 } 74 75 volatile G1TaskQueueEntry& operator=(const volatile G1TaskQueueEntry& t) volatile { 76 _holder = t._holder; 77 return *this; 78 } 79 80 oop obj() const { 81 assert(!is_array_slice(), "Trying to read array slice " PTR_FORMAT " as oop", p2i(_holder)); 82 return (oop)_holder; 83 } 84 85 HeapWord* slice() const { 86 assert(is_array_slice(), "Trying to read oop " PTR_FORMAT " as array slice", p2i(_holder)); 87 return (HeapWord*)((uintptr_t)_holder & ~ArraySliceBit); 88 } 89 90 bool is_oop() const { return !is_array_slice(); } 91 bool is_array_slice() const { return ((uintptr_t)_holder & ArraySliceBit) != 0; } 92 bool is_null() const { return _holder == NULL; } 93 }; 94 95 #ifdef _MSC_VER 96 #pragma warning(pop) 97 #endif 98 99 typedef GenericTaskQueue<G1TaskQueueEntry, mtGC> G1CMTaskQueue; 100 typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet; 101 102 // Closure used by CM during concurrent reference discovery 103 // and reference processing (during remarking) to determine 104 // if a particular object is alive. It is primarily used 105 // to determine if referents of discovered reference objects 106 // are alive. An instance is also embedded into the 107 // reference processor as the _is_alive_non_header field 108 class G1CMIsAliveClosure : public BoolObjectClosure { 109 G1CollectedHeap* _g1h; 110 public: 111 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1h(g1) { } 112 113 bool do_object_b(oop obj); 114 }; 115 116 // Represents the overflow mark stack used by concurrent marking. 117 // 118 // Stores oops in a huge buffer in virtual memory that is always fully committed. 119 // Resizing may only happen during a STW pause when the stack is empty. 120 // 121 // Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark 122 // stack memory is split into evenly sized chunks of oops. Users can only 123 // add or remove entries on that basis. 124 // Chunks are filled in increasing address order. Not completely filled chunks 125 // have a NULL element as a terminating element. 126 // 127 // Every chunk has a header containing a single pointer element used for memory 128 // management. This wastes some space, but is negligible (< .1% with current sizing). 129 // 130 // Memory management is done using a mix of tracking a high water-mark indicating 131 // that all chunks at a lower address are valid chunks, and a singly linked free 132 // list connecting all empty chunks. 133 class G1CMMarkStack { 134 public: 135 // Number of TaskQueueEntries that can fit in a single chunk. 136 static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */; 137 private: 138 struct TaskQueueEntryChunk { 139 TaskQueueEntryChunk* next; 140 G1TaskQueueEntry data[EntriesPerChunk]; 141 }; 142 143 size_t _max_chunk_capacity; // Maximum number of TaskQueueEntryChunk elements on the stack. 144 145 TaskQueueEntryChunk* _base; // Bottom address of allocated memory area. 146 size_t _chunk_capacity; // Current maximum number of TaskQueueEntryChunk elements. 147 148 char _pad0[DEFAULT_CACHE_LINE_SIZE]; 149 TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. 150 char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*)]; 151 TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. 152 volatile size_t _chunks_in_chunk_list; 153 char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; 154 155 volatile size_t _hwm; // High water mark within the reserved space. 156 char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; 157 158 // Allocate a new chunk from the reserved memory, using the high water mark. Returns 159 // NULL if out of memory. 160 TaskQueueEntryChunk* allocate_new_chunk(); 161 162 // Atomically add the given chunk to the list. 163 void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); 164 // Atomically remove and return a chunk from the given list. Returns NULL if the 165 // list is empty. 166 TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); 167 168 void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); 169 void add_chunk_to_free_list(TaskQueueEntryChunk* elem); 170 171 TaskQueueEntryChunk* remove_chunk_from_chunk_list(); 172 TaskQueueEntryChunk* remove_chunk_from_free_list(); 173 174 // Resizes the mark stack to the given new capacity. Releases any previous 175 // memory if successful. 176 bool resize(size_t new_capacity); 177 178 public: 179 G1CMMarkStack(); 180 ~G1CMMarkStack(); 181 182 // Alignment and minimum capacity of this mark stack in number of oops. 183 static size_t capacity_alignment(); 184 185 // Allocate and initialize the mark stack with the given number of oops. 186 bool initialize(size_t initial_capacity, size_t max_capacity); 187 188 // Pushes the given buffer containing at most EntriesPerChunk elements on the mark 189 // stack. If less than EntriesPerChunk elements are to be pushed, the array must 190 // be terminated with a NULL. 191 // Returns whether the buffer contents were successfully pushed to the global mark 192 // stack. 193 bool par_push_chunk(G1TaskQueueEntry* buffer); 194 195 // Pops a chunk from this mark stack, copying them into the given buffer. This 196 // chunk may contain up to EntriesPerChunk elements. If there are less, the last 197 // element in the array is a NULL pointer. 198 bool par_pop_chunk(G1TaskQueueEntry* buffer); 199 200 // Return whether the chunk list is empty. Racy due to unsynchronized access to 201 // _chunk_list. 202 bool is_empty() const { return _chunk_list == NULL; } 203 204 size_t capacity() const { return _chunk_capacity; } 205 206 // Expand the stack, typically in response to an overflow condition 207 void expand(); 208 209 // Return the approximate number of oops on this mark stack. Racy due to 210 // unsynchronized access to _chunks_in_chunk_list. 211 size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; } 212 213 void set_empty(); 214 215 // Apply Fn to every oop on the mark stack. The mark stack must not 216 // be modified while iterating. 217 template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN; 218 }; 219 220 // Root Regions are regions that are not empty at the beginning of a 221 // marking cycle and which we might collect during an evacuation pause 222 // while the cycle is active. Given that, during evacuation pauses, we 223 // do not copy objects that are explicitly marked, what we have to do 224 // for the root regions is to scan them and mark all objects reachable 225 // from them. According to the SATB assumptions, we only need to visit 226 // each object once during marking. So, as long as we finish this scan 227 // before the next evacuation pause, we can copy the objects from the 228 // root regions without having to mark them or do anything else to them. 229 // 230 // Currently, we only support root region scanning once (at the start 231 // of the marking cycle) and the root regions are all the survivor 232 // regions populated during the initial-mark pause. 233 class G1CMRootRegions { 234 private: 235 const G1SurvivorRegions* _survivors; 236 G1ConcurrentMark* _cm; 237 238 volatile bool _scan_in_progress; 239 volatile bool _should_abort; 240 volatile int _claimed_survivor_index; 241 242 void notify_scan_done(); 243 244 public: 245 G1CMRootRegions(); 246 // We actually do most of the initialization in this method. 247 void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm); 248 249 // Reset the claiming / scanning of the root regions. 250 void prepare_for_scan(); 251 252 // Forces get_next() to return NULL so that the iteration aborts early. 253 void abort() { _should_abort = true; } 254 255 // Return true if the CM thread are actively scanning root regions, 256 // false otherwise. 257 bool scan_in_progress() { return _scan_in_progress; } 258 259 // Claim the next root region to scan atomically, or return NULL if 260 // all have been claimed. 261 HeapRegion* claim_next(); 262 263 // The number of root regions to scan. 264 uint num_root_regions() const; 265 266 void cancel_scan(); 267 268 // Flag that we're done with root region scanning and notify anyone 269 // who's waiting on it. If aborted is false, assume that all regions 270 // have been claimed. 271 void scan_finished(); 272 273 // If CM threads are still scanning root regions, wait until they 274 // are done. Return true if we had to wait, false otherwise. 275 bool wait_until_scan_finished(); 276 }; 277 278 // This class manages data structures and methods for doing liveness analysis in 279 // G1's concurrent cycle. 280 class G1ConcurrentMark : public CHeapObj<mtGC> { 281 friend class G1ConcurrentMarkThread; 282 friend class G1CMRefProcTaskProxy; 283 friend class G1CMRefProcTaskExecutor; 284 friend class G1CMKeepAliveAndDrainClosure; 285 friend class G1CMDrainMarkingStackClosure; 286 friend class G1CMBitMapClosure; 287 friend class G1CMConcurrentMarkingTask; 288 friend class G1CMRemarkTask; 289 friend class G1CMTask; 290 291 G1ConcurrentMarkThread* _cm_thread; // The thread doing the work 292 G1CollectedHeap* _g1h; // The heap 293 bool _completed_initialization; // Set to true when initialization is complete 294 295 // Concurrent marking support structures 296 G1CMBitMap _mark_bitmap_1; 297 G1CMBitMap _mark_bitmap_2; 298 G1CMBitMap* _prev_mark_bitmap; // Completed mark bitmap 299 G1CMBitMap* _next_mark_bitmap; // Under-construction mark bitmap 300 301 // Heap bounds 302 MemRegion const _heap; 303 304 // Root region tracking and claiming 305 G1CMRootRegions _root_regions; 306 307 // For grey objects 308 G1CMMarkStack _global_mark_stack; // Grey objects behind global finger 309 HeapWord* volatile _finger; // The global finger, region aligned, 310 // always pointing to the end of the 311 // last claimed region 312 313 uint _worker_id_offset; 314 uint _max_num_tasks; // Maximum number of marking tasks 315 uint _num_active_tasks; // Number of tasks currently active 316 G1CMTask** _tasks; // Task queue array (max_worker_id length) 317 318 G1CMTaskQueueSet* _task_queues; // Task queue set 319 ParallelTaskTerminator _terminator; // For termination 320 321 // Two sync barriers that are used to synchronize tasks when an 322 // overflow occurs. The algorithm is the following. All tasks enter 323 // the first one to ensure that they have all stopped manipulating 324 // the global data structures. After they exit it, they re-initialize 325 // their data structures and task 0 re-initializes the global data 326 // structures. Then, they enter the second sync barrier. This 327 // ensure, that no task starts doing work before all data 328 // structures (local and global) have been re-initialized. When they 329 // exit it, they are free to start working again. 330 WorkGangBarrierSync _first_overflow_barrier_sync; 331 WorkGangBarrierSync _second_overflow_barrier_sync; 332 333 // This is set by any task, when an overflow on the global data 334 // structures is detected 335 volatile bool _has_overflown; 336 // True: marking is concurrent, false: we're in remark 337 volatile bool _concurrent; 338 // Set at the end of a Full GC so that marking aborts 339 volatile bool _has_aborted; 340 341 // Used when remark aborts due to an overflow to indicate that 342 // another concurrent marking phase should start 343 volatile bool _restart_for_overflow; 344 345 ConcurrentGCTimer* _gc_timer_cm; 346 347 G1OldTracer* _gc_tracer_cm; 348 349 // Timing statistics. All of them are in ms 350 NumberSeq _init_times; 351 NumberSeq _remark_times; 352 NumberSeq _remark_mark_times; 353 NumberSeq _remark_weak_ref_times; 354 NumberSeq _cleanup_times; 355 double _total_cleanup_time; 356 357 double* _accum_task_vtime; // Accumulated task vtime 358 359 WorkGang* _concurrent_workers; 360 uint _num_concurrent_workers; // The number of marking worker threads we're using 361 uint _max_concurrent_workers; // Maximum number of marking worker threads 362 363 void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller); 364 365 void finalize_marking(); 366 367 void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes); 368 void weak_refs_work(bool clear_all_soft_refs); 369 370 void report_object_count(bool mark_completed); 371 372 void swap_mark_bitmaps(); 373 374 void reclaim_empty_regions(); 375 376 // Clear statistics gathered during the concurrent cycle for the given region after 377 // it has been reclaimed. 378 void clear_statistics(HeapRegion* r); 379 380 // Resets the global marking data structures, as well as the 381 // task local ones; should be called during initial mark. 382 void reset(); 383 384 // Resets all the marking data structures. Called when we have to restart 385 // marking or when marking completes (via set_non_marking_state below). 386 void reset_marking_for_restart(); 387 388 // We do this after we're done with marking so that the marking data 389 // structures are initialized to a sensible and predictable state. 390 void reset_at_marking_complete(); 391 392 // Called to indicate how many threads are currently active. 393 void set_concurrency(uint active_tasks); 394 395 // Should be called to indicate which phase we're in (concurrent 396 // mark or remark) and how many threads are currently active. 397 void set_concurrency_and_phase(uint active_tasks, bool concurrent); 398 399 // Prints all gathered CM-related statistics 400 void print_stats(); 401 402 HeapWord* finger() { return _finger; } 403 bool concurrent() { return _concurrent; } 404 uint active_tasks() { return _num_active_tasks; } 405 ParallelTaskTerminator* terminator() { return &_terminator; } 406 407 // Claims the next available region to be scanned by a marking 408 // task/thread. It might return NULL if the next region is empty or 409 // we have run out of regions. In the latter case, out_of_regions() 410 // determines whether we've really run out of regions or the task 411 // should call claim_region() again. This might seem a bit 412 // awkward. Originally, the code was written so that claim_region() 413 // either successfully returned with a non-empty region or there 414 // were no more regions to be claimed. The problem with this was 415 // that, in certain circumstances, it iterated over large chunks of 416 // the heap finding only empty regions and, while it was working, it 417 // was preventing the calling task to call its regular clock 418 // method. So, this way, each task will spend very little time in 419 // claim_region() and is allowed to call the regular clock method 420 // frequently. 421 HeapRegion* claim_region(uint worker_id); 422 423 // Determines whether we've run out of regions to scan. Note that 424 // the finger can point past the heap end in case the heap was expanded 425 // to satisfy an allocation without doing a GC. This is fine, because all 426 // objects in those regions will be considered live anyway because of 427 // SATB guarantees (i.e. their TAMS will be equal to bottom). 428 bool out_of_regions() { return _finger >= _heap.end(); } 429 430 // Returns the task with the given id 431 G1CMTask* task(uint id) { 432 // During initial mark we use the parallel gc threads to do some work, so 433 // we can only compare against _max_num_tasks. 434 assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks); 435 return _tasks[id]; 436 } 437 438 // Access / manipulation of the overflow flag which is set to 439 // indicate that the global stack has overflown 440 bool has_overflown() { return _has_overflown; } 441 void set_has_overflown() { _has_overflown = true; } 442 void clear_has_overflown() { _has_overflown = false; } 443 bool restart_for_overflow() { return _restart_for_overflow; } 444 445 // Methods to enter the two overflow sync barriers 446 void enter_first_sync_barrier(uint worker_id); 447 void enter_second_sync_barrier(uint worker_id); 448 449 // Clear the given bitmap in parallel using the given WorkGang. If may_yield is 450 // true, periodically insert checks to see if this method should exit prematurely. 451 void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); 452 453 // Region statistics gathered during marking. 454 G1RegionMarkStats* _region_mark_stats; 455 // Top pointer for each region at the start of the rebuild remembered set process 456 // for regions which remembered sets need to be rebuilt. A NULL for a given region 457 // means that this region does not be scanned during the rebuilding remembered 458 // set phase at all. 459 HeapWord* volatile* _top_at_rebuild_starts; 460 public: 461 void add_to_liveness(uint worker_id, oop const obj, size_t size); 462 // Liveness of the given region as determined by concurrent marking, i.e. the amount of 463 // live words between bottom and nTAMS. 464 size_t liveness(uint region) { return _region_mark_stats[region]._live_words; } 465 466 // Sets the internal top_at_region_start for the given region to current top of the region. 467 inline void update_top_at_rebuild_start(HeapRegion* r); 468 // TARS for the given region during remembered set rebuilding. 469 inline HeapWord* top_at_rebuild_start(uint region) const; 470 471 // Clear statistics gathered during the concurrent cycle for the given region after 472 // it has been reclaimed. 473 void clear_statistics_in_region(uint region_idx); 474 // Notification for eagerly reclaimed regions to clean up. 475 void humongous_object_eagerly_reclaimed(HeapRegion* r); 476 // Manipulation of the global mark stack. 477 // The push and pop operations are used by tasks for transfers 478 // between task-local queues and the global mark stack. 479 bool mark_stack_push(G1TaskQueueEntry* arr) { 480 if (!_global_mark_stack.par_push_chunk(arr)) { 481 set_has_overflown(); 482 return false; 483 } 484 return true; 485 } 486 bool mark_stack_pop(G1TaskQueueEntry* arr) { 487 return _global_mark_stack.par_pop_chunk(arr); 488 } 489 size_t mark_stack_size() const { return _global_mark_stack.size(); } 490 size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; } 491 bool mark_stack_empty() const { return _global_mark_stack.is_empty(); } 492 493 G1CMRootRegions* root_regions() { return &_root_regions; } 494 495 void concurrent_cycle_start(); 496 // Abandon current marking iteration due to a Full GC. 497 void concurrent_cycle_abort(); 498 void concurrent_cycle_end(); 499 500 void update_accum_task_vtime(int i, double vtime) { 501 _accum_task_vtime[i] += vtime; 502 } 503 504 double all_task_accum_vtime() { 505 double ret = 0.0; 506 for (uint i = 0; i < _max_num_tasks; ++i) 507 ret += _accum_task_vtime[i]; 508 return ret; 509 } 510 511 // Attempts to steal an object from the task queues of other tasks 512 bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry); 513 514 G1ConcurrentMark(G1CollectedHeap* g1h, 515 G1RegionToSpaceMapper* prev_bitmap_storage, 516 G1RegionToSpaceMapper* next_bitmap_storage); 517 ~G1ConcurrentMark(); 518 519 G1ConcurrentMarkThread* cm_thread() { return _cm_thread; } 520 521 const G1CMBitMap* const prev_mark_bitmap() const { return _prev_mark_bitmap; } 522 G1CMBitMap* next_mark_bitmap() const { return _next_mark_bitmap; } 523 524 // Calculates the number of concurrent GC threads to be used in the marking phase. 525 uint calc_active_marking_workers(); 526 527 // Moves all per-task cached data into global state. 528 void flush_all_task_caches(); 529 // Prepare internal data structures for the next mark cycle. This includes clearing 530 // the next mark bitmap and some internal data structures. This method is intended 531 // to be called concurrently to the mutator. It will yield to safepoint requests. 532 void cleanup_for_next_mark(); 533 534 // Clear the previous marking bitmap during safepoint. 535 void clear_prev_bitmap(WorkGang* workers); 536 537 // Return whether the next mark bitmap has no marks set. To be used for assertions 538 // only. Will not yield to pause requests. 539 bool next_mark_bitmap_is_clear(); 540 541 // These two methods do the work that needs to be done at the start and end of the 542 // initial mark pause. 543 void pre_initial_mark(); 544 void post_initial_mark(); 545 546 // Scan all the root regions and mark everything reachable from 547 // them. 548 void scan_root_regions(); 549 550 // Scan a single root region and mark everything reachable from it. 551 void scan_root_region(HeapRegion* hr, uint worker_id); 552 553 // Do concurrent phase of marking, to a tentative transitive closure. 554 void mark_from_roots(); 555 556 void remark(); 557 558 void cleanup(); 559 // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use 560 // this carefully. 561 inline void mark_in_prev_bitmap(oop p); 562 563 // Clears marks for all objects in the given range, for the prev or 564 // next bitmaps. Caution: the previous bitmap is usually 565 // read-only, so use this carefully! 566 void clear_range_in_prev_bitmap(MemRegion mr); 567 568 inline bool is_marked_in_prev_bitmap(oop p) const; 569 570 // Verify that there are no collection set oops on the stacks (taskqueues / 571 // global mark stack) and fingers (global / per-task). 572 // If marking is not in progress, it's a no-op. 573 void verify_no_cset_oops() PRODUCT_RETURN; 574 575 inline bool do_yield_check(); 576 577 bool has_aborted() { return _has_aborted; } 578 579 void print_summary_info(); 580 581 void print_worker_threads_on(outputStream* st) const; 582 void threads_do(ThreadClosure* tc) const; 583 584 void print_on_error(outputStream* st) const; 585 586 // Mark the given object on the next bitmap if it is below nTAMS. 587 // If the passed obj_size is zero, it is recalculated from the given object if 588 // needed. This is to be as lazy as possible with accessing the object's size. 589 inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size = 0); 590 inline bool mark_in_next_bitmap(uint worker_id, oop const obj, size_t const obj_size = 0); 591 592 inline bool is_marked_in_next_bitmap(oop p) const; 593 594 // Returns true if initialization was successfully completed. 595 bool completed_initialization() const { 596 return _completed_initialization; 597 } 598 599 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 600 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 601 602 private: 603 // Rebuilds the remembered sets for chosen regions in parallel and concurrently to the application. 604 void rebuild_rem_set_concurrently(); 605 }; 606 607 // A class representing a marking task. 608 class G1CMTask : public TerminatorTerminator { 609 private: 610 enum PrivateConstants { 611 // The regular clock call is called once the scanned words reaches 612 // this limit 613 words_scanned_period = 12*1024, 614 // The regular clock call is called once the number of visited 615 // references reaches this limit 616 refs_reached_period = 1024, 617 // Initial value for the hash seed, used in the work stealing code 618 init_hash_seed = 17 619 }; 620 621 // Number of entries in the per-task stats entry. This seems enough to have a very 622 // low cache miss rate. 623 static const uint RegionMarkStatsCacheSize = 1024; 624 625 G1CMObjArrayProcessor _objArray_processor; 626 627 uint _worker_id; 628 G1CollectedHeap* _g1h; 629 G1ConcurrentMark* _cm; 630 G1CMBitMap* _next_mark_bitmap; 631 // the task queue of this task 632 G1CMTaskQueue* _task_queue; 633 634 G1RegionMarkStatsCache _mark_stats_cache; 635 // Number of calls to this task 636 uint _calls; 637 638 // When the virtual timer reaches this time, the marking step should exit 639 double _time_target_ms; 640 // Start time of the current marking step 641 double _start_time_ms; 642 643 // Oop closure used for iterations over oops 644 G1CMOopClosure* _cm_oop_closure; 645 646 // Region this task is scanning, NULL if we're not scanning any 647 HeapRegion* _curr_region; 648 // Local finger of this task, NULL if we're not scanning a region 649 HeapWord* _finger; 650 // Limit of the region this task is scanning, NULL if we're not scanning one 651 HeapWord* _region_limit; 652 653 // Number of words this task has scanned 654 size_t _words_scanned; 655 // When _words_scanned reaches this limit, the regular clock is 656 // called. Notice that this might be decreased under certain 657 // circumstances (i.e. when we believe that we did an expensive 658 // operation). 659 size_t _words_scanned_limit; 660 // Initial value of _words_scanned_limit (i.e. what it was 661 // before it was decreased). 662 size_t _real_words_scanned_limit; 663 664 // Number of references this task has visited 665 size_t _refs_reached; 666 // When _refs_reached reaches this limit, the regular clock is 667 // called. Notice this this might be decreased under certain 668 // circumstances (i.e. when we believe that we did an expensive 669 // operation). 670 size_t _refs_reached_limit; 671 // Initial value of _refs_reached_limit (i.e. what it was before 672 // it was decreased). 673 size_t _real_refs_reached_limit; 674 675 // Used by the work stealing 676 int _hash_seed; 677 // If true, then the task has aborted for some reason 678 bool _has_aborted; 679 // Set when the task aborts because it has met its time quota 680 bool _has_timed_out; 681 // True when we're draining SATB buffers; this avoids the task 682 // aborting due to SATB buffers being available (as we're already 683 // dealing with them) 684 bool _draining_satb_buffers; 685 686 // Number sequence of past step times 687 NumberSeq _step_times_ms; 688 // Elapsed time of this task 689 double _elapsed_time_ms; 690 // Termination time of this task 691 double _termination_time_ms; 692 // When this task got into the termination protocol 693 double _termination_start_time_ms; 694 695 TruncatedSeq _marking_step_diffs_ms; 696 697 // Updates the local fields after this task has claimed 698 // a new region to scan 699 void setup_for_region(HeapRegion* hr); 700 // Makes the limit of the region up-to-date 701 void update_region_limit(); 702 703 // Called when either the words scanned or the refs visited limit 704 // has been reached 705 void reached_limit(); 706 // Recalculates the words scanned and refs visited limits 707 void recalculate_limits(); 708 // Decreases the words scanned and refs visited limits when we reach 709 // an expensive operation 710 void decrease_limits(); 711 // Checks whether the words scanned or refs visited reached their 712 // respective limit and calls reached_limit() if they have 713 void check_limits() { 714 if (_words_scanned >= _words_scanned_limit || 715 _refs_reached >= _refs_reached_limit) { 716 reached_limit(); 717 } 718 } 719 // Supposed to be called regularly during a marking step as 720 // it checks a bunch of conditions that might cause the marking step 721 // to abort 722 void regular_clock_call(); 723 724 // Test whether obj might have already been passed over by the 725 // mark bitmap scan, and so needs to be pushed onto the mark stack. 726 bool is_below_finger(oop obj, HeapWord* global_finger) const; 727 728 template<bool scan> void process_grey_task_entry(G1TaskQueueEntry task_entry); 729 public: 730 // Apply the closure on the given area of the objArray. Return the number of words 731 // scanned. 732 inline size_t scan_objArray(objArrayOop obj, MemRegion mr); 733 // Resets the task; should be called right at the beginning of a marking phase. 734 void reset(G1CMBitMap* next_mark_bitmap); 735 // Clears all the fields that correspond to a claimed region. 736 void clear_region_fields(); 737 738 // The main method of this class which performs a marking step 739 // trying not to exceed the given duration. However, it might exit 740 // prematurely, according to some conditions (i.e. SATB buffers are 741 // available for processing). 742 void do_marking_step(double target_ms, 743 bool do_termination, 744 bool is_serial); 745 746 // These two calls start and stop the timer 747 void record_start_time() { 748 _elapsed_time_ms = os::elapsedTime() * 1000.0; 749 } 750 void record_end_time() { 751 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 752 } 753 754 // Returns the worker ID associated with this task. 755 uint worker_id() { return _worker_id; } 756 757 // From TerminatorTerminator. It determines whether this task should 758 // exit the termination protocol after it's entered it. 759 virtual bool should_exit_termination(); 760 761 // Resets the local region fields after a task has finished scanning a 762 // region; or when they have become stale as a result of the region 763 // being evacuated. 764 void giveup_current_region(); 765 766 HeapWord* finger() { return _finger; } 767 768 bool has_aborted() { return _has_aborted; } 769 void set_has_aborted() { _has_aborted = true; } 770 void clear_has_aborted() { _has_aborted = false; } 771 772 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure); 773 774 // Increment the number of references this task has visited. 775 void increment_refs_reached() { ++_refs_reached; } 776 777 // Grey the object by marking it. If not already marked, push it on 778 // the local queue if below the finger. 779 // obj is below its region's NTAMS. 780 inline void make_reference_grey(oop obj); 781 782 // Grey the object (by calling make_grey_reference) if required, 783 // e.g. obj is below its containing region's NTAMS. 784 // Precondition: obj is a valid heap object. 785 template <class T> 786 inline void deal_with_reference(T* p); 787 788 // Scans an object and visits its children. 789 inline void scan_task_entry(G1TaskQueueEntry task_entry); 790 791 // Pushes an object on the local queue. 792 inline void push(G1TaskQueueEntry task_entry); 793 794 // Move entries to the global stack. 795 void move_entries_to_global_stack(); 796 // Move entries from the global stack, return true if we were successful to do so. 797 bool get_entries_from_global_stack(); 798 799 // Pops and scans objects from the local queue. If partially is 800 // true, then it stops when the queue size is of a given limit. If 801 // partially is false, then it stops when the queue is empty. 802 void drain_local_queue(bool partially); 803 // Moves entries from the global stack to the local queue and 804 // drains the local queue. If partially is true, then it stops when 805 // both the global stack and the local queue reach a given size. If 806 // partially if false, it tries to empty them totally. 807 void drain_global_stack(bool partially); 808 // Keeps picking SATB buffers and processing them until no SATB 809 // buffers are available. 810 void drain_satb_buffers(); 811 812 // Moves the local finger to a new location 813 inline void move_finger_to(HeapWord* new_finger) { 814 assert(new_finger >= _finger && new_finger < _region_limit, "invariant"); 815 _finger = new_finger; 816 } 817 818 G1CMTask(uint worker_id, 819 G1ConcurrentMark *cm, 820 G1CMTaskQueue* task_queue, 821 G1RegionMarkStats* mark_stats, 822 uint max_regions); 823 824 inline void update_liveness(oop const obj, size_t const obj_size); 825 826 // Clear (without flushing) the mark cache entry for the given region. 827 void clear_mark_stats_cache(uint region_idx); 828 // Evict the whole statistics cache into the global statistics. Returns the 829 // number of cache hits and misses so far. 830 Pair<size_t, size_t> flush_mark_stats_cache(); 831 // Prints statistics associated with this task 832 void print_stats(); 833 }; 834 835 // Class that's used to to print out per-region liveness 836 // information. It's currently used at the end of marking and also 837 // after we sort the old regions at the end of the cleanup operation. 838 class G1PrintRegionLivenessInfoClosure : public HeapRegionClosure { 839 private: 840 // Accumulators for these values. 841 size_t _total_used_bytes; 842 size_t _total_capacity_bytes; 843 size_t _total_prev_live_bytes; 844 size_t _total_next_live_bytes; 845 846 // Accumulator for the remembered set size 847 size_t _total_remset_bytes; 848 849 // Accumulator for strong code roots memory size 850 size_t _total_strong_code_roots_bytes; 851 852 static double bytes_to_mb(size_t val) { 853 return (double) val / (double) M; 854 } 855 856 public: 857 // The header and footer are printed in the constructor and 858 // destructor respectively. 859 G1PrintRegionLivenessInfoClosure(const char* phase_name); 860 virtual bool do_heap_region(HeapRegion* r); 861 ~G1PrintRegionLivenessInfoClosure(); 862 }; 863 864 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP