1 /*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/g1/concurrentMarkThread.inline.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectorState.hpp"
32 #include "gc/g1/g1ConcurrentMark.inline.hpp"
33 #include "gc/g1/g1HeapVerifier.hpp"
34 #include "gc/g1/g1OopClosures.inline.hpp"
35 #include "gc/g1/g1CardLiveData.inline.hpp"
36 #include "gc/g1/g1Policy.hpp"
37 #include "gc/g1/g1StringDedup.hpp"
38 #include "gc/g1/heapRegion.inline.hpp"
39 #include "gc/g1/heapRegionRemSet.hpp"
40 #include "gc/g1/heapRegionSet.inline.hpp"
41 #include "gc/g1/suspendibleThreadSet.hpp"
42 #include "gc/shared/gcId.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/genOopClosures.inline.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/taskqueue.inline.hpp"
50 #include "gc/shared/vmGCOperations.hpp"
51 #include "logging/log.hpp"
52 #include "memory/allocation.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/atomic.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "services/memTracker.hpp"
60 #include "utilities/growableArray.hpp"
61
62 // Concurrent marking bit map wrapper
63
64 G1CMBitMapRO::G1CMBitMapRO(int shifter) :
65 _bm(),
66 _shifter(shifter) {
67 _bmStartWord = 0;
68 _bmWordSize = 0;
69 }
70
71 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
72 const HeapWord* limit) const {
73 // First we must round addr *up* to a possible object boundary.
74 addr = (HeapWord*)align_size_up((intptr_t)addr,
75 HeapWordSize << _shifter);
76 size_t addrOffset = heapWordToOffset(addr);
77 assert(limit != NULL, "limit must not be NULL");
78 size_t limitOffset = heapWordToOffset(limit);
79 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
80 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
81 assert(nextAddr >= addr, "get_next_one postcondition");
82 assert(nextAddr == limit || isMarked(nextAddr),
83 "get_next_one postcondition");
84 return nextAddr;
85 }
86
87 #ifndef PRODUCT
88 bool G1CMBitMapRO::covers(MemRegion heap_rs) const {
89 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
90 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
91 "size inconsistency");
92 return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
93 _bmWordSize == heap_rs.word_size();
94 }
95 #endif
96
97 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
98 _bm.print_on_error(st, prefix);
99 }
100
101 size_t G1CMBitMap::compute_size(size_t heap_size) {
102 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
103 }
104
105 size_t G1CMBitMap::mark_distance() {
106 return MinObjAlignmentInBytes * BitsPerByte;
107 }
108
109 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
110 _bmStartWord = heap.start();
111 _bmWordSize = heap.word_size();
112
113 _bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
114
115 storage->set_mapping_changed_listener(&_listener);
116 }
117
118 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
119 if (zero_filled) {
120 return;
121 }
122 // We need to clear the bitmap on commit, removing any existing information.
123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
124 _bm->clear_range(mr);
125 }
126
127 void G1CMBitMap::clear_range(MemRegion mr) {
128 mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
129 assert(!mr.is_empty(), "unexpected empty region");
130 // convert address range into offset range
131 _bm.at_put_range(heapWordToOffset(mr.start()),
132 heapWordToOffset(mr.end()), false);
133 }
134
135 G1CMMarkStack::G1CMMarkStack() :
136 _max_chunk_capacity(0),
137 _base(NULL),
138 _chunk_capacity(0),
139 _out_of_memory(false),
140 _should_expand(false) {
141 set_empty();
142 }
143
144 bool G1CMMarkStack::resize(size_t new_capacity) {
145 assert(is_empty(), "Only resize when stack is empty.");
146 assert(new_capacity <= _max_chunk_capacity,
147 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
148
149 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity);
150
151 if (new_base == NULL) {
152 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
153 return false;
154 }
155 // Release old mapping.
156 if (_base != NULL) {
157 MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
158 }
159
160 _base = new_base;
161 _chunk_capacity = new_capacity;
162 set_empty();
163 _should_expand = false;
164
165 return true;
166 }
167
168 size_t G1CMMarkStack::capacity_alignment() {
169 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
170 }
171
172 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
173 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
174
175 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
176
177 _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
178 size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
179
180 guarantee(initial_chunk_capacity <= _max_chunk_capacity,
181 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
182 _max_chunk_capacity,
183 initial_chunk_capacity);
184
185 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
186 initial_chunk_capacity, _max_chunk_capacity);
187
188 return resize(initial_chunk_capacity);
189 }
190
191 void G1CMMarkStack::expand() {
192 // Clear expansion flag
193 _should_expand = false;
194
195 if (_chunk_capacity == _max_chunk_capacity) {
196 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
197 return;
198 }
199 size_t old_capacity = _chunk_capacity;
200 // Double capacity if possible
201 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
202
203 if (resize(new_capacity)) {
204 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
205 old_capacity, new_capacity);
206 } else {
207 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
208 old_capacity, new_capacity);
209 }
210 }
211
212 G1CMMarkStack::~G1CMMarkStack() {
213 if (_base != NULL) {
214 MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
215 }
216 }
217
218 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
219 elem->next = *list;
220 *list = elem;
221 }
222
223 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
224 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
225 add_chunk_to_list(&_chunk_list, elem);
226 _chunks_in_chunk_list++;
227 }
228
229 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
230 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
231 add_chunk_to_list(&_free_list, elem);
232 }
233
234 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {
235 TaskQueueEntryChunk* result = *list;
236 if (result != NULL) {
237 *list = (*list)->next;
238 }
239 return result;
240 }
241
242 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
243 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
244 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
245 if (result != NULL) {
246 _chunks_in_chunk_list--;
247 }
248 return result;
249 }
250
251 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
252 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
253 return remove_chunk_from_list(&_free_list);
254 }
255
256 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
257 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code.
258 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding
259 // wraparound of _hwm.
260 if (_hwm >= _chunk_capacity) {
261 return NULL;
262 }
263
264 size_t cur_idx = Atomic::add(1, &_hwm) - 1;
265 if (cur_idx >= _chunk_capacity) {
266 return NULL;
267 }
268
269 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk;
270 result->next = NULL;
271 return result;
272 }
273
274 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
275 // Get a new chunk.
276 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list();
277
278 if (new_chunk == NULL) {
279 // Did not get a chunk from the free list. Allocate from backing memory.
280 new_chunk = allocate_new_chunk();
281 }
282
283 if (new_chunk == NULL) {
284 _out_of_memory = true;
285 return false;
286 }
287
288 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
289
290 add_chunk_to_chunk_list(new_chunk);
291
292 return true;
293 }
294
295 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
296 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
297
298 if (cur == NULL) {
299 return false;
300 }
301
302 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
303
304 add_chunk_to_free_list(cur);
305 return true;
306 }
307
308 void G1CMMarkStack::set_empty() {
309 _chunks_in_chunk_list = 0;
310 _hwm = 0;
311 clear_out_of_memory();
312 _chunk_list = NULL;
313 _free_list = NULL;
314 }
315
316 G1CMRootRegions::G1CMRootRegions() :
317 _cm(NULL), _scan_in_progress(false),
318 _should_abort(false), _claimed_survivor_index(0) { }
319
320 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
321 _survivors = survivors;
322 _cm = cm;
323 }
324
325 void G1CMRootRegions::prepare_for_scan() {
326 assert(!scan_in_progress(), "pre-condition");
327
328 // Currently, only survivors can be root regions.
329 _claimed_survivor_index = 0;
330 _scan_in_progress = _survivors->regions()->is_nonempty();
331 _should_abort = false;
332 }
333
334 HeapRegion* G1CMRootRegions::claim_next() {
335 if (_should_abort) {
336 // If someone has set the should_abort flag, we return NULL to
337 // force the caller to bail out of their loop.
338 return NULL;
339 }
340
341 // Currently, only survivors can be root regions.
342 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
343
344 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
345 if (claimed_index < survivor_regions->length()) {
346 return survivor_regions->at(claimed_index);
347 }
348 return NULL;
349 }
350
351 uint G1CMRootRegions::num_root_regions() const {
352 return (uint)_survivors->regions()->length();
353 }
354
355 void G1CMRootRegions::notify_scan_done() {
356 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
357 _scan_in_progress = false;
358 RootRegionScan_lock->notify_all();
359 }
360
361 void G1CMRootRegions::cancel_scan() {
362 notify_scan_done();
363 }
364
365 void G1CMRootRegions::scan_finished() {
366 assert(scan_in_progress(), "pre-condition");
367
368 // Currently, only survivors can be root regions.
369 if (!_should_abort) {
370 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
371 assert((uint)_claimed_survivor_index >= _survivors->length(),
372 "we should have claimed all survivors, claimed index = %u, length = %u",
373 (uint)_claimed_survivor_index, _survivors->length());
374 }
375
376 notify_scan_done();
377 }
378
379 bool G1CMRootRegions::wait_until_scan_finished() {
380 if (!scan_in_progress()) return false;
381
382 {
383 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
384 while (scan_in_progress()) {
385 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
386 }
387 }
388 return true;
389 }
390
391 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
392 return MAX2((n_par_threads + 2) / 4, 1U);
393 }
394
395 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
396 _g1h(g1h),
397 _markBitMap1(),
398 _markBitMap2(),
399 _parallel_marking_threads(0),
400 _max_parallel_marking_threads(0),
401 _sleep_factor(0.0),
402 _marking_task_overhead(1.0),
403 _cleanup_list("Cleanup List"),
404
405 _prevMarkBitMap(&_markBitMap1),
406 _nextMarkBitMap(&_markBitMap2),
407
408 _global_mark_stack(),
409 // _finger set in set_non_marking_state
410
411 _max_worker_id(ParallelGCThreads),
412 // _active_tasks set in set_non_marking_state
413 // _tasks set inside the constructor
414 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
415 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
416
417 _has_overflown(false),
418 _concurrent(false),
419 _has_aborted(false),
420 _restart_for_overflow(false),
421 _concurrent_marking_in_progress(false),
422 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
423 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
424
425 // _verbose_level set below
426
427 _init_times(),
428 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
429 _cleanup_times(),
430 _total_counting_time(0.0),
431 _total_rs_scrub_time(0.0),
432
433 _parallel_workers(NULL),
434
435 _completed_initialization(false) {
436
437 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
438 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
439
440 // Create & start a ConcurrentMark thread.
441 _cmThread = new ConcurrentMarkThread(this);
442 assert(cmThread() != NULL, "CM Thread should have been created");
443 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
444 if (_cmThread->osthread() == NULL) {
445 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
446 }
447
448 assert(CGC_lock != NULL, "Where's the CGC_lock?");
449 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
450 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
451
452 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
453 satb_qs.set_buffer_size(G1SATBBufferSize);
454
455 _root_regions.init(_g1h->survivor(), this);
456
457 if (ConcGCThreads > ParallelGCThreads) {
458 log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
459 ConcGCThreads, ParallelGCThreads);
460 return;
461 }
462 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
463 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
464 // if both are set
465 _sleep_factor = 0.0;
466 _marking_task_overhead = 1.0;
467 } else if (G1MarkingOverheadPercent > 0) {
468 // We will calculate the number of parallel marking threads based
469 // on a target overhead with respect to the soft real-time goal
470 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
471 double overall_cm_overhead =
472 (double) MaxGCPauseMillis * marking_overhead /
473 (double) GCPauseIntervalMillis;
474 double cpu_ratio = 1.0 / os::initial_active_processor_count();
475 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
476 double marking_task_overhead =
477 overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
478 double sleep_factor =
479 (1.0 - marking_task_overhead) / marking_task_overhead;
480
481 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num);
482 _sleep_factor = sleep_factor;
483 _marking_task_overhead = marking_task_overhead;
484 } else {
485 // Calculate the number of parallel marking threads by scaling
486 // the number of parallel GC threads.
487 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
488 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
489 _sleep_factor = 0.0;
490 _marking_task_overhead = 1.0;
491 }
492
493 assert(ConcGCThreads > 0, "Should have been set");
494 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
495 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
496 _parallel_marking_threads = ConcGCThreads;
497 _max_parallel_marking_threads = _parallel_marking_threads;
498
499 _parallel_workers = new WorkGang("G1 Marker",
500 _max_parallel_marking_threads, false, true);
501 if (_parallel_workers == NULL) {
502 vm_exit_during_initialization("Failed necessary allocation.");
503 } else {
504 _parallel_workers->initialize_workers();
505 }
506
507 if (FLAG_IS_DEFAULT(MarkStackSize)) {
508 size_t mark_stack_size =
509 MIN2(MarkStackSizeMax,
510 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
511 // Verify that the calculated value for MarkStackSize is in range.
512 // It would be nice to use the private utility routine from Arguments.
513 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
514 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
515 "must be between 1 and " SIZE_FORMAT,
516 mark_stack_size, MarkStackSizeMax);
517 return;
518 }
519 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
520 } else {
521 // Verify MarkStackSize is in range.
522 if (FLAG_IS_CMDLINE(MarkStackSize)) {
523 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
524 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
525 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
526 "must be between 1 and " SIZE_FORMAT,
527 MarkStackSize, MarkStackSizeMax);
528 return;
529 }
530 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
531 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
532 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
533 " or for MarkStackSizeMax (" SIZE_FORMAT ")",
534 MarkStackSize, MarkStackSizeMax);
535 return;
536 }
537 }
538 }
539 }
540
541 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
542 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
543 }
544
545 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC);
546 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
547
548 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
549 _active_tasks = _max_worker_id;
550
551 for (uint i = 0; i < _max_worker_id; ++i) {
552 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
553 task_queue->initialize();
554 _task_queues->register_queue(i, task_queue);
555
556 _tasks[i] = new G1CMTask(i, this, task_queue, _task_queues);
557
558 _accum_task_vtime[i] = 0.0;
559 }
560
561 // so that the call below can read a sensible value
562 _heap_start = g1h->reserved_region().start();
563 set_non_marking_state();
564 _completed_initialization = true;
565 }
566
567 void G1ConcurrentMark::reset() {
568 // Starting values for these two. This should be called in a STW
569 // phase.
570 MemRegion reserved = _g1h->g1_reserved();
571 _heap_start = reserved.start();
572 _heap_end = reserved.end();
573
574 // Separated the asserts so that we know which one fires.
575 assert(_heap_start != NULL, "heap bounds should look ok");
576 assert(_heap_end != NULL, "heap bounds should look ok");
577 assert(_heap_start < _heap_end, "heap bounds should look ok");
578
579 // Reset all the marking data structures and any necessary flags
580 reset_marking_state();
581
582 // We do reset all of them, since different phases will use
583 // different number of active threads. So, it's easiest to have all
584 // of them ready.
585 for (uint i = 0; i < _max_worker_id; ++i) {
586 _tasks[i]->reset(_nextMarkBitMap);
587 }
588
589 // we need this to make sure that the flag is on during the evac
590 // pause with initial mark piggy-backed
591 set_concurrent_marking_in_progress();
592 }
593
594
595 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
596 _global_mark_stack.set_should_expand(has_overflown());
597 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
598 if (clear_overflow) {
599 clear_has_overflown();
600 } else {
601 assert(has_overflown(), "pre-condition");
602 }
603 _finger = _heap_start;
604
605 for (uint i = 0; i < _max_worker_id; ++i) {
606 G1CMTaskQueue* queue = _task_queues->queue(i);
607 queue->set_empty();
608 }
609 }
610
611 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
612 assert(active_tasks <= _max_worker_id, "we should not have more");
613
614 _active_tasks = active_tasks;
615 // Need to update the three data structures below according to the
616 // number of active threads for this phase.
617 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
618 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
619 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
620 }
621
622 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
623 set_concurrency(active_tasks);
624
625 _concurrent = concurrent;
626 // We propagate this to all tasks, not just the active ones.
627 for (uint i = 0; i < _max_worker_id; ++i)
628 _tasks[i]->set_concurrent(concurrent);
629
630 if (concurrent) {
631 set_concurrent_marking_in_progress();
632 } else {
633 // We currently assume that the concurrent flag has been set to
634 // false before we start remark. At this point we should also be
635 // in a STW phase.
636 assert(!concurrent_marking_in_progress(), "invariant");
637 assert(out_of_regions(),
638 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
639 p2i(_finger), p2i(_heap_end));
640 }
641 }
642
643 void G1ConcurrentMark::set_non_marking_state() {
644 // We set the global marking state to some default values when we're
645 // not doing marking.
646 reset_marking_state();
647 _active_tasks = 0;
648 clear_concurrent_marking_in_progress();
649 }
650
651 G1ConcurrentMark::~G1ConcurrentMark() {
652 // The G1ConcurrentMark instance is never freed.
653 ShouldNotReachHere();
654 }
655
656 class G1ClearBitMapTask : public AbstractGangTask {
657 public:
658 static size_t chunk_size() { return M; }
659
660 private:
661 // Heap region closure used for clearing the given mark bitmap.
662 class G1ClearBitmapHRClosure : public HeapRegionClosure {
663 private:
664 G1CMBitMap* _bitmap;
665 G1ConcurrentMark* _cm;
666 public:
667 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
668 }
669
670 virtual bool doHeapRegion(HeapRegion* r) {
671 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
672
673 HeapWord* cur = r->bottom();
674 HeapWord* const end = r->end();
675
676 while (cur < end) {
677 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
678 _bitmap->clear_range(mr);
679
680 cur += chunk_size_in_words;
681
682 // Abort iteration if after yielding the marking has been aborted.
683 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
684 return true;
685 }
686 // Repeat the asserts from before the start of the closure. We will do them
687 // as asserts here to minimize their overhead on the product. However, we
688 // will have them as guarantees at the beginning / end of the bitmap
689 // clearing to get some checking in the product.
690 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
691 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
692 }
693 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
694
695 return false;
696 }
697 };
698
699 G1ClearBitmapHRClosure _cl;
700 HeapRegionClaimer _hr_claimer;
701 bool _suspendible; // If the task is suspendible, workers must join the STS.
702
703 public:
704 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
705 AbstractGangTask("G1 Clear Bitmap"),
706 _cl(bitmap, suspendible ? cm : NULL),
707 _hr_claimer(n_workers),
708 _suspendible(suspendible)
709 { }
710
711 void work(uint worker_id) {
712 SuspendibleThreadSetJoiner sts_join(_suspendible);
713 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
714 }
715
716 bool is_complete() {
717 return _cl.complete();
718 }
719 };
720
721 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
722 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
723
724 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
725 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
726
727 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
728
729 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
730
731 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
732 workers->run_task(&cl, num_workers);
733 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
734 }
735
736 void G1ConcurrentMark::cleanup_for_next_mark() {
737 // Make sure that the concurrent mark thread looks to still be in
738 // the current cycle.
739 guarantee(cmThread()->during_cycle(), "invariant");
740
741 // We are finishing up the current cycle by clearing the next
742 // marking bitmap and getting it ready for the next cycle. During
743 // this time no other cycle can start. So, let's make sure that this
744 // is the case.
745 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
746
747 clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
748
749 // Clear the live count data. If the marking has been aborted, the abort()
750 // call already did that.
751 if (!has_aborted()) {
752 clear_live_data(_parallel_workers);
753 DEBUG_ONLY(verify_live_data_clear());
754 }
755
756 // Repeat the asserts from above.
757 guarantee(cmThread()->during_cycle(), "invariant");
758 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
759 }
760
761 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
762 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
763 clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
764 }
765
766 class CheckBitmapClearHRClosure : public HeapRegionClosure {
767 G1CMBitMap* _bitmap;
768 bool _error;
769 public:
770 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) {
771 }
772
773 virtual bool doHeapRegion(HeapRegion* r) {
774 // This closure can be called concurrently to the mutator, so we must make sure
775 // that the result of the getNextMarkedWordAddress() call is compared to the
776 // value passed to it as limit to detect any found bits.
777 // end never changes in G1.
778 HeapWord* end = r->end();
779 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
780 }
781 };
782
783 bool G1ConcurrentMark::nextMarkBitmapIsClear() {
784 CheckBitmapClearHRClosure cl(_nextMarkBitMap);
785 _g1h->heap_region_iterate(&cl);
786 return cl.complete();
787 }
788
789 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
790 public:
791 bool doHeapRegion(HeapRegion* r) {
792 r->note_start_of_marking();
793 return false;
794 }
795 };
796
797 void G1ConcurrentMark::checkpointRootsInitialPre() {
798 G1CollectedHeap* g1h = G1CollectedHeap::heap();
799 G1Policy* g1p = g1h->g1_policy();
800
801 _has_aborted = false;
802
803 // Initialize marking structures. This has to be done in a STW phase.
804 reset();
805
806 // For each region note start of marking.
807 NoteStartOfMarkHRClosure startcl;
808 g1h->heap_region_iterate(&startcl);
809 }
810
811
812 void G1ConcurrentMark::checkpointRootsInitialPost() {
813 G1CollectedHeap* g1h = G1CollectedHeap::heap();
814
815 // Start Concurrent Marking weak-reference discovery.
816 ReferenceProcessor* rp = g1h->ref_processor_cm();
817 // enable ("weak") refs discovery
818 rp->enable_discovery();
819 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
820
821 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
822 // This is the start of the marking cycle, we're expected all
823 // threads to have SATB queues with active set to false.
824 satb_mq_set.set_active_all_threads(true, /* new active value */
825 false /* expected_active */);
826
827 _root_regions.prepare_for_scan();
828
829 // update_g1_committed() will be called at the end of an evac pause
830 // when marking is on. So, it's also called at the end of the
831 // initial-mark pause to update the heap end, if the heap expands
832 // during it. No need to call it here.
833 }
834
835 /*
836 * Notice that in the next two methods, we actually leave the STS
837 * during the barrier sync and join it immediately afterwards. If we
838 * do not do this, the following deadlock can occur: one thread could
839 * be in the barrier sync code, waiting for the other thread to also
840 * sync up, whereas another one could be trying to yield, while also
841 * waiting for the other threads to sync up too.
842 *
843 * Note, however, that this code is also used during remark and in
844 * this case we should not attempt to leave / enter the STS, otherwise
845 * we'll either hit an assert (debug / fastdebug) or deadlock
846 * (product). So we should only leave / enter the STS if we are
847 * operating concurrently.
848 *
849 * Because the thread that does the sync barrier has left the STS, it
850 * is possible to be suspended for a Full GC or an evacuation pause
851 * could occur. This is actually safe, since the entering the sync
852 * barrier is one of the last things do_marking_step() does, and it
853 * doesn't manipulate any data structures afterwards.
854 */
855
856 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
857 bool barrier_aborted;
858 {
859 SuspendibleThreadSetLeaver sts_leave(concurrent());
860 barrier_aborted = !_first_overflow_barrier_sync.enter();
861 }
862
863 // at this point everyone should have synced up and not be doing any
864 // more work
865
866 if (barrier_aborted) {
867 // If the barrier aborted we ignore the overflow condition and
868 // just abort the whole marking phase as quickly as possible.
869 return;
870 }
871
872 // If we're executing the concurrent phase of marking, reset the marking
873 // state; otherwise the marking state is reset after reference processing,
874 // during the remark pause.
875 // If we reset here as a result of an overflow during the remark we will
876 // see assertion failures from any subsequent set_concurrency_and_phase()
877 // calls.
878 if (concurrent()) {
879 // let the task associated with with worker 0 do this
880 if (worker_id == 0) {
881 // task 0 is responsible for clearing the global data structures
882 // We should be here because of an overflow. During STW we should
883 // not clear the overflow flag since we rely on it being true when
884 // we exit this method to abort the pause and restart concurrent
885 // marking.
886 reset_marking_state(true /* clear_overflow */);
887
888 log_info(gc, marking)("Concurrent Mark reset for overflow");
889 }
890 }
891
892 // after this, each task should reset its own data structures then
893 // then go into the second barrier
894 }
895
896 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
897 SuspendibleThreadSetLeaver sts_leave(concurrent());
898 _second_overflow_barrier_sync.enter();
899
900 // at this point everything should be re-initialized and ready to go
901 }
902
903 class G1CMConcurrentMarkingTask: public AbstractGangTask {
904 private:
905 G1ConcurrentMark* _cm;
906 ConcurrentMarkThread* _cmt;
907
908 public:
909 void work(uint worker_id) {
910 assert(Thread::current()->is_ConcurrentGC_thread(),
911 "this should only be done by a conc GC thread");
912 ResourceMark rm;
913
914 double start_vtime = os::elapsedVTime();
915
916 {
917 SuspendibleThreadSetJoiner sts_join;
918
919 assert(worker_id < _cm->active_tasks(), "invariant");
920 G1CMTask* the_task = _cm->task(worker_id);
921 the_task->record_start_time();
922 if (!_cm->has_aborted()) {
923 do {
924 double start_vtime_sec = os::elapsedVTime();
925 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
926
927 the_task->do_marking_step(mark_step_duration_ms,
928 true /* do_termination */,
929 false /* is_serial*/);
930
931 double end_vtime_sec = os::elapsedVTime();
932 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
933 _cm->clear_has_overflown();
934
935 _cm->do_yield_check();
936
937 jlong sleep_time_ms;
938 if (!_cm->has_aborted() && the_task->has_aborted()) {
939 sleep_time_ms =
940 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
941 {
942 SuspendibleThreadSetLeaver sts_leave;
943 os::sleep(Thread::current(), sleep_time_ms, false);
944 }
945 }
946 } while (!_cm->has_aborted() && the_task->has_aborted());
947 }
948 the_task->record_end_time();
949 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
950 }
951
952 double end_vtime = os::elapsedVTime();
953 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
954 }
955
956 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm,
957 ConcurrentMarkThread* cmt) :
958 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
959
960 ~G1CMConcurrentMarkingTask() { }
961 };
962
963 // Calculates the number of active workers for a concurrent
964 // phase.
965 uint G1ConcurrentMark::calc_parallel_marking_threads() {
966 uint n_conc_workers = 0;
967 if (!UseDynamicNumberOfGCThreads ||
968 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
969 !ForceDynamicNumberOfGCThreads)) {
970 n_conc_workers = max_parallel_marking_threads();
971 } else {
972 n_conc_workers =
973 AdaptiveSizePolicy::calc_default_active_workers(max_parallel_marking_threads(),
974 1, /* Minimum workers */
975 parallel_marking_threads(),
976 Threads::number_of_non_daemon_threads());
977 // Don't scale down "n_conc_workers" by scale_parallel_threads() because
978 // that scaling has already gone into "_max_parallel_marking_threads".
979 }
980 assert(n_conc_workers > 0 && n_conc_workers <= max_parallel_marking_threads(),
981 "Calculated number of workers must be larger than zero and at most the maximum %u, but is %u",
982 max_parallel_marking_threads(), n_conc_workers);
983 return n_conc_workers;
984 }
985
986 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr) {
987 // Currently, only survivors can be root regions.
988 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
989 G1RootRegionScanClosure cl(_g1h, this);
990
991 const uintx interval = PrefetchScanIntervalInBytes;
992 HeapWord* curr = hr->bottom();
993 const HeapWord* end = hr->top();
994 while (curr < end) {
995 Prefetch::read(curr, interval);
996 oop obj = oop(curr);
997 int size = obj->oop_iterate_size(&cl);
998 assert(size == obj->size(), "sanity");
999 curr += size;
1000 }
1001 }
1002
1003 class G1CMRootRegionScanTask : public AbstractGangTask {
1004 private:
1005 G1ConcurrentMark* _cm;
1006
1007 public:
1008 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
1009 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
1010
1011 void work(uint worker_id) {
1012 assert(Thread::current()->is_ConcurrentGC_thread(),
1013 "this should only be done by a conc GC thread");
1014
1015 G1CMRootRegions* root_regions = _cm->root_regions();
1016 HeapRegion* hr = root_regions->claim_next();
1017 while (hr != NULL) {
1018 _cm->scanRootRegion(hr);
1019 hr = root_regions->claim_next();
1020 }
1021 }
1022 };
1023
1024 void G1ConcurrentMark::scan_root_regions() {
1025 // scan_in_progress() will have been set to true only if there was
1026 // at least one root region to scan. So, if it's false, we
1027 // should not attempt to do any further work.
1028 if (root_regions()->scan_in_progress()) {
1029 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
1030
1031 _parallel_marking_threads = MIN2(calc_parallel_marking_threads(),
1032 // We distribute work on a per-region basis, so starting
1033 // more threads than that is useless.
1034 root_regions()->num_root_regions());
1035 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1036 "Maximum number of marking threads exceeded");
1037
1038 G1CMRootRegionScanTask task(this);
1039 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
1040 task.name(), _parallel_marking_threads, root_regions()->num_root_regions());
1041 _parallel_workers->run_task(&task, _parallel_marking_threads);
1042
1043 // It's possible that has_aborted() is true here without actually
1044 // aborting the survivor scan earlier. This is OK as it's
1045 // mainly used for sanity checking.
1046 root_regions()->scan_finished();
1047 }
1048 }
1049
1050 void G1ConcurrentMark::concurrent_cycle_start() {
1051 _gc_timer_cm->register_gc_start();
1052
1053 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
1054
1055 _g1h->trace_heap_before_gc(_gc_tracer_cm);
1056 }
1057
1058 void G1ConcurrentMark::concurrent_cycle_end() {
1059 _g1h->trace_heap_after_gc(_gc_tracer_cm);
1060
1061 if (has_aborted()) {
1062 _gc_tracer_cm->report_concurrent_mode_failure();
1063 }
1064
1065 _gc_timer_cm->register_gc_end();
1066
1067 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1068 }
1069
1070 void G1ConcurrentMark::mark_from_roots() {
1071 // we might be tempted to assert that:
1072 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1073 // "inconsistent argument?");
1074 // However that wouldn't be right, because it's possible that
1075 // a safepoint is indeed in progress as a younger generation
1076 // stop-the-world GC happens even as we mark in this generation.
1077
1078 _restart_for_overflow = false;
1079
1080 // _g1h has _n_par_threads
1081 _parallel_marking_threads = calc_parallel_marking_threads();
1082 assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1083 "Maximum number of marking threads exceeded");
1084
1085 uint active_workers = MAX2(1U, parallel_marking_threads());
1086 assert(active_workers > 0, "Should have been set");
1087
1088 // Setting active workers is not guaranteed since fewer
1089 // worker threads may currently exist and more may not be
1090 // available.
1091 active_workers = _parallel_workers->update_active_workers(active_workers);
1092 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _parallel_workers->total_workers());
1093
1094 // Parallel task terminator is set in "set_concurrency_and_phase()"
1095 set_concurrency_and_phase(active_workers, true /* concurrent */);
1096
1097 G1CMConcurrentMarkingTask markingTask(this, cmThread());
1098 _parallel_workers->run_task(&markingTask);
1099 print_stats();
1100 }
1101
1102 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1103 // world is stopped at this checkpoint
1104 assert(SafepointSynchronize::is_at_safepoint(),
1105 "world should be stopped");
1106
1107 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1108
1109 // If a full collection has happened, we shouldn't do this.
1110 if (has_aborted()) {
1111 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1112 return;
1113 }
1114
1115 SvcGCMarker sgcm(SvcGCMarker::OTHER);
1116
1117 if (VerifyDuringGC) {
1118 HandleMark hm; // handle scope
1119 g1h->prepare_for_verify();
1120 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1121 }
1122 g1h->verifier()->check_bitmaps("Remark Start");
1123
1124 G1Policy* g1p = g1h->g1_policy();
1125 g1p->record_concurrent_mark_remark_start();
1126
1127 double start = os::elapsedTime();
1128
1129 checkpointRootsFinalWork();
1130
1131 double mark_work_end = os::elapsedTime();
1132
1133 weakRefsWork(clear_all_soft_refs);
1134
1135 if (has_overflown()) {
1136 // We overflowed. Restart concurrent marking.
1137 _restart_for_overflow = true;
1138
1139 // Verify the heap w.r.t. the previous marking bitmap.
1140 if (VerifyDuringGC) {
1141 HandleMark hm; // handle scope
1142 g1h->prepare_for_verify();
1143 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");
1144 }
1145
1146 // Clear the marking state because we will be restarting
1147 // marking due to overflowing the global mark stack.
1148 reset_marking_state();
1149 } else {
1150 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1151 // We're done with marking.
1152 // This is the end of the marking cycle, we're expected all
1153 // threads to have SATB queues with active set to true.
1154 satb_mq_set.set_active_all_threads(false, /* new active value */
1155 true /* expected_active */);
1156
1157 if (VerifyDuringGC) {
1158 HandleMark hm; // handle scope
1159 g1h->prepare_for_verify();
1160 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");
1161 }
1162 g1h->verifier()->check_bitmaps("Remark End");
1163 assert(!restart_for_overflow(), "sanity");
1164 // Completely reset the marking state since marking completed
1165 set_non_marking_state();
1166 }
1167
1168 // Expand the marking stack, if we have to and if we can.
1169 if (_global_mark_stack.should_expand()) {
1170 _global_mark_stack.expand();
1171 }
1172
1173 // Statistics
1174 double now = os::elapsedTime();
1175 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1176 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1177 _remark_times.add((now - start) * 1000.0);
1178
1179 g1p->record_concurrent_mark_remark_end();
1180
1181 G1CMIsAliveClosure is_alive(g1h);
1182 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1183 }
1184
1185 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1186 G1CollectedHeap* _g1;
1187 size_t _freed_bytes;
1188 FreeRegionList* _local_cleanup_list;
1189 uint _old_regions_removed;
1190 uint _humongous_regions_removed;
1191 HRRSCleanupTask* _hrrs_cleanup_task;
1192
1193 public:
1194 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1195 FreeRegionList* local_cleanup_list,
1196 HRRSCleanupTask* hrrs_cleanup_task) :
1197 _g1(g1),
1198 _freed_bytes(0),
1199 _local_cleanup_list(local_cleanup_list),
1200 _old_regions_removed(0),
1201 _humongous_regions_removed(0),
1202 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1203
1204 size_t freed_bytes() { return _freed_bytes; }
1205 const uint old_regions_removed() { return _old_regions_removed; }
1206 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1207
1208 bool doHeapRegion(HeapRegion *hr) {
1209 if (hr->is_archive()) {
1210 return false;
1211 }
1212 _g1->reset_gc_time_stamps(hr);
1213 hr->note_end_of_marking();
1214
1215 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1216 _freed_bytes += hr->used();
1217 hr->set_containing_set(NULL);
1218 if (hr->is_humongous()) {
1219 _humongous_regions_removed++;
1220 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1221 } else {
1222 _old_regions_removed++;
1223 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1224 }
1225 } else {
1226 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1227 }
1228
1229 return false;
1230 }
1231 };
1232
1233 class G1ParNoteEndTask: public AbstractGangTask {
1234 friend class G1NoteEndOfConcMarkClosure;
1235
1236 protected:
1237 G1CollectedHeap* _g1h;
1238 FreeRegionList* _cleanup_list;
1239 HeapRegionClaimer _hrclaimer;
1240
1241 public:
1242 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1243 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1244 }
1245
1246 void work(uint worker_id) {
1247 FreeRegionList local_cleanup_list("Local Cleanup List");
1248 HRRSCleanupTask hrrs_cleanup_task;
1249 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1250 &hrrs_cleanup_task);
1251 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1252 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1253
1254 // Now update the lists
1255 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1256 {
1257 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1258 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1259
1260 // If we iterate over the global cleanup list at the end of
1261 // cleanup to do this printing we will not guarantee to only
1262 // generate output for the newly-reclaimed regions (the list
1263 // might not be empty at the beginning of cleanup; we might
1264 // still be working on its previous contents). So we do the
1265 // printing here, before we append the new regions to the global
1266 // cleanup list.
1267
1268 G1HRPrinter* hr_printer = _g1h->hr_printer();
1269 if (hr_printer->is_active()) {
1270 FreeRegionListIterator iter(&local_cleanup_list);
1271 while (iter.more_available()) {
1272 HeapRegion* hr = iter.get_next();
1273 hr_printer->cleanup(hr);
1274 }
1275 }
1276
1277 _cleanup_list->add_ordered(&local_cleanup_list);
1278 assert(local_cleanup_list.is_empty(), "post-condition");
1279
1280 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1281 }
1282 }
1283 };
1284
1285 void G1ConcurrentMark::cleanup() {
1286 // world is stopped at this checkpoint
1287 assert(SafepointSynchronize::is_at_safepoint(),
1288 "world should be stopped");
1289 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1290
1291 // If a full collection has happened, we shouldn't do this.
1292 if (has_aborted()) {
1293 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1294 return;
1295 }
1296
1297 g1h->verifier()->verify_region_sets_optional();
1298
1299 if (VerifyDuringGC) {
1300 HandleMark hm; // handle scope
1301 g1h->prepare_for_verify();
1302 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");
1303 }
1304 g1h->verifier()->check_bitmaps("Cleanup Start");
1305
1306 G1Policy* g1p = g1h->g1_policy();
1307 g1p->record_concurrent_mark_cleanup_start();
1308
1309 double start = os::elapsedTime();
1310
1311 HeapRegionRemSet::reset_for_cleanup_tasks();
1312
1313 {
1314 GCTraceTime(Debug, gc)("Finalize Live Data");
1315 finalize_live_data();
1316 }
1317
1318 if (VerifyDuringGC) {
1319 GCTraceTime(Debug, gc)("Verify Live Data");
1320 verify_live_data();
1321 }
1322
1323 g1h->collector_state()->set_mark_in_progress(false);
1324
1325 double count_end = os::elapsedTime();
1326 double this_final_counting_time = (count_end - start);
1327 _total_counting_time += this_final_counting_time;
1328
1329 if (log_is_enabled(Trace, gc, liveness)) {
1330 G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1331 _g1h->heap_region_iterate(&cl);
1332 }
1333
1334 // Install newly created mark bitMap as "prev".
1335 swapMarkBitMaps();
1336
1337 g1h->reset_gc_time_stamp();
1338
1339 uint n_workers = _g1h->workers()->active_workers();
1340
1341 // Note end of marking in all heap regions.
1342 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1343 g1h->workers()->run_task(&g1_par_note_end_task);
1344 g1h->check_gc_time_stamps();
1345
1346 if (!cleanup_list_is_empty()) {
1347 // The cleanup list is not empty, so we'll have to process it
1348 // concurrently. Notify anyone else that might be wanting free
1349 // regions that there will be more free regions coming soon.
1350 g1h->set_free_regions_coming();
1351 }
1352
1353 // call below, since it affects the metric by which we sort the heap
1354 // regions.
1355 if (G1ScrubRemSets) {
1356 double rs_scrub_start = os::elapsedTime();
1357 g1h->scrub_rem_set();
1358 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start);
1359 }
1360
1361 // this will also free any regions totally full of garbage objects,
1362 // and sort the regions.
1363 g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1364
1365 // Statistics.
1366 double end = os::elapsedTime();
1367 _cleanup_times.add((end - start) * 1000.0);
1368
1369 // Clean up will have freed any regions completely full of garbage.
1370 // Update the soft reference policy with the new heap occupancy.
1371 Universe::update_heap_info_at_gc();
1372
1373 if (VerifyDuringGC) {
1374 HandleMark hm; // handle scope
1375 g1h->prepare_for_verify();
1376 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");
1377 }
1378
1379 g1h->verifier()->check_bitmaps("Cleanup End");
1380
1381 g1h->verifier()->verify_region_sets_optional();
1382
1383 // We need to make this be a "collection" so any collection pause that
1384 // races with it goes around and waits for completeCleanup to finish.
1385 g1h->increment_total_collections();
1386
1387 // Clean out dead classes and update Metaspace sizes.
1388 if (ClassUnloadingWithConcurrentMark) {
1389 ClassLoaderDataGraph::purge();
1390 }
1391 MetaspaceGC::compute_new_size();
1392
1393 // We reclaimed old regions so we should calculate the sizes to make
1394 // sure we update the old gen/space data.
1395 g1h->g1mm()->update_sizes();
1396 g1h->allocation_context_stats().update_after_mark();
1397 }
1398
1399 void G1ConcurrentMark::complete_cleanup() {
1400 if (has_aborted()) return;
1401
1402 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1403
1404 _cleanup_list.verify_optional();
1405 FreeRegionList tmp_free_list("Tmp Free List");
1406
1407 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1408 "cleanup list has %u entries",
1409 _cleanup_list.length());
1410
1411 // No one else should be accessing the _cleanup_list at this point,
1412 // so it is not necessary to take any locks
1413 while (!_cleanup_list.is_empty()) {
1414 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1415 assert(hr != NULL, "Got NULL from a non-empty list");
1416 hr->par_clear();
1417 tmp_free_list.add_ordered(hr);
1418
1419 // Instead of adding one region at a time to the secondary_free_list,
1420 // we accumulate them in the local list and move them a few at a
1421 // time. This also cuts down on the number of notify_all() calls
1422 // we do during this process. We'll also append the local list when
1423 // _cleanup_list is empty (which means we just removed the last
1424 // region from the _cleanup_list).
1425 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1426 _cleanup_list.is_empty()) {
1427 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
1428 "appending %u entries to the secondary_free_list, "
1429 "cleanup list still has %u entries",
1430 tmp_free_list.length(),
1431 _cleanup_list.length());
1432
1433 {
1434 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1435 g1h->secondary_free_list_add(&tmp_free_list);
1436 SecondaryFreeList_lock->notify_all();
1437 }
1438 #ifndef PRODUCT
1439 if (G1StressConcRegionFreeing) {
1440 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1441 os::sleep(Thread::current(), (jlong) 1, false);
1442 }
1443 }
1444 #endif
1445 }
1446 }
1447 assert(tmp_free_list.is_empty(), "post-condition");
1448 }
1449
1450 // Supporting Object and Oop closures for reference discovery
1451 // and processing in during marking
1452
1453 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1454 HeapWord* addr = (HeapWord*)obj;
1455 return addr != NULL &&
1456 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1457 }
1458
1459 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1460 // Uses the G1CMTask associated with a worker thread (for serial reference
1461 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1462 // trace referent objects.
1463 //
1464 // Using the G1CMTask and embedded local queues avoids having the worker
1465 // threads operating on the global mark stack. This reduces the risk
1466 // of overflowing the stack - which we would rather avoid at this late
1467 // state. Also using the tasks' local queues removes the potential
1468 // of the workers interfering with each other that could occur if
1469 // operating on the global stack.
1470
1471 class G1CMKeepAliveAndDrainClosure: public OopClosure {
1472 G1ConcurrentMark* _cm;
1473 G1CMTask* _task;
1474 int _ref_counter_limit;
1475 int _ref_counter;
1476 bool _is_serial;
1477 public:
1478 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1479 _cm(cm), _task(task), _is_serial(is_serial),
1480 _ref_counter_limit(G1RefProcDrainInterval) {
1481 assert(_ref_counter_limit > 0, "sanity");
1482 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1483 _ref_counter = _ref_counter_limit;
1484 }
1485
1486 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1487 virtual void do_oop( oop* p) { do_oop_work(p); }
1488
1489 template <class T> void do_oop_work(T* p) {
1490 if (!_cm->has_overflown()) {
1491 oop obj = oopDesc::load_decode_heap_oop(p);
1492 _task->deal_with_reference(obj);
1493 _ref_counter--;
1494
1495 if (_ref_counter == 0) {
1496 // We have dealt with _ref_counter_limit references, pushing them
1497 // and objects reachable from them on to the local stack (and
1498 // possibly the global stack). Call G1CMTask::do_marking_step() to
1499 // process these entries.
1500 //
1501 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if
1502 // there's nothing more to do (i.e. we're done with the entries that
1503 // were pushed as a result of the G1CMTask::deal_with_reference() calls
1504 // above) or we overflow.
1505 //
1506 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1507 // flag while there may still be some work to do. (See the comment at
1508 // the beginning of G1CMTask::do_marking_step() for those conditions -
1509 // one of which is reaching the specified time target.) It is only
1510 // when G1CMTask::do_marking_step() returns without setting the
1511 // has_aborted() flag that the marking step has completed.
1512 do {
1513 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1514 _task->do_marking_step(mark_step_duration_ms,
1515 false /* do_termination */,
1516 _is_serial);
1517 } while (_task->has_aborted() && !_cm->has_overflown());
1518 _ref_counter = _ref_counter_limit;
1519 }
1520 }
1521 }
1522 };
1523
1524 // 'Drain' oop closure used by both serial and parallel reference processing.
1525 // Uses the G1CMTask associated with a given worker thread (for serial
1526 // reference processing the G1CMtask for worker 0 is used). Calls the
1527 // do_marking_step routine, with an unbelievably large timeout value,
1528 // to drain the marking data structures of the remaining entries
1529 // added by the 'keep alive' oop closure above.
1530
1531 class G1CMDrainMarkingStackClosure: public VoidClosure {
1532 G1ConcurrentMark* _cm;
1533 G1CMTask* _task;
1534 bool _is_serial;
1535 public:
1536 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
1537 _cm(cm), _task(task), _is_serial(is_serial) {
1538 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
1539 }
1540
1541 void do_void() {
1542 do {
1543 // We call G1CMTask::do_marking_step() to completely drain the local
1544 // and global marking stacks of entries pushed by the 'keep alive'
1545 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
1546 //
1547 // G1CMTask::do_marking_step() is called in a loop, which we'll exit
1548 // if there's nothing more to do (i.e. we've completely drained the
1549 // entries that were pushed as a a result of applying the 'keep alive'
1550 // closure to the entries on the discovered ref lists) or we overflow
1551 // the global marking stack.
1552 //
1553 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted()
1554 // flag while there may still be some work to do. (See the comment at
1555 // the beginning of G1CMTask::do_marking_step() for those conditions -
1556 // one of which is reaching the specified time target.) It is only
1557 // when G1CMTask::do_marking_step() returns without setting the
1558 // has_aborted() flag that the marking step has completed.
1559
1560 _task->do_marking_step(1000000000.0 /* something very large */,
1561 true /* do_termination */,
1562 _is_serial);
1563 } while (_task->has_aborted() && !_cm->has_overflown());
1564 }
1565 };
1566
1567 // Implementation of AbstractRefProcTaskExecutor for parallel
1568 // reference processing at the end of G1 concurrent marking
1569
1570 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
1571 private:
1572 G1CollectedHeap* _g1h;
1573 G1ConcurrentMark* _cm;
1574 WorkGang* _workers;
1575 uint _active_workers;
1576
1577 public:
1578 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
1579 G1ConcurrentMark* cm,
1580 WorkGang* workers,
1581 uint n_workers) :
1582 _g1h(g1h), _cm(cm),
1583 _workers(workers), _active_workers(n_workers) { }
1584
1585 // Executes the given task using concurrent marking worker threads.
1586 virtual void execute(ProcessTask& task);
1587 virtual void execute(EnqueueTask& task);
1588 };
1589
1590 class G1CMRefProcTaskProxy: public AbstractGangTask {
1591 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
1592 ProcessTask& _proc_task;
1593 G1CollectedHeap* _g1h;
1594 G1ConcurrentMark* _cm;
1595
1596 public:
1597 G1CMRefProcTaskProxy(ProcessTask& proc_task,
1598 G1CollectedHeap* g1h,
1599 G1ConcurrentMark* cm) :
1600 AbstractGangTask("Process reference objects in parallel"),
1601 _proc_task(proc_task), _g1h(g1h), _cm(cm) {
1602 ReferenceProcessor* rp = _g1h->ref_processor_cm();
1603 assert(rp->processing_is_mt(), "shouldn't be here otherwise");
1604 }
1605
1606 virtual void work(uint worker_id) {
1607 ResourceMark rm;
1608 HandleMark hm;
1609 G1CMTask* task = _cm->task(worker_id);
1610 G1CMIsAliveClosure g1_is_alive(_g1h);
1611 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
1612 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
1613
1614 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
1615 }
1616 };
1617
1618 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
1619 assert(_workers != NULL, "Need parallel worker threads.");
1620 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1621
1622 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
1623
1624 // We need to reset the concurrency level before each
1625 // proxy task execution, so that the termination protocol
1626 // and overflow handling in G1CMTask::do_marking_step() knows
1627 // how many workers to wait for.
1628 _cm->set_concurrency(_active_workers);
1629 _workers->run_task(&proc_task_proxy);
1630 }
1631
1632 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
1633 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
1634 EnqueueTask& _enq_task;
1635
1636 public:
1637 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
1638 AbstractGangTask("Enqueue reference objects in parallel"),
1639 _enq_task(enq_task) { }
1640
1641 virtual void work(uint worker_id) {
1642 _enq_task.work(worker_id);
1643 }
1644 };
1645
1646 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
1647 assert(_workers != NULL, "Need parallel worker threads.");
1648 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
1649
1650 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
1651
1652 // Not strictly necessary but...
1653 //
1654 // We need to reset the concurrency level before each
1655 // proxy task execution, so that the termination protocol
1656 // and overflow handling in G1CMTask::do_marking_step() knows
1657 // how many workers to wait for.
1658 _cm->set_concurrency(_active_workers);
1659 _workers->run_task(&enq_task_proxy);
1660 }
1661
1662 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
1663 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
1664 }
1665
1666 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1667 if (has_overflown()) {
1668 // Skip processing the discovered references if we have
1669 // overflown the global marking stack. Reference objects
1670 // only get discovered once so it is OK to not
1671 // de-populate the discovered reference lists. We could have,
1672 // but the only benefit would be that, when marking restarts,
1673 // less reference objects are discovered.
1674 return;
1675 }
1676
1677 ResourceMark rm;
1678 HandleMark hm;
1679
1680 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1681
1682 // Is alive closure.
1683 G1CMIsAliveClosure g1_is_alive(g1h);
1684
1685 // Inner scope to exclude the cleaning of the string and symbol
1686 // tables from the displayed time.
1687 {
1688 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
1689
1690 ReferenceProcessor* rp = g1h->ref_processor_cm();
1691
1692 // See the comment in G1CollectedHeap::ref_processing_init()
1693 // about how reference processing currently works in G1.
1694
1695 // Set the soft reference policy
1696 rp->setup_policy(clear_all_soft_refs);
1697 assert(_global_mark_stack.is_empty(), "mark stack should be empty");
1698
1699 // Instances of the 'Keep Alive' and 'Complete GC' closures used
1700 // in serial reference processing. Note these closures are also
1701 // used for serially processing (by the the current thread) the
1702 // JNI references during parallel reference processing.
1703 //
1704 // These closures do not need to synchronize with the worker
1705 // threads involved in parallel reference processing as these
1706 // instances are executed serially by the current thread (e.g.
1707 // reference processing is not multi-threaded and is thus
1708 // performed by the current thread instead of a gang worker).
1709 //
1710 // The gang tasks involved in parallel reference processing create
1711 // their own instances of these closures, which do their own
1712 // synchronization among themselves.
1713 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
1714 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
1715
1716 // We need at least one active thread. If reference processing
1717 // is not multi-threaded we use the current (VMThread) thread,
1718 // otherwise we use the work gang from the G1CollectedHeap and
1719 // we utilize all the worker threads we can.
1720 bool processing_is_mt = rp->processing_is_mt();
1721 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1722 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1723
1724 // Parallel processing task executor.
1725 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1726 g1h->workers(), active_workers);
1727 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1728
1729 // Set the concurrency level. The phase was already set prior to
1730 // executing the remark task.
1731 set_concurrency(active_workers);
1732
1733 // Set the degree of MT processing here. If the discovery was done MT,
1734 // the number of threads involved during discovery could differ from
1735 // the number of active workers. This is OK as long as the discovered
1736 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737 rp->set_active_mt_degree(active_workers);
1738
1739 // Process the weak references.
1740 const ReferenceProcessorStats& stats =
1741 rp->process_discovered_references(&g1_is_alive,
1742 &g1_keep_alive,
1743 &g1_drain_mark_stack,
1744 executor,
1745 _gc_timer_cm);
1746 _gc_tracer_cm->report_gc_reference_stats(stats);
1747
1748 // The do_oop work routines of the keep_alive and drain_marking_stack
1749 // oop closures will set the has_overflown flag if we overflow the
1750 // global marking stack.
1751
1752 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753 "Mark stack should be empty (unless it is out of memory)");
1754
1755 if (_global_mark_stack.is_out_of_memory()) {
1756 // This should have been done already when we tried to push an
1757 // entry on to the global mark stack. But let's do it again.
1758 set_has_overflown();
1759 }
1760
1761 assert(rp->num_q() == active_workers, "why not");
1762
1763 rp->enqueue_discovered_references(executor);
1764
1765 rp->verify_no_references_recorded();
1766 assert(!rp->discovery_enabled(), "Post condition");
1767 }
1768
1769 if (has_overflown()) {
1770 // We can not trust g1_is_alive if the marking stack overflowed
1771 return;
1772 }
1773
1774 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775
1776 // Unload Klasses, String, Symbols, Code Cache, etc.
1777 if (ClassUnloadingWithConcurrentMark) {
1778 bool purged_classes;
1779
1780 {
1781 GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
1782 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
1783 }
1784
1785 {
1786 GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
1787 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
1788 }
1789 }
1790
1791 if (G1StringDedup::is_enabled()) {
1792 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
1793 G1StringDedup::unlink(&g1_is_alive);
1794 }
1795 }
1796
1797 void G1ConcurrentMark::swapMarkBitMaps() {
1798 G1CMBitMapRO* temp = _prevMarkBitMap;
1799 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap;
1800 _nextMarkBitMap = (G1CMBitMap*) temp;
1801 }
1802
1803 // Closure for marking entries in SATB buffers.
1804 class G1CMSATBBufferClosure : public SATBBufferClosure {
1805 private:
1806 G1CMTask* _task;
1807 G1CollectedHeap* _g1h;
1808
1809 // This is very similar to G1CMTask::deal_with_reference, but with
1810 // more relaxed requirements for the argument, so this must be more
1811 // circumspect about treating the argument as an object.
1812 void do_entry(void* entry) const {
1813 _task->increment_refs_reached();
1814 HeapRegion* hr = _g1h->heap_region_containing(entry);
1815 if (entry < hr->next_top_at_mark_start()) {
1816 // Until we get here, we don't know whether entry refers to a valid
1817 // object; it could instead have been a stale reference.
1818 oop obj = static_cast<oop>(entry);
1819 assert(obj->is_oop(true /* ignore mark word */),
1820 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
1821 _task->make_reference_grey(obj);
1822 }
1823 }
1824
1825 public:
1826 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h)
1827 : _task(task), _g1h(g1h) { }
1828
1829 virtual void do_buffer(void** buffer, size_t size) {
1830 for (size_t i = 0; i < size; ++i) {
1831 do_entry(buffer[i]);
1832 }
1833 }
1834 };
1835
1836 class G1RemarkThreadsClosure : public ThreadClosure {
1837 G1CMSATBBufferClosure _cm_satb_cl;
1838 G1CMOopClosure _cm_cl;
1839 MarkingCodeBlobClosure _code_cl;
1840 int _thread_parity;
1841
1842 public:
1843 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
1844 _cm_satb_cl(task, g1h),
1845 _cm_cl(g1h, g1h->concurrent_mark(), task),
1846 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
1847 _thread_parity(Threads::thread_claim_parity()) {}
1848
1849 void do_thread(Thread* thread) {
1850 if (thread->is_Java_thread()) {
1851 if (thread->claim_oops_do(true, _thread_parity)) {
1852 JavaThread* jt = (JavaThread*)thread;
1853
1854 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
1855 // however the liveness of oops reachable from nmethods have very complex lifecycles:
1856 // * Alive if on the stack of an executing method
1857 // * Weakly reachable otherwise
1858 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
1859 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
1860 jt->nmethods_do(&_code_cl);
1861
1862 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl);
1863 }
1864 } else if (thread->is_VM_thread()) {
1865 if (thread->claim_oops_do(true, _thread_parity)) {
1866 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
1867 }
1868 }
1869 }
1870 };
1871
1872 class G1CMRemarkTask: public AbstractGangTask {
1873 private:
1874 G1ConcurrentMark* _cm;
1875 public:
1876 void work(uint worker_id) {
1877 // Since all available tasks are actually started, we should
1878 // only proceed if we're supposed to be active.
1879 if (worker_id < _cm->active_tasks()) {
1880 G1CMTask* task = _cm->task(worker_id);
1881 task->record_start_time();
1882 {
1883 ResourceMark rm;
1884 HandleMark hm;
1885
1886 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
1887 Threads::threads_do(&threads_f);
1888 }
1889
1890 do {
1891 task->do_marking_step(1000000000.0 /* something very large */,
1892 true /* do_termination */,
1893 false /* is_serial */);
1894 } while (task->has_aborted() && !_cm->has_overflown());
1895 // If we overflow, then we do not want to restart. We instead
1896 // want to abort remark and do concurrent marking again.
1897 task->record_end_time();
1898 }
1899 }
1900
1901 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) :
1902 AbstractGangTask("Par Remark"), _cm(cm) {
1903 _cm->terminator()->reset_for_reuse(active_workers);
1904 }
1905 };
1906
1907 void G1ConcurrentMark::checkpointRootsFinalWork() {
1908 ResourceMark rm;
1909 HandleMark hm;
1910 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1911
1912 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
1913
1914 g1h->ensure_parsability(false);
1915
1916 // this is remark, so we'll use up all active threads
1917 uint active_workers = g1h->workers()->active_workers();
1918 set_concurrency_and_phase(active_workers, false /* concurrent */);
1919 // Leave _parallel_marking_threads at it's
1920 // value originally calculated in the G1ConcurrentMark
1921 // constructor and pass values of the active workers
1922 // through the gang in the task.
1923
1924 {
1925 StrongRootsScope srs(active_workers);
1926
1927 G1CMRemarkTask remarkTask(this, active_workers);
1928 // We will start all available threads, even if we decide that the
1929 // active_workers will be fewer. The extra ones will just bail out
1930 // immediately.
1931 g1h->workers()->run_task(&remarkTask);
1932 }
1933
1934 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1935 guarantee(has_overflown() ||
1936 satb_mq_set.completed_buffers_num() == 0,
1937 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
1938 BOOL_TO_STR(has_overflown()),
1939 satb_mq_set.completed_buffers_num());
1940
1941 print_stats();
1942 }
1943
1944 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
1945 // Note we are overriding the read-only view of the prev map here, via
1946 // the cast.
1947 ((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
1948 }
1949
1950 HeapRegion*
1951 G1ConcurrentMark::claim_region(uint worker_id) {
1952 // "checkpoint" the finger
1953 HeapWord* finger = _finger;
1954
1955 // _heap_end will not change underneath our feet; it only changes at
1956 // yield points.
1957 while (finger < _heap_end) {
1958 assert(_g1h->is_in_g1_reserved(finger), "invariant");
1959
1960 HeapRegion* curr_region = _g1h->heap_region_containing(finger);
1961 // Make sure that the reads below do not float before loading curr_region.
1962 OrderAccess::loadload();
1963 // Above heap_region_containing may return NULL as we always scan claim
1964 // until the end of the heap. In this case, just jump to the next region.
1965 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
1966
1967 // Is the gap between reading the finger and doing the CAS too long?
1968 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
1969 if (res == finger && curr_region != NULL) {
1970 // we succeeded
1971 HeapWord* bottom = curr_region->bottom();
1972 HeapWord* limit = curr_region->next_top_at_mark_start();
1973
1974 // notice that _finger == end cannot be guaranteed here since,
1975 // someone else might have moved the finger even further
1976 assert(_finger >= end, "the finger should have moved forward");
1977
1978 if (limit > bottom) {
1979 return curr_region;
1980 } else {
1981 assert(limit == bottom,
1982 "the region limit should be at bottom");
1983 // we return NULL and the caller should try calling
1984 // claim_region() again.
1985 return NULL;
1986 }
1987 } else {
1988 assert(_finger > finger, "the finger should have moved forward");
1989 // read it again
1990 finger = _finger;
1991 }
1992 }
1993
1994 return NULL;
1995 }
1996
1997 #ifndef PRODUCT
1998 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
1999 private:
2000 G1CollectedHeap* _g1h;
2001 const char* _phase;
2002 int _info;
2003
2004 public:
2005 VerifyNoCSetOops(const char* phase, int info = -1) :
2006 _g1h(G1CollectedHeap::heap()),
2007 _phase(phase),
2008 _info(info)
2009 { }
2010
2011 void operator()(G1TaskQueueEntry task_entry) const {
2012 if (task_entry.is_array_slice()) {
2013 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice()));
2014 return;
2015 }
2016 guarantee(task_entry.obj()->is_oop(),
2017 "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
2018 p2i(task_entry.obj()), _phase, _info);
2019 guarantee(!_g1h->is_in_cset(task_entry.obj()),
2020 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
2021 p2i(task_entry.obj()), _phase, _info);
2022 }
2023 };
2024
2025 void G1ConcurrentMark::verify_no_cset_oops() {
2026 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2027 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) {
2028 return;
2029 }
2030
2031 // Verify entries on the global mark stack
2032 _global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
2033
2034 // Verify entries on the task queues
2035 for (uint i = 0; i < _max_worker_id; ++i) {
2036 G1CMTaskQueue* queue = _task_queues->queue(i);
2037 queue->iterate(VerifyNoCSetOops("Queue", i));
2038 }
2039
2040 // Verify the global finger
2041 HeapWord* global_finger = finger();
2042 if (global_finger != NULL && global_finger < _heap_end) {
2043 // Since we always iterate over all regions, we might get a NULL HeapRegion
2044 // here.
2045 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
2046 guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
2047 "global finger: " PTR_FORMAT " region: " HR_FORMAT,
2048 p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
2049 }
2050
2051 // Verify the task fingers
2052 assert(parallel_marking_threads() <= _max_worker_id, "sanity");
2053 for (uint i = 0; i < parallel_marking_threads(); ++i) {
2054 G1CMTask* task = _tasks[i];
2055 HeapWord* task_finger = task->finger();
2056 if (task_finger != NULL && task_finger < _heap_end) {
2057 // See above note on the global finger verification.
2058 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
2059 guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
2060 !task_hr->in_collection_set(),
2061 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
2062 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
2063 }
2064 }
2065 }
2066 #endif // PRODUCT
2067 void G1ConcurrentMark::create_live_data() {
2068 _g1h->g1_rem_set()->create_card_live_data(_parallel_workers, _nextMarkBitMap);
2069 }
2070
2071 void G1ConcurrentMark::finalize_live_data() {
2072 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _nextMarkBitMap);
2073 }
2074
2075 void G1ConcurrentMark::verify_live_data() {
2076 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _nextMarkBitMap);
2077 }
2078
2079 void G1ConcurrentMark::clear_live_data(WorkGang* workers) {
2080 _g1h->g1_rem_set()->clear_card_live_data(workers);
2081 }
2082
2083 #ifdef ASSERT
2084 void G1ConcurrentMark::verify_live_data_clear() {
2085 _g1h->g1_rem_set()->verify_card_live_data_is_clear();
2086 }
2087 #endif
2088
2089 void G1ConcurrentMark::print_stats() {
2090 if (!log_is_enabled(Debug, gc, stats)) {
2091 return;
2092 }
2093 log_debug(gc, stats)("---------------------------------------------------------------------");
2094 for (size_t i = 0; i < _active_tasks; ++i) {
2095 _tasks[i]->print_stats();
2096 log_debug(gc, stats)("---------------------------------------------------------------------");
2097 }
2098 }
2099
2100 void G1ConcurrentMark::abort() {
2101 if (!cmThread()->during_cycle() || _has_aborted) {
2102 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2103 return;
2104 }
2105
2106 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2107 // concurrent bitmap clearing.
2108 {
2109 GCTraceTime(Debug, gc)("Clear Next Bitmap");
2110 clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
2111 }
2112 // Note we cannot clear the previous marking bitmap here
2113 // since VerifyDuringGC verifies the objects marked during
2114 // a full GC against the previous bitmap.
2115
2116 {
2117 GCTraceTime(Debug, gc)("Clear Live Data");
2118 clear_live_data(_g1h->workers());
2119 }
2120 DEBUG_ONLY({
2121 GCTraceTime(Debug, gc)("Verify Live Data Clear");
2122 verify_live_data_clear();
2123 })
2124 // Empty mark stack
2125 reset_marking_state();
2126 for (uint i = 0; i < _max_worker_id; ++i) {
2127 _tasks[i]->clear_region_fields();
2128 }
2129 _first_overflow_barrier_sync.abort();
2130 _second_overflow_barrier_sync.abort();
2131 _has_aborted = true;
2132
2133 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2134 satb_mq_set.abandon_partial_marking();
2135 // This can be called either during or outside marking, we'll read
2136 // the expected_active value from the SATB queue set.
2137 satb_mq_set.set_active_all_threads(
2138 false, /* new active value */
2139 satb_mq_set.is_active() /* expected_active */);
2140 }
2141
2142 static void print_ms_time_info(const char* prefix, const char* name,
2143 NumberSeq& ns) {
2144 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2145 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2146 if (ns.num() > 0) {
2147 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2148 prefix, ns.sd(), ns.maximum());
2149 }
2150 }
2151
2152 void G1ConcurrentMark::print_summary_info() {
2153 Log(gc, marking) log;
2154 if (!log.is_trace()) {
2155 return;
2156 }
2157
2158 log.trace(" Concurrent marking:");
2159 print_ms_time_info(" ", "init marks", _init_times);
2160 print_ms_time_info(" ", "remarks", _remark_times);
2161 {
2162 print_ms_time_info(" ", "final marks", _remark_mark_times);
2163 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times);
2164
2165 }
2166 print_ms_time_info(" ", "cleanups", _cleanup_times);
2167 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).",
2168 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2169 if (G1ScrubRemSets) {
2170 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).",
2171 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2172 }
2173 log.trace(" Total stop_world time = %8.2f s.",
2174 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2175 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).",
2176 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());
2177 }
2178
2179 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2180 _parallel_workers->print_worker_threads_on(st);
2181 }
2182
2183 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const {
2184 _parallel_workers->threads_do(tc);
2185 }
2186
2187 void G1ConcurrentMark::print_on_error(outputStream* st) const {
2188 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2189 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2190 _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2191 _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2192 }
2193
2194 // Closure for iteration over bitmaps
2195 class G1CMBitMapClosure : public BitMapClosure {
2196 private:
2197 // the bitmap that is being iterated over
2198 G1CMBitMap* _nextMarkBitMap;
2199 G1ConcurrentMark* _cm;
2200 G1CMTask* _task;
2201
2202 public:
2203 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) :
2204 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
2205
2206 bool do_bit(size_t offset) {
2207 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
2208 assert(_nextMarkBitMap->isMarked(addr), "invariant");
2209 assert( addr < _cm->finger(), "invariant");
2210 assert(addr >= _task->finger(), "invariant");
2211
2212 // We move that task's local finger along.
2213 _task->move_finger_to(addr);
2214
2215 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr)));
2216 // we only partially drain the local queue and global stack
2217 _task->drain_local_queue(true);
2218 _task->drain_global_stack(true);
2219
2220 // if the has_aborted flag has been raised, we need to bail out of
2221 // the iteration
2222 return !_task->has_aborted();
2223 }
2224 };
2225
2226 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
2227 ReferenceProcessor* result = g1h->ref_processor_cm();
2228 assert(result != NULL, "CM reference processor should not be NULL");
2229 return result;
2230 }
2231
2232 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
2233 G1ConcurrentMark* cm,
2234 G1CMTask* task)
2235 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
2236 _g1h(g1h), _cm(cm), _task(task)
2237 { }
2238
2239 void G1CMTask::setup_for_region(HeapRegion* hr) {
2240 assert(hr != NULL,
2241 "claim_region() should have filtered out NULL regions");
2242 _curr_region = hr;
2243 _finger = hr->bottom();
2244 update_region_limit();
2245 }
2246
2247 void G1CMTask::update_region_limit() {
2248 HeapRegion* hr = _curr_region;
2249 HeapWord* bottom = hr->bottom();
2250 HeapWord* limit = hr->next_top_at_mark_start();
2251
2252 if (limit == bottom) {
2253 // The region was collected underneath our feet.
2254 // We set the finger to bottom to ensure that the bitmap
2255 // iteration that will follow this will not do anything.
2256 // (this is not a condition that holds when we set the region up,
2257 // as the region is not supposed to be empty in the first place)
2258 _finger = bottom;
2259 } else if (limit >= _region_limit) {
2260 assert(limit >= _finger, "peace of mind");
2261 } else {
2262 assert(limit < _region_limit, "only way to get here");
2263 // This can happen under some pretty unusual circumstances. An
2264 // evacuation pause empties the region underneath our feet (NTAMS
2265 // at bottom). We then do some allocation in the region (NTAMS
2266 // stays at bottom), followed by the region being used as a GC
2267 // alloc region (NTAMS will move to top() and the objects
2268 // originally below it will be grayed). All objects now marked in
2269 // the region are explicitly grayed, if below the global finger,
2270 // and we do not need in fact to scan anything else. So, we simply
2271 // set _finger to be limit to ensure that the bitmap iteration
2272 // doesn't do anything.
2273 _finger = limit;
2274 }
2275
2276 _region_limit = limit;
2277 }
2278
2279 void G1CMTask::giveup_current_region() {
2280 assert(_curr_region != NULL, "invariant");
2281 clear_region_fields();
2282 }
2283
2284 void G1CMTask::clear_region_fields() {
2285 // Values for these three fields that indicate that we're not
2286 // holding on to a region.
2287 _curr_region = NULL;
2288 _finger = NULL;
2289 _region_limit = NULL;
2290 }
2291
2292 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
2293 if (cm_oop_closure == NULL) {
2294 assert(_cm_oop_closure != NULL, "invariant");
2295 } else {
2296 assert(_cm_oop_closure == NULL, "invariant");
2297 }
2298 _cm_oop_closure = cm_oop_closure;
2299 }
2300
2301 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) {
2302 guarantee(nextMarkBitMap != NULL, "invariant");
2303 _nextMarkBitMap = nextMarkBitMap;
2304 clear_region_fields();
2305
2306 _calls = 0;
2307 _elapsed_time_ms = 0.0;
2308 _termination_time_ms = 0.0;
2309 _termination_start_time_ms = 0.0;
2310 }
2311
2312 bool G1CMTask::should_exit_termination() {
2313 regular_clock_call();
2314 // This is called when we are in the termination protocol. We should
2315 // quit if, for some reason, this task wants to abort or the global
2316 // stack is not empty (this means that we can get work from it).
2317 return !_cm->mark_stack_empty() || has_aborted();
2318 }
2319
2320 void G1CMTask::reached_limit() {
2321 assert(_words_scanned >= _words_scanned_limit ||
2322 _refs_reached >= _refs_reached_limit ,
2323 "shouldn't have been called otherwise");
2324 regular_clock_call();
2325 }
2326
2327 void G1CMTask::regular_clock_call() {
2328 if (has_aborted()) return;
2329
2330 // First, we need to recalculate the words scanned and refs reached
2331 // limits for the next clock call.
2332 recalculate_limits();
2333
2334 // During the regular clock call we do the following
2335
2336 // (1) If an overflow has been flagged, then we abort.
2337 if (_cm->has_overflown()) {
2338 set_has_aborted();
2339 return;
2340 }
2341
2342 // If we are not concurrent (i.e. we're doing remark) we don't need
2343 // to check anything else. The other steps are only needed during
2344 // the concurrent marking phase.
2345 if (!concurrent()) return;
2346
2347 // (2) If marking has been aborted for Full GC, then we also abort.
2348 if (_cm->has_aborted()) {
2349 set_has_aborted();
2350 return;
2351 }
2352
2353 double curr_time_ms = os::elapsedVTime() * 1000.0;
2354
2355 // (4) We check whether we should yield. If we have to, then we abort.
2356 if (SuspendibleThreadSet::should_yield()) {
2357 // We should yield. To do this we abort the task. The caller is
2358 // responsible for yielding.
2359 set_has_aborted();
2360 return;
2361 }
2362
2363 // (5) We check whether we've reached our time quota. If we have,
2364 // then we abort.
2365 double elapsed_time_ms = curr_time_ms - _start_time_ms;
2366 if (elapsed_time_ms > _time_target_ms) {
2367 set_has_aborted();
2368 _has_timed_out = true;
2369 return;
2370 }
2371
2372 // (6) Finally, we check whether there are enough completed STAB
2373 // buffers available for processing. If there are, we abort.
2374 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2375 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
2376 // we do need to process SATB buffers, we'll abort and restart
2377 // the marking task to do so
2378 set_has_aborted();
2379 return;
2380 }
2381 }
2382
2383 void G1CMTask::recalculate_limits() {
2384 _real_words_scanned_limit = _words_scanned + words_scanned_period;
2385 _words_scanned_limit = _real_words_scanned_limit;
2386
2387 _real_refs_reached_limit = _refs_reached + refs_reached_period;
2388 _refs_reached_limit = _real_refs_reached_limit;
2389 }
2390
2391 void G1CMTask::decrease_limits() {
2392 // This is called when we believe that we're going to do an infrequent
2393 // operation which will increase the per byte scanned cost (i.e. move
2394 // entries to/from the global stack). It basically tries to decrease the
2395 // scanning limit so that the clock is called earlier.
2396
2397 _words_scanned_limit = _real_words_scanned_limit -
2398 3 * words_scanned_period / 4;
2399 _refs_reached_limit = _real_refs_reached_limit -
2400 3 * refs_reached_period / 4;
2401 }
2402
2403 void G1CMTask::move_entries_to_global_stack() {
2404 // Local array where we'll store the entries that will be popped
2405 // from the local queue.
2406 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2407
2408 size_t n = 0;
2409 G1TaskQueueEntry task_entry;
2410 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) {
2411 buffer[n].assign(task_entry);
2412 ++n;
2413 }
2414 if (n < G1CMMarkStack::EntriesPerChunk) {
2415 buffer[n].assign(G1TaskQueueEntry());
2416 }
2417
2418 if (n > 0) {
2419 if (!_cm->mark_stack_push(buffer)) {
2420 set_has_aborted();
2421 }
2422 }
2423
2424 // This operation was quite expensive, so decrease the limits.
2425 decrease_limits();
2426 }
2427
2428 bool G1CMTask::get_entries_from_global_stack() {
2429 // Local array where we'll store the entries that will be popped
2430 // from the global stack.
2431 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk];
2432
2433 if (!_cm->mark_stack_pop(buffer)) {
2434 return false;
2435 }
2436
2437 // We did actually pop at least one entry.
2438 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) {
2439 G1TaskQueueEntry task_entry = buffer[i];
2440 if (task_entry.is_null()) {
2441 break;
2442 }
2443 assert(task_entry.is_array_slice() || task_entry.obj()->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj()));
2444 bool success = _task_queue->push(task_entry);
2445 // We only call this when the local queue is empty or under a
2446 // given target limit. So, we do not expect this push to fail.
2447 assert(success, "invariant");
2448 }
2449
2450 // This operation was quite expensive, so decrease the limits
2451 decrease_limits();
2452 return true;
2453 }
2454
2455 void G1CMTask::drain_local_queue(bool partially) {
2456 if (has_aborted()) {
2457 return;
2458 }
2459
2460 // Decide what the target size is, depending whether we're going to
2461 // drain it partially (so that other tasks can steal if they run out
2462 // of things to do) or totally (at the very end).
2463 size_t target_size;
2464 if (partially) {
2465 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
2466 } else {
2467 target_size = 0;
2468 }
2469
2470 if (_task_queue->size() > target_size) {
2471 G1TaskQueueEntry entry;
2472 bool ret = _task_queue->pop_local(entry);
2473 while (ret) {
2474 scan_task_entry(entry);
2475 if (_task_queue->size() <= target_size || has_aborted()) {
2476 ret = false;
2477 } else {
2478 ret = _task_queue->pop_local(entry);
2479 }
2480 }
2481 }
2482 }
2483
2484 void G1CMTask::drain_global_stack(bool partially) {
2485 if (has_aborted()) return;
2486
2487 // We have a policy to drain the local queue before we attempt to
2488 // drain the global stack.
2489 assert(partially || _task_queue->size() == 0, "invariant");
2490
2491 // Decide what the target size is, depending whether we're going to
2492 // drain it partially (so that other tasks can steal if they run out
2493 // of things to do) or totally (at the very end).
2494 // Notice that when draining the global mark stack partially, due to the racyness
2495 // of the mark stack size update we might in fact drop below the target. But,
2496 // this is not a problem.
2497 // In case of total draining, we simply process until the global mark stack is
2498 // totally empty, disregarding the size counter.
2499 if (partially) {
2500 size_t const target_size = _cm->partial_mark_stack_size_target();
2501 while (!has_aborted() && _cm->mark_stack_size() > target_size) {
2502 if (get_entries_from_global_stack()) {
2503 drain_local_queue(partially);
2504 }
2505 }
2506 } else {
2507 while (!has_aborted() && get_entries_from_global_stack()) {
2508 drain_local_queue(partially);
2509 }
2510 }
2511 }
2512
2513 // SATB Queue has several assumptions on whether to call the par or
2514 // non-par versions of the methods. this is why some of the code is
2515 // replicated. We should really get rid of the single-threaded version
2516 // of the code to simplify things.
2517 void G1CMTask::drain_satb_buffers() {
2518 if (has_aborted()) return;
2519
2520 // We set this so that the regular clock knows that we're in the
2521 // middle of draining buffers and doesn't set the abort flag when it
2522 // notices that SATB buffers are available for draining. It'd be
2523 // very counter productive if it did that. :-)
2524 _draining_satb_buffers = true;
2525
2526 G1CMSATBBufferClosure satb_cl(this, _g1h);
2527 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2528
2529 // This keeps claiming and applying the closure to completed buffers
2530 // until we run out of buffers or we need to abort.
2531 while (!has_aborted() &&
2532 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
2533 regular_clock_call();
2534 }
2535
2536 _draining_satb_buffers = false;
2537
2538 assert(has_aborted() ||
2539 concurrent() ||
2540 satb_mq_set.completed_buffers_num() == 0, "invariant");
2541
2542 // again, this was a potentially expensive operation, decrease the
2543 // limits to get the regular clock call early
2544 decrease_limits();
2545 }
2546
2547 void G1CMTask::print_stats() {
2548 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
2549 _worker_id, _calls);
2550 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms",
2551 _elapsed_time_ms, _termination_time_ms);
2552 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
2553 _step_times_ms.num(), _step_times_ms.avg(),
2554 _step_times_ms.sd());
2555 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms",
2556 _step_times_ms.maximum(), _step_times_ms.sum());
2557 }
2558
2559 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
2560 return _task_queues->steal(worker_id, hash_seed, task_entry);
2561 }
2562
2563 /*****************************************************************************
2564
2565 The do_marking_step(time_target_ms, ...) method is the building
2566 block of the parallel marking framework. It can be called in parallel
2567 with other invocations of do_marking_step() on different tasks
2568 (but only one per task, obviously) and concurrently with the
2569 mutator threads, or during remark, hence it eliminates the need
2570 for two versions of the code. When called during remark, it will
2571 pick up from where the task left off during the concurrent marking
2572 phase. Interestingly, tasks are also claimable during evacuation
2573 pauses too, since do_marking_step() ensures that it aborts before
2574 it needs to yield.
2575
2576 The data structures that it uses to do marking work are the
2577 following:
2578
2579 (1) Marking Bitmap. If there are gray objects that appear only
2580 on the bitmap (this happens either when dealing with an overflow
2581 or when the initial marking phase has simply marked the roots
2582 and didn't push them on the stack), then tasks claim heap
2583 regions whose bitmap they then scan to find gray objects. A
2584 global finger indicates where the end of the last claimed region
2585 is. A local finger indicates how far into the region a task has
2586 scanned. The two fingers are used to determine how to gray an
2587 object (i.e. whether simply marking it is OK, as it will be
2588 visited by a task in the future, or whether it needs to be also
2589 pushed on a stack).
2590
2591 (2) Local Queue. The local queue of the task which is accessed
2592 reasonably efficiently by the task. Other tasks can steal from
2593 it when they run out of work. Throughout the marking phase, a
2594 task attempts to keep its local queue short but not totally
2595 empty, so that entries are available for stealing by other
2596 tasks. Only when there is no more work, a task will totally
2597 drain its local queue.
2598
2599 (3) Global Mark Stack. This handles local queue overflow. During
2600 marking only sets of entries are moved between it and the local
2601 queues, as access to it requires a mutex and more fine-grain
2602 interaction with it which might cause contention. If it
2603 overflows, then the marking phase should restart and iterate
2604 over the bitmap to identify gray objects. Throughout the marking
2605 phase, tasks attempt to keep the global mark stack at a small
2606 length but not totally empty, so that entries are available for
2607 popping by other tasks. Only when there is no more work, tasks
2608 will totally drain the global mark stack.
2609
2610 (4) SATB Buffer Queue. This is where completed SATB buffers are
2611 made available. Buffers are regularly removed from this queue
2612 and scanned for roots, so that the queue doesn't get too
2613 long. During remark, all completed buffers are processed, as
2614 well as the filled in parts of any uncompleted buffers.
2615
2616 The do_marking_step() method tries to abort when the time target
2617 has been reached. There are a few other cases when the
2618 do_marking_step() method also aborts:
2619
2620 (1) When the marking phase has been aborted (after a Full GC).
2621
2622 (2) When a global overflow (on the global stack) has been
2623 triggered. Before the task aborts, it will actually sync up with
2624 the other tasks to ensure that all the marking data structures
2625 (local queues, stacks, fingers etc.) are re-initialized so that
2626 when do_marking_step() completes, the marking phase can
2627 immediately restart.
2628
2629 (3) When enough completed SATB buffers are available. The
2630 do_marking_step() method only tries to drain SATB buffers right
2631 at the beginning. So, if enough buffers are available, the
2632 marking step aborts and the SATB buffers are processed at
2633 the beginning of the next invocation.
2634
2635 (4) To yield. when we have to yield then we abort and yield
2636 right at the end of do_marking_step(). This saves us from a lot
2637 of hassle as, by yielding we might allow a Full GC. If this
2638 happens then objects will be compacted underneath our feet, the
2639 heap might shrink, etc. We save checking for this by just
2640 aborting and doing the yield right at the end.
2641
2642 From the above it follows that the do_marking_step() method should
2643 be called in a loop (or, otherwise, regularly) until it completes.
2644
2645 If a marking step completes without its has_aborted() flag being
2646 true, it means it has completed the current marking phase (and
2647 also all other marking tasks have done so and have all synced up).
2648
2649 A method called regular_clock_call() is invoked "regularly" (in
2650 sub ms intervals) throughout marking. It is this clock method that
2651 checks all the abort conditions which were mentioned above and
2652 decides when the task should abort. A work-based scheme is used to
2653 trigger this clock method: when the number of object words the
2654 marking phase has scanned or the number of references the marking
2655 phase has visited reach a given limit. Additional invocations to
2656 the method clock have been planted in a few other strategic places
2657 too. The initial reason for the clock method was to avoid calling
2658 vtime too regularly, as it is quite expensive. So, once it was in
2659 place, it was natural to piggy-back all the other conditions on it
2660 too and not constantly check them throughout the code.
2661
2662 If do_termination is true then do_marking_step will enter its
2663 termination protocol.
2664
2665 The value of is_serial must be true when do_marking_step is being
2666 called serially (i.e. by the VMThread) and do_marking_step should
2667 skip any synchronization in the termination and overflow code.
2668 Examples include the serial remark code and the serial reference
2669 processing closures.
2670
2671 The value of is_serial must be false when do_marking_step is
2672 being called by any of the worker threads in a work gang.
2673 Examples include the concurrent marking code (CMMarkingTask),
2674 the MT remark code, and the MT reference processing closures.
2675
2676 *****************************************************************************/
2677
2678 void G1CMTask::do_marking_step(double time_target_ms,
2679 bool do_termination,
2680 bool is_serial) {
2681 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2682 assert(concurrent() == _cm->concurrent(), "they should be the same");
2683
2684 G1Policy* g1_policy = _g1h->g1_policy();
2685 assert(_task_queues != NULL, "invariant");
2686 assert(_task_queue != NULL, "invariant");
2687 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
2688
2689 assert(!_claimed,
2690 "only one thread should claim this task at any one time");
2691
2692 // OK, this doesn't safeguard again all possible scenarios, as it is
2693 // possible for two threads to set the _claimed flag at the same
2694 // time. But it is only for debugging purposes anyway and it will
2695 // catch most problems.
2696 _claimed = true;
2697
2698 _start_time_ms = os::elapsedVTime() * 1000.0;
2699
2700 // If do_stealing is true then do_marking_step will attempt to
2701 // steal work from the other G1CMTasks. It only makes sense to
2702 // enable stealing when the termination protocol is enabled
2703 // and do_marking_step() is not being called serially.
2704 bool do_stealing = do_termination && !is_serial;
2705
2706 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2707 _time_target_ms = time_target_ms - diff_prediction_ms;
2708
2709 // set up the variables that are used in the work-based scheme to
2710 // call the regular clock method
2711 _words_scanned = 0;
2712 _refs_reached = 0;
2713 recalculate_limits();
2714
2715 // clear all flags
2716 clear_has_aborted();
2717 _has_timed_out = false;
2718 _draining_satb_buffers = false;
2719
2720 ++_calls;
2721
2722 // Set up the bitmap and oop closures. Anything that uses them is
2723 // eventually called from this method, so it is OK to allocate these
2724 // statically.
2725 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
2726 G1CMOopClosure cm_oop_closure(_g1h, _cm, this);
2727 set_cm_oop_closure(&cm_oop_closure);
2728
2729 if (_cm->has_overflown()) {
2730 // This can happen if the mark stack overflows during a GC pause
2731 // and this task, after a yield point, restarts. We have to abort
2732 // as we need to get into the overflow protocol which happens
2733 // right at the end of this task.
2734 set_has_aborted();
2735 }
2736
2737 // First drain any available SATB buffers. After this, we will not
2738 // look at SATB buffers before the next invocation of this method.
2739 // If enough completed SATB buffers are queued up, the regular clock
2740 // will abort this task so that it restarts.
2741 drain_satb_buffers();
2742 // ...then partially drain the local queue and the global stack
2743 drain_local_queue(true);
2744 drain_global_stack(true);
2745
2746 do {
2747 if (!has_aborted() && _curr_region != NULL) {
2748 // This means that we're already holding on to a region.
2749 assert(_finger != NULL, "if region is not NULL, then the finger "
2750 "should not be NULL either");
2751
2752 // We might have restarted this task after an evacuation pause
2753 // which might have evacuated the region we're holding on to
2754 // underneath our feet. Let's read its limit again to make sure
2755 // that we do not iterate over a region of the heap that
2756 // contains garbage (update_region_limit() will also move
2757 // _finger to the start of the region if it is found empty).
2758 update_region_limit();
2759 // We will start from _finger not from the start of the region,
2760 // as we might be restarting this task after aborting half-way
2761 // through scanning this region. In this case, _finger points to
2762 // the address where we last found a marked object. If this is a
2763 // fresh region, _finger points to start().
2764 MemRegion mr = MemRegion(_finger, _region_limit);
2765
2766 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
2767 "humongous regions should go around loop once only");
2768
2769 // Some special cases:
2770 // If the memory region is empty, we can just give up the region.
2771 // If the current region is humongous then we only need to check
2772 // the bitmap for the bit associated with the start of the object,
2773 // scan the object if it's live, and give up the region.
2774 // Otherwise, let's iterate over the bitmap of the part of the region
2775 // that is left.
2776 // If the iteration is successful, give up the region.
2777 if (mr.is_empty()) {
2778 giveup_current_region();
2779 regular_clock_call();
2780 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
2781 if (_nextMarkBitMap->isMarked(mr.start())) {
2782 // The object is marked - apply the closure
2783 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
2784 bitmap_closure.do_bit(offset);
2785 }
2786 // Even if this task aborted while scanning the humongous object
2787 // we can (and should) give up the current region.
2788 giveup_current_region();
2789 regular_clock_call();
2790 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
2791 giveup_current_region();
2792 regular_clock_call();
2793 } else {
2794 assert(has_aborted(), "currently the only way to do so");
2795 // The only way to abort the bitmap iteration is to return
2796 // false from the do_bit() method. However, inside the
2797 // do_bit() method we move the _finger to point to the
2798 // object currently being looked at. So, if we bail out, we
2799 // have definitely set _finger to something non-null.
2800 assert(_finger != NULL, "invariant");
2801
2802 // Region iteration was actually aborted. So now _finger
2803 // points to the address of the object we last scanned. If we
2804 // leave it there, when we restart this task, we will rescan
2805 // the object. It is easy to avoid this. We move the finger by
2806 // enough to point to the next possible object header (the
2807 // bitmap knows by how much we need to move it as it knows its
2808 // granularity).
2809 assert(_finger < _region_limit, "invariant");
2810 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
2811 // Check if bitmap iteration was aborted while scanning the last object
2812 if (new_finger >= _region_limit) {
2813 giveup_current_region();
2814 } else {
2815 move_finger_to(new_finger);
2816 }
2817 }
2818 }
2819 // At this point we have either completed iterating over the
2820 // region we were holding on to, or we have aborted.
2821
2822 // We then partially drain the local queue and the global stack.
2823 // (Do we really need this?)
2824 drain_local_queue(true);
2825 drain_global_stack(true);
2826
2827 // Read the note on the claim_region() method on why it might
2828 // return NULL with potentially more regions available for
2829 // claiming and why we have to check out_of_regions() to determine
2830 // whether we're done or not.
2831 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
2832 // We are going to try to claim a new region. We should have
2833 // given up on the previous one.
2834 // Separated the asserts so that we know which one fires.
2835 assert(_curr_region == NULL, "invariant");
2836 assert(_finger == NULL, "invariant");
2837 assert(_region_limit == NULL, "invariant");
2838 HeapRegion* claimed_region = _cm->claim_region(_worker_id);
2839 if (claimed_region != NULL) {
2840 // Yes, we managed to claim one
2841 setup_for_region(claimed_region);
2842 assert(_curr_region == claimed_region, "invariant");
2843 }
2844 // It is important to call the regular clock here. It might take
2845 // a while to claim a region if, for example, we hit a large
2846 // block of empty regions. So we need to call the regular clock
2847 // method once round the loop to make sure it's called
2848 // frequently enough.
2849 regular_clock_call();
2850 }
2851
2852 if (!has_aborted() && _curr_region == NULL) {
2853 assert(_cm->out_of_regions(),
2854 "at this point we should be out of regions");
2855 }
2856 } while ( _curr_region != NULL && !has_aborted());
2857
2858 if (!has_aborted()) {
2859 // We cannot check whether the global stack is empty, since other
2860 // tasks might be pushing objects to it concurrently.
2861 assert(_cm->out_of_regions(),
2862 "at this point we should be out of regions");
2863 // Try to reduce the number of available SATB buffers so that
2864 // remark has less work to do.
2865 drain_satb_buffers();
2866 }
2867
2868 // Since we've done everything else, we can now totally drain the
2869 // local queue and global stack.
2870 drain_local_queue(false);
2871 drain_global_stack(false);
2872
2873 // Attempt at work stealing from other task's queues.
2874 if (do_stealing && !has_aborted()) {
2875 // We have not aborted. This means that we have finished all that
2876 // we could. Let's try to do some stealing...
2877
2878 // We cannot check whether the global stack is empty, since other
2879 // tasks might be pushing objects to it concurrently.
2880 assert(_cm->out_of_regions() && _task_queue->size() == 0,
2881 "only way to reach here");
2882 while (!has_aborted()) {
2883 G1TaskQueueEntry entry;
2884 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) {
2885 scan_task_entry(entry);
2886
2887 // And since we're towards the end, let's totally drain the
2888 // local queue and global stack.
2889 drain_local_queue(false);
2890 drain_global_stack(false);
2891 } else {
2892 break;
2893 }
2894 }
2895 }
2896
2897 // We still haven't aborted. Now, let's try to get into the
2898 // termination protocol.
2899 if (do_termination && !has_aborted()) {
2900 // We cannot check whether the global stack is empty, since other
2901 // tasks might be concurrently pushing objects on it.
2902 // Separated the asserts so that we know which one fires.
2903 assert(_cm->out_of_regions(), "only way to reach here");
2904 assert(_task_queue->size() == 0, "only way to reach here");
2905 _termination_start_time_ms = os::elapsedVTime() * 1000.0;
2906
2907 // The G1CMTask class also extends the TerminatorTerminator class,
2908 // hence its should_exit_termination() method will also decide
2909 // whether to exit the termination protocol or not.
2910 bool finished = (is_serial ||
2911 _cm->terminator()->offer_termination(this));
2912 double termination_end_time_ms = os::elapsedVTime() * 1000.0;
2913 _termination_time_ms +=
2914 termination_end_time_ms - _termination_start_time_ms;
2915
2916 if (finished) {
2917 // We're all done.
2918
2919 if (_worker_id == 0) {
2920 // let's allow task 0 to do this
2921 if (concurrent()) {
2922 assert(_cm->concurrent_marking_in_progress(), "invariant");
2923 // we need to set this to false before the next
2924 // safepoint. This way we ensure that the marking phase
2925 // doesn't observe any more heap expansions.
2926 _cm->clear_concurrent_marking_in_progress();
2927 }
2928 }
2929
2930 // We can now guarantee that the global stack is empty, since
2931 // all other tasks have finished. We separated the guarantees so
2932 // that, if a condition is false, we can immediately find out
2933 // which one.
2934 guarantee(_cm->out_of_regions(), "only way to reach here");
2935 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2936 guarantee(_task_queue->size() == 0, "only way to reach here");
2937 guarantee(!_cm->has_overflown(), "only way to reach here");
2938 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2939 } else {
2940 // Apparently there's more work to do. Let's abort this task. It
2941 // will restart it and we can hopefully find more things to do.
2942 set_has_aborted();
2943 }
2944 }
2945
2946 // Mainly for debugging purposes to make sure that a pointer to the
2947 // closure which was statically allocated in this frame doesn't
2948 // escape it by accident.
2949 set_cm_oop_closure(NULL);
2950 double end_time_ms = os::elapsedVTime() * 1000.0;
2951 double elapsed_time_ms = end_time_ms - _start_time_ms;
2952 // Update the step history.
2953 _step_times_ms.add(elapsed_time_ms);
2954
2955 if (has_aborted()) {
2956 // The task was aborted for some reason.
2957 if (_has_timed_out) {
2958 double diff_ms = elapsed_time_ms - _time_target_ms;
2959 // Keep statistics of how well we did with respect to hitting
2960 // our target only if we actually timed out (if we aborted for
2961 // other reasons, then the results might get skewed).
2962 _marking_step_diffs_ms.add(diff_ms);
2963 }
2964
2965 if (_cm->has_overflown()) {
2966 // This is the interesting one. We aborted because a global
2967 // overflow was raised. This means we have to restart the
2968 // marking phase and start iterating over regions. However, in
2969 // order to do this we have to make sure that all tasks stop
2970 // what they are doing and re-initialize in a safe manner. We
2971 // will achieve this with the use of two barrier sync points.
2972
2973 if (!is_serial) {
2974 // We only need to enter the sync barrier if being called
2975 // from a parallel context
2976 _cm->enter_first_sync_barrier(_worker_id);
2977
2978 // When we exit this sync barrier we know that all tasks have
2979 // stopped doing marking work. So, it's now safe to
2980 // re-initialize our data structures. At the end of this method,
2981 // task 0 will clear the global data structures.
2982 }
2983
2984 // We clear the local state of this task...
2985 clear_region_fields();
2986
2987 if (!is_serial) {
2988 // ...and enter the second barrier.
2989 _cm->enter_second_sync_barrier(_worker_id);
2990 }
2991 // At this point, if we're during the concurrent phase of
2992 // marking, everything has been re-initialized and we're
2993 // ready to restart.
2994 }
2995 }
2996
2997 _claimed = false;
2998 }
2999
3000 G1CMTask::G1CMTask(uint worker_id,
3001 G1ConcurrentMark* cm,
3002 G1CMTaskQueue* task_queue,
3003 G1CMTaskQueueSet* task_queues)
3004 : _g1h(G1CollectedHeap::heap()),
3005 _worker_id(worker_id), _cm(cm),
3006 _objArray_processor(this),
3007 _claimed(false),
3008 _nextMarkBitMap(NULL), _hash_seed(17),
3009 _task_queue(task_queue),
3010 _task_queues(task_queues),
3011 _cm_oop_closure(NULL) {
3012 guarantee(task_queue != NULL, "invariant");
3013 guarantee(task_queues != NULL, "invariant");
3014
3015 _marking_step_diffs_ms.add(0.5);
3016 }
3017
3018 // These are formatting macros that are used below to ensure
3019 // consistent formatting. The *_H_* versions are used to format the
3020 // header for a particular value and they should be kept consistent
3021 // with the corresponding macro. Also note that most of the macros add
3022 // the necessary white space (as a prefix) which makes them a bit
3023 // easier to compose.
3024
3025 // All the output lines are prefixed with this string to be able to
3026 // identify them easily in a large log file.
3027 #define G1PPRL_LINE_PREFIX "###"
3028
3029 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT
3030 #ifdef _LP64
3031 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s"
3032 #else // _LP64
3033 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s"
3034 #endif // _LP64
3035
3036 // For per-region info
3037 #define G1PPRL_TYPE_FORMAT " %-4s"
3038 #define G1PPRL_TYPE_H_FORMAT " %4s"
3039 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9)
3040 #define G1PPRL_BYTE_H_FORMAT " %9s"
3041 #define G1PPRL_DOUBLE_FORMAT " %14.1f"
3042 #define G1PPRL_DOUBLE_H_FORMAT " %14s"
3043
3044 // For summary info
3045 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT
3046 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT
3047 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB"
3048 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3049
3050 G1PrintRegionLivenessInfoClosure::
3051 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3052 : _total_used_bytes(0), _total_capacity_bytes(0),
3053 _total_prev_live_bytes(0), _total_next_live_bytes(0),
3054 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3055 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3056 MemRegion g1_reserved = g1h->g1_reserved();
3057 double now = os::elapsedTime();
3058
3059 // Print the header of the output.
3060 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3061 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"
3062 G1PPRL_SUM_ADDR_FORMAT("reserved")
3063 G1PPRL_SUM_BYTE_FORMAT("region-size"),
3064 p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3065 HeapRegion::GrainBytes);
3066 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3067 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3068 G1PPRL_TYPE_H_FORMAT
3069 G1PPRL_ADDR_BASE_H_FORMAT
3070 G1PPRL_BYTE_H_FORMAT
3071 G1PPRL_BYTE_H_FORMAT
3072 G1PPRL_BYTE_H_FORMAT
3073 G1PPRL_DOUBLE_H_FORMAT
3074 G1PPRL_BYTE_H_FORMAT
3075 G1PPRL_BYTE_H_FORMAT,
3076 "type", "address-range",
3077 "used", "prev-live", "next-live", "gc-eff",
3078 "remset", "code-roots");
3079 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3080 G1PPRL_TYPE_H_FORMAT
3081 G1PPRL_ADDR_BASE_H_FORMAT
3082 G1PPRL_BYTE_H_FORMAT
3083 G1PPRL_BYTE_H_FORMAT
3084 G1PPRL_BYTE_H_FORMAT
3085 G1PPRL_DOUBLE_H_FORMAT
3086 G1PPRL_BYTE_H_FORMAT
3087 G1PPRL_BYTE_H_FORMAT,
3088 "", "",
3089 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3090 "(bytes)", "(bytes)");
3091 }
3092
3093 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
3094 const char* type = r->get_type_str();
3095 HeapWord* bottom = r->bottom();
3096 HeapWord* end = r->end();
3097 size_t capacity_bytes = r->capacity();
3098 size_t used_bytes = r->used();
3099 size_t prev_live_bytes = r->live_bytes();
3100 size_t next_live_bytes = r->next_live_bytes();
3101 double gc_eff = r->gc_efficiency();
3102 size_t remset_bytes = r->rem_set()->mem_size();
3103 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
3104
3105 _total_used_bytes += used_bytes;
3106 _total_capacity_bytes += capacity_bytes;
3107 _total_prev_live_bytes += prev_live_bytes;
3108 _total_next_live_bytes += next_live_bytes;
3109 _total_remset_bytes += remset_bytes;
3110 _total_strong_code_roots_bytes += strong_code_roots_bytes;
3111
3112 // Print a line for this particular region.
3113 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3114 G1PPRL_TYPE_FORMAT
3115 G1PPRL_ADDR_BASE_FORMAT
3116 G1PPRL_BYTE_FORMAT
3117 G1PPRL_BYTE_FORMAT
3118 G1PPRL_BYTE_FORMAT
3119 G1PPRL_DOUBLE_FORMAT
3120 G1PPRL_BYTE_FORMAT
3121 G1PPRL_BYTE_FORMAT,
3122 type, p2i(bottom), p2i(end),
3123 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3124 remset_bytes, strong_code_roots_bytes);
3125
3126 return false;
3127 }
3128
3129 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3130 // add static memory usages to remembered set sizes
3131 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3132 // Print the footer of the output.
3133 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3134 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3135 " SUMMARY"
3136 G1PPRL_SUM_MB_FORMAT("capacity")
3137 G1PPRL_SUM_MB_PERC_FORMAT("used")
3138 G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3139 G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3140 G1PPRL_SUM_MB_FORMAT("remset")
3141 G1PPRL_SUM_MB_FORMAT("code-roots"),
3142 bytes_to_mb(_total_capacity_bytes),
3143 bytes_to_mb(_total_used_bytes),
3144 perc(_total_used_bytes, _total_capacity_bytes),
3145 bytes_to_mb(_total_prev_live_bytes),
3146 perc(_total_prev_live_bytes, _total_capacity_bytes),
3147 bytes_to_mb(_total_next_live_bytes),
3148 perc(_total_next_live_bytes, _total_capacity_bytes),
3149 bytes_to_mb(_total_remset_bytes),
3150 bytes_to_mb(_total_strong_code_roots_bytes));
3151 }
--- EOF ---