rev 55429 : 8226311: Shenandoah: Concurrent evacuation of OopStorage backed weak roots
1 /*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "memory/universe.hpp"
27
28 #include "gc/shared/gcArguments.hpp"
29 #include "gc/shared/gcTimer.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/memAllocator.hpp"
32 #include "gc/shared/parallelCleaning.hpp"
33 #include "gc/shared/plab.hpp"
34
35 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
36 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
37 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
39 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
40 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
55 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
56 #include "gc/shenandoah/shenandoahStringDedup.hpp"
57 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
58 #include "gc/shenandoah/shenandoahUtils.hpp"
59 #include "gc/shenandoah/shenandoahVerifier.hpp"
60 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
61 #include "gc/shenandoah/shenandoahVMOperations.hpp"
62 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
63 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
64 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
65 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
66 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
67 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
68 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
69 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
70 #if INCLUDE_JFR
71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
72 #endif
73
74 #include "memory/metaspace.hpp"
75 #include "oops/compressedOops.inline.hpp"
76 #include "runtime/globals.hpp"
77 #include "runtime/interfaceSupport.inline.hpp"
78 #include "runtime/safepointMechanism.hpp"
79 #include "runtime/vmThread.hpp"
80 #include "services/mallocTracker.hpp"
81
82 #ifdef ASSERT
83 template <class T>
84 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
85 T o = RawAccess<>::oop_load(p);
86 if (! CompressedOops::is_null(o)) {
87 oop obj = CompressedOops::decode_not_null(o);
88 shenandoah_assert_not_forwarded(p, obj);
89 }
90 }
91
92 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
93 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); }
94 #endif
95
96 class ShenandoahPretouchHeapTask : public AbstractGangTask {
97 private:
98 ShenandoahRegionIterator _regions;
99 const size_t _page_size;
100 public:
101 ShenandoahPretouchHeapTask(size_t page_size) :
102 AbstractGangTask("Shenandoah Pretouch Heap"),
103 _page_size(page_size) {}
104
105 virtual void work(uint worker_id) {
106 ShenandoahHeapRegion* r = _regions.next();
107 while (r != NULL) {
108 os::pretouch_memory(r->bottom(), r->end(), _page_size);
109 r = _regions.next();
110 }
111 }
112 };
113
114 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
115 private:
116 ShenandoahRegionIterator _regions;
117 char* _bitmap_base;
118 const size_t _bitmap_size;
119 const size_t _page_size;
120 public:
121 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
122 AbstractGangTask("Shenandoah Pretouch Bitmap"),
123 _bitmap_base(bitmap_base),
124 _bitmap_size(bitmap_size),
125 _page_size(page_size) {}
126
127 virtual void work(uint worker_id) {
128 ShenandoahHeapRegion* r = _regions.next();
129 while (r != NULL) {
130 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
131 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
132 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
133
134 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
135
136 r = _regions.next();
137 }
138 }
139 };
140
141 jint ShenandoahHeap::initialize() {
142 initialize_heuristics();
143
144 //
145 // Figure out heap sizing
146 //
147
148 size_t init_byte_size = InitialHeapSize;
149 size_t min_byte_size = MinHeapSize;
150 size_t max_byte_size = MaxHeapSize;
151 size_t heap_alignment = HeapAlignment;
152
153 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
154
155 if (ShenandoahAlwaysPreTouch) {
156 // Enabled pre-touch means the entire heap is committed right away.
157 init_byte_size = max_byte_size;
158 }
159
160 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
161 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
162
163 _num_regions = ShenandoahHeapRegion::region_count();
164
165 size_t num_committed_regions = init_byte_size / reg_size_bytes;
166 num_committed_regions = MIN2(num_committed_regions, _num_regions);
167 assert(num_committed_regions <= _num_regions, "sanity");
168 _initial_size = num_committed_regions * reg_size_bytes;
169
170 size_t num_min_regions = min_byte_size / reg_size_bytes;
171 num_min_regions = MIN2(num_min_regions, _num_regions);
172 assert(num_min_regions <= _num_regions, "sanity");
173 _minimum_size = num_min_regions * reg_size_bytes;
174
175 _committed = _initial_size;
176
177 size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
178 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
179
180 //
181 // Reserve and commit memory for heap
182 //
183
184 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
185 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
186 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
187 _heap_region_special = heap_rs.special();
188
189 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
190 "Misaligned heap: " PTR_FORMAT, p2i(base()));
191
192 #if SHENANDOAH_OPTIMIZED_OBJTASK
193 // The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
194 // Fail if we ever attempt to address more than we can.
195 if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) {
196 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
197 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
198 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
199 p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable());
200 vm_exit_during_initialization("Fatal Error", buf);
201 }
202 #endif
203
204 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
205 if (!_heap_region_special) {
206 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
207 "Cannot commit heap memory");
208 }
209
210 //
211 // Reserve and commit memory for bitmap(s)
212 //
213
214 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
215 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
216
217 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
218
219 guarantee(bitmap_bytes_per_region != 0,
220 "Bitmap bytes per region should not be zero");
221 guarantee(is_power_of_2(bitmap_bytes_per_region),
222 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
223
224 if (bitmap_page_size > bitmap_bytes_per_region) {
225 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
226 _bitmap_bytes_per_slice = bitmap_page_size;
227 } else {
228 _bitmap_regions_per_slice = 1;
229 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
230 }
231
232 guarantee(_bitmap_regions_per_slice >= 1,
233 "Should have at least one region per slice: " SIZE_FORMAT,
234 _bitmap_regions_per_slice);
235
236 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
237 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
238 _bitmap_bytes_per_slice, bitmap_page_size);
239
240 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
241 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
242 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
243 _bitmap_region_special = bitmap.special();
244
245 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
246 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
247 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
248 if (!_bitmap_region_special) {
249 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
250 "Cannot commit bitmap memory");
251 }
252
253 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
254
255 if (ShenandoahVerify) {
256 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
257 if (!verify_bitmap.special()) {
258 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
259 "Cannot commit verification bitmap memory");
260 }
261 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
262 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
263 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
264 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
265 }
266
267 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
268 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
269 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
270 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
271 _aux_bitmap_region_special = aux_bitmap.special();
272 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
273
274 //
275 // Create regions and region sets
276 //
277
278 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
279 _free_set = new ShenandoahFreeSet(this, _num_regions);
280 _collection_set = new ShenandoahCollectionSet(this, sh_rs.base(), sh_rs.size());
281
282 {
283 ShenandoahHeapLocker locker(lock());
284
285 size_t size_words = ShenandoahHeapRegion::region_size_words();
286
287 for (size_t i = 0; i < _num_regions; i++) {
288 HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i;
289 bool is_committed = i < num_committed_regions;
290 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed);
291
292 _marking_context->initialize_top_at_mark_start(r);
293 _regions[i] = r;
294 assert(!collection_set()->is_in(i), "New region should not be in collection set");
295 }
296
297 // Initialize to complete
298 _marking_context->mark_complete();
299
300 _free_set->rebuild();
301 }
302
303 if (ShenandoahAlwaysPreTouch) {
304 assert(!AlwaysPreTouch, "Should have been overridden");
305
306 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
307 // before initialize() below zeroes it with initializing thread. For any given region,
308 // we touch the region and the corresponding bitmaps from the same thread.
309 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
310
311 size_t pretouch_heap_page_size = heap_page_size;
312 size_t pretouch_bitmap_page_size = bitmap_page_size;
313
314 #ifdef LINUX
315 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
316 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
317 // them into huge one. Therefore, we need to pretouch with smaller pages.
318 if (UseTransparentHugePages) {
319 pretouch_heap_page_size = (size_t)os::vm_page_size();
320 pretouch_bitmap_page_size = (size_t)os::vm_page_size();
321 }
322 #endif
323
324 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
325 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
326
327 log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
328 _num_regions, pretouch_bitmap_page_size);
329 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
330 _workers->run_task(&bcl);
331
332 log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
333 _num_regions, pretouch_heap_page_size);
334 ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
335 _workers->run_task(&hcl);
336 }
337
338 //
339 // Initialize the rest of GC subsystems
340 //
341
342 _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
343 for (uint worker = 0; worker < _max_workers; worker++) {
344 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
345 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
346 }
347
348 // The call below uses stuff (the SATB* things) that are in G1, but probably
349 // belong into a shared location.
350 ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
351 SATB_Q_CBL_mon,
352 20 /* G1SATBProcessCompletedThreshold */,
353 60 /* G1SATBBufferEnqueueingThresholdPercent */);
354
355 _monitoring_support = new ShenandoahMonitoringSupport(this);
356 _phase_timings = new ShenandoahPhaseTimings();
357 ShenandoahStringDedup::initialize();
358 ShenandoahCodeRoots::initialize();
359
360 if (ShenandoahAllocationTrace) {
361 _alloc_tracker = new ShenandoahAllocTracker();
362 }
363
364 if (ShenandoahPacing) {
365 _pacer = new ShenandoahPacer(this);
366 _pacer->setup_for_idle();
367 } else {
368 _pacer = NULL;
369 }
370
371 _traversal_gc = heuristics()->can_do_traversal_gc() ?
372 new ShenandoahTraversalGC(this, _num_regions) :
373 NULL;
374
375 _control_thread = new ShenandoahControlThread();
376
377 log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
378 byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),
379 byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),
380 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
381 );
382
383 log_info(gc, init)("Safepointing mechanism: %s",
384 SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
385 (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
386
387 return JNI_OK;
388 }
389
390 void ShenandoahHeap::initialize_heuristics() {
391 if (ShenandoahGCHeuristics != NULL) {
392 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
393 _heuristics = new ShenandoahAggressiveHeuristics();
394 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
395 _heuristics = new ShenandoahStaticHeuristics();
396 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
397 _heuristics = new ShenandoahAdaptiveHeuristics();
398 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
399 _heuristics = new ShenandoahPassiveHeuristics();
400 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
401 _heuristics = new ShenandoahCompactHeuristics();
402 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
403 _heuristics = new ShenandoahTraversalHeuristics();
404 } else {
405 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
406 }
407
408 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
409 vm_exit_during_initialization(
410 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
411 _heuristics->name()));
412 }
413 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
414 vm_exit_during_initialization(
415 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
416 _heuristics->name()));
417 }
418 log_info(gc, init)("Shenandoah heuristics: %s",
419 _heuristics->name());
420 } else {
421 ShouldNotReachHere();
422 }
423
424 }
425
426 #ifdef _MSC_VER
427 #pragma warning( push )
428 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
429 #endif
430
431 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
432 CollectedHeap(),
433 _initial_size(0),
434 _used(0),
435 _committed(0),
436 _bytes_allocated_since_gc_start(0),
437 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
438 _workers(NULL),
439 _safepoint_workers(NULL),
440 _heap_region_special(false),
441 _num_regions(0),
442 _regions(NULL),
443 _update_refs_iterator(this),
444 _control_thread(NULL),
445 _shenandoah_policy(policy),
446 _heuristics(NULL),
447 _free_set(NULL),
448 _scm(new ShenandoahConcurrentMark()),
449 _traversal_gc(NULL),
450 _full_gc(new ShenandoahMarkCompact()),
451 _pacer(NULL),
452 _verifier(NULL),
453 _alloc_tracker(NULL),
454 _phase_timings(NULL),
455 _monitoring_support(NULL),
456 _memory_pool(NULL),
457 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
458 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
459 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
460 _soft_ref_policy(),
461 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
462 _ref_processor(NULL),
463 _marking_context(NULL),
464 _bitmap_size(0),
465 _bitmap_regions_per_slice(0),
466 _bitmap_bytes_per_slice(0),
467 _bitmap_region_special(false),
468 _aux_bitmap_region_special(false),
469 _liveness_cache(NULL),
470 _collection_set(NULL)
471 {
472 log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
473 log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
474
475 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
476
477 _max_workers = MAX2(_max_workers, 1U);
478 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
479 /* are_GC_task_threads */ true,
480 /* are_ConcurrentGC_threads */ true);
481 if (_workers == NULL) {
482 vm_exit_during_initialization("Failed necessary allocation.");
483 } else {
484 _workers->initialize_workers();
485 }
486
487 if (ShenandoahParallelSafepointThreads > 1) {
488 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
489 ShenandoahParallelSafepointThreads,
490 /* are_GC_task_threads */ false,
491 /* are_ConcurrentGC_threads */ false);
492 _safepoint_workers->initialize_workers();
493 }
494 }
495
496 #ifdef _MSC_VER
497 #pragma warning( pop )
498 #endif
499
500 class ShenandoahResetBitmapTask : public AbstractGangTask {
501 private:
502 ShenandoahRegionIterator _regions;
503
504 public:
505 ShenandoahResetBitmapTask() :
506 AbstractGangTask("Parallel Reset Bitmap Task") {}
507
508 void work(uint worker_id) {
509 ShenandoahHeapRegion* region = _regions.next();
510 ShenandoahHeap* heap = ShenandoahHeap::heap();
511 ShenandoahMarkingContext* const ctx = heap->marking_context();
512 while (region != NULL) {
513 if (heap->is_bitmap_slice_committed(region)) {
514 ctx->clear_bitmap(region);
515 }
516 region = _regions.next();
517 }
518 }
519 };
520
521 void ShenandoahHeap::reset_mark_bitmap() {
522 assert_gc_workers(_workers->active_workers());
523 mark_incomplete_marking_context();
524
525 ShenandoahResetBitmapTask task;
526 _workers->run_task(&task);
527 }
528
529 void ShenandoahHeap::print_on(outputStream* st) const {
530 st->print_cr("Shenandoah Heap");
531 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
532 max_capacity() / K, committed() / K, used() / K);
533 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
534 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
535
536 st->print("Status: ");
537 if (has_forwarded_objects()) st->print("has forwarded objects, ");
538 if (is_concurrent_mark_in_progress()) st->print("marking, ");
539 if (is_evacuation_in_progress()) st->print("evacuating, ");
540 if (is_update_refs_in_progress()) st->print("updating refs, ");
541 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
542 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
543 if (is_full_gc_in_progress()) st->print("full gc, ");
544 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
545
546 if (cancelled_gc()) {
547 st->print("cancelled");
548 } else {
549 st->print("not cancelled");
550 }
551 st->cr();
552
553 st->print_cr("Reserved region:");
554 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
555 p2i(reserved_region().start()),
556 p2i(reserved_region().end()));
557
558 ShenandoahCollectionSet* cset = collection_set();
559 st->print_cr("Collection set:");
560 if (cset != NULL) {
561 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
562 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
563 } else {
564 st->print_cr(" (NULL)");
565 }
566
567 st->cr();
568 MetaspaceUtils::print_on(st);
569
570 if (Verbose) {
571 print_heap_regions_on(st);
572 }
573 }
574
575 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
576 public:
577 void do_thread(Thread* thread) {
578 assert(thread != NULL, "Sanity");
579 assert(thread->is_Worker_thread(), "Only worker thread expected");
580 ShenandoahThreadLocalData::initialize_gclab(thread);
581 }
582 };
583
584 void ShenandoahHeap::post_initialize() {
585 CollectedHeap::post_initialize();
586 MutexLocker ml(Threads_lock);
587
588 ShenandoahInitWorkerGCLABClosure init_gclabs;
589 _workers->threads_do(&init_gclabs);
590
591 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
592 // Now, we will let WorkGang to initialize gclab when new worker is created.
593 _workers->set_initialize_gclab();
594
595 _scm->initialize(_max_workers);
596 _full_gc->initialize(_gc_timer);
597
598 ref_processing_init();
599
600 _heuristics->initialize();
601
602 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
603 }
604
605 size_t ShenandoahHeap::used() const {
606 return OrderAccess::load_acquire(&_used);
607 }
608
609 size_t ShenandoahHeap::committed() const {
610 OrderAccess::acquire();
611 return _committed;
612 }
613
614 void ShenandoahHeap::increase_committed(size_t bytes) {
615 assert_heaplock_or_safepoint();
616 _committed += bytes;
617 }
618
619 void ShenandoahHeap::decrease_committed(size_t bytes) {
620 assert_heaplock_or_safepoint();
621 _committed -= bytes;
622 }
623
624 void ShenandoahHeap::increase_used(size_t bytes) {
625 Atomic::add(bytes, &_used);
626 }
627
628 void ShenandoahHeap::set_used(size_t bytes) {
629 OrderAccess::release_store_fence(&_used, bytes);
630 }
631
632 void ShenandoahHeap::decrease_used(size_t bytes) {
633 assert(used() >= bytes, "never decrease heap size by more than we've left");
634 Atomic::sub(bytes, &_used);
635 }
636
637 void ShenandoahHeap::increase_allocated(size_t bytes) {
638 Atomic::add(bytes, &_bytes_allocated_since_gc_start);
639 }
640
641 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
642 size_t bytes = words * HeapWordSize;
643 if (!waste) {
644 increase_used(bytes);
645 }
646 increase_allocated(bytes);
647 if (ShenandoahPacing) {
648 control_thread()->pacing_notify_alloc(words);
649 if (waste) {
650 pacer()->claim_for_alloc(words, true);
651 }
652 }
653 }
654
655 size_t ShenandoahHeap::capacity() const {
656 return committed();
657 }
658
659 size_t ShenandoahHeap::max_capacity() const {
660 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
661 }
662
663 size_t ShenandoahHeap::min_capacity() const {
664 return _minimum_size;
665 }
666
667 size_t ShenandoahHeap::initial_capacity() const {
668 return _initial_size;
669 }
670
671 bool ShenandoahHeap::is_in(const void* p) const {
672 HeapWord* heap_base = (HeapWord*) base();
673 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
674 return p >= heap_base && p < last_region_end;
675 }
676
677 void ShenandoahHeap::op_uncommit(double shrink_before) {
678 assert (ShenandoahUncommit, "should be enabled");
679
680 // Application allocates from the beginning of the heap, and GC allocates at
681 // the end of it. It is more efficient to uncommit from the end, so that applications
682 // could enjoy the near committed regions. GC allocations are much less frequent,
683 // and therefore can accept the committing costs.
684
685 size_t count = 0;
686 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
687 ShenandoahHeapRegion* r = get_region(i - 1);
688 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
689 ShenandoahHeapLocker locker(lock());
690 if (r->is_empty_committed()) {
691 // Do not uncommit below minimal capacity
692 if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
693 break;
694 }
695
696 r->make_uncommitted();
697 count++;
698 }
699 }
700 SpinPause(); // allow allocators to take the lock
701 }
702
703 if (count > 0) {
704 control_thread()->notify_heap_changed();
705 }
706 }
707
708 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
709 // New object should fit the GCLAB size
710 size_t min_size = MAX2(size, PLAB::min_size());
711
712 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
713 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
714 new_size = MIN2(new_size, PLAB::max_size());
715 new_size = MAX2(new_size, PLAB::min_size());
716
717 // Record new heuristic value even if we take any shortcut. This captures
718 // the case when moderately-sized objects always take a shortcut. At some point,
719 // heuristics should catch up with them.
720 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
721
722 if (new_size < size) {
723 // New size still does not fit the object. Fall back to shared allocation.
724 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
725 return NULL;
726 }
727
728 // Retire current GCLAB, and allocate a new one.
729 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
730 gclab->retire();
731
732 size_t actual_size = 0;
733 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
734 if (gclab_buf == NULL) {
735 return NULL;
736 }
737
738 assert (size <= actual_size, "allocation should fit");
739
740 if (ZeroTLAB) {
741 // ..and clear it.
742 Copy::zero_to_words(gclab_buf, actual_size);
743 } else {
744 // ...and zap just allocated object.
745 #ifdef ASSERT
746 // Skip mangling the space corresponding to the object header to
747 // ensure that the returned space is not considered parsable by
748 // any concurrent GC thread.
749 size_t hdr_size = oopDesc::header_size();
750 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
751 #endif // ASSERT
752 }
753 gclab->set_buf(gclab_buf, actual_size);
754 return gclab->allocate(size);
755 }
756
757 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
758 size_t requested_size,
759 size_t* actual_size) {
760 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
761 HeapWord* res = allocate_memory(req);
762 if (res != NULL) {
763 *actual_size = req.actual_size();
764 } else {
765 *actual_size = 0;
766 }
767 return res;
768 }
769
770 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
771 size_t word_size,
772 size_t* actual_size) {
773 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
774 HeapWord* res = allocate_memory(req);
775 if (res != NULL) {
776 *actual_size = req.actual_size();
777 } else {
778 *actual_size = 0;
779 }
780 return res;
781 }
782
783 ShenandoahHeap* ShenandoahHeap::heap() {
784 CollectedHeap* heap = Universe::heap();
785 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
786 assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
787 return (ShenandoahHeap*) heap;
788 }
789
790 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
791 CollectedHeap* heap = Universe::heap();
792 return (ShenandoahHeap*) heap;
793 }
794
795 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
796 ShenandoahAllocTrace trace_alloc(req.size(), req.type());
797
798 intptr_t pacer_epoch = 0;
799 bool in_new_region = false;
800 HeapWord* result = NULL;
801
802 if (req.is_mutator_alloc()) {
803 if (ShenandoahPacing) {
804 pacer()->pace_for_alloc(req.size());
805 pacer_epoch = pacer()->epoch();
806 }
807
808 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
809 result = allocate_memory_under_lock(req, in_new_region);
810 }
811
812 // Allocation failed, block until control thread reacted, then retry allocation.
813 //
814 // It might happen that one of the threads requesting allocation would unblock
815 // way later after GC happened, only to fail the second allocation, because
816 // other threads have already depleted the free storage. In this case, a better
817 // strategy is to try again, as long as GC makes progress.
818 //
819 // Then, we need to make sure the allocation was retried after at least one
820 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
821
822 size_t tries = 0;
823
824 while (result == NULL && _progress_last_gc.is_set()) {
825 tries++;
826 control_thread()->handle_alloc_failure(req.size());
827 result = allocate_memory_under_lock(req, in_new_region);
828 }
829
830 while (result == NULL && tries <= ShenandoahFullGCThreshold) {
831 tries++;
832 control_thread()->handle_alloc_failure(req.size());
833 result = allocate_memory_under_lock(req, in_new_region);
834 }
835
836 } else {
837 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
838 result = allocate_memory_under_lock(req, in_new_region);
839 // Do not call handle_alloc_failure() here, because we cannot block.
840 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
841 }
842
843 if (in_new_region) {
844 control_thread()->notify_heap_changed();
845 }
846
847 if (result != NULL) {
848 size_t requested = req.size();
849 size_t actual = req.actual_size();
850
851 assert (req.is_lab_alloc() || (requested == actual),
852 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
853 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
854
855 if (req.is_mutator_alloc()) {
856 notify_mutator_alloc_words(actual, false);
857
858 // If we requested more than we were granted, give the rest back to pacer.
859 // This only matters if we are in the same pacing epoch: do not try to unpace
860 // over the budget for the other phase.
861 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
862 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
863 }
864 } else {
865 increase_used(actual*HeapWordSize);
866 }
867 }
868
869 return result;
870 }
871
872 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
873 ShenandoahHeapLocker locker(lock());
874 return _free_set->allocate(req, in_new_region);
875 }
876
877 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
878 bool* gc_overhead_limit_was_exceeded) {
879 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
880 return allocate_memory(req);
881 }
882
883 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
884 size_t size,
885 Metaspace::MetadataType mdtype) {
886 MetaWord* result;
887
888 // Inform metaspace OOM to GC heuristics if class unloading is possible.
889 if (heuristics()->can_unload_classes()) {
890 ShenandoahHeuristics* h = heuristics();
891 h->record_metaspace_oom();
892 }
893
894 // Expand and retry allocation
895 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
896 if (result != NULL) {
897 return result;
898 }
899
900 // Start full GC
901 collect(GCCause::_metadata_GC_clear_soft_refs);
902
903 // Retry allocation
904 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
905 if (result != NULL) {
906 return result;
907 }
908
909 // Expand and retry allocation
910 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
911 if (result != NULL) {
912 return result;
913 }
914
915 // Out of memory
916 return NULL;
917 }
918
919 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
920 private:
921 ShenandoahHeap* const _heap;
922 Thread* const _thread;
923 public:
924 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
925 _heap(heap), _thread(Thread::current()) {}
926
927 void do_object(oop p) {
928 shenandoah_assert_marked(NULL, p);
929 if (!p->is_forwarded()) {
930 _heap->evacuate_object(p, _thread);
931 }
932 }
933 };
934
935 class ShenandoahEvacuationTask : public AbstractGangTask {
936 private:
937 ShenandoahHeap* const _sh;
938 ShenandoahCollectionSet* const _cs;
939 bool _concurrent;
940 public:
941 ShenandoahEvacuationTask(ShenandoahHeap* sh,
942 ShenandoahCollectionSet* cs,
943 bool concurrent) :
944 AbstractGangTask("Parallel Evacuation Task"),
945 _sh(sh),
946 _cs(cs),
947 _concurrent(concurrent)
948 {}
949
950 void work(uint worker_id) {
951 if (_concurrent) {
952 ShenandoahConcurrentWorkerSession worker_session(worker_id);
953 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
954 ShenandoahEvacOOMScope oom_evac_scope;
955 do_work();
956 } else {
957 ShenandoahParallelWorkerSession worker_session(worker_id);
958 ShenandoahEvacOOMScope oom_evac_scope;
959 do_work();
960 }
961 }
962
963 private:
964 void do_work() {
965 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
966 ShenandoahHeapRegion* r;
967 while ((r =_cs->claim_next()) != NULL) {
968 assert(r->has_live(), "all-garbage regions are reclaimed early");
969 _sh->marked_object_iterate(r, &cl);
970
971 if (ShenandoahPacing) {
972 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
973 }
974
975 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
976 break;
977 }
978 }
979 }
980 };
981
982 void ShenandoahHeap::trash_cset_regions() {
983 ShenandoahHeapLocker locker(lock());
984
985 ShenandoahCollectionSet* set = collection_set();
986 ShenandoahHeapRegion* r;
987 set->clear_current_index();
988 while ((r = set->next()) != NULL) {
989 r->make_trash();
990 }
991 collection_set()->clear();
992 }
993
994 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
995 st->print_cr("Heap Regions:");
996 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
997 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
998 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
999 st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1000
1001 for (size_t i = 0; i < num_regions(); i++) {
1002 get_region(i)->print_on(st);
1003 }
1004 }
1005
1006 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1007 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1008
1009 oop humongous_obj = oop(start->bottom());
1010 size_t size = humongous_obj->size();
1011 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1012 size_t index = start->region_number() + required_regions - 1;
1013
1014 assert(!start->has_live(), "liveness must be zero");
1015
1016 for(size_t i = 0; i < required_regions; i++) {
1017 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1018 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1019 ShenandoahHeapRegion* region = get_region(index --);
1020
1021 assert(region->is_humongous(), "expect correct humongous start or continuation");
1022 assert(!region->is_cset(), "Humongous region should not be in collection set");
1023
1024 region->make_trash_immediate();
1025 }
1026 }
1027
1028 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1029 public:
1030 void do_thread(Thread* thread) {
1031 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1032 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1033 gclab->retire();
1034 }
1035 };
1036
1037 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1038 if (UseTLAB) {
1039 CollectedHeap::ensure_parsability(retire_tlabs);
1040 }
1041 ShenandoahRetireGCLABClosure cl;
1042 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1043 cl.do_thread(t);
1044 }
1045 workers()->threads_do(&cl);
1046 }
1047
1048 void ShenandoahHeap::resize_tlabs() {
1049 CollectedHeap::resize_all_tlabs();
1050 }
1051
1052 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1053 private:
1054 ShenandoahRootEvacuator* _rp;
1055
1056 public:
1057 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1058 AbstractGangTask("Shenandoah evacuate and update roots"),
1059 _rp(rp) {}
1060
1061 void work(uint worker_id) {
1062 ShenandoahParallelWorkerSession worker_session(worker_id);
1063 ShenandoahEvacOOMScope oom_evac_scope;
1064 ShenandoahEvacuateUpdateRootsClosure cl;
1065 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1066 _rp->roots_do(worker_id, &cl);
1067 }
1068 };
1069
1070 void ShenandoahHeap::evacuate_and_update_roots() {
1071 #if COMPILER2_OR_JVMCI
1072 DerivedPointerTable::clear();
1073 #endif
1074 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1075 {
1076 // Include concurrent roots if current cycle can not process those roots concurrently
1077 ShenandoahRootEvacuator rp(workers()->active_workers(),
1078 ShenandoahPhaseTimings::init_evac,
1079 !ShenandoahConcurrentRoots::should_do_concurrent_roots());
1080 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1081 workers()->run_task(&roots_task);
1082 }
1083
1084 #if COMPILER2_OR_JVMCI
1085 DerivedPointerTable::update_pointers();
1086 #endif
1087 }
1088
1089 // Returns size in bytes
1090 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1091 if (ShenandoahElasticTLAB) {
1092 // With Elastic TLABs, return the max allowed size, and let the allocation path
1093 // figure out the safe size for current allocation.
1094 return ShenandoahHeapRegion::max_tlab_size_bytes();
1095 } else {
1096 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1097 }
1098 }
1099
1100 size_t ShenandoahHeap::max_tlab_size() const {
1101 // Returns size in words
1102 return ShenandoahHeapRegion::max_tlab_size_words();
1103 }
1104
1105 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1106 public:
1107 void do_thread(Thread* thread) {
1108 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1109 gclab->retire();
1110 if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1111 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1112 }
1113 }
1114 };
1115
1116 void ShenandoahHeap::retire_and_reset_gclabs() {
1117 ShenandoahRetireAndResetGCLABClosure cl;
1118 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1119 cl.do_thread(t);
1120 }
1121 workers()->threads_do(&cl);
1122 }
1123
1124 void ShenandoahHeap::collect(GCCause::Cause cause) {
1125 control_thread()->request_gc(cause);
1126 }
1127
1128 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1129 //assert(false, "Shouldn't need to do full collections");
1130 }
1131
1132 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1133 Space* sp = heap_region_containing(addr);
1134 if (sp != NULL) {
1135 return sp->block_start(addr);
1136 }
1137 return NULL;
1138 }
1139
1140 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1141 Space* sp = heap_region_containing(addr);
1142 return sp->block_is_obj(addr);
1143 }
1144
1145 jlong ShenandoahHeap::millis_since_last_gc() {
1146 double v = heuristics()->time_since_last_gc() * 1000;
1147 assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1148 return (jlong)v;
1149 }
1150
1151 void ShenandoahHeap::prepare_for_verify() {
1152 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1153 make_parsable(false);
1154 }
1155 }
1156
1157 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1158 workers()->print_worker_threads_on(st);
1159 if (ShenandoahStringDedup::is_enabled()) {
1160 ShenandoahStringDedup::print_worker_threads_on(st);
1161 }
1162 }
1163
1164 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1165 workers()->threads_do(tcl);
1166 if (_safepoint_workers != NULL) {
1167 _safepoint_workers->threads_do(tcl);
1168 }
1169 if (ShenandoahStringDedup::is_enabled()) {
1170 ShenandoahStringDedup::threads_do(tcl);
1171 }
1172 }
1173
1174 void ShenandoahHeap::print_tracing_info() const {
1175 LogTarget(Info, gc, stats) lt;
1176 if (lt.is_enabled()) {
1177 ResourceMark rm;
1178 LogStream ls(lt);
1179
1180 phase_timings()->print_on(&ls);
1181
1182 ls.cr();
1183 ls.cr();
1184
1185 shenandoah_policy()->print_gc_stats(&ls);
1186
1187 ls.cr();
1188 ls.cr();
1189
1190 if (ShenandoahPacing) {
1191 pacer()->print_on(&ls);
1192 }
1193
1194 ls.cr();
1195 ls.cr();
1196
1197 if (ShenandoahAllocationTrace) {
1198 assert(alloc_tracker() != NULL, "Must be");
1199 alloc_tracker()->print_on(&ls);
1200 } else {
1201 ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1202 }
1203 }
1204 }
1205
1206 void ShenandoahHeap::verify(VerifyOption vo) {
1207 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1208 if (ShenandoahVerify) {
1209 verifier()->verify_generic(vo);
1210 } else {
1211 // TODO: Consider allocating verification bitmaps on demand,
1212 // and turn this on unconditionally.
1213 }
1214 }
1215 }
1216 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1217 return _free_set->capacity();
1218 }
1219
1220 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1221 private:
1222 MarkBitMap* _bitmap;
1223 Stack<oop,mtGC>* _oop_stack;
1224
1225 template <class T>
1226 void do_oop_work(T* p) {
1227 T o = RawAccess<>::oop_load(p);
1228 if (!CompressedOops::is_null(o)) {
1229 oop obj = CompressedOops::decode_not_null(o);
1230 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1231 assert(oopDesc::is_oop(obj), "must be a valid oop");
1232 if (!_bitmap->is_marked((HeapWord*) obj)) {
1233 _bitmap->mark((HeapWord*) obj);
1234 _oop_stack->push(obj);
1235 }
1236 }
1237 }
1238 public:
1239 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1240 _bitmap(bitmap), _oop_stack(oop_stack) {}
1241 void do_oop(oop* p) { do_oop_work(p); }
1242 void do_oop(narrowOop* p) { do_oop_work(p); }
1243 };
1244
1245 /*
1246 * This is public API, used in preparation of object_iterate().
1247 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1248 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1249 * control, we call SH::make_tlabs_parsable().
1250 */
1251 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1252 // No-op.
1253 }
1254
1255 /*
1256 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1257 *
1258 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1259 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1260 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1261 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1262 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1263 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1264 * wiped the bitmap in preparation for next marking).
1265 *
1266 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1267 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1268 * is allowed to report dead objects, but is not required to do so.
1269 */
1270 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1271 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1272 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1273 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1274 return;
1275 }
1276
1277 // Reset bitmap
1278 _aux_bit_map.clear();
1279
1280 Stack<oop,mtGC> oop_stack;
1281
1282 // First, we process all GC roots. This populates the work stack with initial objects.
1283 ShenandoahAllRootScanner rp(1, ShenandoahPhaseTimings::_num_phases);
1284 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1285
1286 if (unload_classes()) {
1287 rp.strong_roots_do_unchecked(&oops);
1288 } else {
1289 rp.roots_do_unchecked(&oops);
1290 }
1291
1292 // Work through the oop stack to traverse heap.
1293 while (! oop_stack.is_empty()) {
1294 oop obj = oop_stack.pop();
1295 assert(oopDesc::is_oop(obj), "must be a valid oop");
1296 cl->do_object(obj);
1297 obj->oop_iterate(&oops);
1298 }
1299
1300 assert(oop_stack.is_empty(), "should be empty");
1301
1302 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1303 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1304 }
1305 }
1306
1307 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1308 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1309 object_iterate(cl);
1310 }
1311
1312 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1313 for (size_t i = 0; i < num_regions(); i++) {
1314 ShenandoahHeapRegion* current = get_region(i);
1315 blk->heap_region_do(current);
1316 }
1317 }
1318
1319 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1320 private:
1321 ShenandoahHeap* const _heap;
1322 ShenandoahHeapRegionClosure* const _blk;
1323
1324 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
1325 volatile size_t _index;
1326 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
1327
1328 public:
1329 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1330 AbstractGangTask("Parallel Region Task"),
1331 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1332
1333 void work(uint worker_id) {
1334 size_t stride = ShenandoahParallelRegionStride;
1335
1336 size_t max = _heap->num_regions();
1337 while (_index < max) {
1338 size_t cur = Atomic::add(stride, &_index) - stride;
1339 size_t start = cur;
1340 size_t end = MIN2(cur + stride, max);
1341 if (start >= max) break;
1342
1343 for (size_t i = cur; i < end; i++) {
1344 ShenandoahHeapRegion* current = _heap->get_region(i);
1345 _blk->heap_region_do(current);
1346 }
1347 }
1348 }
1349 };
1350
1351 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1352 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1353 if (num_regions() > ShenandoahParallelRegionStride) {
1354 ShenandoahParallelHeapRegionTask task(blk);
1355 workers()->run_task(&task);
1356 } else {
1357 heap_region_iterate(blk);
1358 }
1359 }
1360
1361 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1362 private:
1363 ShenandoahMarkingContext* const _ctx;
1364 public:
1365 ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1366
1367 void heap_region_do(ShenandoahHeapRegion* r) {
1368 if (r->is_active()) {
1369 r->clear_live_data();
1370 _ctx->capture_top_at_mark_start(r);
1371 } else {
1372 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1373 assert(_ctx->top_at_mark_start(r) == r->top(),
1374 "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number());
1375 }
1376 }
1377
1378 bool is_thread_safe() { return true; }
1379 };
1380
1381 void ShenandoahHeap::op_init_mark() {
1382 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1383 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1384
1385 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1386 assert(!marking_context()->is_complete(), "should not be complete");
1387
1388 if (ShenandoahVerify) {
1389 verifier()->verify_before_concmark();
1390 }
1391
1392 if (VerifyBeforeGC) {
1393 Universe::verify();
1394 }
1395
1396 set_concurrent_mark_in_progress(true);
1397 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1398 {
1399 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1400 make_parsable(true);
1401 }
1402
1403 {
1404 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1405 ShenandoahClearLivenessClosure clc;
1406 parallel_heap_region_iterate(&clc);
1407 }
1408
1409 // Make above changes visible to worker threads
1410 OrderAccess::fence();
1411
1412 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1413
1414 if (UseTLAB) {
1415 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1416 resize_tlabs();
1417 }
1418
1419 if (ShenandoahPacing) {
1420 pacer()->setup_for_mark();
1421 }
1422 }
1423
1424 void ShenandoahHeap::op_mark() {
1425 concurrent_mark()->mark_from_roots();
1426 }
1427
1428 class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure {
1429 private:
1430 ShenandoahMarkingContext* const _ctx;
1431 public:
1432 ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
1433
1434 void heap_region_do(ShenandoahHeapRegion* r) {
1435 if (r->is_active()) {
1436 HeapWord *tams = _ctx->top_at_mark_start(r);
1437 HeapWord *top = r->top();
1438 if (top > tams) {
1439 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1440 }
1441 } else {
1442 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1443 assert(_ctx->top_at_mark_start(r) == r->top(),
1444 "Region " SIZE_FORMAT " should have correct TAMS", r->region_number());
1445 }
1446 }
1447
1448 bool is_thread_safe() { return true; }
1449 };
1450
1451 void ShenandoahHeap::op_final_mark() {
1452 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1453
1454 // It is critical that we
1455 // evacuate roots right after finishing marking, so that we don't
1456 // get unmarked objects in the roots.
1457
1458 if (!cancelled_gc()) {
1459 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1460
1461 if (has_forwarded_objects()) {
1462 // Degen may be caused by failed evacuation of roots
1463 if (is_degenerated_gc_in_progress()) {
1464 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
1465 } else {
1466 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::update_roots);
1467 }
1468 }
1469
1470 if (ShenandoahVerify) {
1471 verifier()->verify_roots_no_forwarded();
1472 }
1473
1474 stop_concurrent_marking();
1475
1476 {
1477 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1478
1479 // All allocations past TAMS are implicitly live, adjust the region data.
1480 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1481 ShenandoahCompleteLivenessClosure cl;
1482 parallel_heap_region_iterate(&cl);
1483 }
1484
1485 {
1486 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1487
1488 make_parsable(true);
1489
1490 trash_cset_regions();
1491
1492 {
1493 ShenandoahHeapLocker locker(lock());
1494 _collection_set->clear();
1495 _free_set->clear();
1496
1497 heuristics()->choose_collection_set(_collection_set);
1498
1499 _free_set->rebuild();
1500 }
1501 }
1502
1503 // If collection set has candidates, start evacuation.
1504 // Otherwise, bypass the rest of the cycle.
1505 if (!collection_set()->is_empty()) {
1506 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1507
1508 if (ShenandoahVerify) {
1509 verifier()->verify_before_evacuation();
1510 }
1511
1512 set_evacuation_in_progress(true);
1513 // From here on, we need to update references.
1514 set_has_forwarded_objects(true);
1515
1516 evacuate_and_update_roots();
1517
1518 if (ShenandoahPacing) {
1519 pacer()->setup_for_evac();
1520 }
1521
1522 if (ShenandoahVerify) {
1523 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1524 verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::JNIHandleRoots);
1525 } else {
1526 verifier()->verify_roots_no_forwarded();
1527 }
1528 verifier()->verify_during_evacuation();
1529 }
1530 } else {
1531 if (ShenandoahVerify) {
1532 verifier()->verify_after_concmark();
1533 }
1534
1535 if (VerifyAfterGC) {
1536 Universe::verify();
1537 }
1538 }
1539
1540 } else {
1541 concurrent_mark()->cancel();
1542 stop_concurrent_marking();
1543
1544 if (process_references()) {
1545 // Abandon reference processing right away: pre-cleaning must have failed.
1546 ReferenceProcessor *rp = ref_processor();
1547 rp->disable_discovery();
1548 rp->abandon_partial_discovery();
1549 rp->verify_no_references_recorded();
1550 }
1551 }
1552 }
1553
1554 void ShenandoahHeap::op_final_evac() {
1555 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1556
1557 set_evacuation_in_progress(false);
1558
1559 retire_and_reset_gclabs();
1560
1561 if (ShenandoahVerify) {
1562 verifier()->verify_after_evacuation();
1563 }
1564
1565 if (VerifyAfterGC) {
1566 Universe::verify();
1567 }
1568 }
1569
1570 void ShenandoahHeap::op_conc_evac() {
1571 ShenandoahEvacuationTask task(this, _collection_set, true);
1572 workers()->run_task(&task);
1573 }
1574
1575 void ShenandoahHeap::op_stw_evac() {
1576 ShenandoahEvacuationTask task(this, _collection_set, false);
1577 workers()->run_task(&task);
1578 }
1579
1580 void ShenandoahHeap::op_updaterefs() {
1581 update_heap_references(true);
1582 }
1583
1584 void ShenandoahHeap::op_cleanup() {
1585 free_set()->recycle_trash();
1586 }
1587
1588 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1589 private:
1590 ShenandoahJNIHandleRoots<true /*concurrent*/> _jni_roots;
1591
1592 public:
1593 ShenandoahConcurrentRootsEvacUpdateTask() :
1594 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task") {
1595 }
1596
1597 void work(uint worker_id) {
1598 ShenandoahEvacOOMScope oom;
1599 ShenandoahEvacuateUpdateRootsClosure cl;
1600 _jni_roots.oops_do<ShenandoahEvacuateUpdateRootsClosure>(&cl);
1601 }
1602 };
1603
1604 void ShenandoahHeap::op_roots() {
1605 if (is_evacuation_in_progress() &&
1606 ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1607 ShenandoahConcurrentRootsEvacUpdateTask task;
1608 workers()->run_task(&task);
1609 }
1610 }
1611
1612 void ShenandoahHeap::op_reset() {
1613 reset_mark_bitmap();
1614 }
1615
1616 void ShenandoahHeap::op_preclean() {
1617 concurrent_mark()->preclean_weak_refs();
1618 }
1619
1620 void ShenandoahHeap::op_init_traversal() {
1621 traversal_gc()->init_traversal_collection();
1622 }
1623
1624 void ShenandoahHeap::op_traversal() {
1625 traversal_gc()->concurrent_traversal_collection();
1626 }
1627
1628 void ShenandoahHeap::op_final_traversal() {
1629 traversal_gc()->final_traversal_collection();
1630 }
1631
1632 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1633 ShenandoahMetricsSnapshot metrics;
1634 metrics.snap_before();
1635
1636 full_gc()->do_it(cause);
1637 if (UseTLAB) {
1638 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1639 resize_all_tlabs();
1640 }
1641
1642 metrics.snap_after();
1643
1644 if (metrics.is_good_progress()) {
1645 _progress_last_gc.set();
1646 } else {
1647 // Nothing to do. Tell the allocation path that we have failed to make
1648 // progress, and it can finally fail.
1649 _progress_last_gc.unset();
1650 }
1651 }
1652
1653 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1654 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1655 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1656 // some phase, we have to upgrade the Degenerate GC to Full GC.
1657
1658 clear_cancelled_gc();
1659
1660 ShenandoahMetricsSnapshot metrics;
1661 metrics.snap_before();
1662
1663 switch (point) {
1664 case _degenerated_traversal:
1665 {
1666 // Drop the collection set. Note: this leaves some already forwarded objects
1667 // behind, which may be problematic, see comments for ShenandoahEvacAssist
1668 // workarounds in ShenandoahTraversalHeuristics.
1669
1670 ShenandoahHeapLocker locker(lock());
1671 collection_set()->clear_current_index();
1672 for (size_t i = 0; i < collection_set()->count(); i++) {
1673 ShenandoahHeapRegion* r = collection_set()->next();
1674 r->make_regular_bypass();
1675 }
1676 collection_set()->clear();
1677 }
1678 op_final_traversal();
1679 op_cleanup();
1680 return;
1681
1682 // The cases below form the Duff's-like device: it describes the actual GC cycle,
1683 // but enters it at different points, depending on which concurrent phase had
1684 // degenerated.
1685
1686 case _degenerated_outside_cycle:
1687 // We have degenerated from outside the cycle, which means something is bad with
1688 // the heap, most probably heavy humongous fragmentation, or we are very low on free
1689 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1690 // we can do the most aggressive degen cycle, which includes processing references and
1691 // class unloading, unless those features are explicitly disabled.
1692 //
1693 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1694 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1695 set_process_references(heuristics()->can_process_references());
1696 set_unload_classes(heuristics()->can_unload_classes());
1697
1698 if (heuristics()->can_do_traversal_gc()) {
1699 // Not possible to degenerate from here, upgrade to Full GC right away.
1700 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1701 op_degenerated_fail();
1702 return;
1703 }
1704
1705 op_reset();
1706
1707 op_init_mark();
1708 if (cancelled_gc()) {
1709 op_degenerated_fail();
1710 return;
1711 }
1712
1713 case _degenerated_mark:
1714 op_final_mark();
1715 if (cancelled_gc()) {
1716 op_degenerated_fail();
1717 return;
1718 }
1719
1720 op_cleanup();
1721
1722 case _degenerated_evac:
1723 // If heuristics thinks we should do the cycle, this flag would be set,
1724 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1725 if (is_evacuation_in_progress()) {
1726
1727 // Degeneration under oom-evac protocol might have left some objects in
1728 // collection set un-evacuated. Restart evacuation from the beginning to
1729 // capture all objects. For all the objects that are already evacuated,
1730 // it would be a simple check, which is supposed to be fast. This is also
1731 // safe to do even without degeneration, as CSet iterator is at beginning
1732 // in preparation for evacuation anyway.
1733 //
1734 // Before doing that, we need to make sure we never had any cset-pinned
1735 // regions. This may happen if allocation failure happened when evacuating
1736 // the about-to-be-pinned object, oom-evac protocol left the object in
1737 // the collection set, and then the pin reached the cset region. If we continue
1738 // the cycle here, we would trash the cset and alive objects in it. To avoid
1739 // it, we fail degeneration right away and slide into Full GC to recover.
1740
1741 {
1742 collection_set()->clear_current_index();
1743
1744 ShenandoahHeapRegion* r;
1745 while ((r = collection_set()->next()) != NULL) {
1746 if (r->is_pinned()) {
1747 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1748 op_degenerated_fail();
1749 return;
1750 }
1751 }
1752
1753 collection_set()->clear_current_index();
1754 }
1755
1756 op_stw_evac();
1757 if (cancelled_gc()) {
1758 op_degenerated_fail();
1759 return;
1760 }
1761 }
1762
1763 // If heuristics thinks we should do the cycle, this flag would be set,
1764 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1765 if (has_forwarded_objects()) {
1766 op_init_updaterefs();
1767 if (cancelled_gc()) {
1768 op_degenerated_fail();
1769 return;
1770 }
1771 }
1772
1773 case _degenerated_updaterefs:
1774 if (has_forwarded_objects()) {
1775 op_final_updaterefs();
1776 if (cancelled_gc()) {
1777 op_degenerated_fail();
1778 return;
1779 }
1780 }
1781
1782 op_cleanup();
1783 break;
1784
1785 default:
1786 ShouldNotReachHere();
1787 }
1788
1789 if (ShenandoahVerify) {
1790 verifier()->verify_after_degenerated();
1791 }
1792
1793 if (VerifyAfterGC) {
1794 Universe::verify();
1795 }
1796
1797 metrics.snap_after();
1798
1799 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1800 // because that probably means the heap is overloaded and/or fragmented.
1801 if (!metrics.is_good_progress()) {
1802 _progress_last_gc.unset();
1803 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1804 op_degenerated_futile();
1805 } else {
1806 _progress_last_gc.set();
1807 }
1808 }
1809
1810 void ShenandoahHeap::op_degenerated_fail() {
1811 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1812 shenandoah_policy()->record_degenerated_upgrade_to_full();
1813 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1814 }
1815
1816 void ShenandoahHeap::op_degenerated_futile() {
1817 shenandoah_policy()->record_degenerated_upgrade_to_full();
1818 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1819 }
1820
1821 void ShenandoahHeap::stop_concurrent_marking() {
1822 assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1823 set_concurrent_mark_in_progress(false);
1824 if (!cancelled_gc()) {
1825 // If we needed to update refs, and concurrent marking has been cancelled,
1826 // we need to finish updating references.
1827 set_has_forwarded_objects(false);
1828 mark_complete_marking_context();
1829 }
1830 }
1831
1832 void ShenandoahHeap::force_satb_flush_all_threads() {
1833 if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
1834 // No need to flush SATBs
1835 return;
1836 }
1837
1838 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1839 ShenandoahThreadLocalData::set_force_satb_flush(t, true);
1840 }
1841 // The threads are not "acquiring" their thread-local data, but it does not
1842 // hurt to "release" the updates here anyway.
1843 OrderAccess::fence();
1844 }
1845
1846 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1847 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1848 ShenandoahThreadLocalData::set_gc_state(t, state);
1849 }
1850 }
1851
1852 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1853 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1854 _gc_state.set_cond(mask, value);
1855 set_gc_state_all_threads(_gc_state.raw_value());
1856 }
1857
1858 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1859 if (has_forwarded_objects()) {
1860 set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1861 } else {
1862 set_gc_state_mask(MARKING, in_progress);
1863 }
1864 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1865 }
1866
1867 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1868 set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1869 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1870 }
1871
1872 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1873 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1874 set_gc_state_mask(EVACUATION, in_progress);
1875 }
1876
1877 void ShenandoahHeap::ref_processing_init() {
1878 assert(_max_workers > 0, "Sanity");
1879
1880 _ref_processor =
1881 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
1882 ParallelRefProcEnabled, // MT processing
1883 _max_workers, // Degree of MT processing
1884 true, // MT discovery
1885 _max_workers, // Degree of MT discovery
1886 false, // Reference discovery is not atomic
1887 NULL, // No closure, should be installed before use
1888 true); // Scale worker threads
1889
1890 shenandoah_assert_rp_isalive_not_installed();
1891 }
1892
1893 GCTracer* ShenandoahHeap::tracer() {
1894 return shenandoah_policy()->tracer();
1895 }
1896
1897 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1898 return _free_set->used();
1899 }
1900
1901 bool ShenandoahHeap::try_cancel_gc() {
1902 while (true) {
1903 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1904 if (prev == CANCELLABLE) return true;
1905 else if (prev == CANCELLED) return false;
1906 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1907 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1908 {
1909 // We need to provide a safepoint here, otherwise we might
1910 // spin forever if a SP is pending.
1911 ThreadBlockInVM sp(JavaThread::current());
1912 SpinPause();
1913 }
1914 }
1915 }
1916
1917 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1918 if (try_cancel_gc()) {
1919 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1920 log_info(gc)("%s", msg.buffer());
1921 Events::log(Thread::current(), "%s", msg.buffer());
1922 }
1923 }
1924
1925 uint ShenandoahHeap::max_workers() {
1926 return _max_workers;
1927 }
1928
1929 void ShenandoahHeap::stop() {
1930 // The shutdown sequence should be able to terminate when GC is running.
1931
1932 // Step 0. Notify policy to disable event recording.
1933 _shenandoah_policy->record_shutdown();
1934
1935 // Step 1. Notify control thread that we are in shutdown.
1936 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1937 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1938 control_thread()->prepare_for_graceful_shutdown();
1939
1940 // Step 2. Notify GC workers that we are cancelling GC.
1941 cancel_gc(GCCause::_shenandoah_stop_vm);
1942
1943 // Step 3. Wait until GC worker exits normally.
1944 control_thread()->stop();
1945
1946 // Step 4. Stop String Dedup thread if it is active
1947 if (ShenandoahStringDedup::is_enabled()) {
1948 ShenandoahStringDedup::stop();
1949 }
1950 }
1951
1952 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1953 assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
1954
1955 ShenandoahGCPhase root_phase(full_gc ?
1956 ShenandoahPhaseTimings::full_gc_purge :
1957 ShenandoahPhaseTimings::purge);
1958
1959 ShenandoahIsAliveSelector alive;
1960 BoolObjectClosure* is_alive = alive.is_alive_closure();
1961
1962 bool purged_class;
1963
1964 // Unload classes and purge SystemDictionary.
1965 {
1966 ShenandoahGCPhase phase(full_gc ?
1967 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1968 ShenandoahPhaseTimings::purge_class_unload);
1969 purged_class = SystemDictionary::do_unloading(gc_timer());
1970 }
1971
1972 {
1973 ShenandoahGCPhase phase(full_gc ?
1974 ShenandoahPhaseTimings::full_gc_purge_par :
1975 ShenandoahPhaseTimings::purge_par);
1976 uint active = _workers->active_workers();
1977 ParallelCleaningTask unlink_task(is_alive, active, purged_class, true);
1978 _workers->run_task(&unlink_task);
1979 }
1980
1981 {
1982 ShenandoahGCPhase phase(full_gc ?
1983 ShenandoahPhaseTimings::full_gc_purge_cldg :
1984 ShenandoahPhaseTimings::purge_cldg);
1985 ClassLoaderDataGraph::purge();
1986 }
1987 }
1988
1989 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1990 set_gc_state_mask(HAS_FORWARDED, cond);
1991 }
1992
1993 void ShenandoahHeap::set_process_references(bool pr) {
1994 _process_references.set_cond(pr);
1995 }
1996
1997 void ShenandoahHeap::set_unload_classes(bool uc) {
1998 _unload_classes.set_cond(uc);
1999 }
2000
2001 bool ShenandoahHeap::process_references() const {
2002 return _process_references.is_set();
2003 }
2004
2005 bool ShenandoahHeap::unload_classes() const {
2006 return _unload_classes.is_set();
2007 }
2008
2009 address ShenandoahHeap::in_cset_fast_test_addr() {
2010 ShenandoahHeap* heap = ShenandoahHeap::heap();
2011 assert(heap->collection_set() != NULL, "Sanity");
2012 return (address) heap->collection_set()->biased_map_address();
2013 }
2014
2015 address ShenandoahHeap::cancelled_gc_addr() {
2016 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2017 }
2018
2019 address ShenandoahHeap::gc_state_addr() {
2020 return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2021 }
2022
2023 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2024 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2025 }
2026
2027 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2028 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2029 }
2030
2031 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2032 _degenerated_gc_in_progress.set_cond(in_progress);
2033 }
2034
2035 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2036 _full_gc_in_progress.set_cond(in_progress);
2037 }
2038
2039 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2040 assert (is_full_gc_in_progress(), "should be");
2041 _full_gc_move_in_progress.set_cond(in_progress);
2042 }
2043
2044 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2045 set_gc_state_mask(UPDATEREFS, in_progress);
2046 }
2047
2048 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2049 ShenandoahCodeRoots::add_nmethod(nm);
2050 }
2051
2052 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2053 ShenandoahCodeRoots::remove_nmethod(nm);
2054 }
2055
2056 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2057 ShenandoahHeapLocker locker(lock());
2058 heap_region_containing(o)->make_pinned();
2059 return o;
2060 }
2061
2062 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2063 ShenandoahHeapLocker locker(lock());
2064 heap_region_containing(o)->make_unpinned();
2065 }
2066
2067 GCTimer* ShenandoahHeap::gc_timer() const {
2068 return _gc_timer;
2069 }
2070
2071 #ifdef ASSERT
2072 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2073 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2074
2075 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2076 if (UseDynamicNumberOfGCThreads ||
2077 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2078 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2079 } else {
2080 // Use ParallelGCThreads inside safepoints
2081 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2082 }
2083 } else {
2084 if (UseDynamicNumberOfGCThreads ||
2085 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2086 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2087 } else {
2088 // Use ConcGCThreads outside safepoints
2089 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2090 }
2091 }
2092 }
2093 #endif
2094
2095 ShenandoahVerifier* ShenandoahHeap::verifier() {
2096 guarantee(ShenandoahVerify, "Should be enabled");
2097 assert (_verifier != NULL, "sanity");
2098 return _verifier;
2099 }
2100
2101 template<class T>
2102 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2103 private:
2104 T cl;
2105 ShenandoahHeap* _heap;
2106 ShenandoahRegionIterator* _regions;
2107 bool _concurrent;
2108 public:
2109 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2110 AbstractGangTask("Concurrent Update References Task"),
2111 cl(T()),
2112 _heap(ShenandoahHeap::heap()),
2113 _regions(regions),
2114 _concurrent(concurrent) {
2115 }
2116
2117 void work(uint worker_id) {
2118 if (_concurrent) {
2119 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2120 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2121 do_work();
2122 } else {
2123 ShenandoahParallelWorkerSession worker_session(worker_id);
2124 do_work();
2125 }
2126 }
2127
2128 private:
2129 void do_work() {
2130 ShenandoahHeapRegion* r = _regions->next();
2131 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2132 while (r != NULL) {
2133 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2134 assert (top_at_start_ur >= r->bottom(), "sanity");
2135 if (r->is_active() && !r->is_cset()) {
2136 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
2137 }
2138 if (ShenandoahPacing) {
2139 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2140 }
2141 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2142 return;
2143 }
2144 r = _regions->next();
2145 }
2146 }
2147 };
2148
2149 void ShenandoahHeap::update_heap_references(bool concurrent) {
2150 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2151 workers()->run_task(&task);
2152 }
2153
2154 void ShenandoahHeap::op_init_updaterefs() {
2155 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2156
2157 set_evacuation_in_progress(false);
2158
2159 retire_and_reset_gclabs();
2160
2161 if (ShenandoahVerify) {
2162 if (!is_degenerated_gc_in_progress()) {
2163 verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2164 }
2165 verifier()->verify_before_updaterefs();
2166 }
2167
2168 set_update_refs_in_progress(true);
2169 make_parsable(true);
2170 for (uint i = 0; i < num_regions(); i++) {
2171 ShenandoahHeapRegion* r = get_region(i);
2172 r->set_concurrent_iteration_safe_limit(r->top());
2173 }
2174
2175 // Reset iterator.
2176 _update_refs_iterator.reset();
2177
2178 if (ShenandoahPacing) {
2179 pacer()->setup_for_updaterefs();
2180 }
2181 }
2182
2183 void ShenandoahHeap::op_final_updaterefs() {
2184 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2185
2186 // Check if there is left-over work, and finish it
2187 if (_update_refs_iterator.has_next()) {
2188 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2189
2190 // Finish updating references where we left off.
2191 clear_cancelled_gc();
2192 update_heap_references(false);
2193 }
2194
2195 // Clear cancelled GC, if set. On cancellation path, the block before would handle
2196 // everything. On degenerated paths, cancelled gc would not be set anyway.
2197 if (cancelled_gc()) {
2198 clear_cancelled_gc();
2199 }
2200 assert(!cancelled_gc(), "Should have been done right before");
2201
2202 if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2203 verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2204 }
2205
2206 if (is_degenerated_gc_in_progress()) {
2207 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2208 } else {
2209 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2210 }
2211
2212 // Has to be done before cset is clear
2213 if (ShenandoahVerify) {
2214 verifier()->verify_roots_in_to_space();
2215 }
2216
2217 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2218
2219 trash_cset_regions();
2220 set_has_forwarded_objects(false);
2221 set_update_refs_in_progress(false);
2222
2223 if (ShenandoahVerify) {
2224 verifier()->verify_after_updaterefs();
2225 }
2226
2227 if (VerifyAfterGC) {
2228 Universe::verify();
2229 }
2230
2231 {
2232 ShenandoahHeapLocker locker(lock());
2233 _free_set->rebuild();
2234 }
2235 }
2236
2237 #ifdef ASSERT
2238 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2239 _lock.assert_owned_by_current_thread();
2240 }
2241
2242 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2243 _lock.assert_not_owned_by_current_thread();
2244 }
2245
2246 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2247 _lock.assert_owned_by_current_thread_or_safepoint();
2248 }
2249 #endif
2250
2251 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2252 print_on(st);
2253 print_heap_regions_on(st);
2254 }
2255
2256 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2257 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2258
2259 size_t regions_from = _bitmap_regions_per_slice * slice;
2260 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2261 for (size_t g = regions_from; g < regions_to; g++) {
2262 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2263 if (skip_self && g == r->region_number()) continue;
2264 if (get_region(g)->is_committed()) {
2265 return true;
2266 }
2267 }
2268 return false;
2269 }
2270
2271 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2272 assert_heaplock_owned_by_current_thread();
2273
2274 // Bitmaps in special regions do not need commits
2275 if (_bitmap_region_special) {
2276 return true;
2277 }
2278
2279 if (is_bitmap_slice_committed(r, true)) {
2280 // Some other region from the group is already committed, meaning the bitmap
2281 // slice is already committed, we exit right away.
2282 return true;
2283 }
2284
2285 // Commit the bitmap slice:
2286 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2287 size_t off = _bitmap_bytes_per_slice * slice;
2288 size_t len = _bitmap_bytes_per_slice;
2289 if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2290 return false;
2291 }
2292 return true;
2293 }
2294
2295 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2296 assert_heaplock_owned_by_current_thread();
2297
2298 // Bitmaps in special regions do not need uncommits
2299 if (_bitmap_region_special) {
2300 return true;
2301 }
2302
2303 if (is_bitmap_slice_committed(r, true)) {
2304 // Some other region from the group is still committed, meaning the bitmap
2305 // slice is should stay committed, exit right away.
2306 return true;
2307 }
2308
2309 // Uncommit the bitmap slice:
2310 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2311 size_t off = _bitmap_bytes_per_slice * slice;
2312 size_t len = _bitmap_bytes_per_slice;
2313 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2314 return false;
2315 }
2316 return true;
2317 }
2318
2319 void ShenandoahHeap::safepoint_synchronize_begin() {
2320 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2321 SuspendibleThreadSet::synchronize();
2322 }
2323 }
2324
2325 void ShenandoahHeap::safepoint_synchronize_end() {
2326 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2327 SuspendibleThreadSet::desynchronize();
2328 }
2329 }
2330
2331 void ShenandoahHeap::vmop_entry_init_mark() {
2332 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2333 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2334 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2335
2336 try_inject_alloc_failure();
2337 VM_ShenandoahInitMark op;
2338 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2339 }
2340
2341 void ShenandoahHeap::vmop_entry_final_mark() {
2342 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2343 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2344 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2345
2346 try_inject_alloc_failure();
2347 VM_ShenandoahFinalMarkStartEvac op;
2348 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2349 }
2350
2351 void ShenandoahHeap::vmop_entry_final_evac() {
2352 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2353 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2354 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2355
2356 VM_ShenandoahFinalEvac op;
2357 VMThread::execute(&op); // jump to entry_final_evac under safepoint
2358 }
2359
2360 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2361 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2362 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2363 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2364
2365 try_inject_alloc_failure();
2366 VM_ShenandoahInitUpdateRefs op;
2367 VMThread::execute(&op);
2368 }
2369
2370 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2371 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2372 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2373 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2374
2375 try_inject_alloc_failure();
2376 VM_ShenandoahFinalUpdateRefs op;
2377 VMThread::execute(&op);
2378 }
2379
2380 void ShenandoahHeap::vmop_entry_init_traversal() {
2381 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2382 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2383 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2384
2385 try_inject_alloc_failure();
2386 VM_ShenandoahInitTraversalGC op;
2387 VMThread::execute(&op);
2388 }
2389
2390 void ShenandoahHeap::vmop_entry_final_traversal() {
2391 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2392 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2393 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2394
2395 try_inject_alloc_failure();
2396 VM_ShenandoahFinalTraversalGC op;
2397 VMThread::execute(&op);
2398 }
2399
2400 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2401 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2402 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2403 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2404
2405 try_inject_alloc_failure();
2406 VM_ShenandoahFullGC op(cause);
2407 VMThread::execute(&op);
2408 }
2409
2410 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2411 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2412 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2413 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2414
2415 VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2416 VMThread::execute(°enerated_gc);
2417 }
2418
2419 void ShenandoahHeap::entry_init_mark() {
2420 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2421 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2422 const char* msg = init_mark_event_message();
2423 GCTraceTime(Info, gc) time(msg, gc_timer());
2424 EventMark em("%s", msg);
2425
2426 ShenandoahWorkerScope scope(workers(),
2427 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2428 "init marking");
2429
2430 op_init_mark();
2431 }
2432
2433 void ShenandoahHeap::entry_final_mark() {
2434 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2435 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2436 const char* msg = final_mark_event_message();
2437 GCTraceTime(Info, gc) time(msg, gc_timer());
2438 EventMark em("%s", msg);
2439
2440 ShenandoahWorkerScope scope(workers(),
2441 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2442 "final marking");
2443
2444 op_final_mark();
2445 }
2446
2447 void ShenandoahHeap::entry_final_evac() {
2448 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2449 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2450 static const char* msg = "Pause Final Evac";
2451 GCTraceTime(Info, gc) time(msg, gc_timer());
2452 EventMark em("%s", msg);
2453
2454 op_final_evac();
2455 }
2456
2457 void ShenandoahHeap::entry_init_updaterefs() {
2458 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2459 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2460
2461 static const char* msg = "Pause Init Update Refs";
2462 GCTraceTime(Info, gc) time(msg, gc_timer());
2463 EventMark em("%s", msg);
2464
2465 // No workers used in this phase, no setup required
2466
2467 op_init_updaterefs();
2468 }
2469
2470 void ShenandoahHeap::entry_final_updaterefs() {
2471 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2472 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2473
2474 static const char* msg = "Pause Final Update Refs";
2475 GCTraceTime(Info, gc) time(msg, gc_timer());
2476 EventMark em("%s", msg);
2477
2478 ShenandoahWorkerScope scope(workers(),
2479 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2480 "final reference update");
2481
2482 op_final_updaterefs();
2483 }
2484
2485 void ShenandoahHeap::entry_init_traversal() {
2486 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2487 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2488
2489 static const char* msg = "Pause Init Traversal";
2490 GCTraceTime(Info, gc) time(msg, gc_timer());
2491 EventMark em("%s", msg);
2492
2493 ShenandoahWorkerScope scope(workers(),
2494 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2495 "init traversal");
2496
2497 op_init_traversal();
2498 }
2499
2500 void ShenandoahHeap::entry_final_traversal() {
2501 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2502 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2503
2504 static const char* msg = "Pause Final Traversal";
2505 GCTraceTime(Info, gc) time(msg, gc_timer());
2506 EventMark em("%s", msg);
2507
2508 ShenandoahWorkerScope scope(workers(),
2509 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2510 "final traversal");
2511
2512 op_final_traversal();
2513 }
2514
2515 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2516 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2517 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2518
2519 static const char* msg = "Pause Full";
2520 GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2521 EventMark em("%s", msg);
2522
2523 ShenandoahWorkerScope scope(workers(),
2524 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2525 "full gc");
2526
2527 op_full(cause);
2528 }
2529
2530 void ShenandoahHeap::entry_degenerated(int point) {
2531 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2532 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2533
2534 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2535 const char* msg = degen_event_message(dpoint);
2536 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2537 EventMark em("%s", msg);
2538
2539 ShenandoahWorkerScope scope(workers(),
2540 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2541 "stw degenerated gc");
2542
2543 set_degenerated_gc_in_progress(true);
2544 op_degenerated(dpoint);
2545 set_degenerated_gc_in_progress(false);
2546 }
2547
2548 void ShenandoahHeap::entry_mark() {
2549 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2550
2551 const char* msg = conc_mark_event_message();
2552 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2553 EventMark em("%s", msg);
2554
2555 ShenandoahWorkerScope scope(workers(),
2556 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2557 "concurrent marking");
2558
2559 try_inject_alloc_failure();
2560 op_mark();
2561 }
2562
2563 void ShenandoahHeap::entry_evac() {
2564 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2565 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2566
2567 static const char* msg = "Concurrent evacuation";
2568 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2569 EventMark em("%s", msg);
2570
2571 ShenandoahWorkerScope scope(workers(),
2572 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2573 "concurrent evacuation");
2574
2575 try_inject_alloc_failure();
2576 op_conc_evac();
2577 }
2578
2579 void ShenandoahHeap::entry_updaterefs() {
2580 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2581
2582 static const char* msg = "Concurrent update references";
2583 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2584 EventMark em("%s", msg);
2585
2586 ShenandoahWorkerScope scope(workers(),
2587 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2588 "concurrent reference update");
2589
2590 try_inject_alloc_failure();
2591 op_updaterefs();
2592 }
2593
2594 void ShenandoahHeap::entry_roots() {
2595 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_roots);
2596
2597 static const char* msg = "Concurrent roots processing";
2598 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2599 EventMark em("%s", msg);
2600
2601 ShenandoahWorkerScope scope(workers(),
2602 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2603 "concurrent root processing");
2604
2605 try_inject_alloc_failure();
2606 op_roots();
2607 }
2608
2609 void ShenandoahHeap::entry_cleanup() {
2610 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2611
2612 static const char* msg = "Concurrent cleanup";
2613 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2614 EventMark em("%s", msg);
2615
2616 // This phase does not use workers, no need for setup
2617
2618 try_inject_alloc_failure();
2619 op_cleanup();
2620 }
2621
2622 void ShenandoahHeap::entry_reset() {
2623 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2624
2625 static const char* msg = "Concurrent reset";
2626 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2627 EventMark em("%s", msg);
2628
2629 ShenandoahWorkerScope scope(workers(),
2630 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2631 "concurrent reset");
2632
2633 try_inject_alloc_failure();
2634 op_reset();
2635 }
2636
2637 void ShenandoahHeap::entry_preclean() {
2638 if (ShenandoahPreclean && process_references()) {
2639 static const char* msg = "Concurrent precleaning";
2640 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2641 EventMark em("%s", msg);
2642
2643 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2644
2645 ShenandoahWorkerScope scope(workers(),
2646 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2647 "concurrent preclean",
2648 /* check_workers = */ false);
2649
2650 try_inject_alloc_failure();
2651 op_preclean();
2652 }
2653 }
2654
2655 void ShenandoahHeap::entry_traversal() {
2656 static const char* msg = "Concurrent traversal";
2657 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2658 EventMark em("%s", msg);
2659
2660 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2661
2662 ShenandoahWorkerScope scope(workers(),
2663 ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2664 "concurrent traversal");
2665
2666 try_inject_alloc_failure();
2667 op_traversal();
2668 }
2669
2670 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2671 static const char *msg = "Concurrent uncommit";
2672 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2673 EventMark em("%s", msg);
2674
2675 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2676
2677 op_uncommit(shrink_before);
2678 }
2679
2680 void ShenandoahHeap::try_inject_alloc_failure() {
2681 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2682 _inject_alloc_failure.set();
2683 os::naked_short_sleep(1);
2684 if (cancelled_gc()) {
2685 log_info(gc)("Allocation failure was successfully injected");
2686 }
2687 }
2688 }
2689
2690 bool ShenandoahHeap::should_inject_alloc_failure() {
2691 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2692 }
2693
2694 void ShenandoahHeap::initialize_serviceability() {
2695 _memory_pool = new ShenandoahMemoryPool(this);
2696 _cycle_memory_manager.add_pool(_memory_pool);
2697 _stw_memory_manager.add_pool(_memory_pool);
2698 }
2699
2700 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2701 GrowableArray<GCMemoryManager*> memory_managers(2);
2702 memory_managers.append(&_cycle_memory_manager);
2703 memory_managers.append(&_stw_memory_manager);
2704 return memory_managers;
2705 }
2706
2707 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2708 GrowableArray<MemoryPool*> memory_pools(1);
2709 memory_pools.append(_memory_pool);
2710 return memory_pools;
2711 }
2712
2713 MemoryUsage ShenandoahHeap::memory_usage() {
2714 return _memory_pool->get_memory_usage();
2715 }
2716
2717 void ShenandoahHeap::enter_evacuation() {
2718 _oom_evac_handler.enter_evacuation();
2719 }
2720
2721 void ShenandoahHeap::leave_evacuation() {
2722 _oom_evac_handler.leave_evacuation();
2723 }
2724
2725 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2726 _heap(ShenandoahHeap::heap()),
2727 _index(0) {}
2728
2729 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2730 _heap(heap),
2731 _index(0) {}
2732
2733 void ShenandoahRegionIterator::reset() {
2734 _index = 0;
2735 }
2736
2737 bool ShenandoahRegionIterator::has_next() const {
2738 return _index < _heap->num_regions();
2739 }
2740
2741 char ShenandoahHeap::gc_state() const {
2742 return _gc_state.raw_value();
2743 }
2744
2745 void ShenandoahHeap::deduplicate_string(oop str) {
2746 assert(java_lang_String::is_instance(str), "invariant");
2747
2748 if (ShenandoahStringDedup::is_enabled()) {
2749 ShenandoahStringDedup::deduplicate(str);
2750 }
2751 }
2752
2753 const char* ShenandoahHeap::init_mark_event_message() const {
2754 bool update_refs = has_forwarded_objects();
2755 bool proc_refs = process_references();
2756 bool unload_cls = unload_classes();
2757
2758 if (update_refs && proc_refs && unload_cls) {
2759 return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
2760 } else if (update_refs && proc_refs) {
2761 return "Pause Init Mark (update refs) (process weakrefs)";
2762 } else if (update_refs && unload_cls) {
2763 return "Pause Init Mark (update refs) (unload classes)";
2764 } else if (proc_refs && unload_cls) {
2765 return "Pause Init Mark (process weakrefs) (unload classes)";
2766 } else if (update_refs) {
2767 return "Pause Init Mark (update refs)";
2768 } else if (proc_refs) {
2769 return "Pause Init Mark (process weakrefs)";
2770 } else if (unload_cls) {
2771 return "Pause Init Mark (unload classes)";
2772 } else {
2773 return "Pause Init Mark";
2774 }
2775 }
2776
2777 const char* ShenandoahHeap::final_mark_event_message() const {
2778 bool update_refs = has_forwarded_objects();
2779 bool proc_refs = process_references();
2780 bool unload_cls = unload_classes();
2781
2782 if (update_refs && proc_refs && unload_cls) {
2783 return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
2784 } else if (update_refs && proc_refs) {
2785 return "Pause Final Mark (update refs) (process weakrefs)";
2786 } else if (update_refs && unload_cls) {
2787 return "Pause Final Mark (update refs) (unload classes)";
2788 } else if (proc_refs && unload_cls) {
2789 return "Pause Final Mark (process weakrefs) (unload classes)";
2790 } else if (update_refs) {
2791 return "Pause Final Mark (update refs)";
2792 } else if (proc_refs) {
2793 return "Pause Final Mark (process weakrefs)";
2794 } else if (unload_cls) {
2795 return "Pause Final Mark (unload classes)";
2796 } else {
2797 return "Pause Final Mark";
2798 }
2799 }
2800
2801 const char* ShenandoahHeap::conc_mark_event_message() const {
2802 bool update_refs = has_forwarded_objects();
2803 bool proc_refs = process_references();
2804 bool unload_cls = unload_classes();
2805
2806 if (update_refs && proc_refs && unload_cls) {
2807 return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
2808 } else if (update_refs && proc_refs) {
2809 return "Concurrent marking (update refs) (process weakrefs)";
2810 } else if (update_refs && unload_cls) {
2811 return "Concurrent marking (update refs) (unload classes)";
2812 } else if (proc_refs && unload_cls) {
2813 return "Concurrent marking (process weakrefs) (unload classes)";
2814 } else if (update_refs) {
2815 return "Concurrent marking (update refs)";
2816 } else if (proc_refs) {
2817 return "Concurrent marking (process weakrefs)";
2818 } else if (unload_cls) {
2819 return "Concurrent marking (unload classes)";
2820 } else {
2821 return "Concurrent marking";
2822 }
2823 }
2824
2825 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2826 switch (point) {
2827 case _degenerated_unset:
2828 return "Pause Degenerated GC (<UNSET>)";
2829 case _degenerated_traversal:
2830 return "Pause Degenerated GC (Traversal)";
2831 case _degenerated_outside_cycle:
2832 return "Pause Degenerated GC (Outside of Cycle)";
2833 case _degenerated_mark:
2834 return "Pause Degenerated GC (Mark)";
2835 case _degenerated_evac:
2836 return "Pause Degenerated GC (Evacuation)";
2837 case _degenerated_updaterefs:
2838 return "Pause Degenerated GC (Update Refs)";
2839 default:
2840 ShouldNotReachHere();
2841 return "ERROR";
2842 }
2843 }
2844
2845 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2846 #ifdef ASSERT
2847 assert(_liveness_cache != NULL, "sanity");
2848 assert(worker_id < _max_workers, "sanity");
2849 for (uint i = 0; i < num_regions(); i++) {
2850 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2851 }
2852 #endif
2853 return _liveness_cache[worker_id];
2854 }
2855
2856 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2857 assert(worker_id < _max_workers, "sanity");
2858 assert(_liveness_cache != NULL, "sanity");
2859 jushort* ld = _liveness_cache[worker_id];
2860 for (uint i = 0; i < num_regions(); i++) {
2861 ShenandoahHeapRegion* r = get_region(i);
2862 jushort live = ld[i];
2863 if (live > 0) {
2864 r->increase_live_data_gc_words(live);
2865 ld[i] = 0;
2866 }
2867 }
2868 }
--- EOF ---