1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/oopStorageSet.hpp"
  35 #include "gc/shared/plab.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"
  44 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  49 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  51 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  52 #include "gc/shenandoah/shenandoahMetrics.hpp"
  53 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  54 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  55 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPadding.hpp"
  57 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  58 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  59 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  60 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  61 #include "gc/shenandoah/shenandoahUtils.hpp"
  62 #include "gc/shenandoah/shenandoahVerifier.hpp"
  63 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  64 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  65 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  66 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  67 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  68 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  70 #if INCLUDE_JFR
  71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  72 #endif
  73 
  74 #include "memory/metaspace.hpp"
  75 #include "oops/compressedOops.inline.hpp"
  76 #include "runtime/atomic.hpp"
  77 #include "runtime/globals.hpp"
  78 #include "runtime/interfaceSupport.inline.hpp"
  79 #include "runtime/orderAccess.hpp"
  80 #include "runtime/safepointMechanism.hpp"
  81 #include "runtime/vmThread.hpp"
  82 #include "services/mallocTracker.hpp"
  83 #include "utilities/powerOfTwo.hpp"
  84 
  85 ShenandoahHeap* ShenandoahHeap::_heap = NULL;
  86 
  87 #ifdef ASSERT
  88 template <class T>
  89 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  90   T o = RawAccess<>::oop_load(p);
  91   if (! CompressedOops::is_null(o)) {
  92     oop obj = CompressedOops::decode_not_null(o);
  93     shenandoah_assert_not_forwarded(p, obj);
  94   }
  95 }
  96 
  97 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  98 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  99 #endif
 100 
 101 class ShenandoahPretouchHeapTask : public AbstractGangTask {
 102 private:
 103   ShenandoahRegionIterator _regions;
 104   const size_t _page_size;
 105 public:
 106   ShenandoahPretouchHeapTask(size_t page_size) :
 107     AbstractGangTask("Shenandoah Pretouch Heap"),
 108     _page_size(page_size) {}
 109 
 110   virtual void work(uint worker_id) {
 111     ShenandoahHeapRegion* r = _regions.next();
 112     while (r != NULL) {
 113       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 114       r = _regions.next();
 115     }
 116   }
 117 };
 118 
 119 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
 120 private:
 121   ShenandoahRegionIterator _regions;
 122   char* _bitmap_base;
 123   const size_t _bitmap_size;
 124   const size_t _page_size;
 125 public:
 126   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 127     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 128     _bitmap_base(bitmap_base),
 129     _bitmap_size(bitmap_size),
 130     _page_size(page_size) {}
 131 
 132   virtual void work(uint worker_id) {
 133     ShenandoahHeapRegion* r = _regions.next();
 134     while (r != NULL) {
 135       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 136       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 137       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 138 
 139       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 140 
 141       r = _regions.next();
 142     }
 143   }
 144 };
 145 
 146 jint ShenandoahHeap::initialize() {
 147   //
 148   // Figure out heap sizing
 149   //
 150 
 151   size_t init_byte_size = InitialHeapSize;
 152   size_t min_byte_size  = MinHeapSize;
 153   size_t max_byte_size  = MaxHeapSize;
 154   size_t heap_alignment = HeapAlignment;
 155 
 156   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 157 
 158   if (AlwaysPreTouch) {
 159     // Enabled pre-touch means the entire heap is committed right away.
 160     init_byte_size = max_byte_size;
 161   }
 162 
 163   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 164   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 165 
 166   _num_regions = ShenandoahHeapRegion::region_count();
 167 
 168   // Now we know the number of regions, initialize the heuristics.
 169   initialize_heuristics();
 170 
 171   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 172   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 173   assert(num_committed_regions <= _num_regions, "sanity");
 174   _initial_size = num_committed_regions * reg_size_bytes;
 175 
 176   size_t num_min_regions = min_byte_size / reg_size_bytes;
 177   num_min_regions = MIN2(num_min_regions, _num_regions);
 178   assert(num_min_regions <= _num_regions, "sanity");
 179   _minimum_size = num_min_regions * reg_size_bytes;
 180 
 181   _committed = _initial_size;
 182 
 183   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 184   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 185   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 186 
 187   //
 188   // Reserve and commit memory for heap
 189   //
 190 
 191   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 192   initialize_reserved_region(heap_rs);
 193   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 194   _heap_region_special = heap_rs.special();
 195 
 196   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 197          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 198 
 199 #if SHENANDOAH_OPTIMIZED_OBJTASK
 200   // The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
 201   // Fail if we ever attempt to address more than we can.
 202   if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) {
 203     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 204                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 205                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 206                 p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable());
 207     vm_exit_during_initialization("Fatal Error", buf);
 208   }
 209 #endif
 210 
 211   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 212   if (!_heap_region_special) {
 213     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 214                               "Cannot commit heap memory");
 215   }
 216 
 217   //
 218   // Reserve and commit memory for bitmap(s)
 219   //
 220 
 221   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 222   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 223 
 224   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 225 
 226   guarantee(bitmap_bytes_per_region != 0,
 227             "Bitmap bytes per region should not be zero");
 228   guarantee(is_power_of_2(bitmap_bytes_per_region),
 229             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 230 
 231   if (bitmap_page_size > bitmap_bytes_per_region) {
 232     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 233     _bitmap_bytes_per_slice = bitmap_page_size;
 234   } else {
 235     _bitmap_regions_per_slice = 1;
 236     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 237   }
 238 
 239   guarantee(_bitmap_regions_per_slice >= 1,
 240             "Should have at least one region per slice: " SIZE_FORMAT,
 241             _bitmap_regions_per_slice);
 242 
 243   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 244             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 245             _bitmap_bytes_per_slice, bitmap_page_size);
 246 
 247   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 248   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 249   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 250   _bitmap_region_special = bitmap.special();
 251 
 252   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 253                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 254   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 255   if (!_bitmap_region_special) {
 256     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 257                               "Cannot commit bitmap memory");
 258   }
 259 
 260   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 261 
 262   if (ShenandoahVerify) {
 263     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 264     if (!verify_bitmap.special()) {
 265       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 266                                 "Cannot commit verification bitmap memory");
 267     }
 268     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 269     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 270     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 271     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 272   }
 273 
 274   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 275   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 276   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 277   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 278   _aux_bitmap_region_special = aux_bitmap.special();
 279   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 280 
 281   //
 282   // Create regions and region sets
 283   //
 284   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 285   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 286   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 287 
 288   ReservedSpace region_storage(region_storage_size, region_page_size);
 289   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 290   if (!region_storage.special()) {
 291     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 292                               "Cannot commit region memory");
 293   }
 294 
 295   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 296   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 297   // If not successful, bite a bullet and allocate at whatever address.
 298   {
 299     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 300     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 301 
 302     uintptr_t min = round_up_power_of_2(cset_align);
 303     uintptr_t max = (1u << 30u);
 304 
 305     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 306       char* req_addr = (char*)addr;
 307       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 308       ReservedSpace cset_rs(cset_size, os::vm_page_size(), false, req_addr);
 309       if (cset_rs.is_reserved()) {
 310         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 311         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 312         break;
 313       }
 314     }
 315 
 316     if (_collection_set == NULL) {
 317       ReservedSpace cset_rs(cset_size, os::vm_page_size(), false);
 318       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 319     }
 320   }
 321 
 322   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 323   _free_set = new ShenandoahFreeSet(this, _num_regions);
 324 
 325   {
 326     ShenandoahHeapLocker locker(lock());
 327 
 328     for (size_t i = 0; i < _num_regions; i++) {
 329       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 330       bool is_committed = i < num_committed_regions;
 331       void* loc = region_storage.base() + i * region_align;
 332 
 333       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 334       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 335 
 336       _marking_context->initialize_top_at_mark_start(r);
 337       _regions[i] = r;
 338       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 339     }
 340 
 341     // Initialize to complete
 342     _marking_context->mark_complete();
 343 
 344     _free_set->rebuild();
 345   }
 346 
 347   if (AlwaysPreTouch) {
 348     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 349     // before initialize() below zeroes it with initializing thread. For any given region,
 350     // we touch the region and the corresponding bitmaps from the same thread.
 351     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 352 
 353     size_t pretouch_heap_page_size = heap_page_size;
 354     size_t pretouch_bitmap_page_size = bitmap_page_size;
 355 
 356 #ifdef LINUX
 357     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 358     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 359     // them into huge one. Therefore, we need to pretouch with smaller pages.
 360     if (UseTransparentHugePages) {
 361       pretouch_heap_page_size = (size_t)os::vm_page_size();
 362       pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 363     }
 364 #endif
 365 
 366     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 367     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 368 
 369     log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 370                        _num_regions, pretouch_bitmap_page_size);
 371     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
 372     _workers->run_task(&bcl);
 373 
 374     log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 375                        _num_regions, pretouch_heap_page_size);
 376     ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
 377     _workers->run_task(&hcl);
 378   }
 379 
 380   //
 381   // Initialize the rest of GC subsystems
 382   //
 383 
 384   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 385   for (uint worker = 0; worker < _max_workers; worker++) {
 386     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 387     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 388   }
 389 
 390   // There should probably be Shenandoah-specific options for these,
 391   // just as there are G1-specific options.
 392   {
 393     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 394     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 395     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 396   }
 397 
 398   _monitoring_support = new ShenandoahMonitoringSupport(this);
 399   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 400   ShenandoahStringDedup::initialize();
 401   ShenandoahCodeRoots::initialize();
 402 
 403   if (ShenandoahPacing) {
 404     _pacer = new ShenandoahPacer(this);
 405     _pacer->setup_for_idle();
 406   } else {
 407     _pacer = NULL;
 408   }
 409 
 410   _control_thread = new ShenandoahControlThread();
 411 
 412   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 413                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 414                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 415                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 416   );
 417 
 418   log_info(gc, init)("Safepointing mechanism: thread-local poll");
 419 
 420   return JNI_OK;
 421 }
 422 
 423 void ShenandoahHeap::initialize_heuristics() {
 424   if (ShenandoahGCMode != NULL) {
 425     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 426       _gc_mode = new ShenandoahSATBMode();
 427     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 428       _gc_mode = new ShenandoahIUMode();
 429     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 430       _gc_mode = new ShenandoahPassiveMode();
 431     } else {
 432       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 433     }
 434   } else {
 435     ShouldNotReachHere();
 436   }
 437   _gc_mode->initialize_flags();
 438   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 439     vm_exit_during_initialization(
 440             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 441                     _gc_mode->name()));
 442   }
 443   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 444     vm_exit_during_initialization(
 445             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 446                     _gc_mode->name()));
 447   }
 448   log_info(gc, init)("Shenandoah GC mode: %s",
 449                      _gc_mode->name());
 450 
 451   _heuristics = _gc_mode->initialize_heuristics();
 452 
 453   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 454     vm_exit_during_initialization(
 455             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 456                     _heuristics->name()));
 457   }
 458   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 459     vm_exit_during_initialization(
 460             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 461                     _heuristics->name()));
 462   }
 463   log_info(gc, init)("Shenandoah heuristics: %s",
 464                      _heuristics->name());
 465 }
 466 
 467 #ifdef _MSC_VER
 468 #pragma warning( push )
 469 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 470 #endif
 471 
 472 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 473   CollectedHeap(),
 474   _initial_size(0),
 475   _used(0),
 476   _committed(0),
 477   _bytes_allocated_since_gc_start(0),
 478   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 479   _workers(NULL),
 480   _safepoint_workers(NULL),
 481   _heap_region_special(false),
 482   _num_regions(0),
 483   _regions(NULL),
 484   _update_refs_iterator(this),
 485   _control_thread(NULL),
 486   _shenandoah_policy(policy),
 487   _heuristics(NULL),
 488   _free_set(NULL),
 489   _scm(new ShenandoahConcurrentMark()),
 490   _full_gc(new ShenandoahMarkCompact()),
 491   _pacer(NULL),
 492   _verifier(NULL),
 493   _phase_timings(NULL),
 494   _monitoring_support(NULL),
 495   _memory_pool(NULL),
 496   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 497   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 498   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 499   _soft_ref_policy(),
 500   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 501   _ref_processor(NULL),
 502   _marking_context(NULL),
 503   _bitmap_size(0),
 504   _bitmap_regions_per_slice(0),
 505   _bitmap_bytes_per_slice(0),
 506   _bitmap_region_special(false),
 507   _aux_bitmap_region_special(false),
 508   _liveness_cache(NULL),
 509   _collection_set(NULL)
 510 {
 511   _heap = this;
 512 
 513   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 514 
 515   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 516 
 517   _max_workers = MAX2(_max_workers, 1U);
 518   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 519                             /* are_GC_task_threads */ true,
 520                             /* are_ConcurrentGC_threads */ true);
 521   if (_workers == NULL) {
 522     vm_exit_during_initialization("Failed necessary allocation.");
 523   } else {
 524     _workers->initialize_workers();
 525   }
 526 
 527   if (ParallelGCThreads > 1) {
 528     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 529                                                 ParallelGCThreads,
 530                       /* are_GC_task_threads */ false,
 531                  /* are_ConcurrentGC_threads */ false);
 532     _safepoint_workers->initialize_workers();
 533   }
 534 }
 535 
 536 #ifdef _MSC_VER
 537 #pragma warning( pop )
 538 #endif
 539 
 540 class ShenandoahResetBitmapTask : public AbstractGangTask {
 541 private:
 542   ShenandoahRegionIterator _regions;
 543 
 544 public:
 545   ShenandoahResetBitmapTask() :
 546     AbstractGangTask("Parallel Reset Bitmap Task") {}
 547 
 548   void work(uint worker_id) {
 549     ShenandoahHeapRegion* region = _regions.next();
 550     ShenandoahHeap* heap = ShenandoahHeap::heap();
 551     ShenandoahMarkingContext* const ctx = heap->marking_context();
 552     while (region != NULL) {
 553       if (heap->is_bitmap_slice_committed(region)) {
 554         ctx->clear_bitmap(region);
 555       }
 556       region = _regions.next();
 557     }
 558   }
 559 };
 560 
 561 void ShenandoahHeap::reset_mark_bitmap() {
 562   assert_gc_workers(_workers->active_workers());
 563   mark_incomplete_marking_context();
 564 
 565   ShenandoahResetBitmapTask task;
 566   _workers->run_task(&task);
 567 }
 568 
 569 void ShenandoahHeap::print_on(outputStream* st) const {
 570   st->print_cr("Shenandoah Heap");
 571   st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 572                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 573                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 574                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 575   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 576                num_regions(),
 577                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 578                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 579 
 580   st->print("Status: ");
 581   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 582   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 583   if (is_evacuation_in_progress())             st->print("evacuating, ");
 584   if (is_update_refs_in_progress())            st->print("updating refs, ");
 585   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 586   if (is_full_gc_in_progress())                st->print("full gc, ");
 587   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 588   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 589   if (is_concurrent_strong_root_in_progress() &&
 590       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 591 
 592   if (cancelled_gc()) {
 593     st->print("cancelled");
 594   } else {
 595     st->print("not cancelled");
 596   }
 597   st->cr();
 598 
 599   st->print_cr("Reserved region:");
 600   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 601                p2i(reserved_region().start()),
 602                p2i(reserved_region().end()));
 603 
 604   ShenandoahCollectionSet* cset = collection_set();
 605   st->print_cr("Collection set:");
 606   if (cset != NULL) {
 607     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 608     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 609   } else {
 610     st->print_cr(" (NULL)");
 611   }
 612 
 613   st->cr();
 614   MetaspaceUtils::print_on(st);
 615 
 616   if (Verbose) {
 617     print_heap_regions_on(st);
 618   }
 619 }
 620 
 621 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 622 public:
 623   void do_thread(Thread* thread) {
 624     assert(thread != NULL, "Sanity");
 625     assert(thread->is_Worker_thread(), "Only worker thread expected");
 626     ShenandoahThreadLocalData::initialize_gclab(thread);
 627   }
 628 };
 629 
 630 void ShenandoahHeap::post_initialize() {
 631   CollectedHeap::post_initialize();
 632   MutexLocker ml(Threads_lock);
 633 
 634   ShenandoahInitWorkerGCLABClosure init_gclabs;
 635   _workers->threads_do(&init_gclabs);
 636 
 637   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 638   // Now, we will let WorkGang to initialize gclab when new worker is created.
 639   _workers->set_initialize_gclab();
 640 
 641   _scm->initialize(_max_workers);
 642   _full_gc->initialize(_gc_timer);
 643 
 644   ref_processing_init();
 645 
 646   _heuristics->initialize();
 647 
 648   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 649 }
 650 
 651 size_t ShenandoahHeap::used() const {
 652   return Atomic::load_acquire(&_used);
 653 }
 654 
 655 size_t ShenandoahHeap::committed() const {
 656   OrderAccess::acquire();
 657   return _committed;
 658 }
 659 
 660 void ShenandoahHeap::increase_committed(size_t bytes) {
 661   shenandoah_assert_heaplocked_or_safepoint();
 662   _committed += bytes;
 663 }
 664 
 665 void ShenandoahHeap::decrease_committed(size_t bytes) {
 666   shenandoah_assert_heaplocked_or_safepoint();
 667   _committed -= bytes;
 668 }
 669 
 670 void ShenandoahHeap::increase_used(size_t bytes) {
 671   Atomic::add(&_used, bytes);
 672 }
 673 
 674 void ShenandoahHeap::set_used(size_t bytes) {
 675   Atomic::release_store_fence(&_used, bytes);
 676 }
 677 
 678 void ShenandoahHeap::decrease_used(size_t bytes) {
 679   assert(used() >= bytes, "never decrease heap size by more than we've left");
 680   Atomic::sub(&_used, bytes);
 681 }
 682 
 683 void ShenandoahHeap::increase_allocated(size_t bytes) {
 684   Atomic::add(&_bytes_allocated_since_gc_start, bytes);
 685 }
 686 
 687 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 688   size_t bytes = words * HeapWordSize;
 689   if (!waste) {
 690     increase_used(bytes);
 691   }
 692   increase_allocated(bytes);
 693   if (ShenandoahPacing) {
 694     control_thread()->pacing_notify_alloc(words);
 695     if (waste) {
 696       pacer()->claim_for_alloc(words, true);
 697     }
 698   }
 699 }
 700 
 701 size_t ShenandoahHeap::capacity() const {
 702   return committed();
 703 }
 704 
 705 size_t ShenandoahHeap::max_capacity() const {
 706   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 707 }
 708 
 709 size_t ShenandoahHeap::min_capacity() const {
 710   return _minimum_size;
 711 }
 712 
 713 size_t ShenandoahHeap::initial_capacity() const {
 714   return _initial_size;
 715 }
 716 
 717 bool ShenandoahHeap::is_in(const void* p) const {
 718   HeapWord* heap_base = (HeapWord*) base();
 719   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 720   return p >= heap_base && p < last_region_end;
 721 }
 722 
 723 void ShenandoahHeap::op_uncommit(double shrink_before) {
 724   assert (ShenandoahUncommit, "should be enabled");
 725 
 726   // Application allocates from the beginning of the heap, and GC allocates at
 727   // the end of it. It is more efficient to uncommit from the end, so that applications
 728   // could enjoy the near committed regions. GC allocations are much less frequent,
 729   // and therefore can accept the committing costs.
 730 
 731   size_t count = 0;
 732   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 733     ShenandoahHeapRegion* r = get_region(i - 1);
 734     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 735       ShenandoahHeapLocker locker(lock());
 736       if (r->is_empty_committed()) {
 737         // Do not uncommit below minimal capacity
 738         if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
 739           break;
 740         }
 741 
 742         r->make_uncommitted();
 743         count++;
 744       }
 745     }
 746     SpinPause(); // allow allocators to take the lock
 747   }
 748 
 749   if (count > 0) {
 750     control_thread()->notify_heap_changed();
 751   }
 752 }
 753 
 754 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 755   // New object should fit the GCLAB size
 756   size_t min_size = MAX2(size, PLAB::min_size());
 757 
 758   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 759   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 760   new_size = MIN2(new_size, PLAB::max_size());
 761   new_size = MAX2(new_size, PLAB::min_size());
 762 
 763   // Record new heuristic value even if we take any shortcut. This captures
 764   // the case when moderately-sized objects always take a shortcut. At some point,
 765   // heuristics should catch up with them.
 766   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 767 
 768   if (new_size < size) {
 769     // New size still does not fit the object. Fall back to shared allocation.
 770     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 771     return NULL;
 772   }
 773 
 774   // Retire current GCLAB, and allocate a new one.
 775   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 776   gclab->retire();
 777 
 778   size_t actual_size = 0;
 779   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 780   if (gclab_buf == NULL) {
 781     return NULL;
 782   }
 783 
 784   assert (size <= actual_size, "allocation should fit");
 785 
 786   if (ZeroTLAB) {
 787     // ..and clear it.
 788     Copy::zero_to_words(gclab_buf, actual_size);
 789   } else {
 790     // ...and zap just allocated object.
 791 #ifdef ASSERT
 792     // Skip mangling the space corresponding to the object header to
 793     // ensure that the returned space is not considered parsable by
 794     // any concurrent GC thread.
 795     size_t hdr_size = oopDesc::header_size();
 796     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 797 #endif // ASSERT
 798   }
 799   gclab->set_buf(gclab_buf, actual_size);
 800   return gclab->allocate(size);
 801 }
 802 
 803 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 804                                             size_t requested_size,
 805                                             size_t* actual_size) {
 806   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 807   HeapWord* res = allocate_memory(req);
 808   if (res != NULL) {
 809     *actual_size = req.actual_size();
 810   } else {
 811     *actual_size = 0;
 812   }
 813   return res;
 814 }
 815 
 816 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 817                                              size_t word_size,
 818                                              size_t* actual_size) {
 819   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 820   HeapWord* res = allocate_memory(req);
 821   if (res != NULL) {
 822     *actual_size = req.actual_size();
 823   } else {
 824     *actual_size = 0;
 825   }
 826   return res;
 827 }
 828 
 829 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 830   intptr_t pacer_epoch = 0;
 831   bool in_new_region = false;
 832   HeapWord* result = NULL;
 833 
 834   if (req.is_mutator_alloc()) {
 835     if (ShenandoahPacing) {
 836       pacer()->pace_for_alloc(req.size());
 837       pacer_epoch = pacer()->epoch();
 838     }
 839 
 840     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 841       result = allocate_memory_under_lock(req, in_new_region);
 842     }
 843 
 844     // Allocation failed, block until control thread reacted, then retry allocation.
 845     //
 846     // It might happen that one of the threads requesting allocation would unblock
 847     // way later after GC happened, only to fail the second allocation, because
 848     // other threads have already depleted the free storage. In this case, a better
 849     // strategy is to try again, as long as GC makes progress.
 850     //
 851     // Then, we need to make sure the allocation was retried after at least one
 852     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 853 
 854     size_t tries = 0;
 855 
 856     while (result == NULL && _progress_last_gc.is_set()) {
 857       tries++;
 858       control_thread()->handle_alloc_failure(req);
 859       result = allocate_memory_under_lock(req, in_new_region);
 860     }
 861 
 862     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 863       tries++;
 864       control_thread()->handle_alloc_failure(req);
 865       result = allocate_memory_under_lock(req, in_new_region);
 866     }
 867 
 868   } else {
 869     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 870     result = allocate_memory_under_lock(req, in_new_region);
 871     // Do not call handle_alloc_failure() here, because we cannot block.
 872     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 873   }
 874 
 875   if (in_new_region) {
 876     control_thread()->notify_heap_changed();
 877   }
 878 
 879   if (result != NULL) {
 880     size_t requested = req.size();
 881     size_t actual = req.actual_size();
 882 
 883     assert (req.is_lab_alloc() || (requested == actual),
 884             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 885             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 886 
 887     if (req.is_mutator_alloc()) {
 888       notify_mutator_alloc_words(actual, false);
 889 
 890       // If we requested more than we were granted, give the rest back to pacer.
 891       // This only matters if we are in the same pacing epoch: do not try to unpace
 892       // over the budget for the other phase.
 893       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 894         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 895       }
 896     } else {
 897       increase_used(actual*HeapWordSize);
 898     }
 899   }
 900 
 901   return result;
 902 }
 903 
 904 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 905   ShenandoahHeapLocker locker(lock());
 906   return _free_set->allocate(req, in_new_region);
 907 }
 908 
 909 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 910                                         bool*  gc_overhead_limit_was_exceeded) {
 911   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 912   return allocate_memory(req);
 913 }
 914 
 915 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 916                                                              size_t size,
 917                                                              Metaspace::MetadataType mdtype) {
 918   MetaWord* result;
 919 
 920   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 921   if (heuristics()->can_unload_classes()) {
 922     ShenandoahHeuristics* h = heuristics();
 923     h->record_metaspace_oom();
 924   }
 925 
 926   // Expand and retry allocation
 927   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 928   if (result != NULL) {
 929     return result;
 930   }
 931 
 932   // Start full GC
 933   collect(GCCause::_metadata_GC_clear_soft_refs);
 934 
 935   // Retry allocation
 936   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 937   if (result != NULL) {
 938     return result;
 939   }
 940 
 941   // Expand and retry allocation
 942   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 943   if (result != NULL) {
 944     return result;
 945   }
 946 
 947   // Out of memory
 948   return NULL;
 949 }
 950 
 951 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 952 private:
 953   ShenandoahHeap* const _heap;
 954   Thread* const _thread;
 955 public:
 956   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 957     _heap(heap), _thread(Thread::current()) {}
 958 
 959   void do_object(oop p) {
 960     shenandoah_assert_marked(NULL, p);
 961     if (!p->is_forwarded()) {
 962       _heap->evacuate_object(p, _thread);
 963     }
 964   }
 965 };
 966 
 967 class ShenandoahEvacuationTask : public AbstractGangTask {
 968 private:
 969   ShenandoahHeap* const _sh;
 970   ShenandoahCollectionSet* const _cs;
 971   bool _concurrent;
 972 public:
 973   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 974                            ShenandoahCollectionSet* cs,
 975                            bool concurrent) :
 976     AbstractGangTask("Parallel Evacuation Task"),
 977     _sh(sh),
 978     _cs(cs),
 979     _concurrent(concurrent)
 980   {}
 981 
 982   void work(uint worker_id) {
 983     if (_concurrent) {
 984       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 985       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 986       ShenandoahEvacOOMScope oom_evac_scope;
 987       do_work();
 988     } else {
 989       ShenandoahParallelWorkerSession worker_session(worker_id);
 990       ShenandoahEvacOOMScope oom_evac_scope;
 991       do_work();
 992     }
 993   }
 994 
 995 private:
 996   void do_work() {
 997     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 998     ShenandoahHeapRegion* r;
 999     while ((r =_cs->claim_next()) != NULL) {
1000       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1001       _sh->marked_object_iterate(r, &cl);
1002 
1003       if (ShenandoahPacing) {
1004         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1005       }
1006 
1007       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1008         break;
1009       }
1010     }
1011   }
1012 };
1013 
1014 void ShenandoahHeap::trash_cset_regions() {
1015   ShenandoahHeapLocker locker(lock());
1016 
1017   ShenandoahCollectionSet* set = collection_set();
1018   ShenandoahHeapRegion* r;
1019   set->clear_current_index();
1020   while ((r = set->next()) != NULL) {
1021     r->make_trash();
1022   }
1023   collection_set()->clear();
1024 }
1025 
1026 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1027   st->print_cr("Heap Regions:");
1028   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1029   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1030   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1031   st->print_cr("SN=alloc sequence number");
1032 
1033   for (size_t i = 0; i < num_regions(); i++) {
1034     get_region(i)->print_on(st);
1035   }
1036 }
1037 
1038 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1039   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1040 
1041   oop humongous_obj = oop(start->bottom());
1042   size_t size = humongous_obj->size();
1043   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1044   size_t index = start->index() + required_regions - 1;
1045 
1046   assert(!start->has_live(), "liveness must be zero");
1047 
1048   for(size_t i = 0; i < required_regions; i++) {
1049     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1050     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1051     ShenandoahHeapRegion* region = get_region(index --);
1052 
1053     assert(region->is_humongous(), "expect correct humongous start or continuation");
1054     assert(!region->is_cset(), "Humongous region should not be in collection set");
1055 
1056     region->make_trash_immediate();
1057   }
1058 }
1059 
1060 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1061 public:
1062   void do_thread(Thread* thread) {
1063     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1064     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1065     gclab->retire();
1066   }
1067 };
1068 
1069 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1070   if (UseTLAB) {
1071     CollectedHeap::ensure_parsability(retire_tlabs);
1072   }
1073   ShenandoahRetireGCLABClosure cl;
1074   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1075     cl.do_thread(t);
1076   }
1077   workers()->threads_do(&cl);
1078 }
1079 
1080 void ShenandoahHeap::resize_tlabs() {
1081   CollectedHeap::resize_all_tlabs();
1082 }
1083 
1084 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1085 private:
1086   ShenandoahRootEvacuator* _rp;
1087 
1088 public:
1089   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1090     AbstractGangTask("Shenandoah evacuate and update roots"),
1091     _rp(rp) {}
1092 
1093   void work(uint worker_id) {
1094     ShenandoahParallelWorkerSession worker_session(worker_id);
1095     ShenandoahEvacOOMScope oom_evac_scope;
1096     ShenandoahEvacuateUpdateRootsClosure<> cl;
1097     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1098     _rp->roots_do(worker_id, &cl);
1099   }
1100 };
1101 
1102 void ShenandoahHeap::evacuate_and_update_roots() {
1103 #if COMPILER2_OR_JVMCI
1104   DerivedPointerTable::clear();
1105 #endif
1106   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1107   {
1108     // Include concurrent roots if current cycle can not process those roots concurrently
1109     ShenandoahRootEvacuator rp(workers()->active_workers(),
1110                                ShenandoahPhaseTimings::init_evac,
1111                                !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1112                                !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1113     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1114     workers()->run_task(&roots_task);
1115   }
1116 
1117 #if COMPILER2_OR_JVMCI
1118   DerivedPointerTable::update_pointers();
1119 #endif
1120 }
1121 
1122 // Returns size in bytes
1123 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1124   if (ShenandoahElasticTLAB) {
1125     // With Elastic TLABs, return the max allowed size, and let the allocation path
1126     // figure out the safe size for current allocation.
1127     return ShenandoahHeapRegion::max_tlab_size_bytes();
1128   } else {
1129     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1130   }
1131 }
1132 
1133 size_t ShenandoahHeap::max_tlab_size() const {
1134   // Returns size in words
1135   return ShenandoahHeapRegion::max_tlab_size_words();
1136 }
1137 
1138 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1139 public:
1140   void do_thread(Thread* thread) {
1141     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1142     gclab->retire();
1143     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1144       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1145     }
1146   }
1147 };
1148 
1149 void ShenandoahHeap::retire_and_reset_gclabs() {
1150   ShenandoahRetireAndResetGCLABClosure cl;
1151   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1152     cl.do_thread(t);
1153   }
1154   workers()->threads_do(&cl);
1155 }
1156 
1157 void ShenandoahHeap::collect(GCCause::Cause cause) {
1158   control_thread()->request_gc(cause);
1159 }
1160 
1161 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1162   //assert(false, "Shouldn't need to do full collections");
1163 }
1164 
1165 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1166   ShenandoahHeapRegion* r = heap_region_containing(addr);
1167   if (r != NULL) {
1168     return r->block_start(addr);
1169   }
1170   return NULL;
1171 }
1172 
1173 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1174   ShenandoahHeapRegion* r = heap_region_containing(addr);
1175   return r->block_is_obj(addr);
1176 }
1177 
1178 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1179   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1180 }
1181 
1182 jlong ShenandoahHeap::millis_since_last_gc() {
1183   double v = heuristics()->time_since_last_gc() * 1000;
1184   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1185   return (jlong)v;
1186 }
1187 
1188 void ShenandoahHeap::prepare_for_verify() {
1189   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1190     make_parsable(false);
1191   }
1192 }
1193 
1194 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1195   workers()->print_worker_threads_on(st);
1196   if (ShenandoahStringDedup::is_enabled()) {
1197     ShenandoahStringDedup::print_worker_threads_on(st);
1198   }
1199 }
1200 
1201 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1202   workers()->threads_do(tcl);
1203   if (_safepoint_workers != NULL) {
1204     _safepoint_workers->threads_do(tcl);
1205   }
1206   if (ShenandoahStringDedup::is_enabled()) {
1207     ShenandoahStringDedup::threads_do(tcl);
1208   }
1209 }
1210 
1211 void ShenandoahHeap::print_tracing_info() const {
1212   LogTarget(Info, gc, stats) lt;
1213   if (lt.is_enabled()) {
1214     ResourceMark rm;
1215     LogStream ls(lt);
1216 
1217     phase_timings()->print_global_on(&ls);
1218 
1219     ls.cr();
1220     ls.cr();
1221 
1222     shenandoah_policy()->print_gc_stats(&ls);
1223 
1224     ls.cr();
1225     ls.cr();
1226 
1227     if (ShenandoahPacing) {
1228       pacer()->print_on(&ls);
1229     }
1230 
1231     ls.cr();
1232     ls.cr();
1233   }
1234 }
1235 
1236 void ShenandoahHeap::verify(VerifyOption vo) {
1237   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1238     if (ShenandoahVerify) {
1239       verifier()->verify_generic(vo);
1240     } else {
1241       // TODO: Consider allocating verification bitmaps on demand,
1242       // and turn this on unconditionally.
1243     }
1244   }
1245 }
1246 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1247   return _free_set->capacity();
1248 }
1249 
1250 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1251 private:
1252   MarkBitMap* _bitmap;
1253   Stack<oop,mtGC>* _oop_stack;
1254   ShenandoahHeap* const _heap;
1255   ShenandoahMarkingContext* const _marking_context;
1256 
1257   template <class T>
1258   void do_oop_work(T* p) {
1259     T o = RawAccess<>::oop_load(p);
1260     if (!CompressedOops::is_null(o)) {
1261       oop obj = CompressedOops::decode_not_null(o);
1262       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1263         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1264         return;
1265       }
1266       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1267 
1268       assert(oopDesc::is_oop(obj), "must be a valid oop");
1269       if (!_bitmap->is_marked(obj)) {
1270         _bitmap->mark(obj);
1271         _oop_stack->push(obj);
1272       }
1273     }
1274   }
1275 public:
1276   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1277     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1278     _marking_context(_heap->marking_context()) {}
1279   void do_oop(oop* p)       { do_oop_work(p); }
1280   void do_oop(narrowOop* p) { do_oop_work(p); }
1281 };
1282 
1283 /*
1284  * This is public API, used in preparation of object_iterate().
1285  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1286  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1287  * control, we call SH::make_tlabs_parsable().
1288  */
1289 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1290   // No-op.
1291 }
1292 
1293 /*
1294  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1295  *
1296  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1297  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1298  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1299  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1300  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1301  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1302  * wiped the bitmap in preparation for next marking).
1303  *
1304  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1305  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1306  * is allowed to report dead objects, but is not required to do so.
1307  */
1308 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1309   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1310   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1311     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1312     return;
1313   }
1314 
1315   // Reset bitmap
1316   _aux_bit_map.clear();
1317 
1318   Stack<oop,mtGC> oop_stack;
1319 
1320   // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1321   ShenandoahHeapIterationRootScanner rp;
1322   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1323 
1324   rp.roots_do(&oops);
1325 
1326   // Work through the oop stack to traverse heap.
1327   while (! oop_stack.is_empty()) {
1328     oop obj = oop_stack.pop();
1329     assert(oopDesc::is_oop(obj), "must be a valid oop");
1330     cl->do_object(obj);
1331     obj->oop_iterate(&oops);
1332   }
1333 
1334   assert(oop_stack.is_empty(), "should be empty");
1335 
1336   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1337     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1338   }
1339 }
1340 
1341 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1342 void ShenandoahHeap::keep_alive(oop obj) {
1343   if (is_concurrent_mark_in_progress()) {
1344     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1345   }
1346 }
1347 
1348 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1349   for (size_t i = 0; i < num_regions(); i++) {
1350     ShenandoahHeapRegion* current = get_region(i);
1351     blk->heap_region_do(current);
1352   }
1353 }
1354 
1355 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1356 private:
1357   ShenandoahHeap* const _heap;
1358   ShenandoahHeapRegionClosure* const _blk;
1359 
1360   shenandoah_padding(0);
1361   volatile size_t _index;
1362   shenandoah_padding(1);
1363 
1364 public:
1365   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1366           AbstractGangTask("Parallel Region Task"),
1367           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1368 
1369   void work(uint worker_id) {
1370     ShenandoahParallelWorkerSession worker_session(worker_id);
1371     size_t stride = ShenandoahParallelRegionStride;
1372 
1373     size_t max = _heap->num_regions();
1374     while (_index < max) {
1375       size_t cur = Atomic::fetch_and_add(&_index, stride);
1376       size_t start = cur;
1377       size_t end = MIN2(cur + stride, max);
1378       if (start >= max) break;
1379 
1380       for (size_t i = cur; i < end; i++) {
1381         ShenandoahHeapRegion* current = _heap->get_region(i);
1382         _blk->heap_region_do(current);
1383       }
1384     }
1385   }
1386 };
1387 
1388 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1389   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1390   if (num_regions() > ShenandoahParallelRegionStride) {
1391     ShenandoahParallelHeapRegionTask task(blk);
1392     workers()->run_task(&task);
1393   } else {
1394     heap_region_iterate(blk);
1395   }
1396 }
1397 
1398 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1399 private:
1400   ShenandoahMarkingContext* const _ctx;
1401 public:
1402   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1403 
1404   void heap_region_do(ShenandoahHeapRegion* r) {
1405     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1406     if (r->is_active()) {
1407       // Check if region needs updating its TAMS. We have updated it already during concurrent
1408       // reset, so it is very likely we don't need to do another write here.
1409       if (_ctx->top_at_mark_start(r) != r->top()) {
1410         _ctx->capture_top_at_mark_start(r);
1411       }
1412     } else {
1413       assert(_ctx->top_at_mark_start(r) == r->top(),
1414              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1415     }
1416   }
1417 
1418   bool is_thread_safe() { return true; }
1419 };
1420 
1421 void ShenandoahHeap::op_init_mark() {
1422   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1423   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1424 
1425   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1426   assert(!marking_context()->is_complete(), "should not be complete");
1427   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1428 
1429   if (ShenandoahVerify) {
1430     verifier()->verify_before_concmark();
1431   }
1432 
1433   if (VerifyBeforeGC) {
1434     Universe::verify();
1435   }
1436 
1437   set_concurrent_mark_in_progress(true);
1438   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1439   {
1440     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1441     make_parsable(true);
1442   }
1443 
1444   {
1445     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1446     ShenandoahInitMarkUpdateRegionStateClosure cl;
1447     parallel_heap_region_iterate(&cl);
1448   }
1449 
1450   // Make above changes visible to worker threads
1451   OrderAccess::fence();
1452 
1453   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1454 
1455   if (UseTLAB) {
1456     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1457     resize_tlabs();
1458   }
1459 
1460   if (ShenandoahPacing) {
1461     pacer()->setup_for_mark();
1462   }
1463 
1464   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
1465   // we need to make sure that all its metadata are marked. alternative is to remark
1466   // thread roots at final mark pause, but it can be potential latency killer.
1467   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1468     ShenandoahCodeRoots::arm_nmethods();
1469   }
1470 }
1471 
1472 void ShenandoahHeap::op_mark() {
1473   concurrent_mark()->mark_from_roots();
1474 }
1475 
1476 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1477 private:
1478   ShenandoahMarkingContext* const _ctx;
1479   ShenandoahHeapLock* const _lock;
1480 
1481 public:
1482   ShenandoahFinalMarkUpdateRegionStateClosure() :
1483     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1484 
1485   void heap_region_do(ShenandoahHeapRegion* r) {
1486     if (r->is_active()) {
1487       // All allocations past TAMS are implicitly live, adjust the region data.
1488       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1489       HeapWord *tams = _ctx->top_at_mark_start(r);
1490       HeapWord *top = r->top();
1491       if (top > tams) {
1492         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1493       }
1494 
1495       // We are about to select the collection set, make sure it knows about
1496       // current pinning status. Also, this allows trashing more regions that
1497       // now have their pinning status dropped.
1498       if (r->is_pinned()) {
1499         if (r->pin_count() == 0) {
1500           ShenandoahHeapLocker locker(_lock);
1501           r->make_unpinned();
1502         }
1503       } else {
1504         if (r->pin_count() > 0) {
1505           ShenandoahHeapLocker locker(_lock);
1506           r->make_pinned();
1507         }
1508       }
1509 
1510       // Remember limit for updating refs. It's guaranteed that we get no
1511       // from-space-refs written from here on.
1512       r->set_update_watermark_at_safepoint(r->top());
1513     } else {
1514       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1515       assert(_ctx->top_at_mark_start(r) == r->top(),
1516              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1517     }
1518   }
1519 
1520   bool is_thread_safe() { return true; }
1521 };
1522 
1523 void ShenandoahHeap::op_final_mark() {
1524   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1525   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1526 
1527   // It is critical that we
1528   // evacuate roots right after finishing marking, so that we don't
1529   // get unmarked objects in the roots.
1530 
1531   if (!cancelled_gc()) {
1532     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1533 
1534     // Marking is completed, deactivate SATB barrier
1535     set_concurrent_mark_in_progress(false);
1536     mark_complete_marking_context();
1537 
1538     parallel_cleaning(false /* full gc*/);
1539 
1540     if (ShenandoahVerify) {
1541       verifier()->verify_roots_no_forwarded();
1542     }
1543 
1544     {
1545       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1546       ShenandoahFinalMarkUpdateRegionStateClosure cl;
1547       parallel_heap_region_iterate(&cl);
1548 
1549       assert_pinned_region_status();
1550     }
1551 
1552     // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1553     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1554     // which would be outside the collection set, so no cset writes would happen there.
1555     // Weaker one: new allocations would happen past update watermark, and so less work would
1556     // be needed for reference updates (would update the large filler instead).
1557     {
1558       ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1559       make_parsable(true);
1560     }
1561 
1562     {
1563       ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1564       ShenandoahHeapLocker locker(lock());
1565       _collection_set->clear();
1566       heuristics()->choose_collection_set(_collection_set);
1567     }
1568 
1569     {
1570       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1571       ShenandoahHeapLocker locker(lock());
1572       _free_set->rebuild();
1573     }
1574 
1575     if (!is_degenerated_gc_in_progress()) {
1576       prepare_concurrent_roots();
1577       prepare_concurrent_unloading();
1578     }
1579 
1580     // If collection set has candidates, start evacuation.
1581     // Otherwise, bypass the rest of the cycle.
1582     if (!collection_set()->is_empty()) {
1583       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1584 
1585       if (ShenandoahVerify) {
1586         verifier()->verify_before_evacuation();
1587       }
1588 
1589       set_evacuation_in_progress(true);
1590       // From here on, we need to update references.
1591       set_has_forwarded_objects(true);
1592 
1593       if (!is_degenerated_gc_in_progress()) {
1594         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1595           ShenandoahCodeRoots::arm_nmethods();
1596         }
1597         evacuate_and_update_roots();
1598       }
1599 
1600       if (ShenandoahPacing) {
1601         pacer()->setup_for_evac();
1602       }
1603 
1604       if (ShenandoahVerify) {
1605         ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1606         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1607           types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1608           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1609           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
1610         }
1611 
1612         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1613           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1614         }
1615         verifier()->verify_roots_no_forwarded_except(types);
1616         verifier()->verify_during_evacuation();
1617       }
1618     } else {
1619       if (ShenandoahVerify) {
1620         verifier()->verify_after_concmark();
1621       }
1622 
1623       if (VerifyAfterGC) {
1624         Universe::verify();
1625       }
1626     }
1627 
1628   } else {
1629     // If this cycle was updating references, we need to keep the has_forwarded_objects
1630     // flag on, for subsequent phases to deal with it.
1631     concurrent_mark()->cancel();
1632     set_concurrent_mark_in_progress(false);
1633 
1634     if (process_references()) {
1635       // Abandon reference processing right away: pre-cleaning must have failed.
1636       ReferenceProcessor *rp = ref_processor();
1637       rp->disable_discovery();
1638       rp->abandon_partial_discovery();
1639       rp->verify_no_references_recorded();
1640     }
1641   }
1642 }
1643 
1644 void ShenandoahHeap::op_conc_evac() {
1645   ShenandoahEvacuationTask task(this, _collection_set, true);
1646   workers()->run_task(&task);
1647 }
1648 
1649 void ShenandoahHeap::op_stw_evac() {
1650   ShenandoahEvacuationTask task(this, _collection_set, false);
1651   workers()->run_task(&task);
1652 }
1653 
1654 void ShenandoahHeap::op_updaterefs() {
1655   update_heap_references(true);
1656 }
1657 
1658 void ShenandoahHeap::op_cleanup_early() {
1659   free_set()->recycle_trash();
1660 }
1661 
1662 void ShenandoahHeap::op_cleanup_complete() {
1663   free_set()->recycle_trash();
1664 }
1665 
1666 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1667 private:
1668   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1669   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1670   ShenandoahConcurrentStringDedupRoots          _dedup_roots;
1671 
1672 public:
1673   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1674     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1675     _vm_roots(phase),
1676     _cld_roots(phase) {}
1677 
1678   void work(uint worker_id) {
1679     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1680     ShenandoahEvacOOMScope oom;
1681     {
1682       // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1683       // may race against OopStorage::release() calls.
1684       ShenandoahEvacUpdateOopStorageRootsClosure cl;
1685       _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1686     }
1687 
1688     {
1689       ShenandoahEvacuateUpdateRootsClosure<> cl;
1690       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1691       _cld_roots.cld_do(&clds, worker_id);
1692     }
1693 
1694     {
1695       ShenandoahForwardedIsAliveClosure is_alive;
1696       ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1697       _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1698     }
1699   }
1700 };
1701 
1702 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1703 private:
1704   ShenandoahHeap* const _heap;
1705   ShenandoahMarkingContext* const _mark_context;
1706   bool  _evac_in_progress;
1707   Thread* const _thread;
1708   size_t  _dead_counter;
1709 
1710 public:
1711   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
1712   void do_oop(oop* p);
1713   void do_oop(narrowOop* p);
1714 
1715   size_t dead_counter() const;
1716   void reset_dead_counter();
1717 };
1718 
1719 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
1720   _heap(ShenandoahHeap::heap()),
1721   _mark_context(ShenandoahHeap::heap()->marking_context()),
1722   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
1723   _thread(Thread::current()),
1724   _dead_counter(0) {
1725 }
1726 
1727 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
1728   const oop obj = RawAccess<>::oop_load(p);
1729   if (!CompressedOops::is_null(obj)) {
1730     if (!_mark_context->is_marked(obj)) {
1731       shenandoah_assert_correct(p, obj);
1732       oop old = Atomic::cmpxchg(p, obj, oop(NULL));
1733       if (obj == old) {
1734         _dead_counter ++;
1735       }
1736     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
1737       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1738       if (resolved == obj) {
1739         resolved = _heap->evacuate_object(obj, _thread);
1740       }
1741       Atomic::cmpxchg(p, obj, resolved);
1742       assert(_heap->cancelled_gc() ||
1743              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
1744              "Sanity");
1745     }
1746   }
1747 }
1748 
1749 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
1750   ShouldNotReachHere();
1751 }
1752 
1753 size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const {
1754   return _dead_counter;
1755 }
1756 
1757 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
1758   _dead_counter = 0;
1759 }
1760 
1761 class ShenandoahIsCLDAliveClosure : public CLDClosure {
1762 public:
1763   void do_cld(ClassLoaderData* cld) {
1764     cld->is_alive();
1765   }
1766 };
1767 
1768 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
1769 public:
1770   void do_nmethod(nmethod* n) {
1771     n->is_unloading();
1772   }
1773 };
1774 
1775 // This task not only evacuates/updates marked weak roots, but also "NULL"
1776 // dead weak roots.
1777 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
1778 private:
1779   ShenandoahWeakRoot<true /*concurrent*/>  _jni_roots;
1780   ShenandoahWeakRoot<true /*concurrent*/>  _string_table_roots;
1781   ShenandoahWeakRoot<true /*concurrent*/>  _resolved_method_table_roots;
1782   ShenandoahWeakRoot<true /*concurrent*/>  _vm_roots;
1783 
1784   // Roots related to concurrent class unloading
1785   ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/>
1786                                            _cld_roots;
1787   ShenandoahConcurrentNMethodIterator      _nmethod_itr;
1788   bool                                     _concurrent_class_unloading;
1789 
1790 public:
1791   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1792     AbstractGangTask("Shenandoah Concurrent Weak Root Task"),
1793     _jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots),
1794     _string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
1795     _resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
1796     _vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots),
1797     _cld_roots(phase),
1798     _nmethod_itr(ShenandoahCodeRoots::table()),
1799     _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1800     StringTable::reset_dead_counter();
1801     ResolvedMethodTable::reset_dead_counter();
1802     if (_concurrent_class_unloading) {
1803       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1804       _nmethod_itr.nmethods_do_begin();
1805     }
1806   }
1807 
1808   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1809     StringTable::finish_dead_counter();
1810     ResolvedMethodTable::finish_dead_counter();
1811     if (_concurrent_class_unloading) {
1812       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1813       _nmethod_itr.nmethods_do_end();
1814     }
1815   }
1816 
1817   void work(uint worker_id) {
1818     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1819     {
1820       ShenandoahEvacOOMScope oom;
1821       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1822       // may race against OopStorage::release() calls.
1823       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1824       _jni_roots.oops_do(&cl, worker_id);
1825       _vm_roots.oops_do(&cl, worker_id);
1826 
1827       cl.reset_dead_counter();
1828       _string_table_roots.oops_do(&cl, worker_id);
1829       StringTable::inc_dead_counter(cl.dead_counter());
1830 
1831       cl.reset_dead_counter();
1832       _resolved_method_table_roots.oops_do(&cl, worker_id);
1833       ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1834     }
1835 
1836     // If we are going to perform concurrent class unloading later on, we need to
1837     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1838     // can cleanup immediate garbage sooner.
1839     if (_concurrent_class_unloading) {
1840       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
1841       // CLD's holder or evacuate it.
1842       ShenandoahIsCLDAliveClosure is_cld_alive;
1843       _cld_roots.cld_do(&is_cld_alive, worker_id);
1844 
1845       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1846       // The closure calls nmethod->is_unloading(). The is_unloading
1847       // state is cached, therefore, during concurrent class unloading phase,
1848       // we will not touch the metadata of unloading nmethods
1849       ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1850       _nmethod_itr.nmethods_do(&is_nmethod_alive);
1851     }
1852   }
1853 };
1854 
1855 void ShenandoahHeap::op_weak_roots() {
1856   if (is_concurrent_weak_root_in_progress()) {
1857     {
1858       // Concurrent weak root processing
1859       ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1860       ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1861       workers()->run_task(&task);
1862       if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1863         set_concurrent_weak_root_in_progress(false);
1864       }
1865     }
1866 
1867     // Perform handshake to flush out dead oops
1868     {
1869       ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1870       ShenandoahRendezvousClosure cl;
1871       Handshake::execute(&cl);
1872     }
1873   }
1874 }
1875 
1876 void ShenandoahHeap::op_class_unloading() {
1877   assert (is_concurrent_weak_root_in_progress() &&
1878           ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
1879           "Checked by caller");
1880   _unloader.unload();
1881   set_concurrent_weak_root_in_progress(false);
1882 }
1883 
1884 void ShenandoahHeap::op_strong_roots() {
1885   assert(is_concurrent_strong_root_in_progress(), "Checked by caller");
1886   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1887   workers()->run_task(&task);
1888   set_concurrent_strong_root_in_progress(false);
1889 }
1890 
1891 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1892 private:
1893   ShenandoahMarkingContext* const _ctx;
1894 public:
1895   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1896 
1897   void heap_region_do(ShenandoahHeapRegion* r) {
1898     if (r->is_active()) {
1899       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1900       // anyway to capture any updates that happened since now.
1901       r->clear_live_data();
1902       _ctx->capture_top_at_mark_start(r);
1903     }
1904   }
1905 
1906   bool is_thread_safe() { return true; }
1907 };
1908 
1909 void ShenandoahHeap::op_reset() {
1910   if (ShenandoahPacing) {
1911     pacer()->setup_for_reset();
1912   }
1913   reset_mark_bitmap();
1914 
1915   ShenandoahResetUpdateRegionStateClosure cl;
1916   parallel_heap_region_iterate(&cl);
1917 }
1918 
1919 void ShenandoahHeap::op_preclean() {
1920   if (ShenandoahPacing) {
1921     pacer()->setup_for_preclean();
1922   }
1923   concurrent_mark()->preclean_weak_refs();
1924 }
1925 
1926 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1927   ShenandoahMetricsSnapshot metrics;
1928   metrics.snap_before();
1929 
1930   full_gc()->do_it(cause);
1931   if (UseTLAB) {
1932     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1933     resize_all_tlabs();
1934   }
1935 
1936   metrics.snap_after();
1937 
1938   if (metrics.is_good_progress()) {
1939     _progress_last_gc.set();
1940   } else {
1941     // Nothing to do. Tell the allocation path that we have failed to make
1942     // progress, and it can finally fail.
1943     _progress_last_gc.unset();
1944   }
1945 }
1946 
1947 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1948   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1949   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1950   // some phase, we have to upgrade the Degenerate GC to Full GC.
1951 
1952   clear_cancelled_gc();
1953 
1954   ShenandoahMetricsSnapshot metrics;
1955   metrics.snap_before();
1956 
1957   switch (point) {
1958     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1959     // but enters it at different points, depending on which concurrent phase had
1960     // degenerated.
1961 
1962     case _degenerated_outside_cycle:
1963       // We have degenerated from outside the cycle, which means something is bad with
1964       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1965       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1966       // we can do the most aggressive degen cycle, which includes processing references and
1967       // class unloading, unless those features are explicitly disabled.
1968       //
1969       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1970       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1971       set_process_references(heuristics()->can_process_references());
1972       set_unload_classes(heuristics()->can_unload_classes());
1973 
1974       op_reset();
1975 
1976       op_init_mark();
1977       if (cancelled_gc()) {
1978         op_degenerated_fail();
1979         return;
1980       }
1981 
1982     case _degenerated_mark:
1983       op_final_mark();
1984       if (cancelled_gc()) {
1985         op_degenerated_fail();
1986         return;
1987       }
1988 
1989       if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
1990         // Disarm nmethods that armed for concurrent mark. On normal cycle, it would
1991         // be disarmed while conc-roots phase is running.
1992         // TODO: Call op_conc_roots() here instead
1993         ShenandoahCodeRoots::disarm_nmethods();
1994       }
1995 
1996       op_cleanup_early();
1997 
1998     case _degenerated_evac:
1999       // If heuristics thinks we should do the cycle, this flag would be set,
2000       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
2001       if (is_evacuation_in_progress()) {
2002 
2003         // Degeneration under oom-evac protocol might have left some objects in
2004         // collection set un-evacuated. Restart evacuation from the beginning to
2005         // capture all objects. For all the objects that are already evacuated,
2006         // it would be a simple check, which is supposed to be fast. This is also
2007         // safe to do even without degeneration, as CSet iterator is at beginning
2008         // in preparation for evacuation anyway.
2009         //
2010         // Before doing that, we need to make sure we never had any cset-pinned
2011         // regions. This may happen if allocation failure happened when evacuating
2012         // the about-to-be-pinned object, oom-evac protocol left the object in
2013         // the collection set, and then the pin reached the cset region. If we continue
2014         // the cycle here, we would trash the cset and alive objects in it. To avoid
2015         // it, we fail degeneration right away and slide into Full GC to recover.
2016 
2017         {
2018           sync_pinned_region_status();
2019           collection_set()->clear_current_index();
2020 
2021           ShenandoahHeapRegion* r;
2022           while ((r = collection_set()->next()) != NULL) {
2023             if (r->is_pinned()) {
2024               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2025               op_degenerated_fail();
2026               return;
2027             }
2028           }
2029 
2030           collection_set()->clear_current_index();
2031         }
2032 
2033         op_stw_evac();
2034         if (cancelled_gc()) {
2035           op_degenerated_fail();
2036           return;
2037         }
2038       }
2039 
2040       // If heuristics thinks we should do the cycle, this flag would be set,
2041       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
2042       if (has_forwarded_objects()) {
2043         op_init_updaterefs();
2044         if (cancelled_gc()) {
2045           op_degenerated_fail();
2046           return;
2047         }
2048       }
2049 
2050     case _degenerated_updaterefs:
2051       if (has_forwarded_objects()) {
2052         op_final_updaterefs();
2053         if (cancelled_gc()) {
2054           op_degenerated_fail();
2055           return;
2056         }
2057       }
2058 
2059       op_cleanup_complete();
2060       break;
2061 
2062     default:
2063       ShouldNotReachHere();
2064   }
2065 
2066   if (ShenandoahVerify) {
2067     verifier()->verify_after_degenerated();
2068   }
2069 
2070   if (VerifyAfterGC) {
2071     Universe::verify();
2072   }
2073 
2074   metrics.snap_after();
2075 
2076   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
2077   // because that probably means the heap is overloaded and/or fragmented.
2078   if (!metrics.is_good_progress()) {
2079     _progress_last_gc.unset();
2080     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2081     op_degenerated_futile();
2082   } else {
2083     _progress_last_gc.set();
2084   }
2085 }
2086 
2087 void ShenandoahHeap::op_degenerated_fail() {
2088   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
2089   shenandoah_policy()->record_degenerated_upgrade_to_full();
2090   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2091 }
2092 
2093 void ShenandoahHeap::op_degenerated_futile() {
2094   shenandoah_policy()->record_degenerated_upgrade_to_full();
2095   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2096 }
2097 
2098 void ShenandoahHeap::force_satb_flush_all_threads() {
2099   if (!is_concurrent_mark_in_progress()) {
2100     // No need to flush SATBs
2101     return;
2102   }
2103 
2104   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2105     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
2106   }
2107   // The threads are not "acquiring" their thread-local data, but it does not
2108   // hurt to "release" the updates here anyway.
2109   OrderAccess::fence();
2110 }
2111 
2112 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2113   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2114     ShenandoahThreadLocalData::set_gc_state(t, state);
2115   }
2116 }
2117 
2118 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2119   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2120   _gc_state.set_cond(mask, value);
2121   set_gc_state_all_threads(_gc_state.raw_value());
2122 }
2123 
2124 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2125   if (has_forwarded_objects()) {
2126     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
2127   } else {
2128     set_gc_state_mask(MARKING, in_progress);
2129   }
2130   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2131 }
2132 
2133 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2134   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2135   set_gc_state_mask(EVACUATION, in_progress);
2136 }
2137 
2138 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2139   assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?");
2140   if (in_progress) {
2141     _concurrent_strong_root_in_progress.set();
2142   } else {
2143     _concurrent_strong_root_in_progress.unset();
2144   }
2145 }
2146 
2147 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool in_progress) {
2148   assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?");
2149   if (in_progress) {
2150     _concurrent_weak_root_in_progress.set();
2151   } else {
2152     _concurrent_weak_root_in_progress.unset();
2153   }
2154 }
2155 
2156 void ShenandoahHeap::ref_processing_init() {
2157   assert(_max_workers > 0, "Sanity");
2158 
2159   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
2160   bool mt_discovery = _max_workers > 1;
2161 
2162   _ref_processor =
2163     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
2164                            mt_processing,           // MT processing
2165                            _max_workers,            // Degree of MT processing
2166                            mt_discovery,            // MT discovery
2167                            _max_workers,            // Degree of MT discovery
2168                            false,                   // Reference discovery is not atomic
2169                            NULL,                    // No closure, should be installed before use
2170                            true);                   // Scale worker threads
2171 
2172   log_info(gc, init)("Reference processing: %s discovery, %s processing",
2173           mt_discovery ? "parallel" : "serial",
2174           mt_processing ? "parallel" : "serial");
2175 
2176   shenandoah_assert_rp_isalive_not_installed();
2177 }
2178 
2179 GCTracer* ShenandoahHeap::tracer() {
2180   return shenandoah_policy()->tracer();
2181 }
2182 
2183 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2184   return _free_set->used();
2185 }
2186 
2187 bool ShenandoahHeap::try_cancel_gc() {
2188   while (true) {
2189     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2190     if (prev == CANCELLABLE) return true;
2191     else if (prev == CANCELLED) return false;
2192     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
2193     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
2194     if (Thread::current()->is_Java_thread()) {
2195       // We need to provide a safepoint here, otherwise we might
2196       // spin forever if a SP is pending.
2197       ThreadBlockInVM sp(JavaThread::current());
2198       SpinPause();
2199     }
2200   }
2201 }
2202 
2203 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2204   if (try_cancel_gc()) {
2205     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2206     log_info(gc)("%s", msg.buffer());
2207     Events::log(Thread::current(), "%s", msg.buffer());
2208   }
2209 }
2210 
2211 uint ShenandoahHeap::max_workers() {
2212   return _max_workers;
2213 }
2214 
2215 void ShenandoahHeap::stop() {
2216   // The shutdown sequence should be able to terminate when GC is running.
2217 
2218   // Step 0. Notify policy to disable event recording.
2219   _shenandoah_policy->record_shutdown();
2220 
2221   // Step 1. Notify control thread that we are in shutdown.
2222   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2223   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2224   control_thread()->prepare_for_graceful_shutdown();
2225 
2226   // Step 2. Notify GC workers that we are cancelling GC.
2227   cancel_gc(GCCause::_shenandoah_stop_vm);
2228 
2229   // Step 3. Wait until GC worker exits normally.
2230   control_thread()->stop();
2231 
2232   // Step 4. Stop String Dedup thread if it is active
2233   if (ShenandoahStringDedup::is_enabled()) {
2234     ShenandoahStringDedup::stop();
2235   }
2236 }
2237 
2238 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2239   if (!unload_classes()) return;
2240 
2241   // Unload classes and purge SystemDictionary.
2242   {
2243     ShenandoahGCPhase phase(full_gc ?
2244                             ShenandoahPhaseTimings::full_gc_purge_class_unload :
2245                             ShenandoahPhaseTimings::purge_class_unload);
2246     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2247 
2248     ShenandoahIsAliveSelector is_alive;
2249     uint num_workers = _workers->active_workers();
2250     ShenandoahClassUnloadingTask unlink_task(is_alive.is_alive_closure(), num_workers, purged_class);
2251     _workers->run_task(&unlink_task);
2252   }
2253 
2254   {
2255     ShenandoahGCPhase phase(full_gc ?
2256                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2257                             ShenandoahPhaseTimings::purge_cldg);
2258     ClassLoaderDataGraph::purge();
2259   }
2260   // Resize and verify metaspace
2261   MetaspaceGC::compute_new_size();
2262   MetaspaceUtils::verify_metrics();
2263 }
2264 
2265 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2266 // so they should not have forwarded oops.
2267 // However, we do need to "null" dead oops in the roots, if can not be done
2268 // in concurrent cycles.
2269 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2270   ShenandoahGCPhase root_phase(full_gc ?
2271                                ShenandoahPhaseTimings::full_gc_purge :
2272                                ShenandoahPhaseTimings::purge);
2273   uint num_workers = _workers->active_workers();
2274   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2275                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2276                                                ShenandoahPhaseTimings::purge_weak_par;
2277   ShenandoahGCPhase phase(timing_phase);
2278   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2279 
2280   // Cleanup weak roots
2281   if (has_forwarded_objects()) {
2282     ShenandoahForwardedIsAliveClosure is_alive;
2283     ShenandoahUpdateRefsClosure keep_alive;
2284     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2285       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2286     _workers->run_task(&cleaning_task);
2287   } else {
2288     ShenandoahIsAliveClosure is_alive;
2289 #ifdef ASSERT
2290     ShenandoahAssertNotForwardedClosure verify_cl;
2291     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2292       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2293 #else
2294     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2295       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers, !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
2296 #endif
2297     _workers->run_task(&cleaning_task);
2298   }
2299 }
2300 
2301 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2302   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2303   stw_process_weak_roots(full_gc);
2304   if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2305     stw_unload_classes(full_gc);
2306   }
2307 }
2308 
2309 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2310   set_gc_state_mask(HAS_FORWARDED, cond);
2311 }
2312 
2313 void ShenandoahHeap::set_process_references(bool pr) {
2314   _process_references.set_cond(pr);
2315 }
2316 
2317 void ShenandoahHeap::set_unload_classes(bool uc) {
2318   _unload_classes.set_cond(uc);
2319 }
2320 
2321 bool ShenandoahHeap::process_references() const {
2322   return _process_references.is_set();
2323 }
2324 
2325 bool ShenandoahHeap::unload_classes() const {
2326   return _unload_classes.is_set();
2327 }
2328 
2329 address ShenandoahHeap::in_cset_fast_test_addr() {
2330   ShenandoahHeap* heap = ShenandoahHeap::heap();
2331   assert(heap->collection_set() != NULL, "Sanity");
2332   return (address) heap->collection_set()->biased_map_address();
2333 }
2334 
2335 address ShenandoahHeap::cancelled_gc_addr() {
2336   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2337 }
2338 
2339 address ShenandoahHeap::gc_state_addr() {
2340   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2341 }
2342 
2343 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2344   return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
2345 }
2346 
2347 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2348   Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2349 }
2350 
2351 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2352   _degenerated_gc_in_progress.set_cond(in_progress);
2353 }
2354 
2355 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2356   _full_gc_in_progress.set_cond(in_progress);
2357 }
2358 
2359 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2360   assert (is_full_gc_in_progress(), "should be");
2361   _full_gc_move_in_progress.set_cond(in_progress);
2362 }
2363 
2364 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2365   set_gc_state_mask(UPDATEREFS, in_progress);
2366 }
2367 
2368 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2369   ShenandoahCodeRoots::register_nmethod(nm);
2370 }
2371 
2372 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2373   ShenandoahCodeRoots::unregister_nmethod(nm);
2374 }
2375 
2376 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
2377   ShenandoahCodeRoots::flush_nmethod(nm);
2378 }
2379 
2380 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2381   heap_region_containing(o)->record_pin();
2382   return o;
2383 }
2384 
2385 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2386   heap_region_containing(o)->record_unpin();
2387 }
2388 
2389 void ShenandoahHeap::sync_pinned_region_status() {
2390   ShenandoahHeapLocker locker(lock());
2391 
2392   for (size_t i = 0; i < num_regions(); i++) {
2393     ShenandoahHeapRegion *r = get_region(i);
2394     if (r->is_active()) {
2395       if (r->is_pinned()) {
2396         if (r->pin_count() == 0) {
2397           r->make_unpinned();
2398         }
2399       } else {
2400         if (r->pin_count() > 0) {
2401           r->make_pinned();
2402         }
2403       }
2404     }
2405   }
2406 
2407   assert_pinned_region_status();
2408 }
2409 
2410 #ifdef ASSERT
2411 void ShenandoahHeap::assert_pinned_region_status() {
2412   for (size_t i = 0; i < num_regions(); i++) {
2413     ShenandoahHeapRegion* r = get_region(i);
2414     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2415            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2416   }
2417 }
2418 #endif
2419 
2420 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2421   return _gc_timer;
2422 }
2423 
2424 void ShenandoahHeap::prepare_concurrent_roots() {
2425   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2426   if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
2427     set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2428     set_concurrent_weak_root_in_progress(true);
2429   }
2430 }
2431 
2432 void ShenandoahHeap::prepare_concurrent_unloading() {
2433   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2434   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2435     _unloader.prepare();
2436   }
2437 }
2438 
2439 void ShenandoahHeap::finish_concurrent_unloading() {
2440   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2441   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
2442     _unloader.finish();
2443   }
2444 }
2445 
2446 #ifdef ASSERT
2447 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2448   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2449 
2450   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2451     if (UseDynamicNumberOfGCThreads) {
2452       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2453     } else {
2454       // Use ParallelGCThreads inside safepoints
2455       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2456     }
2457   } else {
2458     if (UseDynamicNumberOfGCThreads) {
2459       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2460     } else {
2461       // Use ConcGCThreads outside safepoints
2462       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2463     }
2464   }
2465 }
2466 #endif
2467 
2468 ShenandoahVerifier* ShenandoahHeap::verifier() {
2469   guarantee(ShenandoahVerify, "Should be enabled");
2470   assert (_verifier != NULL, "sanity");
2471   return _verifier;
2472 }
2473 
2474 template<class T>
2475 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2476 private:
2477   T cl;
2478   ShenandoahHeap* _heap;
2479   ShenandoahRegionIterator* _regions;
2480   bool _concurrent;
2481 public:
2482   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2483     AbstractGangTask("Concurrent Update References Task"),
2484     cl(T()),
2485     _heap(ShenandoahHeap::heap()),
2486     _regions(regions),
2487     _concurrent(concurrent) {
2488   }
2489 
2490   void work(uint worker_id) {
2491     if (_concurrent) {
2492       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2493       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2494       do_work();
2495     } else {
2496       ShenandoahParallelWorkerSession worker_session(worker_id);
2497       do_work();
2498     }
2499   }
2500 
2501 private:
2502   void do_work() {
2503     ShenandoahHeapRegion* r = _regions->next();
2504     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2505     while (r != NULL) {
2506       HeapWord* update_watermark = r->get_update_watermark();
2507       assert (update_watermark >= r->bottom(), "sanity");
2508       if (r->is_active() && !r->is_cset()) {
2509         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2510       }
2511       if (ShenandoahPacing) {
2512         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2513       }
2514       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2515         return;
2516       }
2517       r = _regions->next();
2518     }
2519   }
2520 };
2521 
2522 void ShenandoahHeap::update_heap_references(bool concurrent) {
2523   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2524   workers()->run_task(&task);
2525 }
2526 
2527 void ShenandoahHeap::op_init_updaterefs() {
2528   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2529 
2530   set_evacuation_in_progress(false);
2531 
2532   {
2533     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2534     retire_and_reset_gclabs();
2535   }
2536 
2537   if (ShenandoahVerify) {
2538     if (!is_degenerated_gc_in_progress()) {
2539       verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2540     }
2541     verifier()->verify_before_updaterefs();
2542   }
2543 
2544   set_update_refs_in_progress(true);
2545 
2546   _update_refs_iterator.reset();
2547 
2548   if (ShenandoahPacing) {
2549     pacer()->setup_for_updaterefs();
2550   }
2551 }
2552 
2553 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2554 private:
2555   ShenandoahHeapLock* const _lock;
2556 
2557 public:
2558   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2559 
2560   void heap_region_do(ShenandoahHeapRegion* r) {
2561     // Drop unnecessary "pinned" state from regions that does not have CP marks
2562     // anymore, as this would allow trashing them.
2563 
2564     if (r->is_active()) {
2565       if (r->is_pinned()) {
2566         if (r->pin_count() == 0) {
2567           ShenandoahHeapLocker locker(_lock);
2568           r->make_unpinned();
2569         }
2570       } else {
2571         if (r->pin_count() > 0) {
2572           ShenandoahHeapLocker locker(_lock);
2573           r->make_pinned();
2574         }
2575       }
2576     }
2577   }
2578 
2579   bool is_thread_safe() { return true; }
2580 };
2581 
2582 void ShenandoahHeap::op_final_updaterefs() {
2583   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2584 
2585   finish_concurrent_unloading();
2586 
2587   // Check if there is left-over work, and finish it
2588   if (_update_refs_iterator.has_next()) {
2589     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2590 
2591     // Finish updating references where we left off.
2592     clear_cancelled_gc();
2593     update_heap_references(false);
2594   }
2595 
2596   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2597   // everything. On degenerated paths, cancelled gc would not be set anyway.
2598   if (cancelled_gc()) {
2599     clear_cancelled_gc();
2600   }
2601   assert(!cancelled_gc(), "Should have been done right before");
2602 
2603   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2604     verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2605   }
2606 
2607   if (is_degenerated_gc_in_progress()) {
2608     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2609   } else {
2610     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2611   }
2612 
2613   // Has to be done before cset is clear
2614   if (ShenandoahVerify) {
2615     verifier()->verify_roots_in_to_space();
2616   }
2617 
2618   {
2619     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2620     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2621     parallel_heap_region_iterate(&cl);
2622 
2623     assert_pinned_region_status();
2624   }
2625 
2626   {
2627     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2628     trash_cset_regions();
2629   }
2630 
2631   set_has_forwarded_objects(false);
2632   set_update_refs_in_progress(false);
2633 
2634   if (ShenandoahVerify) {
2635     verifier()->verify_after_updaterefs();
2636   }
2637 
2638   if (VerifyAfterGC) {
2639     Universe::verify();
2640   }
2641 
2642   {
2643     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2644     ShenandoahHeapLocker locker(lock());
2645     _free_set->rebuild();
2646   }
2647 }
2648 
2649 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2650   print_on(st);
2651   print_heap_regions_on(st);
2652 }
2653 
2654 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2655   size_t slice = r->index() / _bitmap_regions_per_slice;
2656 
2657   size_t regions_from = _bitmap_regions_per_slice * slice;
2658   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2659   for (size_t g = regions_from; g < regions_to; g++) {
2660     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2661     if (skip_self && g == r->index()) continue;
2662     if (get_region(g)->is_committed()) {
2663       return true;
2664     }
2665   }
2666   return false;
2667 }
2668 
2669 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2670   shenandoah_assert_heaplocked();
2671 
2672   // Bitmaps in special regions do not need commits
2673   if (_bitmap_region_special) {
2674     return true;
2675   }
2676 
2677   if (is_bitmap_slice_committed(r, true)) {
2678     // Some other region from the group is already committed, meaning the bitmap
2679     // slice is already committed, we exit right away.
2680     return true;
2681   }
2682 
2683   // Commit the bitmap slice:
2684   size_t slice = r->index() / _bitmap_regions_per_slice;
2685   size_t off = _bitmap_bytes_per_slice * slice;
2686   size_t len = _bitmap_bytes_per_slice;
2687   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2688     return false;
2689   }
2690   return true;
2691 }
2692 
2693 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2694   shenandoah_assert_heaplocked();
2695 
2696   // Bitmaps in special regions do not need uncommits
2697   if (_bitmap_region_special) {
2698     return true;
2699   }
2700 
2701   if (is_bitmap_slice_committed(r, true)) {
2702     // Some other region from the group is still committed, meaning the bitmap
2703     // slice is should stay committed, exit right away.
2704     return true;
2705   }
2706 
2707   // Uncommit the bitmap slice:
2708   size_t slice = r->index() / _bitmap_regions_per_slice;
2709   size_t off = _bitmap_bytes_per_slice * slice;
2710   size_t len = _bitmap_bytes_per_slice;
2711   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2712     return false;
2713   }
2714   return true;
2715 }
2716 
2717 void ShenandoahHeap::safepoint_synchronize_begin() {
2718   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2719     SuspendibleThreadSet::synchronize();
2720   }
2721 }
2722 
2723 void ShenandoahHeap::safepoint_synchronize_end() {
2724   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2725     SuspendibleThreadSet::desynchronize();
2726   }
2727 }
2728 
2729 void ShenandoahHeap::vmop_entry_init_mark() {
2730   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2731   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
2732 
2733   try_inject_alloc_failure();
2734   VM_ShenandoahInitMark op;
2735   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2736 }
2737 
2738 void ShenandoahHeap::vmop_entry_final_mark() {
2739   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2740   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
2741 
2742   try_inject_alloc_failure();
2743   VM_ShenandoahFinalMarkStartEvac op;
2744   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2745 }
2746 
2747 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2748   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2749   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
2750 
2751   try_inject_alloc_failure();
2752   VM_ShenandoahInitUpdateRefs op;
2753   VMThread::execute(&op);
2754 }
2755 
2756 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2757   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2758   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
2759 
2760   try_inject_alloc_failure();
2761   VM_ShenandoahFinalUpdateRefs op;
2762   VMThread::execute(&op);
2763 }
2764 
2765 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2766   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2767   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
2768 
2769   try_inject_alloc_failure();
2770   VM_ShenandoahFullGC op(cause);
2771   VMThread::execute(&op);
2772 }
2773 
2774 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2775   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2776   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
2777 
2778   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2779   VMThread::execute(&degenerated_gc);
2780 }
2781 
2782 void ShenandoahHeap::entry_init_mark() {
2783   const char* msg = init_mark_event_message();
2784   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
2785   EventMark em("%s", msg);
2786 
2787   ShenandoahWorkerScope scope(workers(),
2788                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2789                               "init marking");
2790 
2791   op_init_mark();
2792 }
2793 
2794 void ShenandoahHeap::entry_final_mark() {
2795   const char* msg = final_mark_event_message();
2796   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
2797   EventMark em("%s", msg);
2798 
2799   ShenandoahWorkerScope scope(workers(),
2800                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2801                               "final marking");
2802 
2803   op_final_mark();
2804 }
2805 
2806 void ShenandoahHeap::entry_init_updaterefs() {
2807   static const char* msg = "Pause Init Update Refs";
2808   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
2809   EventMark em("%s", msg);
2810 
2811   // No workers used in this phase, no setup required
2812 
2813   op_init_updaterefs();
2814 }
2815 
2816 void ShenandoahHeap::entry_final_updaterefs() {
2817   static const char* msg = "Pause Final Update Refs";
2818   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
2819   EventMark em("%s", msg);
2820 
2821   ShenandoahWorkerScope scope(workers(),
2822                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2823                               "final reference update");
2824 
2825   op_final_updaterefs();
2826 }
2827 
2828 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2829   static const char* msg = "Pause Full";
2830   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
2831   EventMark em("%s", msg);
2832 
2833   ShenandoahWorkerScope scope(workers(),
2834                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2835                               "full gc");
2836 
2837   op_full(cause);
2838 }
2839 
2840 void ShenandoahHeap::entry_degenerated(int point) {
2841   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2842   const char* msg = degen_event_message(dpoint);
2843   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
2844   EventMark em("%s", msg);
2845 
2846   ShenandoahWorkerScope scope(workers(),
2847                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2848                               "stw degenerated gc");
2849 
2850   set_degenerated_gc_in_progress(true);
2851   op_degenerated(dpoint);
2852   set_degenerated_gc_in_progress(false);
2853 }
2854 
2855 void ShenandoahHeap::entry_mark() {
2856   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2857 
2858   const char* msg = conc_mark_event_message();
2859   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
2860   EventMark em("%s", msg);
2861 
2862   ShenandoahWorkerScope scope(workers(),
2863                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2864                               "concurrent marking");
2865 
2866   try_inject_alloc_failure();
2867   op_mark();
2868 }
2869 
2870 void ShenandoahHeap::entry_evac() {
2871   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2872 
2873   static const char* msg = "Concurrent evacuation";
2874   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
2875   EventMark em("%s", msg);
2876 
2877   ShenandoahWorkerScope scope(workers(),
2878                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2879                               "concurrent evacuation");
2880 
2881   try_inject_alloc_failure();
2882   op_conc_evac();
2883 }
2884 
2885 void ShenandoahHeap::entry_updaterefs() {
2886   static const char* msg = "Concurrent update references";
2887   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
2888   EventMark em("%s", msg);
2889 
2890   ShenandoahWorkerScope scope(workers(),
2891                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2892                               "concurrent reference update");
2893 
2894   try_inject_alloc_failure();
2895   op_updaterefs();
2896 }
2897 
2898 void ShenandoahHeap::entry_weak_roots() {
2899   static const char* msg = "Concurrent weak roots";
2900   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
2901   EventMark em("%s", msg);
2902 
2903   ShenandoahWorkerScope scope(workers(),
2904                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2905                               "concurrent weak root");
2906 
2907   try_inject_alloc_failure();
2908   op_weak_roots();
2909 }
2910 
2911 void ShenandoahHeap::entry_class_unloading() {
2912   static const char* msg = "Concurrent class unloading";
2913   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
2914   EventMark em("%s", msg);
2915 
2916   ShenandoahWorkerScope scope(workers(),
2917                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2918                               "concurrent class unloading");
2919 
2920   try_inject_alloc_failure();
2921   op_class_unloading();
2922 }
2923 
2924 void ShenandoahHeap::entry_strong_roots() {
2925   static const char* msg = "Concurrent strong roots";
2926   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
2927   EventMark em("%s", msg);
2928 
2929   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
2930 
2931   ShenandoahWorkerScope scope(workers(),
2932                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2933                               "concurrent strong root");
2934 
2935   try_inject_alloc_failure();
2936   op_strong_roots();
2937 }
2938 
2939 void ShenandoahHeap::entry_cleanup_early() {
2940   static const char* msg = "Concurrent cleanup";
2941   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
2942   EventMark em("%s", msg);
2943 
2944   // This phase does not use workers, no need for setup
2945 
2946   try_inject_alloc_failure();
2947   op_cleanup_early();
2948 }
2949 
2950 void ShenandoahHeap::entry_cleanup_complete() {
2951   static const char* msg = "Concurrent cleanup";
2952   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
2953   EventMark em("%s", msg);
2954 
2955   // This phase does not use workers, no need for setup
2956 
2957   try_inject_alloc_failure();
2958   op_cleanup_complete();
2959 }
2960 
2961 void ShenandoahHeap::entry_reset() {
2962   static const char* msg = "Concurrent reset";
2963   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
2964   EventMark em("%s", msg);
2965 
2966   ShenandoahWorkerScope scope(workers(),
2967                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2968                               "concurrent reset");
2969 
2970   try_inject_alloc_failure();
2971   op_reset();
2972 }
2973 
2974 void ShenandoahHeap::entry_preclean() {
2975   if (ShenandoahPreclean && process_references()) {
2976     static const char* msg = "Concurrent precleaning";
2977     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_preclean);
2978     EventMark em("%s", msg);
2979 
2980     ShenandoahWorkerScope scope(workers(),
2981                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2982                                 "concurrent preclean",
2983                                 /* check_workers = */ false);
2984 
2985     try_inject_alloc_failure();
2986     op_preclean();
2987   }
2988 }
2989 
2990 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2991   static const char *msg = "Concurrent uncommit";
2992   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2993   EventMark em("%s", msg);
2994 
2995   op_uncommit(shrink_before);
2996 }
2997 
2998 void ShenandoahHeap::try_inject_alloc_failure() {
2999   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
3000     _inject_alloc_failure.set();
3001     os::naked_short_sleep(1);
3002     if (cancelled_gc()) {
3003       log_info(gc)("Allocation failure was successfully injected");
3004     }
3005   }
3006 }
3007 
3008 bool ShenandoahHeap::should_inject_alloc_failure() {
3009   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3010 }
3011 
3012 void ShenandoahHeap::initialize_serviceability() {
3013   _memory_pool = new ShenandoahMemoryPool(this);
3014   _cycle_memory_manager.add_pool(_memory_pool);
3015   _stw_memory_manager.add_pool(_memory_pool);
3016 }
3017 
3018 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3019   GrowableArray<GCMemoryManager*> memory_managers(2);
3020   memory_managers.append(&_cycle_memory_manager);
3021   memory_managers.append(&_stw_memory_manager);
3022   return memory_managers;
3023 }
3024 
3025 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3026   GrowableArray<MemoryPool*> memory_pools(1);
3027   memory_pools.append(_memory_pool);
3028   return memory_pools;
3029 }
3030 
3031 MemoryUsage ShenandoahHeap::memory_usage() {
3032   return _memory_pool->get_memory_usage();
3033 }
3034 
3035 void ShenandoahHeap::enter_evacuation() {
3036   _oom_evac_handler.enter_evacuation();
3037 }
3038 
3039 void ShenandoahHeap::leave_evacuation() {
3040   _oom_evac_handler.leave_evacuation();
3041 }
3042 
3043 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3044   _heap(ShenandoahHeap::heap()),
3045   _index(0) {}
3046 
3047 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3048   _heap(heap),
3049   _index(0) {}
3050 
3051 void ShenandoahRegionIterator::reset() {
3052   _index = 0;
3053 }
3054 
3055 bool ShenandoahRegionIterator::has_next() const {
3056   return _index < _heap->num_regions();
3057 }
3058 
3059 char ShenandoahHeap::gc_state() const {
3060   return _gc_state.raw_value();
3061 }
3062 
3063 void ShenandoahHeap::deduplicate_string(oop str) {
3064   assert(java_lang_String::is_instance(str), "invariant");
3065 
3066   if (ShenandoahStringDedup::is_enabled()) {
3067     ShenandoahStringDedup::deduplicate(str);
3068   }
3069 }
3070 
3071 const char* ShenandoahHeap::init_mark_event_message() const {
3072   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
3073 
3074   bool proc_refs = process_references();
3075   bool unload_cls = unload_classes();
3076 
3077   if (proc_refs && unload_cls) {
3078     return "Pause Init Mark (process weakrefs) (unload classes)";
3079   } else if (proc_refs) {
3080     return "Pause Init Mark (process weakrefs)";
3081   } else if (unload_cls) {
3082     return "Pause Init Mark (unload classes)";
3083   } else {
3084     return "Pause Init Mark";
3085   }
3086 }
3087 
3088 const char* ShenandoahHeap::final_mark_event_message() const {
3089   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
3090 
3091   bool proc_refs = process_references();
3092   bool unload_cls = unload_classes();
3093 
3094   if (proc_refs && unload_cls) {
3095     return "Pause Final Mark (process weakrefs) (unload classes)";
3096   } else if (proc_refs) {
3097     return "Pause Final Mark (process weakrefs)";
3098   } else if (unload_cls) {
3099     return "Pause Final Mark (unload classes)";
3100   } else {
3101     return "Pause Final Mark";
3102   }
3103 }
3104 
3105 const char* ShenandoahHeap::conc_mark_event_message() const {
3106   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
3107 
3108   bool proc_refs = process_references();
3109   bool unload_cls = unload_classes();
3110 
3111   if (proc_refs && unload_cls) {
3112     return "Concurrent marking (process weakrefs) (unload classes)";
3113   } else if (proc_refs) {
3114     return "Concurrent marking (process weakrefs)";
3115   } else if (unload_cls) {
3116     return "Concurrent marking (unload classes)";
3117   } else {
3118     return "Concurrent marking";
3119   }
3120 }
3121 
3122 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
3123   switch (point) {
3124     case _degenerated_unset:
3125       return "Pause Degenerated GC (<UNSET>)";
3126     case _degenerated_outside_cycle:
3127       return "Pause Degenerated GC (Outside of Cycle)";
3128     case _degenerated_mark:
3129       return "Pause Degenerated GC (Mark)";
3130     case _degenerated_evac:
3131       return "Pause Degenerated GC (Evacuation)";
3132     case _degenerated_updaterefs:
3133       return "Pause Degenerated GC (Update Refs)";
3134     default:
3135       ShouldNotReachHere();
3136       return "ERROR";
3137   }
3138 }
3139 
3140 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3141 #ifdef ASSERT
3142   assert(_liveness_cache != NULL, "sanity");
3143   assert(worker_id < _max_workers, "sanity");
3144   for (uint i = 0; i < num_regions(); i++) {
3145     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3146   }
3147 #endif
3148   return _liveness_cache[worker_id];
3149 }
3150 
3151 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3152   assert(worker_id < _max_workers, "sanity");
3153   assert(_liveness_cache != NULL, "sanity");
3154   ShenandoahLiveData* ld = _liveness_cache[worker_id];
3155   for (uint i = 0; i < num_regions(); i++) {
3156     ShenandoahLiveData live = ld[i];
3157     if (live > 0) {
3158       ShenandoahHeapRegion* r = get_region(i);
3159       r->increase_live_data_gc_words(live);
3160       ld[i] = 0;
3161     }
3162   }
3163 }