1 /*
   2  * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmSymbols.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "gc/shared/collectedHeap.inline.hpp"
  34 #include "gc/shared/collectorCounters.hpp"
  35 #include "gc/shared/gcId.hpp"
  36 #include "gc/shared/gcLocker.inline.hpp"
  37 #include "gc/shared/gcTrace.hpp"
  38 #include "gc/shared/gcTraceTime.inline.hpp"
  39 #include "gc/shared/genCollectedHeap.hpp"
  40 #include "gc/shared/genOopClosures.inline.hpp"
  41 #include "gc/shared/generationSpec.hpp"
  42 #include "gc/shared/space.hpp"
  43 #include "gc/shared/strongRootsScope.hpp"
  44 #include "gc/shared/vmGCOperations.hpp"
  45 #include "gc/shared/workgroup.hpp"
  46 #include "memory/filemap.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/biasedLocking.hpp"
  50 #include "runtime/fprofiler.hpp"
  51 #include "runtime/handles.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/java.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "services/management.hpp"
  56 #include "services/memoryService.hpp"
  57 #include "utilities/debug.hpp"
  58 #include "utilities/formatBuffer.hpp"
  59 #include "utilities/macros.hpp"
  60 #include "utilities/stack.inline.hpp"
  61 #include "utilities/vmError.hpp"
  62 #if INCLUDE_ALL_GCS
  63 #include "gc/cms/concurrentMarkSweepThread.hpp"
  64 #include "gc/cms/vmCMSOperations.hpp"
  65 #endif // INCLUDE_ALL_GCS
  66 
  67 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
  68 
  69 // The set of potentially parallel tasks in root scanning.
  70 enum GCH_strong_roots_tasks {
  71   GCH_PS_Universe_oops_do,
  72   GCH_PS_JNIHandles_oops_do,
  73   GCH_PS_ObjectSynchronizer_oops_do,
  74   GCH_PS_FlatProfiler_oops_do,
  75   GCH_PS_Management_oops_do,
  76   GCH_PS_SystemDictionary_oops_do,
  77   GCH_PS_ClassLoaderDataGraph_oops_do,
  78   GCH_PS_jvmti_oops_do,
  79   GCH_PS_CodeCache_oops_do,
  80   GCH_PS_aot_oops_do,
  81   GCH_PS_younger_gens,
  82   // Leave this one last.
  83   GCH_PS_NumElements
  84 };
  85 
  86 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  87   CollectedHeap(),
  88   _rem_set(NULL),
  89   _gen_policy(policy),
  90   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  91   _full_collections_completed(0)
  92 {
  93   assert(policy != NULL, "Sanity check");
  94   if (UseConcMarkSweepGC) {
  95     _workers = new WorkGang("GC Thread", ParallelGCThreads,
  96                             /* are_GC_task_threads */true,
  97                             /* are_ConcurrentGC_threads */false);
  98     _workers->initialize_workers();
  99   } else {
 100     // Serial GC does not use workers.
 101     _workers = NULL;
 102   }
 103 }
 104 
 105 jint GenCollectedHeap::initialize() {
 106   CollectedHeap::pre_initialize();
 107 
 108   // While there are no constraints in the GC code that HeapWordSize
 109   // be any particular value, there are multiple other areas in the
 110   // system which believe this to be true (e.g. oop->object_size in some
 111   // cases incorrectly returns the size in wordSize units rather than
 112   // HeapWordSize).
 113   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 114 
 115   // Allocate space for the heap.
 116 
 117   char* heap_address;
 118   ReservedSpace heap_rs;
 119 
 120   size_t heap_alignment = collector_policy()->heap_alignment();
 121 
 122   heap_address = allocate(heap_alignment, &heap_rs);
 123 
 124   if (!heap_rs.is_reserved()) {
 125     vm_shutdown_during_initialization(
 126       "Could not reserve enough space for object heap");
 127     return JNI_ENOMEM;
 128   }
 129 
 130   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 131 
 132   _rem_set = collector_policy()->create_rem_set(reserved_region());
 133   set_barrier_set(rem_set()->bs());
 134 
 135   ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
 136   _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
 137   heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
 138 
 139   ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
 140   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
 141   clear_incremental_collection_failed();
 142 
 143 #if INCLUDE_ALL_GCS
 144   // If we are running CMS, create the collector responsible
 145   // for collecting the CMS generations.
 146   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
 147     bool success = create_cms_collector();
 148     if (!success) return JNI_ENOMEM;
 149   }
 150 #endif // INCLUDE_ALL_GCS
 151 
 152   return JNI_OK;
 153 }
 154 
 155 char* GenCollectedHeap::allocate(size_t alignment,
 156                                  ReservedSpace* heap_rs){
 157   // Now figure out the total size.
 158   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 159   assert(alignment % pageSize == 0, "Must be");
 160 
 161   GenerationSpec* young_spec = gen_policy()->young_gen_spec();
 162   GenerationSpec* old_spec = gen_policy()->old_gen_spec();
 163 
 164   // Check for overflow.
 165   size_t total_reserved = young_spec->max_size() + old_spec->max_size();
 166   if (total_reserved < young_spec->max_size()) {
 167     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 168                                   "the maximum representable size");
 169   }
 170   assert(total_reserved % alignment == 0,
 171          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 172          SIZE_FORMAT, total_reserved, alignment);
 173 
 174   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 175 
 176   os::trace_page_sizes("Heap",
 177                        collector_policy()->min_heap_byte_size(),
 178                        total_reserved,
 179                        alignment,
 180                        heap_rs->base(),
 181                        heap_rs->size());
 182 
 183   return heap_rs->base();
 184 }
 185 
 186 void GenCollectedHeap::post_initialize() {
 187   ref_processing_init();
 188   assert((_young_gen->kind() == Generation::DefNew) ||
 189          (_young_gen->kind() == Generation::ParNew),
 190     "Wrong youngest generation type");
 191   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 192 
 193   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
 194          _old_gen->kind() == Generation::MarkSweepCompact,
 195     "Wrong generation kind");
 196 
 197   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
 198                                       _old_gen->capacity(),
 199                                       def_new_gen->from()->capacity());
 200   _gen_policy->initialize_gc_policy_counters();
 201 }
 202 
 203 void GenCollectedHeap::ref_processing_init() {
 204   _young_gen->ref_processor_init();
 205   _old_gen->ref_processor_init();
 206 }
 207 
 208 size_t GenCollectedHeap::capacity() const {
 209   return _young_gen->capacity() + _old_gen->capacity();
 210 }
 211 
 212 size_t GenCollectedHeap::used() const {
 213   return _young_gen->used() + _old_gen->used();
 214 }
 215 
 216 void GenCollectedHeap::save_used_regions() {
 217   _old_gen->save_used_region();
 218   _young_gen->save_used_region();
 219 }
 220 
 221 size_t GenCollectedHeap::max_capacity() const {
 222   return _young_gen->max_capacity() + _old_gen->max_capacity();
 223 }
 224 
 225 // Update the _full_collections_completed counter
 226 // at the end of a stop-world full GC.
 227 unsigned int GenCollectedHeap::update_full_collections_completed() {
 228   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 229   assert(_full_collections_completed <= _total_full_collections,
 230          "Can't complete more collections than were started");
 231   _full_collections_completed = _total_full_collections;
 232   ml.notify_all();
 233   return _full_collections_completed;
 234 }
 235 
 236 // Update the _full_collections_completed counter, as appropriate,
 237 // at the end of a concurrent GC cycle. Note the conditional update
 238 // below to allow this method to be called by a concurrent collector
 239 // without synchronizing in any manner with the VM thread (which
 240 // may already have initiated a STW full collection "concurrently").
 241 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
 242   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 243   assert((_full_collections_completed <= _total_full_collections) &&
 244          (count <= _total_full_collections),
 245          "Can't complete more collections than were started");
 246   if (count > _full_collections_completed) {
 247     _full_collections_completed = count;
 248     ml.notify_all();
 249   }
 250   return _full_collections_completed;
 251 }
 252 
 253 
 254 #ifndef PRODUCT
 255 // Override of memory state checking method in CollectedHeap:
 256 // Some collectors (CMS for example) can't have badHeapWordVal written
 257 // in the first two words of an object. (For instance , in the case of
 258 // CMS these words hold state used to synchronize between certain
 259 // (concurrent) GC steps and direct allocating mutators.)
 260 // The skip_header_HeapWords() method below, allows us to skip
 261 // over the requisite number of HeapWord's. Note that (for
 262 // generational collectors) this means that those many words are
 263 // skipped in each object, irrespective of the generation in which
 264 // that object lives. The resultant loss of precision seems to be
 265 // harmless and the pain of avoiding that imprecision appears somewhat
 266 // higher than we are prepared to pay for such rudimentary debugging
 267 // support.
 268 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
 269                                                          size_t size) {
 270   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 271     // We are asked to check a size in HeapWords,
 272     // but the memory is mangled in juint words.
 273     juint* start = (juint*) (addr + skip_header_HeapWords());
 274     juint* end   = (juint*) (addr + size);
 275     for (juint* slot = start; slot < end; slot += 1) {
 276       assert(*slot == badHeapWordVal,
 277              "Found non badHeapWordValue in pre-allocation check");
 278     }
 279   }
 280 }
 281 #endif
 282 
 283 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 284                                                bool is_tlab,
 285                                                bool first_only) {
 286   HeapWord* res = NULL;
 287 
 288   if (_young_gen->should_allocate(size, is_tlab)) {
 289     res = _young_gen->allocate(size, is_tlab);
 290     if (res != NULL || first_only) {
 291       return res;
 292     }
 293   }
 294 
 295   if (_old_gen->should_allocate(size, is_tlab)) {
 296     res = _old_gen->allocate(size, is_tlab);
 297   }
 298 
 299   return res;
 300 }
 301 
 302 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 303                                          bool* gc_overhead_limit_was_exceeded) {
 304   return gen_policy()->mem_allocate_work(size,
 305                                          false /* is_tlab */,
 306                                          gc_overhead_limit_was_exceeded);
 307 }
 308 
 309 bool GenCollectedHeap::must_clear_all_soft_refs() {
 310   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 311          _gc_cause == GCCause::_wb_full_gc;
 312 }
 313 
 314 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
 315   if (!UseConcMarkSweepGC) {
 316     return false;
 317   }
 318 
 319   switch (cause) {
 320     case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
 321     case GCCause::_java_lang_system_gc:
 322     case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
 323     default:                            return false;
 324   }
 325 }
 326 
 327 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 328                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 329                                           bool restore_marks_for_biased_locking) {
 330   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 331   GCTraceTime(Trace, gc, phases) t1(title);
 332   TraceCollectorStats tcs(gen->counters());
 333   TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
 334 
 335   gen->stat_record()->invocations++;
 336   gen->stat_record()->accumulated_time.start();
 337 
 338   // Must be done anew before each collection because
 339   // a previous collection will do mangling and will
 340   // change top of some spaces.
 341   record_gen_tops_before_GC();
 342 
 343   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 344 
 345   if (run_verification && VerifyBeforeGC) {
 346     HandleMark hm;  // Discard invalid handles created during verification
 347     Universe::verify("Before GC");
 348   }
 349   COMPILER2_PRESENT(DerivedPointerTable::clear());
 350 
 351   if (restore_marks_for_biased_locking) {
 352     // We perform this mark word preservation work lazily
 353     // because it's only at this point that we know whether we
 354     // absolutely have to do it; we want to avoid doing it for
 355     // scavenge-only collections where it's unnecessary
 356     BiasedLocking::preserve_marks();
 357   }
 358 
 359   // Do collection work
 360   {
 361     // Note on ref discovery: For what appear to be historical reasons,
 362     // GCH enables and disabled (by enqueing) refs discovery.
 363     // In the future this should be moved into the generation's
 364     // collect method so that ref discovery and enqueueing concerns
 365     // are local to a generation. The collect method could return
 366     // an appropriate indication in the case that notification on
 367     // the ref lock was needed. This will make the treatment of
 368     // weak refs more uniform (and indeed remove such concerns
 369     // from GCH). XXX
 370 
 371     HandleMark hm;  // Discard invalid handles created during gc
 372     save_marks();   // save marks for all gens
 373     // We want to discover references, but not process them yet.
 374     // This mode is disabled in process_discovered_references if the
 375     // generation does some collection work, or in
 376     // enqueue_discovered_references if the generation returns
 377     // without doing any work.
 378     ReferenceProcessor* rp = gen->ref_processor();
 379     // If the discovery of ("weak") refs in this generation is
 380     // atomic wrt other collectors in this configuration, we
 381     // are guaranteed to have empty discovered ref lists.
 382     if (rp->discovery_is_atomic()) {
 383       rp->enable_discovery();
 384       rp->setup_policy(clear_soft_refs);
 385     } else {
 386       // collect() below will enable discovery as appropriate
 387     }
 388     gen->collect(full, clear_soft_refs, size, is_tlab);
 389     if (!rp->enqueuing_is_done()) {
 390       ReferenceProcessorPhaseTimes pt(NULL, rp->num_q());
 391       rp->enqueue_discovered_references(NULL, &pt);
 392       pt.print_enqueue_phase();
 393     } else {
 394       rp->set_enqueuing_is_done(false);
 395     }
 396     rp->verify_no_references_recorded();
 397   }
 398 
 399   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 400 
 401   gen->stat_record()->accumulated_time.stop();
 402 
 403   update_gc_stats(gen, full);
 404 
 405   if (run_verification && VerifyAfterGC) {
 406     HandleMark hm;  // Discard invalid handles created during verification
 407     Universe::verify("After GC");
 408   }
 409 }
 410 
 411 void GenCollectedHeap::do_collection(bool           full,
 412                                      bool           clear_all_soft_refs,
 413                                      size_t         size,
 414                                      bool           is_tlab,
 415                                      GenerationType max_generation) {
 416   ResourceMark rm;
 417   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 418 
 419   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 420   assert(my_thread->is_VM_thread() ||
 421          my_thread->is_ConcurrentGC_thread(),
 422          "incorrect thread type capability");
 423   assert(Heap_lock->is_locked(),
 424          "the requesting thread should have the Heap_lock");
 425   guarantee(!is_gc_active(), "collection is not reentrant");
 426 
 427   if (GCLocker::check_active_before_gc()) {
 428     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 429   }
 430 
 431   GCIdMarkAndRestore gc_id_mark;
 432 
 433   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 434                           collector_policy()->should_clear_all_soft_refs();
 435 
 436   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 437 
 438   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
 439 
 440   print_heap_before_gc();
 441 
 442   {
 443     FlagSetting fl(_is_gc_active, true);
 444 
 445     bool complete = full && (max_generation == OldGen);
 446     bool old_collects_young = complete && !ScavengeBeforeFullGC;
 447     bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 448 
 449     FormatBuffer<> gc_string("%s", "Pause ");
 450     if (do_young_collection) {
 451       gc_string.append("Young");
 452     } else {
 453       gc_string.append("Full");
 454     }
 455 
 456     GCTraceCPUTime tcpu;
 457     GCTraceTime(Info, gc) t(gc_string, NULL, gc_cause(), true);
 458 
 459     gc_prologue(complete);
 460     increment_total_collections(complete);
 461 
 462     size_t young_prev_used = _young_gen->used();
 463     size_t old_prev_used = _old_gen->used();
 464 
 465     bool run_verification = total_collections() >= VerifyGCStartAt;
 466 
 467     bool prepared_for_verification = false;
 468     bool collected_old = false;
 469 
 470     if (do_young_collection) {
 471       if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 472         prepare_for_verify();
 473         prepared_for_verification = true;
 474       }
 475 
 476       collect_generation(_young_gen,
 477                          full,
 478                          size,
 479                          is_tlab,
 480                          run_verification && VerifyGCLevel <= 0,
 481                          do_clear_all_soft_refs,
 482                          false);
 483 
 484       if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 485           size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 486         // Allocation request was met by young GC.
 487         size = 0;
 488       }
 489     }
 490 
 491     bool must_restore_marks_for_biased_locking = false;
 492 
 493     if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
 494       if (!complete) {
 495         // The full_collections increment was missed above.
 496         increment_total_full_collections();
 497       }
 498 
 499       if (!prepared_for_verification && run_verification &&
 500           VerifyGCLevel <= 1 && VerifyBeforeGC) {
 501         prepare_for_verify();
 502       }
 503 
 504       if (do_young_collection) {
 505         // We did a young GC. Need a new GC id for the old GC.
 506         GCIdMarkAndRestore gc_id_mark;
 507         GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 508         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
 509       } else {
 510         // No young GC done. Use the same GC id as was set up earlier in this method.
 511         collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true);
 512       }
 513 
 514       must_restore_marks_for_biased_locking = true;
 515       collected_old = true;
 516     }
 517 
 518     // Update "complete" boolean wrt what actually transpired --
 519     // for instance, a promotion failure could have led to
 520     // a whole heap collection.
 521     complete = complete || collected_old;
 522 
 523     print_heap_change(young_prev_used, old_prev_used);
 524     MetaspaceAux::print_metaspace_change(metadata_prev_used);
 525 
 526     // Adjust generation sizes.
 527     if (collected_old) {
 528       _old_gen->compute_new_size();
 529     }
 530     _young_gen->compute_new_size();
 531 
 532     if (complete) {
 533       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 534       ClassLoaderDataGraph::purge();
 535       MetaspaceAux::verify_metrics();
 536       // Resize the metaspace capacity after full collections
 537       MetaspaceGC::compute_new_size();
 538       update_full_collections_completed();
 539     }
 540 
 541     // Track memory usage and detect low memory after GC finishes
 542     MemoryService::track_memory_usage();
 543 
 544     gc_epilogue(complete);
 545 
 546     if (must_restore_marks_for_biased_locking) {
 547       BiasedLocking::restore_marks();
 548     }
 549   }
 550 
 551   print_heap_after_gc();
 552 
 553 #ifdef TRACESPINNING
 554   ParallelTaskTerminator::print_termination_counts();
 555 #endif
 556 }
 557 
 558 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 559   return gen_policy()->satisfy_failed_allocation(size, is_tlab);
 560 }
 561 
 562 #ifdef ASSERT
 563 class AssertNonScavengableClosure: public OopClosure {
 564 public:
 565   virtual void do_oop(oop* p) {
 566     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 567       "Referent should not be scavengable.");  }
 568   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 569 };
 570 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 571 #endif
 572 
 573 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
 574                                      ScanningOption so,
 575                                      OopClosure* strong_roots,
 576                                      OopClosure* weak_roots,
 577                                      CLDClosure* strong_cld_closure,
 578                                      CLDClosure* weak_cld_closure,
 579                                      CodeBlobToOopClosure* code_roots) {
 580   // General roots.
 581   assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
 582   assert(code_roots != NULL, "code root closure should always be set");
 583   // _n_termination for _process_strong_tasks should be set up stream
 584   // in a method not running in a GC worker.  Otherwise the GC worker
 585   // could be trying to change the termination condition while the task
 586   // is executing in another GC worker.
 587 
 588   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
 589     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 590   }
 591 
 592   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 593   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 594 
 595   bool is_par = scope->n_threads() > 1;
 596   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
 597 
 598   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
 599     Universe::oops_do(strong_roots);
 600   }
 601   // Global (strong) JNI handles
 602   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
 603     JNIHandles::oops_do(strong_roots);
 604   }
 605 
 606   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
 607     ObjectSynchronizer::oops_do(strong_roots);
 608   }
 609   if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
 610     FlatProfiler::oops_do(strong_roots);
 611   }
 612   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
 613     Management::oops_do(strong_roots);
 614   }
 615   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
 616     JvmtiExport::oops_do(strong_roots);
 617   }
 618   if (UseAOT && !_process_strong_tasks->is_task_claimed(GCH_PS_aot_oops_do)) {
 619     AOTLoader::oops_do(strong_roots);
 620   }
 621 
 622   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
 623     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 624   }
 625 
 626   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
 627     if (so & SO_ScavengeCodeCache) {
 628       assert(code_roots != NULL, "must supply closure for code cache");
 629 
 630       // We only visit parts of the CodeCache when scavenging.
 631       CodeCache::scavenge_root_nmethods_do(code_roots);
 632     }
 633     if (so & SO_AllCodeCache) {
 634       assert(code_roots != NULL, "must supply closure for code cache");
 635 
 636       // CMSCollector uses this to do intermediate-strength collections.
 637       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 638       CodeCache::blobs_do(code_roots);
 639     }
 640     // Verify that the code cache contents are not subject to
 641     // movement by a scavenging collection.
 642     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 643     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 644   }
 645 }
 646 
 647 void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope,
 648                                                   OopClosure* root_closure) {
 649   assert(root_closure != NULL, "Must be set");
 650   // All threads execute the following. A specific chunk of buckets
 651   // from the StringTable are the individual tasks.
 652   if (scope->n_threads() > 1) {
 653     StringTable::possibly_parallel_oops_do(root_closure);
 654   } else {
 655     StringTable::oops_do(root_closure);
 656   }
 657 }
 658 
 659 void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
 660                                            OopsInGenClosure* root_closure,
 661                                            OopsInGenClosure* old_gen_closure,
 662                                            CLDClosure* cld_closure) {
 663   MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
 664 
 665   process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
 666                 cld_closure, cld_closure, &mark_code_closure);
 667   process_string_table_roots(scope, root_closure);
 668 
 669   if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 670     root_closure->reset_generation();
 671   }
 672 
 673   // When collection is parallel, all threads get to cooperate to do
 674   // old generation scanning.
 675   old_gen_closure->set_generation(_old_gen);
 676   rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads());
 677   old_gen_closure->reset_generation();
 678 
 679   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 680 }
 681 
 682 void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
 683                                          bool young_gen_as_roots,
 684                                          ScanningOption so,
 685                                          bool only_strong_roots,
 686                                          OopsInGenClosure* root_closure,
 687                                          CLDClosure* cld_closure) {
 688   MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
 689   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
 690   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 691 
 692   process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 693   if (!only_strong_roots) {
 694     process_string_table_roots(scope, root_closure);
 695   }
 696 
 697   if (young_gen_as_roots &&
 698       !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
 699     root_closure->set_generation(_young_gen);
 700     _young_gen->oop_iterate(root_closure);
 701     root_closure->reset_generation();
 702   }
 703 
 704   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 705 }
 706 
 707 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
 708                                           bool is_adjust_phase,
 709                                           ScanningOption so,
 710                                           bool only_strong_roots,
 711                                           OopsInGenClosure* root_closure,
 712                                           CLDClosure* cld_closure) {
 713   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 714   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
 715   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 716 
 717   process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
 718   if (is_adjust_phase) {
 719     // We never treat the string table as roots during marking
 720     // for the full gc, so we only need to process it during
 721     // the adjust phase.
 722     process_string_table_roots(scope, root_closure);
 723   }
 724 
 725   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 726 }
 727 
 728 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 729   JNIHandles::weak_oops_do(root_closure);
 730   _young_gen->ref_processor()->weak_oops_do(root_closure);
 731   _old_gen->ref_processor()->weak_oops_do(root_closure);
 732 }
 733 
 734 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
 735 void GenCollectedHeap::                                                 \
 736 oop_since_save_marks_iterate(GenerationType gen,                        \
 737                              OopClosureType* cur,                       \
 738                              OopClosureType* older) {                   \
 739   if (gen == YoungGen) {                              \
 740     _young_gen->oop_since_save_marks_iterate##nv_suffix(cur);           \
 741     _old_gen->oop_since_save_marks_iterate##nv_suffix(older);           \
 742   } else {                                                              \
 743     _old_gen->oop_since_save_marks_iterate##nv_suffix(cur);             \
 744   }                                                                     \
 745 }
 746 
 747 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
 748 
 749 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
 750 
 751 bool GenCollectedHeap::no_allocs_since_save_marks() {
 752   return _young_gen->no_allocs_since_save_marks() &&
 753          _old_gen->no_allocs_since_save_marks();
 754 }
 755 
 756 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 757   return _young_gen->supports_inline_contig_alloc();
 758 }
 759 
 760 HeapWord* volatile* GenCollectedHeap::top_addr() const {
 761   return _young_gen->top_addr();
 762 }
 763 
 764 HeapWord** GenCollectedHeap::end_addr() const {
 765   return _young_gen->end_addr();
 766 }
 767 
 768 // public collection interfaces
 769 
 770 void GenCollectedHeap::collect(GCCause::Cause cause) {
 771   if (should_do_concurrent_full_gc(cause)) {
 772 #if INCLUDE_ALL_GCS
 773     // Mostly concurrent full collection.
 774     collect_mostly_concurrent(cause);
 775 #else  // INCLUDE_ALL_GCS
 776     ShouldNotReachHere();
 777 #endif // INCLUDE_ALL_GCS
 778   } else if (cause == GCCause::_wb_young_gc) {
 779     // Young collection for the WhiteBox API.
 780     collect(cause, YoungGen);
 781   } else {
 782 #ifdef ASSERT
 783   if (cause == GCCause::_scavenge_alot) {
 784     // Young collection only.
 785     collect(cause, YoungGen);
 786   } else {
 787     // Stop-the-world full collection.
 788     collect(cause, OldGen);
 789   }
 790 #else
 791     // Stop-the-world full collection.
 792     collect(cause, OldGen);
 793 #endif
 794   }
 795 }
 796 
 797 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 798   // The caller doesn't have the Heap_lock
 799   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 800   MutexLocker ml(Heap_lock);
 801   collect_locked(cause, max_generation);
 802 }
 803 
 804 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 805   // The caller has the Heap_lock
 806   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 807   collect_locked(cause, OldGen);
 808 }
 809 
 810 // this is the private collection interface
 811 // The Heap_lock is expected to be held on entry.
 812 
 813 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 814   // Read the GC count while holding the Heap_lock
 815   unsigned int gc_count_before      = total_collections();
 816   unsigned int full_gc_count_before = total_full_collections();
 817   {
 818     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 819     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 820                          cause, max_generation);
 821     VMThread::execute(&op);
 822   }
 823 }
 824 
 825 #if INCLUDE_ALL_GCS
 826 bool GenCollectedHeap::create_cms_collector() {
 827 
 828   assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
 829          "Unexpected generation kinds");
 830   // Skip two header words in the block content verification
 831   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
 832   assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
 833   CMSCollector* collector =
 834     new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
 835                      _rem_set,
 836                      _gen_policy->as_concurrent_mark_sweep_policy());
 837 
 838   if (collector == NULL || !collector->completed_initialization()) {
 839     if (collector) {
 840       delete collector;  // Be nice in embedded situation
 841     }
 842     vm_shutdown_during_initialization("Could not create CMS collector");
 843     return false;
 844   }
 845   return true;  // success
 846 }
 847 
 848 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
 849   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
 850 
 851   MutexLocker ml(Heap_lock);
 852   // Read the GC counts while holding the Heap_lock
 853   unsigned int full_gc_count_before = total_full_collections();
 854   unsigned int gc_count_before      = total_collections();
 855   {
 856     MutexUnlocker mu(Heap_lock);
 857     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
 858     VMThread::execute(&op);
 859   }
 860 }
 861 #endif // INCLUDE_ALL_GCS
 862 
 863 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 864    do_full_collection(clear_all_soft_refs, OldGen);
 865 }
 866 
 867 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 868                                           GenerationType last_generation) {
 869   GenerationType local_last_generation;
 870   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
 871       gc_cause() == GCCause::_gc_locker) {
 872     local_last_generation = YoungGen;
 873   } else {
 874     local_last_generation = last_generation;
 875   }
 876 
 877   do_collection(true,                   // full
 878                 clear_all_soft_refs,    // clear_all_soft_refs
 879                 0,                      // size
 880                 false,                  // is_tlab
 881                 local_last_generation); // last_generation
 882   // Hack XXX FIX ME !!!
 883   // A scavenge may not have been attempted, or may have
 884   // been attempted and failed, because the old gen was too full
 885   if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker &&
 886       incremental_collection_will_fail(false /* don't consult_young */)) {
 887     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 888     // This time allow the old gen to be collected as well
 889     do_collection(true,                // full
 890                   clear_all_soft_refs, // clear_all_soft_refs
 891                   0,                   // size
 892                   false,               // is_tlab
 893                   OldGen);             // last_generation
 894   }
 895 }
 896 
 897 bool GenCollectedHeap::is_in_young(oop p) {
 898   bool result = ((HeapWord*)p) < _old_gen->reserved().start();
 899   assert(result == _young_gen->is_in_reserved(p),
 900          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 901   return result;
 902 }
 903 
 904 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 905 bool GenCollectedHeap::is_in(const void* p) const {
 906   return _young_gen->is_in(p) || _old_gen->is_in(p);
 907 }
 908 
 909 #ifdef ASSERT
 910 // Don't implement this by using is_in_young().  This method is used
 911 // in some cases to check that is_in_young() is correct.
 912 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 913   assert(is_in_reserved(p) || p == NULL,
 914     "Does not work if address is non-null and outside of the heap");
 915   return p < _young_gen->reserved().end() && p != NULL;
 916 }
 917 #endif
 918 
 919 void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
 920   NoHeaderExtendedOopClosure no_header_cl(cl);
 921   oop_iterate(&no_header_cl);
 922 }
 923 
 924 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
 925   _young_gen->oop_iterate(cl);
 926   _old_gen->oop_iterate(cl);
 927 }
 928 
 929 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 930   _young_gen->object_iterate(cl);
 931   _old_gen->object_iterate(cl);
 932 }
 933 
 934 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
 935   _young_gen->safe_object_iterate(cl);
 936   _old_gen->safe_object_iterate(cl);
 937 }
 938 
 939 Space* GenCollectedHeap::space_containing(const void* addr) const {
 940   Space* res = _young_gen->space_containing(addr);
 941   if (res != NULL) {
 942     return res;
 943   }
 944   res = _old_gen->space_containing(addr);
 945   assert(res != NULL, "Could not find containing space");
 946   return res;
 947 }
 948 
 949 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 950   assert(is_in_reserved(addr), "block_start of address outside of heap");
 951   if (_young_gen->is_in_reserved(addr)) {
 952     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 953     return _young_gen->block_start(addr);
 954   }
 955 
 956   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 957   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 958   return _old_gen->block_start(addr);
 959 }
 960 
 961 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
 962   assert(is_in_reserved(addr), "block_size of address outside of heap");
 963   if (_young_gen->is_in_reserved(addr)) {
 964     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 965     return _young_gen->block_size(addr);
 966   }
 967 
 968   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 969   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 970   return _old_gen->block_size(addr);
 971 }
 972 
 973 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 974   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 975   assert(block_start(addr) == addr, "addr must be a block start");
 976   if (_young_gen->is_in_reserved(addr)) {
 977     return _young_gen->block_is_obj(addr);
 978   }
 979 
 980   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 981   return _old_gen->block_is_obj(addr);
 982 }
 983 
 984 bool GenCollectedHeap::supports_tlab_allocation() const {
 985   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 986   return _young_gen->supports_tlab_allocation();
 987 }
 988 
 989 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 990   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 991   if (_young_gen->supports_tlab_allocation()) {
 992     return _young_gen->tlab_capacity();
 993   }
 994   return 0;
 995 }
 996 
 997 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 998   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 999   if (_young_gen->supports_tlab_allocation()) {
1000     return _young_gen->tlab_used();
1001   }
1002   return 0;
1003 }
1004 
1005 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1006   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1007   if (_young_gen->supports_tlab_allocation()) {
1008     return _young_gen->unsafe_max_tlab_alloc();
1009   }
1010   return 0;
1011 }
1012 
1013 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1014   bool gc_overhead_limit_was_exceeded;
1015   return gen_policy()->mem_allocate_work(size /* size */,
1016                                          true /* is_tlab */,
1017                                          &gc_overhead_limit_was_exceeded);
1018 }
1019 
1020 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1021 // from the list headed by "*prev_ptr".
1022 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1023   bool first = true;
1024   size_t min_size = 0;   // "first" makes this conceptually infinite.
1025   ScratchBlock **smallest_ptr, *smallest;
1026   ScratchBlock  *cur = *prev_ptr;
1027   while (cur) {
1028     assert(*prev_ptr == cur, "just checking");
1029     if (first || cur->num_words < min_size) {
1030       smallest_ptr = prev_ptr;
1031       smallest     = cur;
1032       min_size     = smallest->num_words;
1033       first        = false;
1034     }
1035     prev_ptr = &cur->next;
1036     cur     =  cur->next;
1037   }
1038   smallest      = *smallest_ptr;
1039   *smallest_ptr = smallest->next;
1040   return smallest;
1041 }
1042 
1043 // Sort the scratch block list headed by res into decreasing size order,
1044 // and set "res" to the result.
1045 static void sort_scratch_list(ScratchBlock*& list) {
1046   ScratchBlock* sorted = NULL;
1047   ScratchBlock* unsorted = list;
1048   while (unsorted) {
1049     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1050     smallest->next  = sorted;
1051     sorted          = smallest;
1052   }
1053   list = sorted;
1054 }
1055 
1056 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1057                                                size_t max_alloc_words) {
1058   ScratchBlock* res = NULL;
1059   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1060   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1061   sort_scratch_list(res);
1062   return res;
1063 }
1064 
1065 void GenCollectedHeap::release_scratch() {
1066   _young_gen->reset_scratch();
1067   _old_gen->reset_scratch();
1068 }
1069 
1070 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1071   void do_generation(Generation* gen) {
1072     gen->prepare_for_verify();
1073   }
1074 };
1075 
1076 void GenCollectedHeap::prepare_for_verify() {
1077   ensure_parsability(false);        // no need to retire TLABs
1078   GenPrepareForVerifyClosure blk;
1079   generation_iterate(&blk, false);
1080 }
1081 
1082 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1083                                           bool old_to_young) {
1084   if (old_to_young) {
1085     cl->do_generation(_old_gen);
1086     cl->do_generation(_young_gen);
1087   } else {
1088     cl->do_generation(_young_gen);
1089     cl->do_generation(_old_gen);
1090   }
1091 }
1092 
1093 bool GenCollectedHeap::is_maximal_no_gc() const {
1094   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1095 }
1096 
1097 void GenCollectedHeap::save_marks() {
1098   _young_gen->save_marks();
1099   _old_gen->save_marks();
1100 }
1101 
1102 GenCollectedHeap* GenCollectedHeap::heap() {
1103   CollectedHeap* heap = Universe::heap();
1104   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1105   assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1106   return (GenCollectedHeap*)heap;
1107 }
1108 
1109 void GenCollectedHeap::prepare_for_compaction() {
1110   // Start by compacting into same gen.
1111   CompactPoint cp(_old_gen);
1112   _old_gen->prepare_for_compaction(&cp);
1113   _young_gen->prepare_for_compaction(&cp);
1114 }
1115 
1116 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1117   log_debug(gc, verify)("%s", _old_gen->name());
1118   _old_gen->verify();
1119 
1120   log_debug(gc, verify)("%s", _old_gen->name());
1121   _young_gen->verify();
1122 
1123   log_debug(gc, verify)("RemSet");
1124   rem_set()->verify();
1125 }
1126 
1127 void GenCollectedHeap::print_on(outputStream* st) const {
1128   _young_gen->print_on(st);
1129   _old_gen->print_on(st);
1130   MetaspaceAux::print_on(st);
1131 }
1132 
1133 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1134   if (workers() != NULL) {
1135     workers()->threads_do(tc);
1136   }
1137 #if INCLUDE_ALL_GCS
1138   if (UseConcMarkSweepGC) {
1139     ConcurrentMarkSweepThread::threads_do(tc);
1140   }
1141 #endif // INCLUDE_ALL_GCS
1142 }
1143 
1144 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1145 #if INCLUDE_ALL_GCS
1146   if (UseConcMarkSweepGC) {
1147     workers()->print_worker_threads_on(st);
1148     ConcurrentMarkSweepThread::print_all_on(st);
1149   }
1150 #endif // INCLUDE_ALL_GCS
1151 }
1152 
1153 void GenCollectedHeap::print_on_error(outputStream* st) const {
1154   this->CollectedHeap::print_on_error(st);
1155 
1156 #if INCLUDE_ALL_GCS
1157   if (UseConcMarkSweepGC) {
1158     st->cr();
1159     CMSCollector::print_on_error(st);
1160   }
1161 #endif // INCLUDE_ALL_GCS
1162 }
1163 
1164 void GenCollectedHeap::print_tracing_info() const {
1165   _young_gen->print_summary_info();
1166   _old_gen->print_summary_info();
1167 }
1168 
1169 void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const {
1170   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1171                      _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K);
1172   log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1173                      _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K);
1174 }
1175 
1176 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1177  private:
1178   bool _full;
1179  public:
1180   void do_generation(Generation* gen) {
1181     gen->gc_prologue(_full);
1182   }
1183   GenGCPrologueClosure(bool full) : _full(full) {};
1184 };
1185 
1186 void GenCollectedHeap::gc_prologue(bool full) {
1187   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1188 
1189   always_do_update_barrier = false;
1190   // Fill TLAB's and such
1191   CollectedHeap::accumulate_statistics_all_tlabs();
1192   ensure_parsability(true);   // retire TLABs
1193 
1194   // Walk generations
1195   GenGCPrologueClosure blk(full);
1196   generation_iterate(&blk, false);  // not old-to-young.
1197 };
1198 
1199 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1200  private:
1201   bool _full;
1202  public:
1203   void do_generation(Generation* gen) {
1204     gen->gc_epilogue(_full);
1205   }
1206   GenGCEpilogueClosure(bool full) : _full(full) {};
1207 };
1208 
1209 void GenCollectedHeap::gc_epilogue(bool full) {
1210 #if defined(COMPILER2) || INCLUDE_JVMCI
1211   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1212   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1213   guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1214 #endif /* COMPILER2 || INCLUDE_JVMCI */
1215 
1216   resize_all_tlabs();
1217 
1218   GenGCEpilogueClosure blk(full);
1219   generation_iterate(&blk, false);  // not old-to-young.
1220 
1221   if (!CleanChunkPoolAsync) {
1222     Chunk::clean_chunk_pool();
1223   }
1224 
1225   MetaspaceCounters::update_performance_counters();
1226   CompressedClassSpaceCounters::update_performance_counters();
1227 
1228   always_do_update_barrier = UseConcMarkSweepGC;
1229 };
1230 
1231 #ifndef PRODUCT
1232 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1233  private:
1234  public:
1235   void do_generation(Generation* gen) {
1236     gen->record_spaces_top();
1237   }
1238 };
1239 
1240 void GenCollectedHeap::record_gen_tops_before_GC() {
1241   if (ZapUnusedHeapArea) {
1242     GenGCSaveTopsBeforeGCClosure blk;
1243     generation_iterate(&blk, false);  // not old-to-young.
1244   }
1245 }
1246 #endif  // not PRODUCT
1247 
1248 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1249  public:
1250   void do_generation(Generation* gen) {
1251     gen->ensure_parsability();
1252   }
1253 };
1254 
1255 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1256   CollectedHeap::ensure_parsability(retire_tlabs);
1257   GenEnsureParsabilityClosure ep_cl;
1258   generation_iterate(&ep_cl, false);
1259 }
1260 
1261 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1262                                               oop obj,
1263                                               size_t obj_size) {
1264   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1265   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1266   HeapWord* result = NULL;
1267 
1268   result = old_gen->expand_and_allocate(obj_size, false);
1269 
1270   if (result != NULL) {
1271     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1272   }
1273   return oop(result);
1274 }
1275 
1276 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1277   jlong _time;   // in ms
1278   jlong _now;    // in ms
1279 
1280  public:
1281   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1282 
1283   jlong time() { return _time; }
1284 
1285   void do_generation(Generation* gen) {
1286     _time = MIN2(_time, gen->time_of_last_gc(_now));
1287   }
1288 };
1289 
1290 jlong GenCollectedHeap::millis_since_last_gc() {
1291   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1292   // provided the underlying platform provides such a time source
1293   // (and it is bug free). So we still have to guard against getting
1294   // back a time later than 'now'.
1295   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1296   GenTimeOfLastGCClosure tolgc_cl(now);
1297   // iterate over generations getting the oldest
1298   // time that a generation was collected
1299   generation_iterate(&tolgc_cl, false);
1300 
1301   jlong retVal = now - tolgc_cl.time();
1302   if (retVal < 0) {
1303     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
1304        ". returning zero instead.", retVal);
1305     return 0;
1306   }
1307   return retVal;
1308 }
1309 
1310 void GenCollectedHeap::stop() {
1311 #if INCLUDE_ALL_GCS
1312   if (UseConcMarkSweepGC) {
1313     ConcurrentMarkSweepThread::cmst()->stop();
1314   }
1315 #endif
1316 }