1 /*
   2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  35 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  36 #include "gc_implementation/parallelScavenge/psTasks.hpp"
  37 #include "gc_implementation/shared/isGCActiveMark.hpp"
  38 #include "gc_implementation/shared/spaceDecorator.hpp"
  39 #include "gc_interface/gcCause.hpp"
  40 #include "memory/collectorPolicy.hpp"
  41 #include "memory/gcLocker.inline.hpp"
  42 #include "memory/referencePolicy.hpp"
  43 #include "memory/referenceProcessor.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/oop.psgc.inline.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/fprofiler.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/threadCritical.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "runtime/vm_operations.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/stack.inline.hpp"
  55 
  56 
  57 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
  58 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
  59 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
  60 CardTableExtension*        PSScavenge::_card_table = NULL;
  61 bool                       PSScavenge::_survivor_overflow = false;
  62 uint                       PSScavenge::_tenuring_threshold = 0;
  63 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
  64 uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
  65 elapsedTimer               PSScavenge::_accumulated_time;
  66 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
  67 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
  68 CollectorCounters*         PSScavenge::_counters = NULL;
  69 bool                       PSScavenge::_promotion_failed = false;
  70 
  71 // Define before use
  72 class PSIsAliveClosure: public BoolObjectClosure {
  73 public:
  74   bool do_object_b(oop p) {
  75     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
  76   }
  77 };
  78 
  79 PSIsAliveClosure PSScavenge::_is_alive_closure;
  80 
  81 class PSKeepAliveClosure: public OopClosure {
  82 protected:
  83   MutableSpace* _to_space;
  84   PSPromotionManager* _promotion_manager;
  85 
  86 public:
  87   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
  88     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  89     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  90     _to_space = heap->young_gen()->to_space();
  91 
  92     assert(_promotion_manager != NULL, "Sanity");
  93   }
  94 
  95   template <class T> void do_oop_work(T* p) {
  96     assert (!oopDesc::is_null(*p), "expected non-null ref");
  97     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
  98             "expected an oop while scanning weak refs");
  99 
 100     // Weak refs may be visited more than once.
 101     if (PSScavenge::should_scavenge(p, _to_space)) {
 102       PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
 103     }
 104   }
 105   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
 106   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
 107 };
 108 
 109 class PSEvacuateFollowersClosure: public VoidClosure {
 110  private:
 111   PSPromotionManager* _promotion_manager;
 112  public:
 113   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
 114 
 115   virtual void do_void() {
 116     assert(_promotion_manager != NULL, "Sanity");
 117     _promotion_manager->drain_stacks(true);
 118     guarantee(_promotion_manager->stacks_empty(),
 119               "stacks should be empty at this point");
 120   }
 121 };
 122 
 123 class PSPromotionFailedClosure : public ObjectClosure {
 124   virtual void do_object(oop obj) {
 125     if (obj->is_forwarded()) {
 126       obj->init_mark();
 127     }
 128   }
 129 };
 130 
 131 class PSRefProcTaskProxy: public GCTask {
 132   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 133   ProcessTask & _rp_task;
 134   uint          _work_id;
 135 public:
 136   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
 137     : _rp_task(rp_task),
 138       _work_id(work_id)
 139   { }
 140 
 141 private:
 142   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
 143   virtual void do_it(GCTaskManager* manager, uint which);
 144 };
 145 
 146 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 147 {
 148   PSPromotionManager* promotion_manager =
 149     PSPromotionManager::gc_thread_promotion_manager(which);
 150   assert(promotion_manager != NULL, "sanity check");
 151   PSKeepAliveClosure keep_alive(promotion_manager);
 152   PSEvacuateFollowersClosure evac_followers(promotion_manager);
 153   PSIsAliveClosure is_alive;
 154   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
 155 }
 156 
 157 class PSRefEnqueueTaskProxy: public GCTask {
 158   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 159   EnqueueTask& _enq_task;
 160   uint         _work_id;
 161 
 162 public:
 163   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
 164     : _enq_task(enq_task),
 165       _work_id(work_id)
 166   { }
 167 
 168   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
 169   virtual void do_it(GCTaskManager* manager, uint which)
 170   {
 171     _enq_task.work(_work_id);
 172   }
 173 };
 174 
 175 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 176   virtual void execute(ProcessTask& task);
 177   virtual void execute(EnqueueTask& task);
 178 };
 179 
 180 void PSRefProcTaskExecutor::execute(ProcessTask& task)
 181 {
 182   GCTaskQueue* q = GCTaskQueue::create();
 183   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 184   for(uint i=0; i < manager->active_workers(); i++) {
 185     q->enqueue(new PSRefProcTaskProxy(task, i));
 186   }
 187   ParallelTaskTerminator terminator(manager->active_workers(),
 188                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
 189   if (task.marks_oops_alive() && manager->active_workers() > 1) {
 190     for (uint j = 0; j < manager->active_workers(); j++) {
 191       q->enqueue(new StealTask(&terminator));
 192     }
 193   }
 194   manager->execute_and_wait(q);
 195 }
 196 
 197 
 198 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
 199 {
 200   GCTaskQueue* q = GCTaskQueue::create();
 201   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 202   for(uint i=0; i < manager->active_workers(); i++) {
 203     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
 204   }
 205   manager->execute_and_wait(q);
 206 }
 207 
 208 // This method contains all heap specific policy for invoking scavenge.
 209 // PSScavenge::invoke_no_policy() will do nothing but attempt to
 210 // scavenge. It will not clean up after failed promotions, bail out if
 211 // we've exceeded policy time limits, or any other special behavior.
 212 // All such policy should be placed here.
 213 //
 214 // Note that this method should only be called from the vm_thread while
 215 // at a safepoint!
 216 bool PSScavenge::invoke() {
 217   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 218   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 219   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 220 
 221   ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
 222   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 223 
 224   PSAdaptiveSizePolicy* policy = heap->size_policy();
 225   IsGCActiveMark mark;
 226 
 227   const bool scavenge_done = PSScavenge::invoke_no_policy();
 228   const bool need_full_gc = !scavenge_done ||
 229     policy->should_full_GC(heap->old_gen()->free_in_bytes());
 230   bool full_gc_done = false;
 231 
 232   if (UsePerfData) {
 233     PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
 234     const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
 235     counters->update_full_follows_scavenge(ffs_val);
 236   }
 237 
 238   if (need_full_gc) {
 239     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
 240     CollectorPolicy* cp = heap->collector_policy();
 241     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 242 
 243     if (UseParallelOldGC) {
 244       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
 245     } else {
 246       full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
 247     }
 248   }
 249 
 250   return full_gc_done;
 251 }
 252 
 253 // This method contains no policy. You should probably
 254 // be calling invoke() instead.
 255 bool PSScavenge::invoke_no_policy() {
 256   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 257   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 258 
 259   assert(_preserved_mark_stack.is_empty(), "should be empty");
 260   assert(_preserved_oop_stack.is_empty(), "should be empty");
 261 
 262   TimeStamp scavenge_entry;
 263   TimeStamp scavenge_midpoint;
 264   TimeStamp scavenge_exit;
 265 
 266   scavenge_entry.update();
 267 
 268   if (GC_locker::check_active_before_gc()) {
 269     return false;
 270   }
 271 
 272   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 273   GCCause::Cause gc_cause = heap->gc_cause();
 274   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 275 
 276   // Check for potential problems.
 277   if (!should_attempt_scavenge()) {
 278     return false;
 279   }
 280 
 281   bool promotion_failure_occurred = false;
 282 
 283   PSYoungGen* young_gen = heap->young_gen();
 284   PSOldGen* old_gen = heap->old_gen();
 285   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 286   heap->increment_total_collections();
 287 
 288   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 289 
 290   if ((gc_cause != GCCause::_java_lang_system_gc) ||
 291        UseAdaptiveSizePolicyWithSystemGC) {
 292     // Gather the feedback data for eden occupancy.
 293     young_gen->eden_space()->accumulate_statistics();
 294   }
 295 
 296   if (ZapUnusedHeapArea) {
 297     // Save information needed to minimize mangling
 298     heap->record_gen_tops_before_GC();
 299   }
 300 
 301   heap->print_heap_before_gc();
 302 
 303   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
 304   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 305 
 306   size_t prev_used = heap->used();
 307   assert(promotion_failed() == false, "Sanity");
 308 
 309   // Fill in TLABs
 310   heap->accumulate_statistics_all_tlabs();
 311   heap->ensure_parsability(true);  // retire TLABs
 312 
 313   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 314     HandleMark hm;  // Discard invalid handles created during verification
 315     Universe::verify(" VerifyBeforeGC:");
 316   }
 317 
 318   {
 319     ResourceMark rm;
 320     HandleMark hm;
 321 
 322     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 323     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 324     TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
 325     TraceCollectorStats tcs(counters());
 326     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 327 
 328     if (TraceGen0Time) accumulated_time()->start();
 329 
 330     // Let the size policy know we're starting
 331     size_policy->minor_collection_begin();
 332 
 333     // Verify the object start arrays.
 334     if (VerifyObjectStartArray &&
 335         VerifyBeforeGC) {
 336       old_gen->verify_object_start_array();
 337     }
 338 
 339     // Verify no unmarked old->young roots
 340     if (VerifyRememberedSets) {
 341       CardTableExtension::verify_all_young_refs_imprecise();
 342     }
 343 
 344     if (!ScavengeWithObjectsInToSpace) {
 345       assert(young_gen->to_space()->is_empty(),
 346              "Attempt to scavenge with live objects in to_space");
 347       young_gen->to_space()->clear(SpaceDecorator::Mangle);
 348     } else if (ZapUnusedHeapArea) {
 349       young_gen->to_space()->mangle_unused_area();
 350     }
 351     save_to_space_top_before_gc();
 352 
 353     COMPILER2_PRESENT(DerivedPointerTable::clear());
 354 
 355     reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 356     reference_processor()->setup_policy(false);
 357 
 358     // We track how much was promoted to the next generation for
 359     // the AdaptiveSizePolicy.
 360     size_t old_gen_used_before = old_gen->used_in_bytes();
 361 
 362     // For PrintGCDetails
 363     size_t young_gen_used_before = young_gen->used_in_bytes();
 364 
 365     // Reset our survivor overflow.
 366     set_survivor_overflow(false);
 367 
 368     // We need to save the old top values before
 369     // creating the promotion_manager. We pass the top
 370     // values to the card_table, to prevent it from
 371     // straying into the promotion labs.
 372     HeapWord* old_top = old_gen->object_space()->top();
 373 
 374     // Release all previously held resources
 375     gc_task_manager()->release_all_resources();
 376 
 377     // Set the number of GC threads to be used in this collection
 378     gc_task_manager()->set_active_gang();
 379     gc_task_manager()->task_idle_workers();
 380     // Get the active number of workers here and use that value
 381     // throughout the methods.
 382     uint active_workers = gc_task_manager()->active_workers();
 383     heap->set_par_threads(active_workers);
 384 
 385     PSPromotionManager::pre_scavenge();
 386 
 387     // We'll use the promotion manager again later.
 388     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
 389     {
 390       // TraceTime("Roots");
 391       ParallelScavengeHeap::ParStrongRootsScope psrs;
 392 
 393       GCTaskQueue* q = GCTaskQueue::create();
 394 
 395       if (!old_gen->object_space()->is_empty()) {
 396         // There are only old-to-young pointers if there are objects
 397         // in the old gen.
 398         uint stripe_total = active_workers;
 399         for(uint i=0; i < stripe_total; i++) {
 400           q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
 401         }
 402       }
 403 
 404       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
 405       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
 406       // We scan the thread roots in parallel
 407       Threads::create_thread_roots_tasks(q);
 408       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
 409       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
 410       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
 411       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
 412       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
 413       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
 414       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 415 
 416       ParallelTaskTerminator terminator(
 417         active_workers,
 418                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
 419       if (active_workers > 1) {
 420         for (uint j = 0; j < active_workers; j++) {
 421           q->enqueue(new StealTask(&terminator));
 422         }
 423       }
 424 
 425       gc_task_manager()->execute_and_wait(q);
 426     }
 427 
 428     scavenge_midpoint.update();
 429 
 430     // Process reference objects discovered during scavenge
 431     {
 432       reference_processor()->setup_policy(false); // not always_clear
 433       reference_processor()->set_active_mt_degree(active_workers);
 434       PSKeepAliveClosure keep_alive(promotion_manager);
 435       PSEvacuateFollowersClosure evac_followers(promotion_manager);
 436       if (reference_processor()->processing_is_mt()) {
 437         PSRefProcTaskExecutor task_executor;
 438         reference_processor()->process_discovered_references(
 439           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
 440       } else {
 441         reference_processor()->process_discovered_references(
 442           &_is_alive_closure, &keep_alive, &evac_followers, NULL);
 443       }
 444     }
 445 
 446     // Enqueue reference objects discovered during scavenge.
 447     if (reference_processor()->processing_is_mt()) {
 448       PSRefProcTaskExecutor task_executor;
 449       reference_processor()->enqueue_discovered_references(&task_executor);
 450     } else {
 451       reference_processor()->enqueue_discovered_references(NULL);
 452     }
 453 
 454     // Unlink any dead interned Strings and process the remaining live ones.
 455     PSScavengeRootsClosure root_closure(promotion_manager);
 456     StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
 457 
 458     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
 459     PSPromotionManager::post_scavenge();
 460 
 461     promotion_failure_occurred = promotion_failed();
 462     if (promotion_failure_occurred) {
 463       clean_up_failed_promotion();
 464       if (PrintGC) {
 465         gclog_or_tty->print("--");
 466       }
 467     }
 468 
 469     // Let the size policy know we're done.  Note that we count promotion
 470     // failure cleanup time as part of the collection (otherwise, we're
 471     // implicitly saying it's mutator time).
 472     size_policy->minor_collection_end(gc_cause);
 473 
 474     if (!promotion_failure_occurred) {
 475       // Swap the survivor spaces.
 476 
 477 
 478       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
 479       young_gen->from_space()->clear(SpaceDecorator::Mangle);
 480       young_gen->swap_spaces();
 481 
 482       size_t survived = young_gen->from_space()->used_in_bytes();
 483       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
 484       size_policy->update_averages(_survivor_overflow, survived, promoted);
 485 
 486       // A successful scavenge should restart the GC time limit count which is
 487       // for full GC's.
 488       size_policy->reset_gc_overhead_limit_count();
 489       if (UseAdaptiveSizePolicy) {
 490         // Calculate the new survivor size and tenuring threshold
 491 
 492         if (PrintAdaptiveSizePolicy) {
 493           gclog_or_tty->print("AdaptiveSizeStart: ");
 494           gclog_or_tty->stamp();
 495           gclog_or_tty->print(" collection: %d ",
 496                          heap->total_collections());
 497 
 498           if (Verbose) {
 499             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
 500               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 501           }
 502         }
 503 
 504 
 505         if (UsePerfData) {
 506           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 507           counters->update_old_eden_size(
 508             size_policy->calculated_eden_size_in_bytes());
 509           counters->update_old_promo_size(
 510             size_policy->calculated_promo_size_in_bytes());
 511           counters->update_old_capacity(old_gen->capacity_in_bytes());
 512           counters->update_young_capacity(young_gen->capacity_in_bytes());
 513           counters->update_survived(survived);
 514           counters->update_promoted(promoted);
 515           counters->update_survivor_overflowed(_survivor_overflow);
 516         }
 517 
 518         size_t survivor_limit =
 519           size_policy->max_survivor_size(young_gen->max_size());
 520         _tenuring_threshold =
 521           size_policy->compute_survivor_space_size_and_threshold(
 522                                                            _survivor_overflow,
 523                                                            _tenuring_threshold,
 524                                                            survivor_limit);
 525 
 526        if (PrintTenuringDistribution) {
 527          gclog_or_tty->cr();
 528          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
 529                                 size_policy->calculated_survivor_size_in_bytes(),
 530                                 _tenuring_threshold, MaxTenuringThreshold);
 531        }
 532 
 533         if (UsePerfData) {
 534           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 535           counters->update_tenuring_threshold(_tenuring_threshold);
 536           counters->update_survivor_size_counters();
 537         }
 538 
 539         // Do call at minor collections?
 540         // Don't check if the size_policy is ready at this
 541         // level.  Let the size_policy check that internally.
 542         if (UseAdaptiveSizePolicy &&
 543             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 544             ((gc_cause != GCCause::_java_lang_system_gc) ||
 545               UseAdaptiveSizePolicyWithSystemGC)) {
 546 
 547           // Calculate optimal free space amounts
 548           assert(young_gen->max_size() >
 549             young_gen->from_space()->capacity_in_bytes() +
 550             young_gen->to_space()->capacity_in_bytes(),
 551             "Sizes of space in young gen are out-of-bounds");
 552 
 553           size_t young_live = young_gen->used_in_bytes();
 554           size_t eden_live = young_gen->eden_space()->used_in_bytes();
 555           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 556           size_t max_old_gen_size = old_gen->max_gen_size();
 557           size_t max_eden_size = young_gen->max_size() -
 558             young_gen->from_space()->capacity_in_bytes() -
 559             young_gen->to_space()->capacity_in_bytes();
 560 
 561           // Used for diagnostics
 562           size_policy->clear_generation_free_space_flags();
 563 
 564           size_policy->compute_eden_space_size(young_live,
 565                                                eden_live,
 566                                                cur_eden,
 567                                                max_eden_size,
 568                                                false /* not full gc*/);
 569 
 570           size_policy->check_gc_overhead_limit(young_live,
 571                                                eden_live,
 572                                                max_old_gen_size,
 573                                                max_eden_size,
 574                                                false /* not full gc*/,
 575                                                gc_cause,
 576                                                heap->collector_policy());
 577 
 578           size_policy->decay_supplemental_growth(false /* not full gc*/);
 579         }
 580         // Resize the young generation at every collection
 581         // even if new sizes have not been calculated.  This is
 582         // to allow resizes that may have been inhibited by the
 583         // relative location of the "to" and "from" spaces.
 584 
 585         // Resizing the old gen at minor collects can cause increases
 586         // that don't feed back to the generation sizing policy until
 587         // a major collection.  Don't resize the old gen here.
 588 
 589         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 590                         size_policy->calculated_survivor_size_in_bytes());
 591 
 592         if (PrintAdaptiveSizePolicy) {
 593           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
 594                          heap->total_collections());
 595         }
 596       }
 597 
 598       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
 599       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
 600       // Also update() will case adaptive NUMA chunk resizing.
 601       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
 602       young_gen->eden_space()->update();
 603 
 604       heap->gc_policy_counters()->update_counters();
 605 
 606       heap->resize_all_tlabs();
 607 
 608       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
 609     }
 610 
 611     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 612 
 613     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 614 
 615     CodeCache::prune_scavenge_root_nmethods();
 616 
 617     // Re-verify object start arrays
 618     if (VerifyObjectStartArray &&
 619         VerifyAfterGC) {
 620       old_gen->verify_object_start_array();
 621     }
 622 
 623     // Verify all old -> young cards are now precise
 624     if (VerifyRememberedSets) {
 625       // Precise verification will give false positives. Until this is fixed,
 626       // use imprecise verification.
 627       // CardTableExtension::verify_all_young_refs_precise();
 628       CardTableExtension::verify_all_young_refs_imprecise();
 629     }
 630 
 631     if (TraceGen0Time) accumulated_time()->stop();
 632 
 633     if (PrintGC) {
 634       if (PrintGCDetails) {
 635         // Don't print a GC timestamp here.  This is after the GC so
 636         // would be confusing.
 637         young_gen->print_used_change(young_gen_used_before);
 638       }
 639       heap->print_heap_change(prev_used);
 640     }
 641 
 642     // Track memory usage and detect low memory
 643     MemoryService::track_memory_usage();
 644     heap->update_counters();
 645 
 646     gc_task_manager()->release_idle_workers();
 647   }
 648 
 649   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 650     HandleMark hm;  // Discard invalid handles created during verification
 651     Universe::verify(" VerifyAfterGC:");
 652   }
 653 
 654   heap->print_heap_after_gc();
 655 
 656   if (ZapUnusedHeapArea) {
 657     young_gen->eden_space()->check_mangled_unused_area_complete();
 658     young_gen->from_space()->check_mangled_unused_area_complete();
 659     young_gen->to_space()->check_mangled_unused_area_complete();
 660   }
 661 
 662   scavenge_exit.update();
 663 
 664   if (PrintGCTaskTimeStamps) {
 665     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
 666                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
 667                   scavenge_exit.ticks());
 668     gc_task_manager()->print_task_time_stamps();
 669   }
 670 
 671 #ifdef TRACESPINNING
 672   ParallelTaskTerminator::print_termination_counts();
 673 #endif
 674 
 675   return !promotion_failure_occurred;
 676 }
 677 
 678 // This method iterates over all objects in the young generation,
 679 // unforwarding markOops. It then restores any preserved mark oops,
 680 // and clears the _preserved_mark_stack.
 681 void PSScavenge::clean_up_failed_promotion() {
 682   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 683   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 684   assert(promotion_failed(), "Sanity");
 685 
 686   PSYoungGen* young_gen = heap->young_gen();
 687 
 688   {
 689     ResourceMark rm;
 690 
 691     // Unforward all pointers in the young gen.
 692     PSPromotionFailedClosure unforward_closure;
 693     young_gen->object_iterate(&unforward_closure);
 694 
 695     if (PrintGC && Verbose) {
 696       gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
 697     }
 698 
 699     // Restore any saved marks.
 700     while (!_preserved_oop_stack.is_empty()) {
 701       oop obj      = _preserved_oop_stack.pop();
 702       markOop mark = _preserved_mark_stack.pop();
 703       obj->set_mark(mark);
 704     }
 705 
 706     // Clear the preserved mark and oop stack caches.
 707     _preserved_mark_stack.clear(true);
 708     _preserved_oop_stack.clear(true);
 709     _promotion_failed = false;
 710   }
 711 
 712   // Reset the PromotionFailureALot counters.
 713   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 714 }
 715 
 716 // This method is called whenever an attempt to promote an object
 717 // fails. Some markOops will need preservation, some will not. Note
 718 // that the entire eden is traversed after a failed promotion, with
 719 // all forwarded headers replaced by the default markOop. This means
 720 // it is not neccessary to preserve most markOops.
 721 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
 722   _promotion_failed = true;
 723   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
 724     // Should use per-worker private stakcs hetre rather than
 725     // locking a common pair of stacks.
 726     ThreadCritical tc;
 727     _preserved_oop_stack.push(obj);
 728     _preserved_mark_stack.push(obj_mark);
 729   }
 730 }
 731 
 732 bool PSScavenge::should_attempt_scavenge() {
 733   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 734   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 735   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 736 
 737   if (UsePerfData) {
 738     counters->update_scavenge_skipped(not_skipped);
 739   }
 740 
 741   PSYoungGen* young_gen = heap->young_gen();
 742   PSOldGen* old_gen = heap->old_gen();
 743 
 744   if (!ScavengeWithObjectsInToSpace) {
 745     // Do not attempt to promote unless to_space is empty
 746     if (!young_gen->to_space()->is_empty()) {
 747       _consecutive_skipped_scavenges++;
 748       if (UsePerfData) {
 749         counters->update_scavenge_skipped(to_space_not_empty);
 750       }
 751       return false;
 752     }
 753   }
 754 
 755   // Test to see if the scavenge will likely fail.
 756   PSAdaptiveSizePolicy* policy = heap->size_policy();
 757 
 758   // A similar test is done in the policy's should_full_GC().  If this is
 759   // changed, decide if that test should also be changed.
 760   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 761   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
 762   bool result = promotion_estimate < old_gen->free_in_bytes();
 763 
 764   if (PrintGCDetails && Verbose) {
 765     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
 766     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
 767       " padded_average_promoted " SIZE_FORMAT
 768       " free in old gen " SIZE_FORMAT,
 769       (size_t) policy->average_promoted_in_bytes(),
 770       (size_t) policy->padded_average_promoted_in_bytes(),
 771       old_gen->free_in_bytes());
 772     if (young_gen->used_in_bytes() <
 773         (size_t) policy->padded_average_promoted_in_bytes()) {
 774       gclog_or_tty->print_cr(" padded_promoted_average is greater"
 775         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
 776     }
 777   }
 778 
 779   if (result) {
 780     _consecutive_skipped_scavenges = 0;
 781   } else {
 782     _consecutive_skipped_scavenges++;
 783     if (UsePerfData) {
 784       counters->update_scavenge_skipped(promoted_too_large);
 785     }
 786   }
 787   return result;
 788 }
 789 
 790   // Used to add tasks
 791 GCTaskManager* const PSScavenge::gc_task_manager() {
 792   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 793    "shouldn't return NULL");
 794   return ParallelScavengeHeap::gc_task_manager();
 795 }
 796 
 797 void PSScavenge::initialize() {
 798   // Arguments must have been parsed
 799 
 800   if (AlwaysTenure) {
 801     _tenuring_threshold = 0;
 802   } else if (NeverTenure) {
 803     _tenuring_threshold = markOopDesc::max_age + 1;
 804   } else {
 805     // We want to smooth out our startup times for the AdaptiveSizePolicy
 806     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 807                                                     MaxTenuringThreshold;
 808   }
 809 
 810   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 811   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 812 
 813   PSYoungGen* young_gen = heap->young_gen();
 814   PSOldGen* old_gen = heap->old_gen();
 815 
 816   // Set boundary between young_gen and old_gen
 817   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 818          "old above young");
 819   set_young_generation_boundary(young_gen->eden_space()->bottom());
 820 
 821   // Initialize ref handling object for scavenging.
 822   MemRegion mr = young_gen->reserved();
 823 
 824   _ref_processor =
 825     new ReferenceProcessor(mr,                         // span
 826                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 827                            (int) ParallelGCThreads,    // mt processing degree
 828                            true,                       // mt discovery
 829                            (int) ParallelGCThreads,    // mt discovery degree
 830                            true,                       // atomic_discovery
 831                            NULL,                       // header provides liveness info
 832                            false);                     // next field updates do not need write barrier
 833 
 834   // Cache the cardtable
 835   BarrierSet* bs = Universe::heap()->barrier_set();
 836   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
 837   _card_table = (CardTableExtension*)bs;
 838 
 839   _counters = new CollectorCounters("PSScavenge", 0);
 840 }