1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"
  54 
  55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  56 
  57 // turn it on so that the contents of the young list (scan-only /
  58 // to-be-collected) are printed at "strategic" points before / during
  59 // / after the collection --- this is useful for debugging
  60 #define YOUNG_LIST_VERBOSE 0
  61 // CURRENT STATUS
  62 // This file is under construction.  Search for "FIXME".
  63 
  64 // INVARIANTS/NOTES
  65 //
  66 // All allocation activity covered by the G1CollectedHeap interface is
  67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  68 // and allocate_new_tlab, which are the "entry" points to the
  69 // allocation code from the rest of the JVM.  (Note that this does not
  70 // apply to TLAB allocation, which is not part of this interface: it
  71 // is done by clients of this interface.)
  72 
  73 // Notes on implementation of parallelism in different tasks.
  74 //
  75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  77 // It does use run_task() which sets _n_workers in the task.
  78 // G1ParTask executes g1_process_strong_roots() ->
  79 // SharedHeap::process_strong_roots() which calls eventuall to
  80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  83 //
  84 
  85 // Local to this file.
  86 
  87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  88   SuspendibleThreadSet* _sts;
  89   G1RemSet* _g1rs;
  90   ConcurrentG1Refine* _cg1r;
  91   bool _concurrent;
  92 public:
  93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  94                               G1RemSet* g1rs,
  95                               ConcurrentG1Refine* cg1r) :
  96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
  97   {}
  98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
  99     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
 100     // This path is executed by the concurrent refine or mutator threads,
 101     // concurrently, and so we do not care if card_ptr contains references
 102     // that point into the collection set.
 103     assert(!oops_into_cset, "should be");
 104 
 105     if (_concurrent && _sts->should_yield()) {
 106       // Caller will actually yield.
 107       return false;
 108     }
 109     // Otherwise, we finished successfully; return true.
 110     return true;
 111   }
 112   void set_concurrent(bool b) { _concurrent = b; }
 113 };
 114 
 115 
 116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 117   int _calls;
 118   G1CollectedHeap* _g1h;
 119   CardTableModRefBS* _ctbs;
 120   int _histo[256];
 121 public:
 122   ClearLoggedCardTableEntryClosure() :
 123     _calls(0)
 124   {
 125     _g1h = G1CollectedHeap::heap();
 126     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 127     for (int i = 0; i < 256; i++) _histo[i] = 0;
 128   }
 129   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 131       _calls++;
 132       unsigned char* ujb = (unsigned char*)card_ptr;
 133       int ind = (int)(*ujb);
 134       _histo[ind]++;
 135       *card_ptr = -1;
 136     }
 137     return true;
 138   }
 139   int calls() { return _calls; }
 140   void print_histo() {
 141     gclog_or_tty->print_cr("Card table value histogram:");
 142     for (int i = 0; i < 256; i++) {
 143       if (_histo[i] != 0) {
 144         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 145       }
 146     }
 147   }
 148 };
 149 
 150 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 151   int _calls;
 152   G1CollectedHeap* _g1h;
 153   CardTableModRefBS* _ctbs;
 154 public:
 155   RedirtyLoggedCardTableEntryClosure() :
 156     _calls(0)
 157   {
 158     _g1h = G1CollectedHeap::heap();
 159     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 160   }
 161   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 162     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 163       _calls++;
 164       *card_ptr = 0;
 165     }
 166     return true;
 167   }
 168   int calls() { return _calls; }
 169 };
 170 
 171 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 172 public:
 173   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 174     *card_ptr = CardTableModRefBS::dirty_card_val();
 175     return true;
 176   }
 177 };
 178 
 179 YoungList::YoungList(G1CollectedHeap* g1h) :
 180     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 181     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 182   guarantee(check_list_empty(false), "just making sure...");
 183 }
 184 
 185 void YoungList::push_region(HeapRegion *hr) {
 186   assert(!hr->is_young(), "should not already be young");
 187   assert(hr->get_next_young_region() == NULL, "cause it should!");
 188 
 189   hr->set_next_young_region(_head);
 190   _head = hr;
 191 
 192   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 193   ++_length;
 194 }
 195 
 196 void YoungList::add_survivor_region(HeapRegion* hr) {
 197   assert(hr->is_survivor(), "should be flagged as survivor region");
 198   assert(hr->get_next_young_region() == NULL, "cause it should!");
 199 
 200   hr->set_next_young_region(_survivor_head);
 201   if (_survivor_head == NULL) {
 202     _survivor_tail = hr;
 203   }
 204   _survivor_head = hr;
 205   ++_survivor_length;
 206 }
 207 
 208 void YoungList::empty_list(HeapRegion* list) {
 209   while (list != NULL) {
 210     HeapRegion* next = list->get_next_young_region();
 211     list->set_next_young_region(NULL);
 212     list->uninstall_surv_rate_group();
 213     list->set_not_young();
 214     list = next;
 215   }
 216 }
 217 
 218 void YoungList::empty_list() {
 219   assert(check_list_well_formed(), "young list should be well formed");
 220 
 221   empty_list(_head);
 222   _head = NULL;
 223   _length = 0;
 224 
 225   empty_list(_survivor_head);
 226   _survivor_head = NULL;
 227   _survivor_tail = NULL;
 228   _survivor_length = 0;
 229 
 230   _last_sampled_rs_lengths = 0;
 231 
 232   assert(check_list_empty(false), "just making sure...");
 233 }
 234 
 235 bool YoungList::check_list_well_formed() {
 236   bool ret = true;
 237 
 238   uint length = 0;
 239   HeapRegion* curr = _head;
 240   HeapRegion* last = NULL;
 241   while (curr != NULL) {
 242     if (!curr->is_young()) {
 243       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 244                              "incorrectly tagged (y: %d, surv: %d)",
 245                              curr->bottom(), curr->end(),
 246                              curr->is_young(), curr->is_survivor());
 247       ret = false;
 248     }
 249     ++length;
 250     last = curr;
 251     curr = curr->get_next_young_region();
 252   }
 253   ret = ret && (length == _length);
 254 
 255   if (!ret) {
 256     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 257     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 258                            length, _length);
 259   }
 260 
 261   return ret;
 262 }
 263 
 264 bool YoungList::check_list_empty(bool check_sample) {
 265   bool ret = true;
 266 
 267   if (_length != 0) {
 268     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
 269                   _length);
 270     ret = false;
 271   }
 272   if (check_sample && _last_sampled_rs_lengths != 0) {
 273     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
 274     ret = false;
 275   }
 276   if (_head != NULL) {
 277     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
 278     ret = false;
 279   }
 280   if (!ret) {
 281     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
 282   }
 283 
 284   return ret;
 285 }
 286 
 287 void
 288 YoungList::rs_length_sampling_init() {
 289   _sampled_rs_lengths = 0;
 290   _curr               = _head;
 291 }
 292 
 293 bool
 294 YoungList::rs_length_sampling_more() {
 295   return _curr != NULL;
 296 }
 297 
 298 void
 299 YoungList::rs_length_sampling_next() {
 300   assert( _curr != NULL, "invariant" );
 301   size_t rs_length = _curr->rem_set()->occupied();
 302 
 303   _sampled_rs_lengths += rs_length;
 304 
 305   // The current region may not yet have been added to the
 306   // incremental collection set (it gets added when it is
 307   // retired as the current allocation region).
 308   if (_curr->in_collection_set()) {
 309     // Update the collection set policy information for this region
 310     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
 311   }
 312 
 313   _curr = _curr->get_next_young_region();
 314   if (_curr == NULL) {
 315     _last_sampled_rs_lengths = _sampled_rs_lengths;
 316     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
 317   }
 318 }
 319 
 320 void
 321 YoungList::reset_auxilary_lists() {
 322   guarantee( is_empty(), "young list should be empty" );
 323   assert(check_list_well_formed(), "young list should be well formed");
 324 
 325   // Add survivor regions to SurvRateGroup.
 326   _g1h->g1_policy()->note_start_adding_survivor_regions();
 327   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 328 
 329   int young_index_in_cset = 0;
 330   for (HeapRegion* curr = _survivor_head;
 331        curr != NULL;
 332        curr = curr->get_next_young_region()) {
 333     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 334 
 335     // The region is a non-empty survivor so let's add it to
 336     // the incremental collection set for the next evacuation
 337     // pause.
 338     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
 339     young_index_in_cset += 1;
 340   }
 341   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
 342   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 343 
 344   _head   = _survivor_head;
 345   _length = _survivor_length;
 346   if (_survivor_head != NULL) {
 347     assert(_survivor_tail != NULL, "cause it shouldn't be");
 348     assert(_survivor_length > 0, "invariant");
 349     _survivor_tail->set_next_young_region(NULL);
 350   }
 351 
 352   // Don't clear the survivor list handles until the start of
 353   // the next evacuation pause - we need it in order to re-tag
 354   // the survivor regions from this evacuation pause as 'young'
 355   // at the start of the next.
 356 
 357   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 358 
 359   assert(check_list_well_formed(), "young list should be well formed");
 360 }
 361 
 362 void YoungList::print() {
 363   HeapRegion* lists[] = {_head,   _survivor_head};
 364   const char* names[] = {"YOUNG", "SURVIVOR"};
 365 
 366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 368     HeapRegion *curr = lists[list];
 369     if (curr == NULL)
 370       gclog_or_tty->print_cr("  empty");
 371     while (curr != NULL) {
 372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 373                              HR_FORMAT_PARAMS(curr),
 374                              curr->prev_top_at_mark_start(),
 375                              curr->next_top_at_mark_start(),
 376                              curr->age_in_surv_rate_group_cond());
 377       curr = curr->get_next_young_region();
 378     }
 379   }
 380 
 381   gclog_or_tty->print_cr("");
 382 }
 383 
 384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 385 {
 386   // Claim the right to put the region on the dirty cards region list
 387   // by installing a self pointer.
 388   HeapRegion* next = hr->get_next_dirty_cards_region();
 389   if (next == NULL) {
 390     HeapRegion* res = (HeapRegion*)
 391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 392                           NULL);
 393     if (res == NULL) {
 394       HeapRegion* head;
 395       do {
 396         // Put the region to the dirty cards region list.
 397         head = _dirty_cards_region_list;
 398         next = (HeapRegion*)
 399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 400         if (next == head) {
 401           assert(hr->get_next_dirty_cards_region() == hr,
 402                  "hr->get_next_dirty_cards_region() != hr");
 403           if (next == NULL) {
 404             // The last region in the list points to itself.
 405             hr->set_next_dirty_cards_region(hr);
 406           } else {
 407             hr->set_next_dirty_cards_region(next);
 408           }
 409         }
 410       } while (next != head);
 411     }
 412   }
 413 }
 414 
 415 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 416 {
 417   HeapRegion* head;
 418   HeapRegion* hr;
 419   do {
 420     head = _dirty_cards_region_list;
 421     if (head == NULL) {
 422       return NULL;
 423     }
 424     HeapRegion* new_head = head->get_next_dirty_cards_region();
 425     if (head == new_head) {
 426       // The last region.
 427       new_head = NULL;
 428     }
 429     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 430                                           head);
 431   } while (hr != head);
 432   assert(hr != NULL, "invariant");
 433   hr->set_next_dirty_cards_region(NULL);
 434   return hr;
 435 }
 436 
 437 void G1CollectedHeap::stop_conc_gc_threads() {
 438   _cg1r->stop();
 439   _cmThread->stop();
 440 }
 441 
 442 #ifdef ASSERT
 443 // A region is added to the collection set as it is retired
 444 // so an address p can point to a region which will be in the
 445 // collection set but has not yet been retired.  This method
 446 // therefore is only accurate during a GC pause after all
 447 // regions have been retired.  It is used for debugging
 448 // to check if an nmethod has references to objects that can
 449 // be move during a partial collection.  Though it can be
 450 // inaccurate, it is sufficient for G1 because the conservative
 451 // implementation of is_scavengable() for G1 will indicate that
 452 // all nmethods must be scanned during a partial collection.
 453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 454   HeapRegion* hr = heap_region_containing(p);
 455   return hr != NULL && hr->in_collection_set();
 456 }
 457 #endif
 458 
 459 // Returns true if the reference points to an object that
 460 // can move in an incremental collecction.
 461 bool G1CollectedHeap::is_scavengable(const void* p) {
 462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 463   G1CollectorPolicy* g1p = g1h->g1_policy();
 464   HeapRegion* hr = heap_region_containing(p);
 465   if (hr == NULL) {
 466      // perm gen (or null)
 467      return false;
 468   } else {
 469     return !hr->isHumongous();
 470   }
 471 }
 472 
 473 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 474   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 475   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 476 
 477   // Count the dirty cards at the start.
 478   CountNonCleanMemRegionClosure count1(this);
 479   ct_bs->mod_card_iterate(&count1);
 480   int orig_count = count1.n();
 481 
 482   // First clear the logged cards.
 483   ClearLoggedCardTableEntryClosure clear;
 484   dcqs.set_closure(&clear);
 485   dcqs.apply_closure_to_all_completed_buffers();
 486   dcqs.iterate_closure_all_threads(false);
 487   clear.print_histo();
 488 
 489   // Now ensure that there's no dirty cards.
 490   CountNonCleanMemRegionClosure count2(this);
 491   ct_bs->mod_card_iterate(&count2);
 492   if (count2.n() != 0) {
 493     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 494                            count2.n(), orig_count);
 495   }
 496   guarantee(count2.n() == 0, "Card table should be clean.");
 497 
 498   RedirtyLoggedCardTableEntryClosure redirty;
 499   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 500   dcqs.apply_closure_to_all_completed_buffers();
 501   dcqs.iterate_closure_all_threads(false);
 502   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 503                          clear.calls(), orig_count);
 504   guarantee(redirty.calls() == clear.calls(),
 505             "Or else mechanism is broken.");
 506 
 507   CountNonCleanMemRegionClosure count3(this);
 508   ct_bs->mod_card_iterate(&count3);
 509   if (count3.n() != orig_count) {
 510     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 511                            orig_count, count3.n());
 512     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 513   }
 514 
 515   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 516 }
 517 
 518 // Private class members.
 519 
 520 G1CollectedHeap* G1CollectedHeap::_g1h;
 521 
 522 // Private methods.
 523 
 524 HeapRegion*
 525 G1CollectedHeap::new_region_try_secondary_free_list() {
 526   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 527   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 528     if (!_secondary_free_list.is_empty()) {
 529       if (G1ConcRegionFreeingVerbose) {
 530         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 531                                "secondary_free_list has %u entries",
 532                                _secondary_free_list.length());
 533       }
 534       // It looks as if there are free regions available on the
 535       // secondary_free_list. Let's move them to the free_list and try
 536       // again to allocate from it.
 537       append_secondary_free_list();
 538 
 539       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 540              "empty we should have moved at least one entry to the free_list");
 541       HeapRegion* res = _free_list.remove_head();
 542       if (G1ConcRegionFreeingVerbose) {
 543         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 544                                "allocated "HR_FORMAT" from secondary_free_list",
 545                                HR_FORMAT_PARAMS(res));
 546       }
 547       return res;
 548     }
 549 
 550     // Wait here until we get notifed either when (a) there are no
 551     // more free regions coming or (b) some regions have been moved on
 552     // the secondary_free_list.
 553     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 554   }
 555 
 556   if (G1ConcRegionFreeingVerbose) {
 557     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 558                            "could not allocate from secondary_free_list");
 559   }
 560   return NULL;
 561 }
 562 
 563 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 564   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 565          "the only time we use this to allocate a humongous region is "
 566          "when we are allocating a single humongous region");
 567 
 568   HeapRegion* res;
 569   if (G1StressConcRegionFreeing) {
 570     if (!_secondary_free_list.is_empty()) {
 571       if (G1ConcRegionFreeingVerbose) {
 572         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 573                                "forced to look at the secondary_free_list");
 574       }
 575       res = new_region_try_secondary_free_list();
 576       if (res != NULL) {
 577         return res;
 578       }
 579     }
 580   }
 581   res = _free_list.remove_head_or_null();
 582   if (res == NULL) {
 583     if (G1ConcRegionFreeingVerbose) {
 584       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 585                              "res == NULL, trying the secondary_free_list");
 586     }
 587     res = new_region_try_secondary_free_list();
 588   }
 589   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 590     // Currently, only attempts to allocate GC alloc regions set
 591     // do_expand to true. So, we should only reach here during a
 592     // safepoint. If this assumption changes we might have to
 593     // reconsider the use of _expand_heap_after_alloc_failure.
 594     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 595 
 596     ergo_verbose1(ErgoHeapSizing,
 597                   "attempt heap expansion",
 598                   ergo_format_reason("region allocation request failed")
 599                   ergo_format_byte("allocation request"),
 600                   word_size * HeapWordSize);
 601     if (expand(word_size * HeapWordSize)) {
 602       // Given that expand() succeeded in expanding the heap, and we
 603       // always expand the heap by an amount aligned to the heap
 604       // region size, the free list should in theory not be empty. So
 605       // it would probably be OK to use remove_head(). But the extra
 606       // check for NULL is unlikely to be a performance issue here (we
 607       // just expanded the heap!) so let's just be conservative and
 608       // use remove_head_or_null().
 609       res = _free_list.remove_head_or_null();
 610     } else {
 611       _expand_heap_after_alloc_failure = false;
 612     }
 613   }
 614   return res;
 615 }
 616 
 617 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 618                                                         size_t word_size) {
 619   assert(isHumongous(word_size), "word_size should be humongous");
 620   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 621 
 622   uint first = G1_NULL_HRS_INDEX;
 623   if (num_regions == 1) {
 624     // Only one region to allocate, no need to go through the slower
 625     // path. The caller will attempt the expasion if this fails, so
 626     // let's not try to expand here too.
 627     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 628     if (hr != NULL) {
 629       first = hr->hrs_index();
 630     } else {
 631       first = G1_NULL_HRS_INDEX;
 632     }
 633   } else {
 634     // We can't allocate humongous regions while cleanupComplete() is
 635     // running, since some of the regions we find to be empty might not
 636     // yet be added to the free list and it is not straightforward to
 637     // know which list they are on so that we can remove them. Note
 638     // that we only need to do this if we need to allocate more than
 639     // one region to satisfy the current humongous allocation
 640     // request. If we are only allocating one region we use the common
 641     // region allocation code (see above).
 642     wait_while_free_regions_coming();
 643     append_secondary_free_list_if_not_empty_with_lock();
 644 
 645     if (free_regions() >= num_regions) {
 646       first = _hrs.find_contiguous(num_regions);
 647       if (first != G1_NULL_HRS_INDEX) {
 648         for (uint i = first; i < first + num_regions; ++i) {
 649           HeapRegion* hr = region_at(i);
 650           assert(hr->is_empty(), "sanity");
 651           assert(is_on_master_free_list(hr), "sanity");
 652           hr->set_pending_removal(true);
 653         }
 654         _free_list.remove_all_pending(num_regions);
 655       }
 656     }
 657   }
 658   return first;
 659 }
 660 
 661 HeapWord*
 662 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 663                                                            uint num_regions,
 664                                                            size_t word_size) {
 665   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
 666   assert(isHumongous(word_size), "word_size should be humongous");
 667   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 668 
 669   // Index of last region in the series + 1.
 670   uint last = first + num_regions;
 671 
 672   // We need to initialize the region(s) we just discovered. This is
 673   // a bit tricky given that it can happen concurrently with
 674   // refinement threads refining cards on these regions and
 675   // potentially wanting to refine the BOT as they are scanning
 676   // those cards (this can happen shortly after a cleanup; see CR
 677   // 6991377). So we have to set up the region(s) carefully and in
 678   // a specific order.
 679 
 680   // The word size sum of all the regions we will allocate.
 681   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 682   assert(word_size <= word_size_sum, "sanity");
 683 
 684   // This will be the "starts humongous" region.
 685   HeapRegion* first_hr = region_at(first);
 686   // The header of the new object will be placed at the bottom of
 687   // the first region.
 688   HeapWord* new_obj = first_hr->bottom();
 689   // This will be the new end of the first region in the series that
 690   // should also match the end of the last region in the seriers.
 691   HeapWord* new_end = new_obj + word_size_sum;
 692   // This will be the new top of the first region that will reflect
 693   // this allocation.
 694   HeapWord* new_top = new_obj + word_size;
 695 
 696   // First, we need to zero the header of the space that we will be
 697   // allocating. When we update top further down, some refinement
 698   // threads might try to scan the region. By zeroing the header we
 699   // ensure that any thread that will try to scan the region will
 700   // come across the zero klass word and bail out.
 701   //
 702   // NOTE: It would not have been correct to have used
 703   // CollectedHeap::fill_with_object() and make the space look like
 704   // an int array. The thread that is doing the allocation will
 705   // later update the object header to a potentially different array
 706   // type and, for a very short period of time, the klass and length
 707   // fields will be inconsistent. This could cause a refinement
 708   // thread to calculate the object size incorrectly.
 709   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 710 
 711   // We will set up the first region as "starts humongous". This
 712   // will also update the BOT covering all the regions to reflect
 713   // that there is a single object that starts at the bottom of the
 714   // first region.
 715   first_hr->set_startsHumongous(new_top, new_end);
 716 
 717   // Then, if there are any, we will set up the "continues
 718   // humongous" regions.
 719   HeapRegion* hr = NULL;
 720   for (uint i = first + 1; i < last; ++i) {
 721     hr = region_at(i);
 722     hr->set_continuesHumongous(first_hr);
 723   }
 724   // If we have "continues humongous" regions (hr != NULL), then the
 725   // end of the last one should match new_end.
 726   assert(hr == NULL || hr->end() == new_end, "sanity");
 727 
 728   // Up to this point no concurrent thread would have been able to
 729   // do any scanning on any region in this series. All the top
 730   // fields still point to bottom, so the intersection between
 731   // [bottom,top] and [card_start,card_end] will be empty. Before we
 732   // update the top fields, we'll do a storestore to make sure that
 733   // no thread sees the update to top before the zeroing of the
 734   // object header and the BOT initialization.
 735   OrderAccess::storestore();
 736 
 737   // Now that the BOT and the object header have been initialized,
 738   // we can update top of the "starts humongous" region.
 739   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
 740          "new_top should be in this region");
 741   first_hr->set_top(new_top);
 742   if (_hr_printer.is_active()) {
 743     HeapWord* bottom = first_hr->bottom();
 744     HeapWord* end = first_hr->orig_end();
 745     if ((first + 1) == last) {
 746       // the series has a single humongous region
 747       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
 748     } else {
 749       // the series has more than one humongous regions
 750       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
 751     }
 752   }
 753 
 754   // Now, we will update the top fields of the "continues humongous"
 755   // regions. The reason we need to do this is that, otherwise,
 756   // these regions would look empty and this will confuse parts of
 757   // G1. For example, the code that looks for a consecutive number
 758   // of empty regions will consider them empty and try to
 759   // re-allocate them. We can extend is_empty() to also include
 760   // !continuesHumongous(), but it is easier to just update the top
 761   // fields here. The way we set top for all regions (i.e., top ==
 762   // end for all regions but the last one, top == new_top for the
 763   // last one) is actually used when we will free up the humongous
 764   // region in free_humongous_region().
 765   hr = NULL;
 766   for (uint i = first + 1; i < last; ++i) {
 767     hr = region_at(i);
 768     if ((i + 1) == last) {
 769       // last continues humongous region
 770       assert(hr->bottom() < new_top && new_top <= hr->end(),
 771              "new_top should fall on this region");
 772       hr->set_top(new_top);
 773       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
 774     } else {
 775       // not last one
 776       assert(new_top > hr->end(), "new_top should be above this region");
 777       hr->set_top(hr->end());
 778       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 779     }
 780   }
 781   // If we have continues humongous regions (hr != NULL), then the
 782   // end of the last one should match new_end and its top should
 783   // match new_top.
 784   assert(hr == NULL ||
 785          (hr->end() == new_end && hr->top() == new_top), "sanity");
 786 
 787   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 788   _summary_bytes_used += first_hr->used();
 789   _humongous_set.add(first_hr);
 790 
 791   return new_obj;
 792 }
 793 
 794 // If could fit into free regions w/o expansion, try.
 795 // Otherwise, if can expand, do so.
 796 // Otherwise, if using ex regions might help, try with ex given back.
 797 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 798   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 799 
 800   verify_region_sets_optional();
 801 
 802   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
 803   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
 804   uint x_num = expansion_regions();
 805   uint fs = _hrs.free_suffix();
 806   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
 807   if (first == G1_NULL_HRS_INDEX) {
 808     // The only thing we can do now is attempt expansion.
 809     if (fs + x_num >= num_regions) {
 810       // If the number of regions we're trying to allocate for this
 811       // object is at most the number of regions in the free suffix,
 812       // then the call to humongous_obj_allocate_find_first() above
 813       // should have succeeded and we wouldn't be here.
 814       //
 815       // We should only be trying to expand when the free suffix is
 816       // not sufficient for the object _and_ we have some expansion
 817       // room available.
 818       assert(num_regions > fs, "earlier allocation should have succeeded");
 819 
 820       ergo_verbose1(ErgoHeapSizing,
 821                     "attempt heap expansion",
 822                     ergo_format_reason("humongous allocation request failed")
 823                     ergo_format_byte("allocation request"),
 824                     word_size * HeapWordSize);
 825       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
 826         // Even though the heap was expanded, it might not have
 827         // reached the desired size. So, we cannot assume that the
 828         // allocation will succeed.
 829         first = humongous_obj_allocate_find_first(num_regions, word_size);
 830       }
 831     }
 832   }
 833 
 834   HeapWord* result = NULL;
 835   if (first != G1_NULL_HRS_INDEX) {
 836     result =
 837       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
 838     assert(result != NULL, "it should always return a valid result");
 839 
 840     // A successful humongous object allocation changes the used space
 841     // information of the old generation so we need to recalculate the
 842     // sizes and update the jstat counters here.
 843     g1mm()->update_sizes();
 844   }
 845 
 846   verify_region_sets_optional();
 847 
 848   return result;
 849 }
 850 
 851 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 852   assert_heap_not_locked_and_not_at_safepoint();
 853   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 854 
 855   unsigned int dummy_gc_count_before;
 856   return attempt_allocation(word_size, &dummy_gc_count_before);
 857 }
 858 
 859 HeapWord*
 860 G1CollectedHeap::mem_allocate(size_t word_size,
 861                               bool*  gc_overhead_limit_was_exceeded) {
 862   assert_heap_not_locked_and_not_at_safepoint();
 863 
 864   // Loop until the allocation is satisified, or unsatisfied after GC.
 865   for (int try_count = 1; /* we'll return */; try_count += 1) {
 866     unsigned int gc_count_before;
 867 
 868     HeapWord* result = NULL;
 869     if (!isHumongous(word_size)) {
 870       result = attempt_allocation(word_size, &gc_count_before);
 871     } else {
 872       result = attempt_allocation_humongous(word_size, &gc_count_before);
 873     }
 874     if (result != NULL) {
 875       return result;
 876     }
 877 
 878     // Create the garbage collection operation...
 879     VM_G1CollectForAllocation op(gc_count_before, word_size);
 880     // ...and get the VM thread to execute it.
 881     VMThread::execute(&op);
 882 
 883     if (op.prologue_succeeded() && op.pause_succeeded()) {
 884       // If the operation was successful we'll return the result even
 885       // if it is NULL. If the allocation attempt failed immediately
 886       // after a Full GC, it's unlikely we'll be able to allocate now.
 887       HeapWord* result = op.result();
 888       if (result != NULL && !isHumongous(word_size)) {
 889         // Allocations that take place on VM operations do not do any
 890         // card dirtying and we have to do it here. We only have to do
 891         // this for non-humongous allocations, though.
 892         dirty_young_block(result, word_size);
 893       }
 894       return result;
 895     } else {
 896       assert(op.result() == NULL,
 897              "the result should be NULL if the VM op did not succeed");
 898     }
 899 
 900     // Give a warning if we seem to be looping forever.
 901     if ((QueuedAllocationWarningCount > 0) &&
 902         (try_count % QueuedAllocationWarningCount == 0)) {
 903       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 904     }
 905   }
 906 
 907   ShouldNotReachHere();
 908   return NULL;
 909 }
 910 
 911 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 912                                            unsigned int *gc_count_before_ret) {
 913   // Make sure you read the note in attempt_allocation_humongous().
 914 
 915   assert_heap_not_locked_and_not_at_safepoint();
 916   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
 917          "be called for humongous allocation requests");
 918 
 919   // We should only get here after the first-level allocation attempt
 920   // (attempt_allocation()) failed to allocate.
 921 
 922   // We will loop until a) we manage to successfully perform the
 923   // allocation or b) we successfully schedule a collection which
 924   // fails to perform the allocation. b) is the only case when we'll
 925   // return NULL.
 926   HeapWord* result = NULL;
 927   for (int try_count = 1; /* we'll return */; try_count += 1) {
 928     bool should_try_gc;
 929     unsigned int gc_count_before;
 930 
 931     {
 932       MutexLockerEx x(Heap_lock);
 933 
 934       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
 935                                                       false /* bot_updates */);
 936       if (result != NULL) {
 937         return result;
 938       }
 939 
 940       // If we reach here, attempt_allocation_locked() above failed to
 941       // allocate a new region. So the mutator alloc region should be NULL.
 942       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
 943 
 944       if (GC_locker::is_active_and_needs_gc()) {
 945         if (g1_policy()->can_expand_young_list()) {
 946           // No need for an ergo verbose message here,
 947           // can_expand_young_list() does this when it returns true.
 948           result = _mutator_alloc_region.attempt_allocation_force(word_size,
 949                                                       false /* bot_updates */);
 950           if (result != NULL) {
 951             return result;
 952           }
 953         }
 954         should_try_gc = false;
 955       } else {
 956         // The GCLocker may not be active but the GCLocker initiated
 957         // GC may not yet have been performed (GCLocker::needs_gc()
 958         // returns true). In this case we do not try this GC and
 959         // wait until the GCLocker initiated GC is performed, and
 960         // then retry the allocation.
 961         if (GC_locker::needs_gc()) {
 962           should_try_gc = false;
 963         } else {
 964           // Read the GC count while still holding the Heap_lock.
 965           gc_count_before = total_collections();
 966           should_try_gc = true;
 967         }
 968       }
 969     }
 970 
 971     if (should_try_gc) {
 972       bool succeeded;
 973       result = do_collection_pause(word_size, gc_count_before, &succeeded);
 974       if (result != NULL) {
 975         assert(succeeded, "only way to get back a non-NULL result");
 976         return result;
 977       }
 978 
 979       if (succeeded) {
 980         // If we get here we successfully scheduled a collection which
 981         // failed to allocate. No point in trying to allocate
 982         // further. We'll just return NULL.
 983         MutexLockerEx x(Heap_lock);
 984         *gc_count_before_ret = total_collections();
 985         return NULL;
 986       }
 987     } else {
 988       // The GCLocker is either active or the GCLocker initiated
 989       // GC has not yet been performed. Stall until it is and
 990       // then retry the allocation.
 991       GC_locker::stall_until_clear();
 992     }
 993 
 994     // We can reach here if we were unsuccessul in scheduling a
 995     // collection (because another thread beat us to it) or if we were
 996     // stalled due to the GC locker. In either can we should retry the
 997     // allocation attempt in case another thread successfully
 998     // performed a collection and reclaimed enough space. We do the
 999     // first attempt (without holding the Heap_lock) here and the
1000     // follow-on attempt will be at the start of the next loop
1001     // iteration (after taking the Heap_lock).
1002     result = _mutator_alloc_region.attempt_allocation(word_size,
1003                                                       false /* bot_updates */);
1004     if (result != NULL) {
1005       return result;
1006     }
1007 
1008     // Give a warning if we seem to be looping forever.
1009     if ((QueuedAllocationWarningCount > 0) &&
1010         (try_count % QueuedAllocationWarningCount == 0)) {
1011       warning("G1CollectedHeap::attempt_allocation_slow() "
1012               "retries %d times", try_count);
1013     }
1014   }
1015 
1016   ShouldNotReachHere();
1017   return NULL;
1018 }
1019 
1020 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1021                                           unsigned int * gc_count_before_ret) {
1022   // The structure of this method has a lot of similarities to
1023   // attempt_allocation_slow(). The reason these two were not merged
1024   // into a single one is that such a method would require several "if
1025   // allocation is not humongous do this, otherwise do that"
1026   // conditional paths which would obscure its flow. In fact, an early
1027   // version of this code did use a unified method which was harder to
1028   // follow and, as a result, it had subtle bugs that were hard to
1029   // track down. So keeping these two methods separate allows each to
1030   // be more readable. It will be good to keep these two in sync as
1031   // much as possible.
1032 
1033   assert_heap_not_locked_and_not_at_safepoint();
1034   assert(isHumongous(word_size), "attempt_allocation_humongous() "
1035          "should only be called for humongous allocations");
1036 
1037   // Humongous objects can exhaust the heap quickly, so we should check if we
1038   // need to start a marking cycle at each humongous object allocation. We do
1039   // the check before we do the actual allocation. The reason for doing it
1040   // before the allocation is that we avoid having to keep track of the newly
1041   // allocated memory while we do a GC.
1042   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1043                                            word_size)) {
1044     collect(GCCause::_g1_humongous_allocation);
1045   }
1046 
1047   // We will loop until a) we manage to successfully perform the
1048   // allocation or b) we successfully schedule a collection which
1049   // fails to perform the allocation. b) is the only case when we'll
1050   // return NULL.
1051   HeapWord* result = NULL;
1052   for (int try_count = 1; /* we'll return */; try_count += 1) {
1053     bool should_try_gc;
1054     unsigned int gc_count_before;
1055 
1056     {
1057       MutexLockerEx x(Heap_lock);
1058 
1059       // Given that humongous objects are not allocated in young
1060       // regions, we'll first try to do the allocation without doing a
1061       // collection hoping that there's enough space in the heap.
1062       result = humongous_obj_allocate(word_size);
1063       if (result != NULL) {
1064         return result;
1065       }
1066 
1067       if (GC_locker::is_active_and_needs_gc()) {
1068         should_try_gc = false;
1069       } else {
1070          // The GCLocker may not be active but the GCLocker initiated
1071         // GC may not yet have been performed (GCLocker::needs_gc()
1072         // returns true). In this case we do not try this GC and
1073         // wait until the GCLocker initiated GC is performed, and
1074         // then retry the allocation.
1075         if (GC_locker::needs_gc()) {
1076           should_try_gc = false;
1077         } else {
1078           // Read the GC count while still holding the Heap_lock.
1079           gc_count_before = total_collections();
1080           should_try_gc = true;
1081         }
1082       }
1083     }
1084 
1085     if (should_try_gc) {
1086       // If we failed to allocate the humongous object, we should try to
1087       // do a collection pause (if we're allowed) in case it reclaims
1088       // enough space for the allocation to succeed after the pause.
1089 
1090       bool succeeded;
1091       result = do_collection_pause(word_size, gc_count_before, &succeeded);
1092       if (result != NULL) {
1093         assert(succeeded, "only way to get back a non-NULL result");
1094         return result;
1095       }
1096 
1097       if (succeeded) {
1098         // If we get here we successfully scheduled a collection which
1099         // failed to allocate. No point in trying to allocate
1100         // further. We'll just return NULL.
1101         MutexLockerEx x(Heap_lock);
1102         *gc_count_before_ret = total_collections();
1103         return NULL;
1104       }
1105     } else {
1106       // The GCLocker is either active or the GCLocker initiated
1107       // GC has not yet been performed. Stall until it is and
1108       // then retry the allocation.
1109       GC_locker::stall_until_clear();
1110     }
1111 
1112     // We can reach here if we were unsuccessul in scheduling a
1113     // collection (because another thread beat us to it) or if we were
1114     // stalled due to the GC locker. In either can we should retry the
1115     // allocation attempt in case another thread successfully
1116     // performed a collection and reclaimed enough space.  Give a
1117     // warning if we seem to be looping forever.
1118 
1119     if ((QueuedAllocationWarningCount > 0) &&
1120         (try_count % QueuedAllocationWarningCount == 0)) {
1121       warning("G1CollectedHeap::attempt_allocation_humongous() "
1122               "retries %d times", try_count);
1123     }
1124   }
1125 
1126   ShouldNotReachHere();
1127   return NULL;
1128 }
1129 
1130 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1131                                        bool expect_null_mutator_alloc_region) {
1132   assert_at_safepoint(true /* should_be_vm_thread */);
1133   assert(_mutator_alloc_region.get() == NULL ||
1134                                              !expect_null_mutator_alloc_region,
1135          "the current alloc region was unexpectedly found to be non-NULL");
1136 
1137   if (!isHumongous(word_size)) {
1138     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1139                                                       false /* bot_updates */);
1140   } else {
1141     HeapWord* result = humongous_obj_allocate(word_size);
1142     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1143       g1_policy()->set_initiate_conc_mark_if_possible();
1144     }
1145     return result;
1146   }
1147 
1148   ShouldNotReachHere();
1149 }
1150 
1151 class PostMCRemSetClearClosure: public HeapRegionClosure {
1152   ModRefBarrierSet* _mr_bs;
1153 public:
1154   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
1155   bool doHeapRegion(HeapRegion* r) {
1156     r->reset_gc_time_stamp();
1157     if (r->continuesHumongous())
1158       return false;
1159     HeapRegionRemSet* hrrs = r->rem_set();
1160     if (hrrs != NULL) hrrs->clear();
1161     // You might think here that we could clear just the cards
1162     // corresponding to the used region.  But no: if we leave a dirty card
1163     // in a region we might allocate into, then it would prevent that card
1164     // from being enqueued, and cause it to be missed.
1165     // Re: the performance cost: we shouldn't be doing full GC anyway!
1166     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1167     return false;
1168   }
1169 };
1170 
1171 
1172 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
1173   ModRefBarrierSet* _mr_bs;
1174 public:
1175   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
1176   bool doHeapRegion(HeapRegion* r) {
1177     if (r->continuesHumongous()) return false;
1178     if (r->used_region().word_size() != 0) {
1179       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
1180     }
1181     return false;
1182   }
1183 };
1184 
1185 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1186   G1CollectedHeap*   _g1h;
1187   UpdateRSOopClosure _cl;
1188   int                _worker_i;
1189 public:
1190   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1191     _cl(g1->g1_rem_set(), worker_i),
1192     _worker_i(worker_i),
1193     _g1h(g1)
1194   { }
1195 
1196   bool doHeapRegion(HeapRegion* r) {
1197     if (!r->continuesHumongous()) {
1198       _cl.set_from(r);
1199       r->oop_iterate(&_cl);
1200     }
1201     return false;
1202   }
1203 };
1204 
1205 class ParRebuildRSTask: public AbstractGangTask {
1206   G1CollectedHeap* _g1;
1207 public:
1208   ParRebuildRSTask(G1CollectedHeap* g1)
1209     : AbstractGangTask("ParRebuildRSTask"),
1210       _g1(g1)
1211   { }
1212 
1213   void work(uint worker_id) {
1214     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1215     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1216                                           _g1->workers()->active_workers(),
1217                                          HeapRegion::RebuildRSClaimValue);
1218   }
1219 };
1220 
1221 class PostCompactionPrinterClosure: public HeapRegionClosure {
1222 private:
1223   G1HRPrinter* _hr_printer;
1224 public:
1225   bool doHeapRegion(HeapRegion* hr) {
1226     assert(!hr->is_young(), "not expecting to find young regions");
1227     // We only generate output for non-empty regions.
1228     if (!hr->is_empty()) {
1229       if (!hr->isHumongous()) {
1230         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1231       } else if (hr->startsHumongous()) {
1232         if (hr->capacity() == HeapRegion::GrainBytes) {
1233           // single humongous region
1234           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1235         } else {
1236           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1237         }
1238       } else {
1239         assert(hr->continuesHumongous(), "only way to get here");
1240         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1241       }
1242     }
1243     return false;
1244   }
1245 
1246   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1247     : _hr_printer(hr_printer) { }
1248 };
1249 
1250 bool G1CollectedHeap::do_collection(bool explicit_gc,
1251                                     bool clear_all_soft_refs,
1252                                     size_t word_size) {
1253   assert_at_safepoint(true /* should_be_vm_thread */);
1254 
1255   if (GC_locker::check_active_before_gc()) {
1256     return false;
1257   }
1258 
1259   SvcGCMarker sgcm(SvcGCMarker::FULL);
1260   ResourceMark rm;
1261 
1262   print_heap_before_gc();
1263 
1264   HRSPhaseSetter x(HRSPhaseFullGC);
1265   verify_region_sets_optional();
1266 
1267   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1268                            collector_policy()->should_clear_all_soft_refs();
1269 
1270   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1271 
1272   {
1273     IsGCActiveMark x;
1274 
1275     // Timing
1276     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1277     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1278     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1279 
1280     TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
1281     TraceCollectorStats tcs(g1mm()->full_collection_counters());
1282     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1283 
1284     double start = os::elapsedTime();
1285     g1_policy()->record_full_collection_start();
1286 
1287     // Note: When we have a more flexible GC logging framework that
1288     // allows us to add optional attributes to a GC log record we
1289     // could consider timing and reporting how long we wait in the
1290     // following two methods.
1291     wait_while_free_regions_coming();
1292     // If we start the compaction before the CM threads finish
1293     // scanning the root regions we might trip them over as we'll
1294     // be moving objects / updating references. So let's wait until
1295     // they are done. By telling them to abort, they should complete
1296     // early.
1297     _cm->root_regions()->abort();
1298     _cm->root_regions()->wait_until_scan_finished();
1299     append_secondary_free_list_if_not_empty_with_lock();
1300 
1301     gc_prologue(true);
1302     increment_total_collections(true /* full gc */);
1303     increment_old_marking_cycles_started();
1304 
1305     size_t g1h_prev_used = used();
1306     assert(used() == recalculate_used(), "Should be equal");
1307 
1308     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
1309       HandleMark hm;  // Discard invalid handles created during verification
1310       gclog_or_tty->print(" VerifyBeforeGC:");
1311       prepare_for_verify();
1312       Universe::verify(/* silent      */ false,
1313                        /* option      */ VerifyOption_G1UsePrevMarking);
1314 
1315     }
1316     pre_full_gc_dump();
1317 
1318     COMPILER2_PRESENT(DerivedPointerTable::clear());
1319 
1320     // Disable discovery and empty the discovered lists
1321     // for the CM ref processor.
1322     ref_processor_cm()->disable_discovery();
1323     ref_processor_cm()->abandon_partial_discovery();
1324     ref_processor_cm()->verify_no_references_recorded();
1325 
1326     // Abandon current iterations of concurrent marking and concurrent
1327     // refinement, if any are in progress. We have to do this before
1328     // wait_until_scan_finished() below.
1329     concurrent_mark()->abort();
1330 
1331     // Make sure we'll choose a new allocation region afterwards.
1332     release_mutator_alloc_region();
1333     abandon_gc_alloc_regions();
1334     g1_rem_set()->cleanupHRRS();
1335 
1336     // We should call this after we retire any currently active alloc
1337     // regions so that all the ALLOC / RETIRE events are generated
1338     // before the start GC event.
1339     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1340 
1341     // We may have added regions to the current incremental collection
1342     // set between the last GC or pause and now. We need to clear the
1343     // incremental collection set and then start rebuilding it afresh
1344     // after this full GC.
1345     abandon_collection_set(g1_policy()->inc_cset_head());
1346     g1_policy()->clear_incremental_cset();
1347     g1_policy()->stop_incremental_cset_building();
1348 
1349     tear_down_region_sets(false /* free_list_only */);
1350     g1_policy()->set_gcs_are_young(true);
1351 
1352     // See the comments in g1CollectedHeap.hpp and
1353     // G1CollectedHeap::ref_processing_init() about
1354     // how reference processing currently works in G1.
1355 
1356     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1357     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1358 
1359     // Temporarily clear the STW ref processor's _is_alive_non_header field.
1360     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1361 
1362     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1363     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1364 
1365     // Do collection work
1366     {
1367       HandleMark hm;  // Discard invalid handles created during gc
1368       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1369     }
1370 
1371     assert(free_regions() == 0, "we should not have added any free regions");
1372     rebuild_region_sets(false /* free_list_only */);
1373 
1374     // Enqueue any discovered reference objects that have
1375     // not been removed from the discovered lists.
1376     ref_processor_stw()->enqueue_discovered_references();
1377 
1378     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1379 
1380     MemoryService::track_memory_usage();
1381 
1382     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
1383       HandleMark hm;  // Discard invalid handles created during verification
1384       gclog_or_tty->print(" VerifyAfterGC:");
1385       prepare_for_verify();
1386       Universe::verify(/* silent      */ false,
1387                        /* option      */ VerifyOption_G1UsePrevMarking);
1388 
1389     }
1390 
1391     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1392     ref_processor_stw()->verify_no_references_recorded();
1393 
1394     // Note: since we've just done a full GC, concurrent
1395     // marking is no longer active. Therefore we need not
1396     // re-enable reference discovery for the CM ref processor.
1397     // That will be done at the start of the next marking cycle.
1398     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1399     ref_processor_cm()->verify_no_references_recorded();
1400 
1401     reset_gc_time_stamp();
1402     // Since everything potentially moved, we will clear all remembered
1403     // sets, and clear all cards.  Later we will rebuild remebered
1404     // sets. We will also reset the GC time stamps of the regions.
1405     PostMCRemSetClearClosure rs_clear(mr_bs());
1406     heap_region_iterate(&rs_clear);
1407 
1408     // Resize the heap if necessary.
1409     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1410 
1411     if (_hr_printer.is_active()) {
1412       // We should do this after we potentially resize the heap so
1413       // that all the COMMIT / UNCOMMIT events are generated before
1414       // the end GC event.
1415 
1416       PostCompactionPrinterClosure cl(hr_printer());
1417       heap_region_iterate(&cl);
1418 
1419       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1420     }
1421 
1422     if (_cg1r->use_cache()) {
1423       _cg1r->clear_and_record_card_counts();
1424       _cg1r->clear_hot_cache();
1425     }
1426 
1427     // Rebuild remembered sets of all regions.
1428     if (G1CollectedHeap::use_parallel_gc_threads()) {
1429       uint n_workers =
1430         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1431                                        workers()->active_workers(),
1432                                        Threads::number_of_non_daemon_threads());
1433       assert(UseDynamicNumberOfGCThreads ||
1434              n_workers == workers()->total_workers(),
1435              "If not dynamic should be using all the  workers");
1436       workers()->set_active_workers(n_workers);
1437       // Set parallel threads in the heap (_n_par_threads) only
1438       // before a parallel phase and always reset it to 0 after
1439       // the phase so that the number of parallel threads does
1440       // no get carried forward to a serial phase where there
1441       // may be code that is "possibly_parallel".
1442       set_par_threads(n_workers);
1443 
1444       ParRebuildRSTask rebuild_rs_task(this);
1445       assert(check_heap_region_claim_values(
1446              HeapRegion::InitialClaimValue), "sanity check");
1447       assert(UseDynamicNumberOfGCThreads ||
1448              workers()->active_workers() == workers()->total_workers(),
1449         "Unless dynamic should use total workers");
1450       // Use the most recent number of  active workers
1451       assert(workers()->active_workers() > 0,
1452         "Active workers not properly set");
1453       set_par_threads(workers()->active_workers());
1454       workers()->run_task(&rebuild_rs_task);
1455       set_par_threads(0);
1456       assert(check_heap_region_claim_values(
1457              HeapRegion::RebuildRSClaimValue), "sanity check");
1458       reset_heap_region_claim_values();
1459     } else {
1460       RebuildRSOutOfRegionClosure rebuild_rs(this);
1461       heap_region_iterate(&rebuild_rs);
1462     }
1463 
1464     if (G1Log::fine()) {
1465       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1466     }
1467 
1468     if (true) { // FIXME
1469       // Ask the permanent generation to adjust size for full collections
1470       perm()->compute_new_size();
1471     }
1472 
1473     // Start a new incremental collection set for the next pause
1474     assert(g1_policy()->collection_set() == NULL, "must be");
1475     g1_policy()->start_incremental_cset_building();
1476 
1477     // Clear the _cset_fast_test bitmap in anticipation of adding
1478     // regions to the incremental collection set for the next
1479     // evacuation pause.
1480     clear_cset_fast_test();
1481 
1482     init_mutator_alloc_region();
1483 
1484     double end = os::elapsedTime();
1485     g1_policy()->record_full_collection_end();
1486 
1487 #ifdef TRACESPINNING
1488     ParallelTaskTerminator::print_termination_counts();
1489 #endif
1490 
1491     gc_epilogue(true);
1492 
1493     // Discard all rset updates
1494     JavaThread::dirty_card_queue_set().abandon_logs();
1495     assert(!G1DeferredRSUpdate
1496            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1497 
1498     _young_list->reset_sampled_info();
1499     // At this point there should be no regions in the
1500     // entire heap tagged as young.
1501     assert( check_young_list_empty(true /* check_heap */),
1502       "young list should be empty at this point");
1503 
1504     // Update the number of full collections that have been completed.
1505     increment_old_marking_cycles_completed(false /* concurrent */);
1506 
1507     _hrs.verify_optional();
1508     verify_region_sets_optional();
1509 
1510     print_heap_after_gc();
1511 
1512     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1513     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1514     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1515     // before any GC notifications are raised.
1516     g1mm()->update_sizes();
1517   }
1518 
1519   post_full_gc_dump();
1520 
1521   return true;
1522 }
1523 
1524 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1525   // do_collection() will return whether it succeeded in performing
1526   // the GC. Currently, there is no facility on the
1527   // do_full_collection() API to notify the caller than the collection
1528   // did not succeed (e.g., because it was locked out by the GC
1529   // locker). So, right now, we'll ignore the return value.
1530   bool dummy = do_collection(true,                /* explicit_gc */
1531                              clear_all_soft_refs,
1532                              0                    /* word_size */);
1533 }
1534 
1535 // This code is mostly copied from TenuredGeneration.
1536 void
1537 G1CollectedHeap::
1538 resize_if_necessary_after_full_collection(size_t word_size) {
1539   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1540 
1541   // Include the current allocation, if any, and bytes that will be
1542   // pre-allocated to support collections, as "used".
1543   const size_t used_after_gc = used();
1544   const size_t capacity_after_gc = capacity();
1545   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1546 
1547   // This is enforced in arguments.cpp.
1548   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1549          "otherwise the code below doesn't make sense");
1550 
1551   // We don't have floating point command-line arguments
1552   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1553   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1554   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1555   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1556 
1557   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1558   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1559 
1560   // We have to be careful here as these two calculations can overflow
1561   // 32-bit size_t's.
1562   double used_after_gc_d = (double) used_after_gc;
1563   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1564   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1565 
1566   // Let's make sure that they are both under the max heap size, which
1567   // by default will make them fit into a size_t.
1568   double desired_capacity_upper_bound = (double) max_heap_size;
1569   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1570                                     desired_capacity_upper_bound);
1571   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1572                                     desired_capacity_upper_bound);
1573 
1574   // We can now safely turn them into size_t's.
1575   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1576   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1577 
1578   // This assert only makes sense here, before we adjust them
1579   // with respect to the min and max heap size.
1580   assert(minimum_desired_capacity <= maximum_desired_capacity,
1581          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1582                  "maximum_desired_capacity = "SIZE_FORMAT,
1583                  minimum_desired_capacity, maximum_desired_capacity));
1584 
1585   // Should not be greater than the heap max size. No need to adjust
1586   // it with respect to the heap min size as it's a lower bound (i.e.,
1587   // we'll try to make the capacity larger than it, not smaller).
1588   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1589   // Should not be less than the heap min size. No need to adjust it
1590   // with respect to the heap max size as it's an upper bound (i.e.,
1591   // we'll try to make the capacity smaller than it, not greater).
1592   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1593 
1594   if (capacity_after_gc < minimum_desired_capacity) {
1595     // Don't expand unless it's significant
1596     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1597     ergo_verbose4(ErgoHeapSizing,
1598                   "attempt heap expansion",
1599                   ergo_format_reason("capacity lower than "
1600                                      "min desired capacity after Full GC")
1601                   ergo_format_byte("capacity")
1602                   ergo_format_byte("occupancy")
1603                   ergo_format_byte_perc("min desired capacity"),
1604                   capacity_after_gc, used_after_gc,
1605                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1606     expand(expand_bytes);
1607 
1608     // No expansion, now see if we want to shrink
1609   } else if (capacity_after_gc > maximum_desired_capacity) {
1610     // Capacity too large, compute shrinking size
1611     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1612     ergo_verbose4(ErgoHeapSizing,
1613                   "attempt heap shrinking",
1614                   ergo_format_reason("capacity higher than "
1615                                      "max desired capacity after Full GC")
1616                   ergo_format_byte("capacity")
1617                   ergo_format_byte("occupancy")
1618                   ergo_format_byte_perc("max desired capacity"),
1619                   capacity_after_gc, used_after_gc,
1620                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1621     shrink(shrink_bytes);
1622   }
1623 }
1624 
1625 
1626 HeapWord*
1627 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1628                                            bool* succeeded) {
1629   assert_at_safepoint(true /* should_be_vm_thread */);
1630 
1631   *succeeded = true;
1632   // Let's attempt the allocation first.
1633   HeapWord* result =
1634     attempt_allocation_at_safepoint(word_size,
1635                                  false /* expect_null_mutator_alloc_region */);
1636   if (result != NULL) {
1637     assert(*succeeded, "sanity");
1638     return result;
1639   }
1640 
1641   // In a G1 heap, we're supposed to keep allocation from failing by
1642   // incremental pauses.  Therefore, at least for now, we'll favor
1643   // expansion over collection.  (This might change in the future if we can
1644   // do something smarter than full collection to satisfy a failed alloc.)
1645   result = expand_and_allocate(word_size);
1646   if (result != NULL) {
1647     assert(*succeeded, "sanity");
1648     return result;
1649   }
1650 
1651   // Expansion didn't work, we'll try to do a Full GC.
1652   bool gc_succeeded = do_collection(false, /* explicit_gc */
1653                                     false, /* clear_all_soft_refs */
1654                                     word_size);
1655   if (!gc_succeeded) {
1656     *succeeded = false;
1657     return NULL;
1658   }
1659 
1660   // Retry the allocation
1661   result = attempt_allocation_at_safepoint(word_size,
1662                                   true /* expect_null_mutator_alloc_region */);
1663   if (result != NULL) {
1664     assert(*succeeded, "sanity");
1665     return result;
1666   }
1667 
1668   // Then, try a Full GC that will collect all soft references.
1669   gc_succeeded = do_collection(false, /* explicit_gc */
1670                                true,  /* clear_all_soft_refs */
1671                                word_size);
1672   if (!gc_succeeded) {
1673     *succeeded = false;
1674     return NULL;
1675   }
1676 
1677   // Retry the allocation once more
1678   result = attempt_allocation_at_safepoint(word_size,
1679                                   true /* expect_null_mutator_alloc_region */);
1680   if (result != NULL) {
1681     assert(*succeeded, "sanity");
1682     return result;
1683   }
1684 
1685   assert(!collector_policy()->should_clear_all_soft_refs(),
1686          "Flag should have been handled and cleared prior to this point");
1687 
1688   // What else?  We might try synchronous finalization later.  If the total
1689   // space available is large enough for the allocation, then a more
1690   // complete compaction phase than we've tried so far might be
1691   // appropriate.
1692   assert(*succeeded, "sanity");
1693   return NULL;
1694 }
1695 
1696 // Attempting to expand the heap sufficiently
1697 // to support an allocation of the given "word_size".  If
1698 // successful, perform the allocation and return the address of the
1699 // allocated block, or else "NULL".
1700 
1701 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1702   assert_at_safepoint(true /* should_be_vm_thread */);
1703 
1704   verify_region_sets_optional();
1705 
1706   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1707   ergo_verbose1(ErgoHeapSizing,
1708                 "attempt heap expansion",
1709                 ergo_format_reason("allocation request failed")
1710                 ergo_format_byte("allocation request"),
1711                 word_size * HeapWordSize);
1712   if (expand(expand_bytes)) {
1713     _hrs.verify_optional();
1714     verify_region_sets_optional();
1715     return attempt_allocation_at_safepoint(word_size,
1716                                  false /* expect_null_mutator_alloc_region */);
1717   }
1718   return NULL;
1719 }
1720 
1721 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1722                                              HeapWord* new_end) {
1723   assert(old_end != new_end, "don't call this otherwise");
1724   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1725 
1726   // Update the committed mem region.
1727   _g1_committed.set_end(new_end);
1728   // Tell the card table about the update.
1729   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1730   // Tell the BOT about the update.
1731   _bot_shared->resize(_g1_committed.word_size());
1732 }
1733 
1734 bool G1CollectedHeap::expand(size_t expand_bytes) {
1735   size_t old_mem_size = _g1_storage.committed_size();
1736   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1737   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1738                                        HeapRegion::GrainBytes);
1739   ergo_verbose2(ErgoHeapSizing,
1740                 "expand the heap",
1741                 ergo_format_byte("requested expansion amount")
1742                 ergo_format_byte("attempted expansion amount"),
1743                 expand_bytes, aligned_expand_bytes);
1744 
1745   // First commit the memory.
1746   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1747   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1748   if (successful) {
1749     // Then propagate this update to the necessary data structures.
1750     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1751     update_committed_space(old_end, new_end);
1752 
1753     FreeRegionList expansion_list("Local Expansion List");
1754     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1755     assert(mr.start() == old_end, "post-condition");
1756     // mr might be a smaller region than what was requested if
1757     // expand_by() was unable to allocate the HeapRegion instances
1758     assert(mr.end() <= new_end, "post-condition");
1759 
1760     size_t actual_expand_bytes = mr.byte_size();
1761     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1762     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1763            "post-condition");
1764     if (actual_expand_bytes < aligned_expand_bytes) {
1765       // We could not expand _hrs to the desired size. In this case we
1766       // need to shrink the committed space accordingly.
1767       assert(mr.end() < new_end, "invariant");
1768 
1769       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1770       // First uncommit the memory.
1771       _g1_storage.shrink_by(diff_bytes);
1772       // Then propagate this update to the necessary data structures.
1773       update_committed_space(new_end, mr.end());
1774     }
1775     _free_list.add_as_tail(&expansion_list);
1776 
1777     if (_hr_printer.is_active()) {
1778       HeapWord* curr = mr.start();
1779       while (curr < mr.end()) {
1780         HeapWord* curr_end = curr + HeapRegion::GrainWords;
1781         _hr_printer.commit(curr, curr_end);
1782         curr = curr_end;
1783       }
1784       assert(curr == mr.end(), "post-condition");
1785     }
1786     g1_policy()->record_new_heap_size(n_regions());
1787   } else {
1788     ergo_verbose0(ErgoHeapSizing,
1789                   "did not expand the heap",
1790                   ergo_format_reason("heap expansion operation failed"));
1791     // The expansion of the virtual storage space was unsuccessful.
1792     // Let's see if it was because we ran out of swap.
1793     if (G1ExitOnExpansionFailure &&
1794         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1795       // We had head room...
1796       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
1797     }
1798   }
1799   return successful;
1800 }
1801 
1802 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1803   size_t old_mem_size = _g1_storage.committed_size();
1804   size_t aligned_shrink_bytes =
1805     ReservedSpace::page_align_size_down(shrink_bytes);
1806   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1807                                          HeapRegion::GrainBytes);
1808   uint num_regions_deleted = 0;
1809   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1810   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1811   assert(mr.end() == old_end, "post-condition");
1812 
1813   ergo_verbose3(ErgoHeapSizing,
1814                 "shrink the heap",
1815                 ergo_format_byte("requested shrinking amount")
1816                 ergo_format_byte("aligned shrinking amount")
1817                 ergo_format_byte("attempted shrinking amount"),
1818                 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
1819   if (mr.byte_size() > 0) {
1820     if (_hr_printer.is_active()) {
1821       HeapWord* curr = mr.end();
1822       while (curr > mr.start()) {
1823         HeapWord* curr_end = curr;
1824         curr -= HeapRegion::GrainWords;
1825         _hr_printer.uncommit(curr, curr_end);
1826       }
1827       assert(curr == mr.start(), "post-condition");
1828     }
1829 
1830     _g1_storage.shrink_by(mr.byte_size());
1831     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1832     assert(mr.start() == new_end, "post-condition");
1833 
1834     _expansion_regions += num_regions_deleted;
1835     update_committed_space(old_end, new_end);
1836     HeapRegionRemSet::shrink_heap(n_regions());
1837     g1_policy()->record_new_heap_size(n_regions());
1838   } else {
1839     ergo_verbose0(ErgoHeapSizing,
1840                   "did not shrink the heap",
1841                   ergo_format_reason("heap shrinking operation failed"));
1842   }
1843 }
1844 
1845 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1846   verify_region_sets_optional();
1847 
1848   // We should only reach here at the end of a Full GC which means we
1849   // should not not be holding to any GC alloc regions. The method
1850   // below will make sure of that and do any remaining clean up.
1851   abandon_gc_alloc_regions();
1852 
1853   // Instead of tearing down / rebuilding the free lists here, we
1854   // could instead use the remove_all_pending() method on free_list to
1855   // remove only the ones that we need to remove.
1856   tear_down_region_sets(true /* free_list_only */);
1857   shrink_helper(shrink_bytes);
1858   rebuild_region_sets(true /* free_list_only */);
1859 
1860   _hrs.verify_optional();
1861   verify_region_sets_optional();
1862 }
1863 
1864 // Public methods.
1865 
1866 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1867 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1868 #endif // _MSC_VER
1869 
1870 
1871 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1872   SharedHeap(policy_),
1873   _g1_policy(policy_),
1874   _dirty_card_queue_set(false),
1875   _into_cset_dirty_card_queue_set(false),
1876   _is_alive_closure_cm(this),
1877   _is_alive_closure_stw(this),
1878   _ref_processor_cm(NULL),
1879   _ref_processor_stw(NULL),
1880   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1881   _bot_shared(NULL),
1882   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1883   _evac_failure_scan_stack(NULL) ,
1884   _mark_in_progress(false),
1885   _cg1r(NULL), _summary_bytes_used(0),
1886   _g1mm(NULL),
1887   _refine_cte_cl(NULL),
1888   _full_collection(false),
1889   _free_list("Master Free List"),
1890   _secondary_free_list("Secondary Free List"),
1891   _old_set("Old Set"),
1892   _humongous_set("Master Humongous Set"),
1893   _free_regions_coming(false),
1894   _young_list(new YoungList(this)),
1895   _gc_time_stamp(0),
1896   _retained_old_gc_alloc_region(NULL),
1897   _expand_heap_after_alloc_failure(true),
1898   _surviving_young_words(NULL),
1899   _old_marking_cycles_started(0),
1900   _old_marking_cycles_completed(0),
1901   _in_cset_fast_test(NULL),
1902   _in_cset_fast_test_base(NULL),
1903   _dirty_cards_region_list(NULL),
1904   _worker_cset_start_region(NULL),
1905   _worker_cset_start_region_time_stamp(NULL) {
1906   _g1h = this; // To catch bugs.
1907   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1908     vm_exit_during_initialization("Failed necessary allocation.");
1909   }
1910 
1911   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1912 
1913   int n_queues = MAX2((int)ParallelGCThreads, 1);
1914   _task_queues = new RefToScanQueueSet(n_queues);
1915 
1916   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1917   assert(n_rem_sets > 0, "Invariant.");
1918 
1919   HeapRegionRemSetIterator** iter_arr =
1920     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
1921   for (int i = 0; i < n_queues; i++) {
1922     iter_arr[i] = new HeapRegionRemSetIterator();
1923   }
1924   _rem_set_iterator = iter_arr;
1925 
1926   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1927   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1928 
1929   for (int i = 0; i < n_queues; i++) {
1930     RefToScanQueue* q = new RefToScanQueue();
1931     q->initialize();
1932     _task_queues->register_queue(i, q);
1933   }
1934 
1935   clear_cset_start_regions();
1936 
1937   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1938 }
1939 
1940 jint G1CollectedHeap::initialize() {
1941   CollectedHeap::pre_initialize();
1942   os::enable_vtime();
1943 
1944   G1Log::init();
1945 
1946   // Necessary to satisfy locking discipline assertions.
1947 
1948   MutexLocker x(Heap_lock);
1949 
1950   // We have to initialize the printer before committing the heap, as
1951   // it will be used then.
1952   _hr_printer.set_active(G1PrintHeapRegions);
1953 
1954   // While there are no constraints in the GC code that HeapWordSize
1955   // be any particular value, there are multiple other areas in the
1956   // system which believe this to be true (e.g. oop->object_size in some
1957   // cases incorrectly returns the size in wordSize units rather than
1958   // HeapWordSize).
1959   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1960 
1961   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1962   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1963 
1964   // Ensure that the sizes are properly aligned.
1965   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1966   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1967 
1968   _cg1r = new ConcurrentG1Refine();
1969 
1970   // Reserve the maximum.
1971   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
1972   // Includes the perm-gen.
1973 
1974   // When compressed oops are enabled, the preferred heap base
1975   // is calculated by subtracting the requested size from the
1976   // 32Gb boundary and using the result as the base address for
1977   // heap reservation. If the requested size is not aligned to
1978   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1979   // into the ReservedHeapSpace constructor) then the actual
1980   // base of the reserved heap may end up differing from the
1981   // address that was requested (i.e. the preferred heap base).
1982   // If this happens then we could end up using a non-optimal
1983   // compressed oops mode.
1984 
1985   // Since max_byte_size is aligned to the size of a heap region (checked
1986   // above), we also need to align the perm gen size as it might not be.
1987   const size_t total_reserved = max_byte_size +
1988                                 align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
1989   Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
1990 
1991   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
1992 
1993   ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
1994                             UseLargePages, addr);
1995 
1996   if (UseCompressedOops) {
1997     if (addr != NULL && !heap_rs.is_reserved()) {
1998       // Failed to reserve at specified address - the requested memory
1999       // region is taken already, for example, by 'java' launcher.
2000       // Try again to reserver heap higher.
2001       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
2002 
2003       ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
2004                                  UseLargePages, addr);
2005 
2006       if (addr != NULL && !heap_rs0.is_reserved()) {
2007         // Failed to reserve at specified address again - give up.
2008         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
2009         assert(addr == NULL, "");
2010 
2011         ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
2012                                    UseLargePages, addr);
2013         heap_rs = heap_rs1;
2014       } else {
2015         heap_rs = heap_rs0;
2016       }
2017     }
2018   }
2019 
2020   if (!heap_rs.is_reserved()) {
2021     vm_exit_during_initialization("Could not reserve enough space for object heap");
2022     return JNI_ENOMEM;
2023   }
2024 
2025   // It is important to do this in a way such that concurrent readers can't
2026   // temporarily think somethings in the heap.  (I've actually seen this
2027   // happen in asserts: DLD.)
2028   _reserved.set_word_size(0);
2029   _reserved.set_start((HeapWord*)heap_rs.base());
2030   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2031 
2032   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2033 
2034   // Create the gen rem set (and barrier set) for the entire reserved region.
2035   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2036   set_barrier_set(rem_set()->bs());
2037   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2038     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2039   } else {
2040     vm_exit_during_initialization("G1 requires a mod ref bs.");
2041     return JNI_ENOMEM;
2042   }
2043 
2044   // Also create a G1 rem set.
2045   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2046     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2047   } else {
2048     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2049     return JNI_ENOMEM;
2050   }
2051 
2052   // Carve out the G1 part of the heap.
2053 
2054   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
2055   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2056                            g1_rs.size()/HeapWordSize);
2057   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
2058 
2059   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
2060 
2061   _g1_storage.initialize(g1_rs, 0);
2062   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2063   _hrs.initialize((HeapWord*) _g1_reserved.start(),
2064                   (HeapWord*) _g1_reserved.end(),
2065                   _expansion_regions);
2066 
2067   // 6843694 - ensure that the maximum region index can fit
2068   // in the remembered set structures.
2069   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2070   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2071 
2072   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2073   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2074   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2075             "too many cards per region");
2076 
2077   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2078 
2079   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2080                                              heap_word_size(init_byte_size));
2081 
2082   _g1h = this;
2083 
2084    _in_cset_fast_test_length = max_regions();
2085    _in_cset_fast_test_base =
2086                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2087 
2088    // We're biasing _in_cset_fast_test to avoid subtracting the
2089    // beginning of the heap every time we want to index; basically
2090    // it's the same with what we do with the card table.
2091    _in_cset_fast_test = _in_cset_fast_test_base -
2092                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2093 
2094    // Clear the _cset_fast_test bitmap in anticipation of adding
2095    // regions to the incremental collection set for the first
2096    // evacuation pause.
2097    clear_cset_fast_test();
2098 
2099   // Create the ConcurrentMark data structure and thread.
2100   // (Must do this late, so that "max_regions" is defined.)
2101   _cm       = new ConcurrentMark(heap_rs, max_regions());
2102   _cmThread = _cm->cmThread();
2103 
2104   // Initialize the from_card cache structure of HeapRegionRemSet.
2105   HeapRegionRemSet::init_heap(max_regions());
2106 
2107   // Now expand into the initial heap size.
2108   if (!expand(init_byte_size)) {
2109     vm_exit_during_initialization("Failed to allocate initial heap.");
2110     return JNI_ENOMEM;
2111   }
2112 
2113   // Perform any initialization actions delegated to the policy.
2114   g1_policy()->init();
2115 
2116   _refine_cte_cl =
2117     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2118                                     g1_rem_set(),
2119                                     concurrent_g1_refine());
2120   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2121 
2122   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2123                                                SATB_Q_FL_lock,
2124                                                G1SATBProcessCompletedThreshold,
2125                                                Shared_SATB_Q_lock);
2126 
2127   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2128                                                 DirtyCardQ_FL_lock,
2129                                                 concurrent_g1_refine()->yellow_zone(),
2130                                                 concurrent_g1_refine()->red_zone(),
2131                                                 Shared_DirtyCardQ_lock);
2132 
2133   if (G1DeferredRSUpdate) {
2134     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2135                                       DirtyCardQ_FL_lock,
2136                                       -1, // never trigger processing
2137                                       -1, // no limit on length
2138                                       Shared_DirtyCardQ_lock,
2139                                       &JavaThread::dirty_card_queue_set());
2140   }
2141 
2142   // Initialize the card queue set used to hold cards containing
2143   // references into the collection set.
2144   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2145                                              DirtyCardQ_FL_lock,
2146                                              -1, // never trigger processing
2147                                              -1, // no limit on length
2148                                              Shared_DirtyCardQ_lock,
2149                                              &JavaThread::dirty_card_queue_set());
2150 
2151   // In case we're keeping closure specialization stats, initialize those
2152   // counts and that mechanism.
2153   SpecializationStats::clear();
2154 
2155   // Do later initialization work for concurrent refinement.
2156   _cg1r->init();
2157 
2158   // Here we allocate the dummy full region that is required by the
2159   // G1AllocRegion class. If we don't pass an address in the reserved
2160   // space here, lots of asserts fire.
2161 
2162   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2163                                              _g1_reserved.start());
2164   // We'll re-use the same region whether the alloc region will
2165   // require BOT updates or not and, if it doesn't, then a non-young
2166   // region will complain that it cannot support allocations without
2167   // BOT updates. So we'll tag the dummy region as young to avoid that.
2168   dummy_region->set_young();
2169   // Make sure it's full.
2170   dummy_region->set_top(dummy_region->end());
2171   G1AllocRegion::setup(this, dummy_region);
2172 
2173   init_mutator_alloc_region();
2174 
2175   // Do create of the monitoring and management support so that
2176   // values in the heap have been properly initialized.
2177   _g1mm = new G1MonitoringSupport(this);
2178 
2179   return JNI_OK;
2180 }
2181 
2182 void G1CollectedHeap::ref_processing_init() {
2183   // Reference processing in G1 currently works as follows:
2184   //
2185   // * There are two reference processor instances. One is
2186   //   used to record and process discovered references
2187   //   during concurrent marking; the other is used to
2188   //   record and process references during STW pauses
2189   //   (both full and incremental).
2190   // * Both ref processors need to 'span' the entire heap as
2191   //   the regions in the collection set may be dotted around.
2192   //
2193   // * For the concurrent marking ref processor:
2194   //   * Reference discovery is enabled at initial marking.
2195   //   * Reference discovery is disabled and the discovered
2196   //     references processed etc during remarking.
2197   //   * Reference discovery is MT (see below).
2198   //   * Reference discovery requires a barrier (see below).
2199   //   * Reference processing may or may not be MT
2200   //     (depending on the value of ParallelRefProcEnabled
2201   //     and ParallelGCThreads).
2202   //   * A full GC disables reference discovery by the CM
2203   //     ref processor and abandons any entries on it's
2204   //     discovered lists.
2205   //
2206   // * For the STW processor:
2207   //   * Non MT discovery is enabled at the start of a full GC.
2208   //   * Processing and enqueueing during a full GC is non-MT.
2209   //   * During a full GC, references are processed after marking.
2210   //
2211   //   * Discovery (may or may not be MT) is enabled at the start
2212   //     of an incremental evacuation pause.
2213   //   * References are processed near the end of a STW evacuation pause.
2214   //   * For both types of GC:
2215   //     * Discovery is atomic - i.e. not concurrent.
2216   //     * Reference discovery will not need a barrier.
2217 
2218   SharedHeap::ref_processing_init();
2219   MemRegion mr = reserved_region();
2220 
2221   // Concurrent Mark ref processor
2222   _ref_processor_cm =
2223     new ReferenceProcessor(mr,    // span
2224                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2225                                 // mt processing
2226                            (int) ParallelGCThreads,
2227                                 // degree of mt processing
2228                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2229                                 // mt discovery
2230                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2231                                 // degree of mt discovery
2232                            false,
2233                                 // Reference discovery is not atomic
2234                            &_is_alive_closure_cm,
2235                                 // is alive closure
2236                                 // (for efficiency/performance)
2237                            true);
2238                                 // Setting next fields of discovered
2239                                 // lists requires a barrier.
2240 
2241   // STW ref processor
2242   _ref_processor_stw =
2243     new ReferenceProcessor(mr,    // span
2244                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2245                                 // mt processing
2246                            MAX2((int)ParallelGCThreads, 1),
2247                                 // degree of mt processing
2248                            (ParallelGCThreads > 1),
2249                                 // mt discovery
2250                            MAX2((int)ParallelGCThreads, 1),
2251                                 // degree of mt discovery
2252                            true,
2253                                 // Reference discovery is atomic
2254                            &_is_alive_closure_stw,
2255                                 // is alive closure
2256                                 // (for efficiency/performance)
2257                            false);
2258                                 // Setting next fields of discovered
2259                                 // lists requires a barrier.
2260 }
2261 
2262 size_t G1CollectedHeap::capacity() const {
2263   return _g1_committed.byte_size();
2264 }
2265 
2266 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2267                                                  DirtyCardQueue* into_cset_dcq,
2268                                                  bool concurrent,
2269                                                  int worker_i) {
2270   // Clean cards in the hot card cache
2271   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2272 
2273   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2274   int n_completed_buffers = 0;
2275   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2276     n_completed_buffers++;
2277   }
2278   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
2279                                                   (double) n_completed_buffers);
2280   dcqs.clear_n_completed_buffers();
2281   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2282 }
2283 
2284 
2285 // Computes the sum of the storage used by the various regions.
2286 
2287 size_t G1CollectedHeap::used() const {
2288   assert(Heap_lock->owner() != NULL,
2289          "Should be owned on this thread's behalf.");
2290   size_t result = _summary_bytes_used;
2291   // Read only once in case it is set to NULL concurrently
2292   HeapRegion* hr = _mutator_alloc_region.get();
2293   if (hr != NULL)
2294     result += hr->used();
2295   return result;
2296 }
2297 
2298 size_t G1CollectedHeap::used_unlocked() const {
2299   size_t result = _summary_bytes_used;
2300   return result;
2301 }
2302 
2303 class SumUsedClosure: public HeapRegionClosure {
2304   size_t _used;
2305 public:
2306   SumUsedClosure() : _used(0) {}
2307   bool doHeapRegion(HeapRegion* r) {
2308     if (!r->continuesHumongous()) {
2309       _used += r->used();
2310     }
2311     return false;
2312   }
2313   size_t result() { return _used; }
2314 };
2315 
2316 size_t G1CollectedHeap::recalculate_used() const {
2317   SumUsedClosure blk;
2318   heap_region_iterate(&blk);
2319   return blk.result();
2320 }
2321 
2322 size_t G1CollectedHeap::unsafe_max_alloc() {
2323   if (free_regions() > 0) return HeapRegion::GrainBytes;
2324   // otherwise, is there space in the current allocation region?
2325 
2326   // We need to store the current allocation region in a local variable
2327   // here. The problem is that this method doesn't take any locks and
2328   // there may be other threads which overwrite the current allocation
2329   // region field. attempt_allocation(), for example, sets it to NULL
2330   // and this can happen *after* the NULL check here but before the call
2331   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2332   // to be a problem in the optimized build, since the two loads of the
2333   // current allocation region field are optimized away.
2334   HeapRegion* hr = _mutator_alloc_region.get();
2335   if (hr == NULL) {
2336     return 0;
2337   }
2338   return hr->free();
2339 }
2340 
2341 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2342   switch (cause) {
2343     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2344     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2345     case GCCause::_g1_humongous_allocation: return true;
2346     default:                                return false;
2347   }
2348 }
2349 
2350 #ifndef PRODUCT
2351 void G1CollectedHeap::allocate_dummy_regions() {
2352   // Let's fill up most of the region
2353   size_t word_size = HeapRegion::GrainWords - 1024;
2354   // And as a result the region we'll allocate will be humongous.
2355   guarantee(isHumongous(word_size), "sanity");
2356 
2357   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2358     // Let's use the existing mechanism for the allocation
2359     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2360     if (dummy_obj != NULL) {
2361       MemRegion mr(dummy_obj, word_size);
2362       CollectedHeap::fill_with_object(mr);
2363     } else {
2364       // If we can't allocate once, we probably cannot allocate
2365       // again. Let's get out of the loop.
2366       break;
2367     }
2368   }
2369 }
2370 #endif // !PRODUCT
2371 
2372 void G1CollectedHeap::increment_old_marking_cycles_started() {
2373   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2374     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2375     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2376     _old_marking_cycles_started, _old_marking_cycles_completed));
2377 
2378   _old_marking_cycles_started++;
2379 }
2380 
2381 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2382   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2383 
2384   // We assume that if concurrent == true, then the caller is a
2385   // concurrent thread that was joined the Suspendible Thread
2386   // Set. If there's ever a cheap way to check this, we should add an
2387   // assert here.
2388 
2389   // Given that this method is called at the end of a Full GC or of a
2390   // concurrent cycle, and those can be nested (i.e., a Full GC can
2391   // interrupt a concurrent cycle), the number of full collections
2392   // completed should be either one (in the case where there was no
2393   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2394   // behind the number of full collections started.
2395 
2396   // This is the case for the inner caller, i.e. a Full GC.
2397   assert(concurrent ||
2398          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2399          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2400          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2401                  "is inconsistent with _old_marking_cycles_completed = %u",
2402                  _old_marking_cycles_started, _old_marking_cycles_completed));
2403 
2404   // This is the case for the outer caller, i.e. the concurrent cycle.
2405   assert(!concurrent ||
2406          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2407          err_msg("for outer caller (concurrent cycle): "
2408                  "_old_marking_cycles_started = %u "
2409                  "is inconsistent with _old_marking_cycles_completed = %u",
2410                  _old_marking_cycles_started, _old_marking_cycles_completed));
2411 
2412   _old_marking_cycles_completed += 1;
2413 
2414   // We need to clear the "in_progress" flag in the CM thread before
2415   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2416   // is set) so that if a waiter requests another System.gc() it doesn't
2417   // incorrectly see that a marking cyle is still in progress.
2418   if (concurrent) {
2419     _cmThread->clear_in_progress();
2420   }
2421 
2422   // This notify_all() will ensure that a thread that called
2423   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2424   // and it's waiting for a full GC to finish will be woken up. It is
2425   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2426   FullGCCount_lock->notify_all();
2427 }
2428 
2429 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
2430   assert_at_safepoint(true /* should_be_vm_thread */);
2431   GCCauseSetter gcs(this, cause);
2432   switch (cause) {
2433     case GCCause::_heap_inspection:
2434     case GCCause::_heap_dump: {
2435       HandleMark hm;
2436       do_full_collection(false);         // don't clear all soft refs
2437       break;
2438     }
2439     default: // XXX FIX ME
2440       ShouldNotReachHere(); // Unexpected use of this function
2441   }
2442 }
2443 
2444 void G1CollectedHeap::collect(GCCause::Cause cause) {
2445   assert_heap_not_locked();
2446 
2447   unsigned int gc_count_before;
2448   unsigned int old_marking_count_before;
2449   bool retry_gc;
2450 
2451   do {
2452     retry_gc = false;
2453 
2454     {
2455       MutexLocker ml(Heap_lock);
2456 
2457       // Read the GC count while holding the Heap_lock
2458       gc_count_before = total_collections();
2459       old_marking_count_before = _old_marking_cycles_started;
2460     }
2461 
2462     if (should_do_concurrent_full_gc(cause)) {
2463       // Schedule an initial-mark evacuation pause that will start a
2464       // concurrent cycle. We're setting word_size to 0 which means that
2465       // we are not requesting a post-GC allocation.
2466       VM_G1IncCollectionPause op(gc_count_before,
2467                                  0,     /* word_size */
2468                                  true,  /* should_initiate_conc_mark */
2469                                  g1_policy()->max_pause_time_ms(),
2470                                  cause);
2471 
2472       VMThread::execute(&op);
2473       if (!op.pause_succeeded()) {
2474         if (old_marking_count_before == _old_marking_cycles_started) {
2475           retry_gc = op.should_retry_gc();
2476         } else {
2477           // A Full GC happened while we were trying to schedule the
2478           // initial-mark GC. No point in starting a new cycle given
2479           // that the whole heap was collected anyway.
2480         }
2481 
2482         if (retry_gc) {
2483           if (GC_locker::is_active_and_needs_gc()) {
2484             GC_locker::stall_until_clear();
2485           }
2486         }
2487       }
2488     } else {
2489       if (cause == GCCause::_gc_locker
2490           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2491 
2492         // Schedule a standard evacuation pause. We're setting word_size
2493         // to 0 which means that we are not requesting a post-GC allocation.
2494         VM_G1IncCollectionPause op(gc_count_before,
2495                                    0,     /* word_size */
2496                                    false, /* should_initiate_conc_mark */
2497                                    g1_policy()->max_pause_time_ms(),
2498                                    cause);
2499         VMThread::execute(&op);
2500       } else {
2501         // Schedule a Full GC.
2502         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2503         VMThread::execute(&op);
2504       }
2505     }
2506   } while (retry_gc);
2507 }
2508 
2509 bool G1CollectedHeap::is_in(const void* p) const {
2510   if (_g1_committed.contains(p)) {
2511     // Given that we know that p is in the committed space,
2512     // heap_region_containing_raw() should successfully
2513     // return the containing region.
2514     HeapRegion* hr = heap_region_containing_raw(p);
2515     return hr->is_in(p);
2516   } else {
2517     return _perm_gen->as_gen()->is_in(p);
2518   }
2519 }
2520 
2521 // Iteration functions.
2522 
2523 // Iterates an OopClosure over all ref-containing fields of objects
2524 // within a HeapRegion.
2525 
2526 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2527   MemRegion _mr;
2528   OopClosure* _cl;
2529 public:
2530   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
2531     : _mr(mr), _cl(cl) {}
2532   bool doHeapRegion(HeapRegion* r) {
2533     if (! r->continuesHumongous()) {
2534       r->oop_iterate(_cl);
2535     }
2536     return false;
2537   }
2538 };
2539 
2540 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
2541   IterateOopClosureRegionClosure blk(_g1_committed, cl);
2542   heap_region_iterate(&blk);
2543   if (do_perm) {
2544     perm_gen()->oop_iterate(cl);
2545   }
2546 }
2547 
2548 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
2549   IterateOopClosureRegionClosure blk(mr, cl);
2550   heap_region_iterate(&blk);
2551   if (do_perm) {
2552     perm_gen()->oop_iterate(cl);
2553   }
2554 }
2555 
2556 // Iterates an ObjectClosure over all objects within a HeapRegion.
2557 
2558 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2559   ObjectClosure* _cl;
2560 public:
2561   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2562   bool doHeapRegion(HeapRegion* r) {
2563     if (! r->continuesHumongous()) {
2564       r->object_iterate(_cl);
2565     }
2566     return false;
2567   }
2568 };
2569 
2570 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
2571   IterateObjectClosureRegionClosure blk(cl);
2572   heap_region_iterate(&blk);
2573   if (do_perm) {
2574     perm_gen()->object_iterate(cl);
2575   }
2576 }
2577 
2578 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2579   // FIXME: is this right?
2580   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2581 }
2582 
2583 // Calls a SpaceClosure on a HeapRegion.
2584 
2585 class SpaceClosureRegionClosure: public HeapRegionClosure {
2586   SpaceClosure* _cl;
2587 public:
2588   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2589   bool doHeapRegion(HeapRegion* r) {
2590     _cl->do_space(r);
2591     return false;
2592   }
2593 };
2594 
2595 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2596   SpaceClosureRegionClosure blk(cl);
2597   heap_region_iterate(&blk);
2598 }
2599 
2600 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2601   _hrs.iterate(cl);
2602 }
2603 
2604 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
2605                                                HeapRegionClosure* cl) const {
2606   _hrs.iterate_from(r, cl);
2607 }
2608 
2609 void
2610 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2611                                                  uint worker,
2612                                                  uint no_of_par_workers,
2613                                                  jint claim_value) {
2614   const uint regions = n_regions();
2615   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2616                              no_of_par_workers :
2617                              1);
2618   assert(UseDynamicNumberOfGCThreads ||
2619          no_of_par_workers == workers()->total_workers(),
2620          "Non dynamic should use fixed number of workers");
2621   // try to spread out the starting points of the workers
2622   const uint start_index = regions / max_workers * worker;
2623 
2624   // each worker will actually look at all regions
2625   for (uint count = 0; count < regions; ++count) {
2626     const uint index = (start_index + count) % regions;
2627     assert(0 <= index && index < regions, "sanity");
2628     HeapRegion* r = region_at(index);
2629     // we'll ignore "continues humongous" regions (we'll process them
2630     // when we come across their corresponding "start humongous"
2631     // region) and regions already claimed
2632     if (r->claim_value() == claim_value || r->continuesHumongous()) {
2633       continue;
2634     }
2635     // OK, try to claim it
2636     if (r->claimHeapRegion(claim_value)) {
2637       // success!
2638       assert(!r->continuesHumongous(), "sanity");
2639       if (r->startsHumongous()) {
2640         // If the region is "starts humongous" we'll iterate over its
2641         // "continues humongous" first; in fact we'll do them
2642         // first. The order is important. In on case, calling the
2643         // closure on the "starts humongous" region might de-allocate
2644         // and clear all its "continues humongous" regions and, as a
2645         // result, we might end up processing them twice. So, we'll do
2646         // them first (notice: most closures will ignore them anyway) and
2647         // then we'll do the "starts humongous" region.
2648         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2649           HeapRegion* chr = region_at(ch_index);
2650 
2651           // if the region has already been claimed or it's not
2652           // "continues humongous" we're done
2653           if (chr->claim_value() == claim_value ||
2654               !chr->continuesHumongous()) {
2655             break;
2656           }
2657 
2658           // Noone should have claimed it directly. We can given
2659           // that we claimed its "starts humongous" region.
2660           assert(chr->claim_value() != claim_value, "sanity");
2661           assert(chr->humongous_start_region() == r, "sanity");
2662 
2663           if (chr->claimHeapRegion(claim_value)) {
2664             // we should always be able to claim it; noone else should
2665             // be trying to claim this region
2666 
2667             bool res2 = cl->doHeapRegion(chr);
2668             assert(!res2, "Should not abort");
2669 
2670             // Right now, this holds (i.e., no closure that actually
2671             // does something with "continues humongous" regions
2672             // clears them). We might have to weaken it in the future,
2673             // but let's leave these two asserts here for extra safety.
2674             assert(chr->continuesHumongous(), "should still be the case");
2675             assert(chr->humongous_start_region() == r, "sanity");
2676           } else {
2677             guarantee(false, "we should not reach here");
2678           }
2679         }
2680       }
2681 
2682       assert(!r->continuesHumongous(), "sanity");
2683       bool res = cl->doHeapRegion(r);
2684       assert(!res, "Should not abort");
2685     }
2686   }
2687 }
2688 
2689 class ResetClaimValuesClosure: public HeapRegionClosure {
2690 public:
2691   bool doHeapRegion(HeapRegion* r) {
2692     r->set_claim_value(HeapRegion::InitialClaimValue);
2693     return false;
2694   }
2695 };
2696 
2697 void G1CollectedHeap::reset_heap_region_claim_values() {
2698   ResetClaimValuesClosure blk;
2699   heap_region_iterate(&blk);
2700 }
2701 
2702 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2703   ResetClaimValuesClosure blk;
2704   collection_set_iterate(&blk);
2705 }
2706 
2707 #ifdef ASSERT
2708 // This checks whether all regions in the heap have the correct claim
2709 // value. I also piggy-backed on this a check to ensure that the
2710 // humongous_start_region() information on "continues humongous"
2711 // regions is correct.
2712 
2713 class CheckClaimValuesClosure : public HeapRegionClosure {
2714 private:
2715   jint _claim_value;
2716   uint _failures;
2717   HeapRegion* _sh_region;
2718 
2719 public:
2720   CheckClaimValuesClosure(jint claim_value) :
2721     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2722   bool doHeapRegion(HeapRegion* r) {
2723     if (r->claim_value() != _claim_value) {
2724       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2725                              "claim value = %d, should be %d",
2726                              HR_FORMAT_PARAMS(r),
2727                              r->claim_value(), _claim_value);
2728       ++_failures;
2729     }
2730     if (!r->isHumongous()) {
2731       _sh_region = NULL;
2732     } else if (r->startsHumongous()) {
2733       _sh_region = r;
2734     } else if (r->continuesHumongous()) {
2735       if (r->humongous_start_region() != _sh_region) {
2736         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2737                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2738                                HR_FORMAT_PARAMS(r),
2739                                r->humongous_start_region(),
2740                                _sh_region);
2741         ++_failures;
2742       }
2743     }
2744     return false;
2745   }
2746   uint failures() { return _failures; }
2747 };
2748 
2749 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2750   CheckClaimValuesClosure cl(claim_value);
2751   heap_region_iterate(&cl);
2752   return cl.failures() == 0;
2753 }
2754 
2755 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2756 private:
2757   jint _claim_value;
2758   uint _failures;
2759 
2760 public:
2761   CheckClaimValuesInCSetHRClosure(jint claim_value) :
2762     _claim_value(claim_value), _failures(0) { }
2763 
2764   uint failures() { return _failures; }
2765 
2766   bool doHeapRegion(HeapRegion* hr) {
2767     assert(hr->in_collection_set(), "how?");
2768     assert(!hr->isHumongous(), "H-region in CSet");
2769     if (hr->claim_value() != _claim_value) {
2770       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2771                              "claim value = %d, should be %d",
2772                              HR_FORMAT_PARAMS(hr),
2773                              hr->claim_value(), _claim_value);
2774       _failures += 1;
2775     }
2776     return false;
2777   }
2778 };
2779 
2780 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2781   CheckClaimValuesInCSetHRClosure cl(claim_value);
2782   collection_set_iterate(&cl);
2783   return cl.failures() == 0;
2784 }
2785 #endif // ASSERT
2786 
2787 // Clear the cached CSet starting regions and (more importantly)
2788 // the time stamps. Called when we reset the GC time stamp.
2789 void G1CollectedHeap::clear_cset_start_regions() {
2790   assert(_worker_cset_start_region != NULL, "sanity");
2791   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2792 
2793   int n_queues = MAX2((int)ParallelGCThreads, 1);
2794   for (int i = 0; i < n_queues; i++) {
2795     _worker_cset_start_region[i] = NULL;
2796     _worker_cset_start_region_time_stamp[i] = 0;
2797   }
2798 }
2799 
2800 // Given the id of a worker, obtain or calculate a suitable
2801 // starting region for iterating over the current collection set.
2802 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2803   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2804 
2805   HeapRegion* result = NULL;
2806   unsigned gc_time_stamp = get_gc_time_stamp();
2807 
2808   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2809     // Cached starting region for current worker was set
2810     // during the current pause - so it's valid.
2811     // Note: the cached starting heap region may be NULL
2812     // (when the collection set is empty).
2813     result = _worker_cset_start_region[worker_i];
2814     assert(result == NULL || result->in_collection_set(), "sanity");
2815     return result;
2816   }
2817 
2818   // The cached entry was not valid so let's calculate
2819   // a suitable starting heap region for this worker.
2820 
2821   // We want the parallel threads to start their collection
2822   // set iteration at different collection set regions to
2823   // avoid contention.
2824   // If we have:
2825   //          n collection set regions
2826   //          p threads
2827   // Then thread t will start at region floor ((t * n) / p)
2828 
2829   result = g1_policy()->collection_set();
2830   if (G1CollectedHeap::use_parallel_gc_threads()) {
2831     uint cs_size = g1_policy()->cset_region_length();
2832     uint active_workers = workers()->active_workers();
2833     assert(UseDynamicNumberOfGCThreads ||
2834              active_workers == workers()->total_workers(),
2835              "Unless dynamic should use total workers");
2836 
2837     uint end_ind   = (cs_size * worker_i) / active_workers;
2838     uint start_ind = 0;
2839 
2840     if (worker_i > 0 &&
2841         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2842       // Previous workers starting region is valid
2843       // so let's iterate from there
2844       start_ind = (cs_size * (worker_i - 1)) / active_workers;
2845       result = _worker_cset_start_region[worker_i - 1];
2846     }
2847 
2848     for (uint i = start_ind; i < end_ind; i++) {
2849       result = result->next_in_collection_set();
2850     }
2851   }
2852 
2853   // Note: the calculated starting heap region may be NULL
2854   // (when the collection set is empty).
2855   assert(result == NULL || result->in_collection_set(), "sanity");
2856   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2857          "should be updated only once per pause");
2858   _worker_cset_start_region[worker_i] = result;
2859   OrderAccess::storestore();
2860   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2861   return result;
2862 }
2863 
2864 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2865   HeapRegion* r = g1_policy()->collection_set();
2866   while (r != NULL) {
2867     HeapRegion* next = r->next_in_collection_set();
2868     if (cl->doHeapRegion(r)) {
2869       cl->incomplete();
2870       return;
2871     }
2872     r = next;
2873   }
2874 }
2875 
2876 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2877                                                   HeapRegionClosure *cl) {
2878   if (r == NULL) {
2879     // The CSet is empty so there's nothing to do.
2880     return;
2881   }
2882 
2883   assert(r->in_collection_set(),
2884          "Start region must be a member of the collection set.");
2885   HeapRegion* cur = r;
2886   while (cur != NULL) {
2887     HeapRegion* next = cur->next_in_collection_set();
2888     if (cl->doHeapRegion(cur) && false) {
2889       cl->incomplete();
2890       return;
2891     }
2892     cur = next;
2893   }
2894   cur = g1_policy()->collection_set();
2895   while (cur != r) {
2896     HeapRegion* next = cur->next_in_collection_set();
2897     if (cl->doHeapRegion(cur) && false) {
2898       cl->incomplete();
2899       return;
2900     }
2901     cur = next;
2902   }
2903 }
2904 
2905 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2906   return n_regions() > 0 ? region_at(0) : NULL;
2907 }
2908 
2909 
2910 Space* G1CollectedHeap::space_containing(const void* addr) const {
2911   Space* res = heap_region_containing(addr);
2912   if (res == NULL)
2913     res = perm_gen()->space_containing(addr);
2914   return res;
2915 }
2916 
2917 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2918   Space* sp = space_containing(addr);
2919   if (sp != NULL) {
2920     return sp->block_start(addr);
2921   }
2922   return NULL;
2923 }
2924 
2925 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2926   Space* sp = space_containing(addr);
2927   assert(sp != NULL, "block_size of address outside of heap");
2928   return sp->block_size(addr);
2929 }
2930 
2931 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2932   Space* sp = space_containing(addr);
2933   return sp->block_is_obj(addr);
2934 }
2935 
2936 bool G1CollectedHeap::supports_tlab_allocation() const {
2937   return true;
2938 }
2939 
2940 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2941   return HeapRegion::GrainBytes;
2942 }
2943 
2944 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2945   // Return the remaining space in the cur alloc region, but not less than
2946   // the min TLAB size.
2947 
2948   // Also, this value can be at most the humongous object threshold,
2949   // since we can't allow tlabs to grow big enough to accomodate
2950   // humongous objects.
2951 
2952   HeapRegion* hr = _mutator_alloc_region.get();
2953   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2954   if (hr == NULL) {
2955     return max_tlab_size;
2956   } else {
2957     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2958   }
2959 }
2960 
2961 size_t G1CollectedHeap::max_capacity() const {
2962   return _g1_reserved.byte_size();
2963 }
2964 
2965 jlong G1CollectedHeap::millis_since_last_gc() {
2966   // assert(false, "NYI");
2967   return 0;
2968 }
2969 
2970 void G1CollectedHeap::prepare_for_verify() {
2971   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2972     ensure_parsability(false);
2973   }
2974   g1_rem_set()->prepare_for_verify();
2975 }
2976 
2977 class VerifyLivenessOopClosure: public OopClosure {
2978   G1CollectedHeap* _g1h;
2979   VerifyOption _vo;
2980 public:
2981   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
2982     _g1h(g1h), _vo(vo)
2983   { }
2984   void do_oop(narrowOop *p) { do_oop_work(p); }
2985   void do_oop(      oop *p) { do_oop_work(p); }
2986 
2987   template <class T> void do_oop_work(T *p) {
2988     oop obj = oopDesc::load_decode_heap_oop(p);
2989     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
2990               "Dead object referenced by a not dead object");
2991   }
2992 };
2993 
2994 class VerifyObjsInRegionClosure: public ObjectClosure {
2995 private:
2996   G1CollectedHeap* _g1h;
2997   size_t _live_bytes;
2998   HeapRegion *_hr;
2999   VerifyOption _vo;
3000 public:
3001   // _vo == UsePrevMarking -> use "prev" marking information,
3002   // _vo == UseNextMarking -> use "next" marking information,
3003   // _vo == UseMarkWord    -> use mark word from object header.
3004   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3005     : _live_bytes(0), _hr(hr), _vo(vo) {
3006     _g1h = G1CollectedHeap::heap();
3007   }
3008   void do_object(oop o) {
3009     VerifyLivenessOopClosure isLive(_g1h, _vo);
3010     assert(o != NULL, "Huh?");
3011     if (!_g1h->is_obj_dead_cond(o, _vo)) {
3012       // If the object is alive according to the mark word,
3013       // then verify that the marking information agrees.
3014       // Note we can't verify the contra-positive of the
3015       // above: if the object is dead (according to the mark
3016       // word), it may not be marked, or may have been marked
3017       // but has since became dead, or may have been allocated
3018       // since the last marking.
3019       if (_vo == VerifyOption_G1UseMarkWord) {
3020         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3021       }
3022 
3023       o->oop_iterate(&isLive);
3024       if (!_hr->obj_allocated_since_prev_marking(o)) {
3025         size_t obj_size = o->size();    // Make sure we don't overflow
3026         _live_bytes += (obj_size * HeapWordSize);
3027       }
3028     }
3029   }
3030   size_t live_bytes() { return _live_bytes; }
3031 };
3032 
3033 class PrintObjsInRegionClosure : public ObjectClosure {
3034   HeapRegion *_hr;
3035   G1CollectedHeap *_g1;
3036 public:
3037   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3038     _g1 = G1CollectedHeap::heap();
3039   };
3040 
3041   void do_object(oop o) {
3042     if (o != NULL) {
3043       HeapWord *start = (HeapWord *) o;
3044       size_t word_sz = o->size();
3045       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3046                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3047                           (void*) o, word_sz,
3048                           _g1->isMarkedPrev(o),
3049                           _g1->isMarkedNext(o),
3050                           _hr->obj_allocated_since_prev_marking(o));
3051       HeapWord *end = start + word_sz;
3052       HeapWord *cur;
3053       int *val;
3054       for (cur = start; cur < end; cur++) {
3055         val = (int *) cur;
3056         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
3057       }
3058     }
3059   }
3060 };
3061 
3062 class VerifyRegionClosure: public HeapRegionClosure {
3063 private:
3064   bool         _par;
3065   VerifyOption _vo;
3066   bool         _failures;
3067 public:
3068   // _vo == UsePrevMarking -> use "prev" marking information,
3069   // _vo == UseNextMarking -> use "next" marking information,
3070   // _vo == UseMarkWord    -> use mark word from object header.
3071   VerifyRegionClosure(bool par, VerifyOption vo)
3072     : _par(par),
3073       _vo(vo),
3074       _failures(false) {}
3075 
3076   bool failures() {
3077     return _failures;
3078   }
3079 
3080   bool doHeapRegion(HeapRegion* r) {
3081     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
3082               "Should be unclaimed at verify points.");
3083     if (!r->continuesHumongous()) {
3084       bool failures = false;
3085       r->verify(_vo, &failures);
3086       if (failures) {
3087         _failures = true;
3088       } else {
3089         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3090         r->object_iterate(&not_dead_yet_cl);
3091         if (_vo != VerifyOption_G1UseNextMarking) {
3092           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3093             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3094                                    "max_live_bytes "SIZE_FORMAT" "
3095                                    "< calculated "SIZE_FORMAT,
3096                                    r->bottom(), r->end(),
3097                                    r->max_live_bytes(),
3098                                  not_dead_yet_cl.live_bytes());
3099             _failures = true;
3100           }
3101         } else {
3102           // When vo == UseNextMarking we cannot currently do a sanity
3103           // check on the live bytes as the calculation has not been
3104           // finalized yet.
3105         }
3106       }
3107     }
3108     return false; // stop the region iteration if we hit a failure
3109   }
3110 };
3111 
3112 class VerifyRootsClosure: public OopsInGenClosure {
3113 private:
3114   G1CollectedHeap* _g1h;
3115   VerifyOption     _vo;
3116   bool             _failures;
3117 public:
3118   // _vo == UsePrevMarking -> use "prev" marking information,
3119   // _vo == UseNextMarking -> use "next" marking information,
3120   // _vo == UseMarkWord    -> use mark word from object header.
3121   VerifyRootsClosure(VerifyOption vo) :
3122     _g1h(G1CollectedHeap::heap()),
3123     _vo(vo),
3124     _failures(false) { }
3125 
3126   bool failures() { return _failures; }
3127 
3128   template <class T> void do_oop_nv(T* p) {
3129     T heap_oop = oopDesc::load_heap_oop(p);
3130     if (!oopDesc::is_null(heap_oop)) {
3131       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3132       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3133         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3134                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3135         if (_vo == VerifyOption_G1UseMarkWord) {
3136           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3137         }
3138         obj->print_on(gclog_or_tty);
3139         _failures = true;
3140       }
3141     }
3142   }
3143 
3144   void do_oop(oop* p)       { do_oop_nv(p); }
3145   void do_oop(narrowOop* p) { do_oop_nv(p); }
3146 };
3147 
3148 // This is the task used for parallel heap verification.
3149 
3150 class G1ParVerifyTask: public AbstractGangTask {
3151 private:
3152   G1CollectedHeap* _g1h;
3153   VerifyOption     _vo;
3154   bool             _failures;
3155 
3156 public:
3157   // _vo == UsePrevMarking -> use "prev" marking information,
3158   // _vo == UseNextMarking -> use "next" marking information,
3159   // _vo == UseMarkWord    -> use mark word from object header.
3160   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3161     AbstractGangTask("Parallel verify task"),
3162     _g1h(g1h),
3163     _vo(vo),
3164     _failures(false) { }
3165 
3166   bool failures() {
3167     return _failures;
3168   }
3169 
3170   void work(uint worker_id) {
3171     HandleMark hm;
3172     VerifyRegionClosure blk(true, _vo);
3173     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3174                                           _g1h->workers()->active_workers(),
3175                                           HeapRegion::ParVerifyClaimValue);
3176     if (blk.failures()) {
3177       _failures = true;
3178     }
3179   }
3180 };
3181 
3182 void G1CollectedHeap::verify(bool silent) {
3183   verify(silent, VerifyOption_G1UsePrevMarking);
3184 }
3185 
3186 void G1CollectedHeap::verify(bool silent,
3187                              VerifyOption vo) {
3188   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3189     if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
3190     VerifyRootsClosure rootsCl(vo);
3191 
3192     assert(Thread::current()->is_VM_thread(),
3193       "Expected to be executed serially by the VM thread at this point");
3194 
3195     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3196 
3197     // We apply the relevant closures to all the oops in the
3198     // system dictionary, the string table and the code cache.
3199     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3200 
3201     process_strong_roots(true,      // activate StrongRootsScope
3202                          true,      // we set "collecting perm gen" to true,
3203                                     // so we don't reset the dirty cards in the perm gen.
3204                          ScanningOption(so),  // roots scanning options
3205                          &rootsCl,
3206                          &blobsCl,
3207                          &rootsCl);
3208 
3209     // If we're verifying after the marking phase of a Full GC then we can't
3210     // treat the perm gen as roots into the G1 heap. Some of the objects in
3211     // the perm gen may be dead and hence not marked. If one of these dead
3212     // objects is considered to be a root then we may end up with a false
3213     // "Root location <x> points to dead ob <y>" failure.
3214     if (vo != VerifyOption_G1UseMarkWord) {
3215       // Since we used "collecting_perm_gen" == true above, we will not have
3216       // checked the refs from perm into the G1-collected heap. We check those
3217       // references explicitly below. Whether the relevant cards are dirty
3218       // is checked further below in the rem set verification.
3219       if (!silent) { gclog_or_tty->print("Permgen roots "); }
3220       perm_gen()->oop_iterate(&rootsCl);
3221     }
3222     bool failures = rootsCl.failures();
3223 
3224     if (vo != VerifyOption_G1UseMarkWord) {
3225       // If we're verifying during a full GC then the region sets
3226       // will have been torn down at the start of the GC. Therefore
3227       // verifying the region sets will fail. So we only verify
3228       // the region sets when not in a full GC.
3229       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3230       verify_region_sets();
3231     }
3232 
3233     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3234     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3235       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3236              "sanity check");
3237 
3238       G1ParVerifyTask task(this, vo);
3239       assert(UseDynamicNumberOfGCThreads ||
3240         workers()->active_workers() == workers()->total_workers(),
3241         "If not dynamic should be using all the workers");
3242       int n_workers = workers()->active_workers();
3243       set_par_threads(n_workers);
3244       workers()->run_task(&task);
3245       set_par_threads(0);
3246       if (task.failures()) {
3247         failures = true;
3248       }
3249 
3250       // Checks that the expected amount of parallel work was done.
3251       // The implication is that n_workers is > 0.
3252       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3253              "sanity check");
3254 
3255       reset_heap_region_claim_values();
3256 
3257       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3258              "sanity check");
3259     } else {
3260       VerifyRegionClosure blk(false, vo);
3261       heap_region_iterate(&blk);
3262       if (blk.failures()) {
3263         failures = true;
3264       }
3265     }
3266     if (!silent) gclog_or_tty->print("RemSet ");
3267     rem_set()->verify();
3268 
3269     if (failures) {
3270       gclog_or_tty->print_cr("Heap:");
3271       // It helps to have the per-region information in the output to
3272       // help us track down what went wrong. This is why we call
3273       // print_extended_on() instead of print_on().
3274       print_extended_on(gclog_or_tty);
3275       gclog_or_tty->print_cr("");
3276 #ifndef PRODUCT
3277       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3278         concurrent_mark()->print_reachable("at-verification-failure",
3279                                            vo, false /* all */);
3280       }
3281 #endif
3282       gclog_or_tty->flush();
3283     }
3284     guarantee(!failures, "there should not have been any failures");
3285   } else {
3286     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
3287   }
3288 }
3289 
3290 class PrintRegionClosure: public HeapRegionClosure {
3291   outputStream* _st;
3292 public:
3293   PrintRegionClosure(outputStream* st) : _st(st) {}
3294   bool doHeapRegion(HeapRegion* r) {
3295     r->print_on(_st);
3296     return false;
3297   }
3298 };
3299 
3300 void G1CollectedHeap::print_on(outputStream* st) const {
3301   st->print(" %-20s", "garbage-first heap");
3302   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3303             capacity()/K, used_unlocked()/K);
3304   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3305             _g1_storage.low_boundary(),
3306             _g1_storage.high(),
3307             _g1_storage.high_boundary());
3308   st->cr();
3309   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3310   uint young_regions = _young_list->length();
3311   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3312             (size_t) young_regions * HeapRegion::GrainBytes / K);
3313   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3314   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3315             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3316   st->cr();
3317   perm()->as_gen()->print_on(st);
3318 }
3319 
3320 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3321   print_on(st);
3322 
3323   // Print the per-region information.
3324   st->cr();
3325   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3326                "HS=humongous(starts), HC=humongous(continues), "
3327                "CS=collection set, F=free, TS=gc time stamp, "
3328                "PTAMS=previous top-at-mark-start, "
3329                "NTAMS=next top-at-mark-start)");
3330   PrintRegionClosure blk(st);
3331   heap_region_iterate(&blk);
3332 }
3333 
3334 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3335   if (G1CollectedHeap::use_parallel_gc_threads()) {
3336     workers()->print_worker_threads_on(st);
3337   }
3338   _cmThread->print_on(st);
3339   st->cr();
3340   _cm->print_worker_threads_on(st);
3341   _cg1r->print_worker_threads_on(st);
3342   st->cr();
3343 }
3344 
3345 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3346   if (G1CollectedHeap::use_parallel_gc_threads()) {
3347     workers()->threads_do(tc);
3348   }
3349   tc->do_thread(_cmThread);
3350   _cg1r->threads_do(tc);
3351 }
3352 
3353 void G1CollectedHeap::print_tracing_info() const {
3354   // We'll overload this to mean "trace GC pause statistics."
3355   if (TraceGen0Time || TraceGen1Time) {
3356     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3357     // to that.
3358     g1_policy()->print_tracing_info();
3359   }
3360   if (G1SummarizeRSetStats) {
3361     g1_rem_set()->print_summary_info();
3362   }
3363   if (G1SummarizeConcMark) {
3364     concurrent_mark()->print_summary_info();
3365   }
3366   g1_policy()->print_yg_surv_rate_info();
3367   SpecializationStats::print();
3368 }
3369 
3370 #ifndef PRODUCT
3371 // Helpful for debugging RSet issues.
3372 
3373 class PrintRSetsClosure : public HeapRegionClosure {
3374 private:
3375   const char* _msg;
3376   size_t _occupied_sum;
3377 
3378 public:
3379   bool doHeapRegion(HeapRegion* r) {
3380     HeapRegionRemSet* hrrs = r->rem_set();
3381     size_t occupied = hrrs->occupied();
3382     _occupied_sum += occupied;
3383 
3384     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3385                            HR_FORMAT_PARAMS(r));
3386     if (occupied == 0) {
3387       gclog_or_tty->print_cr("  RSet is empty");
3388     } else {
3389       hrrs->print();
3390     }
3391     gclog_or_tty->print_cr("----------");
3392     return false;
3393   }
3394 
3395   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3396     gclog_or_tty->cr();
3397     gclog_or_tty->print_cr("========================================");
3398     gclog_or_tty->print_cr(msg);
3399     gclog_or_tty->cr();
3400   }
3401 
3402   ~PrintRSetsClosure() {
3403     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3404     gclog_or_tty->print_cr("========================================");
3405     gclog_or_tty->cr();
3406   }
3407 };
3408 
3409 void G1CollectedHeap::print_cset_rsets() {
3410   PrintRSetsClosure cl("Printing CSet RSets");
3411   collection_set_iterate(&cl);
3412 }
3413 
3414 void G1CollectedHeap::print_all_rsets() {
3415   PrintRSetsClosure cl("Printing All RSets");;
3416   heap_region_iterate(&cl);
3417 }
3418 #endif // PRODUCT
3419 
3420 G1CollectedHeap* G1CollectedHeap::heap() {
3421   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3422          "not a garbage-first heap");
3423   return _g1h;
3424 }
3425 
3426 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3427   // always_do_update_barrier = false;
3428   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3429   // Call allocation profiler
3430   AllocationProfiler::iterate_since_last_gc();
3431   // Fill TLAB's and such
3432   ensure_parsability(true);
3433 }
3434 
3435 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3436   // FIXME: what is this about?
3437   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3438   // is set.
3439   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3440                         "derived pointer present"));
3441   // always_do_update_barrier = true;
3442 
3443   // We have just completed a GC. Update the soft reference
3444   // policy with the new heap occupancy
3445   Universe::update_heap_info_at_gc();
3446 }
3447 
3448 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3449                                                unsigned int gc_count_before,
3450                                                bool* succeeded) {
3451   assert_heap_not_locked_and_not_at_safepoint();
3452   g1_policy()->record_stop_world_start();
3453   VM_G1IncCollectionPause op(gc_count_before,
3454                              word_size,
3455                              false, /* should_initiate_conc_mark */
3456                              g1_policy()->max_pause_time_ms(),
3457                              GCCause::_g1_inc_collection_pause);
3458   VMThread::execute(&op);
3459 
3460   HeapWord* result = op.result();
3461   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3462   assert(result == NULL || ret_succeeded,
3463          "the result should be NULL if the VM did not succeed");
3464   *succeeded = ret_succeeded;
3465 
3466   assert_heap_not_locked();
3467   return result;
3468 }
3469 
3470 void
3471 G1CollectedHeap::doConcurrentMark() {
3472   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3473   if (!_cmThread->in_progress()) {
3474     _cmThread->set_started();
3475     CGC_lock->notify();
3476   }
3477 }
3478 
3479 size_t G1CollectedHeap::pending_card_num() {
3480   size_t extra_cards = 0;
3481   JavaThread *curr = Threads::first();
3482   while (curr != NULL) {
3483     DirtyCardQueue& dcq = curr->dirty_card_queue();
3484     extra_cards += dcq.size();
3485     curr = curr->next();
3486   }
3487   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3488   size_t buffer_size = dcqs.buffer_size();
3489   size_t buffer_num = dcqs.completed_buffers_num();
3490   return buffer_size * buffer_num + extra_cards;
3491 }
3492 
3493 size_t G1CollectedHeap::max_pending_card_num() {
3494   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3495   size_t buffer_size = dcqs.buffer_size();
3496   size_t buffer_num  = dcqs.completed_buffers_num();
3497   int thread_num  = Threads::number_of_threads();
3498   return (buffer_num + thread_num) * buffer_size;
3499 }
3500 
3501 size_t G1CollectedHeap::cards_scanned() {
3502   return g1_rem_set()->cardsScanned();
3503 }
3504 
3505 void
3506 G1CollectedHeap::setup_surviving_young_words() {
3507   assert(_surviving_young_words == NULL, "pre-condition");
3508   uint array_length = g1_policy()->young_cset_region_length();
3509   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3510   if (_surviving_young_words == NULL) {
3511     vm_exit_out_of_memory(sizeof(size_t) * array_length,
3512                           "Not enough space for young surv words summary.");
3513   }
3514   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3515 #ifdef ASSERT
3516   for (uint i = 0;  i < array_length; ++i) {
3517     assert( _surviving_young_words[i] == 0, "memset above" );
3518   }
3519 #endif // !ASSERT
3520 }
3521 
3522 void
3523 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3524   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3525   uint array_length = g1_policy()->young_cset_region_length();
3526   for (uint i = 0; i < array_length; ++i) {
3527     _surviving_young_words[i] += surv_young_words[i];
3528   }
3529 }
3530 
3531 void
3532 G1CollectedHeap::cleanup_surviving_young_words() {
3533   guarantee( _surviving_young_words != NULL, "pre-condition" );
3534   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3535   _surviving_young_words = NULL;
3536 }
3537 
3538 #ifdef ASSERT
3539 class VerifyCSetClosure: public HeapRegionClosure {
3540 public:
3541   bool doHeapRegion(HeapRegion* hr) {
3542     // Here we check that the CSet region's RSet is ready for parallel
3543     // iteration. The fields that we'll verify are only manipulated
3544     // when the region is part of a CSet and is collected. Afterwards,
3545     // we reset these fields when we clear the region's RSet (when the
3546     // region is freed) so they are ready when the region is
3547     // re-allocated. The only exception to this is if there's an
3548     // evacuation failure and instead of freeing the region we leave
3549     // it in the heap. In that case, we reset these fields during
3550     // evacuation failure handling.
3551     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3552 
3553     // Here's a good place to add any other checks we'd like to
3554     // perform on CSet regions.
3555     return false;
3556   }
3557 };
3558 #endif // ASSERT
3559 
3560 #if TASKQUEUE_STATS
3561 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3562   st->print_raw_cr("GC Task Stats");
3563   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3564   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3565 }
3566 
3567 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3568   print_taskqueue_stats_hdr(st);
3569 
3570   TaskQueueStats totals;
3571   const int n = workers() != NULL ? workers()->total_workers() : 1;
3572   for (int i = 0; i < n; ++i) {
3573     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3574     totals += task_queue(i)->stats;
3575   }
3576   st->print_raw("tot "); totals.print(st); st->cr();
3577 
3578   DEBUG_ONLY(totals.verify());
3579 }
3580 
3581 void G1CollectedHeap::reset_taskqueue_stats() {
3582   const int n = workers() != NULL ? workers()->total_workers() : 1;
3583   for (int i = 0; i < n; ++i) {
3584     task_queue(i)->stats.reset();
3585   }
3586 }
3587 #endif // TASKQUEUE_STATS
3588 
3589 bool
3590 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3591   assert_at_safepoint(true /* should_be_vm_thread */);
3592   guarantee(!is_gc_active(), "collection is not reentrant");
3593 
3594   if (GC_locker::check_active_before_gc()) {
3595     return false;
3596   }
3597 
3598   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3599   ResourceMark rm;
3600 
3601   print_heap_before_gc();
3602 
3603   HRSPhaseSetter x(HRSPhaseEvacuation);
3604   verify_region_sets_optional();
3605   verify_dirty_young_regions();
3606 
3607   // This call will decide whether this pause is an initial-mark
3608   // pause. If it is, during_initial_mark_pause() will return true
3609   // for the duration of this pause.
3610   g1_policy()->decide_on_conc_mark_initiation();
3611 
3612   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3613   assert(!g1_policy()->during_initial_mark_pause() ||
3614           g1_policy()->gcs_are_young(), "sanity");
3615 
3616   // We also do not allow mixed GCs during marking.
3617   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3618 
3619   // Record whether this pause is an initial mark. When the current
3620   // thread has completed its logging output and it's safe to signal
3621   // the CM thread, the flag's value in the policy has been reset.
3622   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3623 
3624   // Inner scope for scope based logging, timers, and stats collection
3625   {
3626     if (g1_policy()->during_initial_mark_pause()) {
3627       // We are about to start a marking cycle, so we increment the
3628       // full collection counter.
3629       increment_old_marking_cycles_started();
3630     }
3631     // if the log level is "finer" is on, we'll print long statistics information
3632     // in the collector policy code, so let's not print this as the output
3633     // is messy if we do.
3634     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
3635     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3636 
3637     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3638                                 workers()->active_workers() : 1);
3639     g1_policy()->phase_times()->note_gc_start(os::elapsedTime(), active_workers,
3640       g1_policy()->gcs_are_young(), g1_policy()->during_initial_mark_pause(), gc_cause());
3641 
3642     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3643     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3644 
3645     // If the secondary_free_list is not empty, append it to the
3646     // free_list. No need to wait for the cleanup operation to finish;
3647     // the region allocation code will check the secondary_free_list
3648     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3649     // set, skip this step so that the region allocation code has to
3650     // get entries from the secondary_free_list.
3651     if (!G1StressConcRegionFreeing) {
3652       append_secondary_free_list_if_not_empty_with_lock();
3653     }
3654 
3655     assert(check_young_list_well_formed(),
3656       "young list should be well formed");
3657 
3658     // Don't dynamically change the number of GC threads this early.  A value of
3659     // 0 is used to indicate serial work.  When parallel work is done,
3660     // it will be set.
3661 
3662     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3663       IsGCActiveMark x;
3664 
3665       gc_prologue(false);
3666       increment_total_collections(false /* full gc */);
3667       increment_gc_time_stamp();
3668 
3669       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3670         HandleMark hm;  // Discard invalid handles created during verification
3671         gclog_or_tty->print(" VerifyBeforeGC:");
3672         prepare_for_verify();
3673         Universe::verify(/* silent      */ false,
3674                          /* option      */ VerifyOption_G1UsePrevMarking);
3675       }
3676 
3677       COMPILER2_PRESENT(DerivedPointerTable::clear());
3678 
3679       // Please see comment in g1CollectedHeap.hpp and
3680       // G1CollectedHeap::ref_processing_init() to see how
3681       // reference processing currently works in G1.
3682 
3683       // Enable discovery in the STW reference processor
3684       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3685                                             true /*verify_no_refs*/);
3686 
3687       {
3688         // We want to temporarily turn off discovery by the
3689         // CM ref processor, if necessary, and turn it back on
3690         // on again later if we do. Using a scoped
3691         // NoRefDiscovery object will do this.
3692         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3693 
3694         // Forget the current alloc region (we might even choose it to be part
3695         // of the collection set!).
3696         release_mutator_alloc_region();
3697 
3698         // We should call this after we retire the mutator alloc
3699         // region(s) so that all the ALLOC / RETIRE events are generated
3700         // before the start GC event.
3701         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3702 
3703         // This timing is only used by the ergonomics to handle our pause target.
3704         // It is unclear why this should not include the full pause. We will
3705         // investigate this in CR 7178365.
3706         //
3707         // Preserving the old comment here if that helps the investigation:
3708         //
3709         // The elapsed time induced by the start time below deliberately elides
3710         // the possible verification above.
3711         double sample_start_time_sec = os::elapsedTime();
3712         size_t start_used_bytes = used();
3713 
3714 #if YOUNG_LIST_VERBOSE
3715         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3716         _young_list->print();
3717         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3718 #endif // YOUNG_LIST_VERBOSE
3719 
3720         g1_policy()->record_collection_pause_start(sample_start_time_sec,
3721                                                    start_used_bytes);
3722 
3723         double scan_wait_start = os::elapsedTime();
3724         // We have to wait until the CM threads finish scanning the
3725         // root regions as it's the only way to ensure that all the
3726         // objects on them have been correctly scanned before we start
3727         // moving them during the GC.
3728         bool waited = _cm->root_regions()->wait_until_scan_finished();
3729         double wait_time_ms = 0.0;
3730         if (waited) {
3731           double scan_wait_end = os::elapsedTime();
3732           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3733         }
3734         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3735 
3736 #if YOUNG_LIST_VERBOSE
3737         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3738         _young_list->print();
3739 #endif // YOUNG_LIST_VERBOSE
3740 
3741         if (g1_policy()->during_initial_mark_pause()) {
3742           concurrent_mark()->checkpointRootsInitialPre();
3743         }
3744         perm_gen()->save_marks();
3745 
3746 #if YOUNG_LIST_VERBOSE
3747         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3748         _young_list->print();
3749         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3750 #endif // YOUNG_LIST_VERBOSE
3751 
3752         g1_policy()->finalize_cset(target_pause_time_ms);
3753 
3754         _cm->note_start_of_gc();
3755         // We should not verify the per-thread SATB buffers given that
3756         // we have not filtered them yet (we'll do so during the
3757         // GC). We also call this after finalize_cset() to
3758         // ensure that the CSet has been finalized.
3759         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3760                                  true  /* verify_enqueued_buffers */,
3761                                  false /* verify_thread_buffers */,
3762                                  true  /* verify_fingers */);
3763 
3764         if (_hr_printer.is_active()) {
3765           HeapRegion* hr = g1_policy()->collection_set();
3766           while (hr != NULL) {
3767             G1HRPrinter::RegionType type;
3768             if (!hr->is_young()) {
3769               type = G1HRPrinter::Old;
3770             } else if (hr->is_survivor()) {
3771               type = G1HRPrinter::Survivor;
3772             } else {
3773               type = G1HRPrinter::Eden;
3774             }
3775             _hr_printer.cset(hr);
3776             hr = hr->next_in_collection_set();
3777           }
3778         }
3779 
3780 #ifdef ASSERT
3781         VerifyCSetClosure cl;
3782         collection_set_iterate(&cl);
3783 #endif // ASSERT
3784 
3785         setup_surviving_young_words();
3786 
3787         // Initialize the GC alloc regions.
3788         init_gc_alloc_regions();
3789 
3790         // Actually do the work...
3791         evacuate_collection_set();
3792 
3793         // We do this to mainly verify the per-thread SATB buffers
3794         // (which have been filtered by now) since we didn't verify
3795         // them earlier. No point in re-checking the stacks / enqueued
3796         // buffers given that the CSet has not changed since last time
3797         // we checked.
3798         _cm->verify_no_cset_oops(false /* verify_stacks */,
3799                                  false /* verify_enqueued_buffers */,
3800                                  true  /* verify_thread_buffers */,
3801                                  true  /* verify_fingers */);
3802 
3803         free_collection_set(g1_policy()->collection_set());
3804         g1_policy()->clear_collection_set();
3805 
3806         cleanup_surviving_young_words();
3807 
3808         // Start a new incremental collection set for the next pause.
3809         g1_policy()->start_incremental_cset_building();
3810 
3811         // Clear the _cset_fast_test bitmap in anticipation of adding
3812         // regions to the incremental collection set for the next
3813         // evacuation pause.
3814         clear_cset_fast_test();
3815 
3816         _young_list->reset_sampled_info();
3817 
3818         // Don't check the whole heap at this point as the
3819         // GC alloc regions from this pause have been tagged
3820         // as survivors and moved on to the survivor list.
3821         // Survivor regions will fail the !is_young() check.
3822         assert(check_young_list_empty(false /* check_heap */),
3823           "young list should be empty");
3824 
3825 #if YOUNG_LIST_VERBOSE
3826         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3827         _young_list->print();
3828 #endif // YOUNG_LIST_VERBOSE
3829 
3830         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3831                                             _young_list->first_survivor_region(),
3832                                             _young_list->last_survivor_region());
3833 
3834         _young_list->reset_auxilary_lists();
3835 
3836         if (evacuation_failed()) {
3837           _summary_bytes_used = recalculate_used();
3838         } else {
3839           // The "used" of the the collection set have already been subtracted
3840           // when they were freed.  Add in the bytes evacuated.
3841           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3842         }
3843 
3844         if (g1_policy()->during_initial_mark_pause()) {
3845           // We have to do this before we notify the CM threads that
3846           // they can start working to make sure that all the
3847           // appropriate initialization is done on the CM object.
3848           concurrent_mark()->checkpointRootsInitialPost();
3849           set_marking_started();
3850           // Note that we don't actually trigger the CM thread at
3851           // this point. We do that later when we're sure that
3852           // the current thread has completed its logging output.
3853         }
3854 
3855         allocate_dummy_regions();
3856 
3857 #if YOUNG_LIST_VERBOSE
3858         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3859         _young_list->print();
3860         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3861 #endif // YOUNG_LIST_VERBOSE
3862 
3863         init_mutator_alloc_region();
3864 
3865         {
3866           size_t expand_bytes = g1_policy()->expansion_amount();
3867           if (expand_bytes > 0) {
3868             size_t bytes_before = capacity();
3869             // No need for an ergo verbose message here,
3870             // expansion_amount() does this when it returns a value > 0.
3871             if (!expand(expand_bytes)) {
3872               // We failed to expand the heap so let's verify that
3873               // committed/uncommitted amount match the backing store
3874               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3875               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3876             }
3877           }
3878         }
3879 
3880         // We redo the verificaiton but now wrt to the new CSet which
3881         // has just got initialized after the previous CSet was freed.
3882         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3883                                  true  /* verify_enqueued_buffers */,
3884                                  true  /* verify_thread_buffers */,
3885                                  true  /* verify_fingers */);
3886         _cm->note_end_of_gc();
3887 
3888         // Collect thread local data to allow the ergonomics to use
3889         // the collected information
3890         g1_policy()->phase_times()->collapse_par_times();
3891 
3892         // This timing is only used by the ergonomics to handle our pause target.
3893         // It is unclear why this should not include the full pause. We will
3894         // investigate this in CR 7178365.
3895         double sample_end_time_sec = os::elapsedTime();
3896         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3897         g1_policy()->record_collection_pause_end(pause_time_ms);
3898 
3899         MemoryService::track_memory_usage();
3900 
3901         // In prepare_for_verify() below we'll need to scan the deferred
3902         // update buffers to bring the RSets up-to-date if
3903         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3904         // the update buffers we'll probably need to scan cards on the
3905         // regions we just allocated to (i.e., the GC alloc
3906         // regions). However, during the last GC we called
3907         // set_saved_mark() on all the GC alloc regions, so card
3908         // scanning might skip the [saved_mark_word()...top()] area of
3909         // those regions (i.e., the area we allocated objects into
3910         // during the last GC). But it shouldn't. Given that
3911         // saved_mark_word() is conditional on whether the GC time stamp
3912         // on the region is current or not, by incrementing the GC time
3913         // stamp here we invalidate all the GC time stamps on all the
3914         // regions and saved_mark_word() will simply return top() for
3915         // all the regions. This is a nicer way of ensuring this rather
3916         // than iterating over the regions and fixing them. In fact, the
3917         // GC time stamp increment here also ensures that
3918         // saved_mark_word() will return top() between pauses, i.e.,
3919         // during concurrent refinement. So we don't need the
3920         // is_gc_active() check to decided which top to use when
3921         // scanning cards (see CR 7039627).
3922         increment_gc_time_stamp();
3923 
3924         if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
3925           HandleMark hm;  // Discard invalid handles created during verification
3926           gclog_or_tty->print(" VerifyAfterGC:");
3927           prepare_for_verify();
3928           Universe::verify(/* silent      */ false,
3929                            /* option      */ VerifyOption_G1UsePrevMarking);
3930         }
3931 
3932         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3933         ref_processor_stw()->verify_no_references_recorded();
3934 
3935         // CM reference discovery will be re-enabled if necessary.
3936       }
3937 
3938       // We should do this after we potentially expand the heap so
3939       // that all the COMMIT events are generated before the end GC
3940       // event, and after we retire the GC alloc regions so that all
3941       // RETIRE events are generated before the end GC event.
3942       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
3943 
3944       if (mark_in_progress()) {
3945         concurrent_mark()->update_g1_committed();
3946       }
3947 
3948 #ifdef TRACESPINNING
3949       ParallelTaskTerminator::print_termination_counts();
3950 #endif
3951 
3952       gc_epilogue(false);
3953 
3954       g1_policy()->phase_times()->note_gc_end(os::elapsedTime());
3955 
3956       // We have to do this after we decide whether to expand the heap or not.
3957       g1_policy()->print_heap_transition();
3958     }
3959 
3960     // It is not yet to safe to tell the concurrent mark to
3961     // start as we have some optional output below. We don't want the
3962     // output from the concurrent mark thread interfering with this
3963     // logging output either.
3964 
3965     _hrs.verify_optional();
3966     verify_region_sets_optional();
3967 
3968     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
3969     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3970 
3971     print_heap_after_gc();
3972 
3973     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3974     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3975     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3976     // before any GC notifications are raised.
3977     g1mm()->update_sizes();
3978   }
3979 
3980   if (G1SummarizeRSetStats &&
3981       (G1SummarizeRSetStatsPeriod > 0) &&
3982       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3983     g1_rem_set()->print_summary_info();
3984   }
3985 
3986   // It should now be safe to tell the concurrent mark thread to start
3987   // without its logging output interfering with the logging output
3988   // that came from the pause.
3989 
3990   if (should_start_conc_mark) {
3991     // CAUTION: after the doConcurrentMark() call below,
3992     // the concurrent marking thread(s) could be running
3993     // concurrently with us. Make sure that anything after
3994     // this point does not assume that we are the only GC thread
3995     // running. Note: of course, the actual marking work will
3996     // not start until the safepoint itself is released in
3997     // ConcurrentGCThread::safepoint_desynchronize().
3998     doConcurrentMark();
3999   }
4000 
4001   return true;
4002 }
4003 
4004 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4005 {
4006   size_t gclab_word_size;
4007   switch (purpose) {
4008     case GCAllocForSurvived:
4009       gclab_word_size = YoungPLABSize;
4010       break;
4011     case GCAllocForTenured:
4012       gclab_word_size = OldPLABSize;
4013       break;
4014     default:
4015       assert(false, "unknown GCAllocPurpose");
4016       gclab_word_size = OldPLABSize;
4017       break;
4018   }
4019   return gclab_word_size;
4020 }
4021 
4022 void G1CollectedHeap::init_mutator_alloc_region() {
4023   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4024   _mutator_alloc_region.init();
4025 }
4026 
4027 void G1CollectedHeap::release_mutator_alloc_region() {
4028   _mutator_alloc_region.release();
4029   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4030 }
4031 
4032 void G1CollectedHeap::init_gc_alloc_regions() {
4033   assert_at_safepoint(true /* should_be_vm_thread */);
4034 
4035   _survivor_gc_alloc_region.init();
4036   _old_gc_alloc_region.init();
4037   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4038   _retained_old_gc_alloc_region = NULL;
4039 
4040   // We will discard the current GC alloc region if:
4041   // a) it's in the collection set (it can happen!),
4042   // b) it's already full (no point in using it),
4043   // c) it's empty (this means that it was emptied during
4044   // a cleanup and it should be on the free list now), or
4045   // d) it's humongous (this means that it was emptied
4046   // during a cleanup and was added to the free list, but
4047   // has been subseqently used to allocate a humongous
4048   // object that may be less than the region size).
4049   if (retained_region != NULL &&
4050       !retained_region->in_collection_set() &&
4051       !(retained_region->top() == retained_region->end()) &&
4052       !retained_region->is_empty() &&
4053       !retained_region->isHumongous()) {
4054     retained_region->set_saved_mark();
4055     // The retained region was added to the old region set when it was
4056     // retired. We have to remove it now, since we don't allow regions
4057     // we allocate to in the region sets. We'll re-add it later, when
4058     // it's retired again.
4059     _old_set.remove(retained_region);
4060     bool during_im = g1_policy()->during_initial_mark_pause();
4061     retained_region->note_start_of_copying(during_im);
4062     _old_gc_alloc_region.set(retained_region);
4063     _hr_printer.reuse(retained_region);
4064   }
4065 }
4066 
4067 void G1CollectedHeap::release_gc_alloc_regions() {
4068   _survivor_gc_alloc_region.release();
4069   // If we have an old GC alloc region to release, we'll save it in
4070   // _retained_old_gc_alloc_region. If we don't
4071   // _retained_old_gc_alloc_region will become NULL. This is what we
4072   // want either way so no reason to check explicitly for either
4073   // condition.
4074   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4075 }
4076 
4077 void G1CollectedHeap::abandon_gc_alloc_regions() {
4078   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4079   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4080   _retained_old_gc_alloc_region = NULL;
4081 }
4082 
4083 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4084   _drain_in_progress = false;
4085   set_evac_failure_closure(cl);
4086   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4087 }
4088 
4089 void G1CollectedHeap::finalize_for_evac_failure() {
4090   assert(_evac_failure_scan_stack != NULL &&
4091          _evac_failure_scan_stack->length() == 0,
4092          "Postcondition");
4093   assert(!_drain_in_progress, "Postcondition");
4094   delete _evac_failure_scan_stack;
4095   _evac_failure_scan_stack = NULL;
4096 }
4097 
4098 void G1CollectedHeap::remove_self_forwarding_pointers() {
4099   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4100 
4101   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4102 
4103   if (G1CollectedHeap::use_parallel_gc_threads()) {
4104     set_par_threads();
4105     workers()->run_task(&rsfp_task);
4106     set_par_threads(0);
4107   } else {
4108     rsfp_task.work(0);
4109   }
4110 
4111   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4112 
4113   // Reset the claim values in the regions in the collection set.
4114   reset_cset_heap_region_claim_values();
4115 
4116   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4117 
4118   // Now restore saved marks, if any.
4119   if (_objs_with_preserved_marks != NULL) {
4120     assert(_preserved_marks_of_objs != NULL, "Both or none.");
4121     guarantee(_objs_with_preserved_marks->length() ==
4122               _preserved_marks_of_objs->length(), "Both or none.");
4123     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
4124       oop obj   = _objs_with_preserved_marks->at(i);
4125       markOop m = _preserved_marks_of_objs->at(i);
4126       obj->set_mark(m);
4127     }
4128 
4129     // Delete the preserved marks growable arrays (allocated on the C heap).
4130     delete _objs_with_preserved_marks;
4131     delete _preserved_marks_of_objs;
4132     _objs_with_preserved_marks = NULL;
4133     _preserved_marks_of_objs = NULL;
4134   }
4135 }
4136 
4137 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4138   _evac_failure_scan_stack->push(obj);
4139 }
4140 
4141 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4142   assert(_evac_failure_scan_stack != NULL, "precondition");
4143 
4144   while (_evac_failure_scan_stack->length() > 0) {
4145      oop obj = _evac_failure_scan_stack->pop();
4146      _evac_failure_closure->set_region(heap_region_containing(obj));
4147      obj->oop_iterate_backwards(_evac_failure_closure);
4148   }
4149 }
4150 
4151 oop
4152 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4153                                                oop old) {
4154   assert(obj_in_cs(old),
4155          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4156                  (HeapWord*) old));
4157   markOop m = old->mark();
4158   oop forward_ptr = old->forward_to_atomic(old);
4159   if (forward_ptr == NULL) {
4160     // Forward-to-self succeeded.
4161 
4162     if (_evac_failure_closure != cl) {
4163       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4164       assert(!_drain_in_progress,
4165              "Should only be true while someone holds the lock.");
4166       // Set the global evac-failure closure to the current thread's.
4167       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4168       set_evac_failure_closure(cl);
4169       // Now do the common part.
4170       handle_evacuation_failure_common(old, m);
4171       // Reset to NULL.
4172       set_evac_failure_closure(NULL);
4173     } else {
4174       // The lock is already held, and this is recursive.
4175       assert(_drain_in_progress, "This should only be the recursive case.");
4176       handle_evacuation_failure_common(old, m);
4177     }
4178     return old;
4179   } else {
4180     // Forward-to-self failed. Either someone else managed to allocate
4181     // space for this object (old != forward_ptr) or they beat us in
4182     // self-forwarding it (old == forward_ptr).
4183     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4184            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4185                    "should not be in the CSet",
4186                    (HeapWord*) old, (HeapWord*) forward_ptr));
4187     return forward_ptr;
4188   }
4189 }
4190 
4191 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4192   set_evacuation_failed(true);
4193 
4194   preserve_mark_if_necessary(old, m);
4195 
4196   HeapRegion* r = heap_region_containing(old);
4197   if (!r->evacuation_failed()) {
4198     r->set_evacuation_failed(true);
4199     _hr_printer.evac_failure(r);
4200   }
4201 
4202   push_on_evac_failure_scan_stack(old);
4203 
4204   if (!_drain_in_progress) {
4205     // prevent recursion in copy_to_survivor_space()
4206     _drain_in_progress = true;
4207     drain_evac_failure_scan_stack();
4208     _drain_in_progress = false;
4209   }
4210 }
4211 
4212 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4213   assert(evacuation_failed(), "Oversaving!");
4214   // We want to call the "for_promotion_failure" version only in the
4215   // case of a promotion failure.
4216   if (m->must_be_preserved_for_promotion_failure(obj)) {
4217     if (_objs_with_preserved_marks == NULL) {
4218       assert(_preserved_marks_of_objs == NULL, "Both or none.");
4219       _objs_with_preserved_marks =
4220         new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4221       _preserved_marks_of_objs =
4222         new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
4223     }
4224     _objs_with_preserved_marks->push(obj);
4225     _preserved_marks_of_objs->push(m);
4226   }
4227 }
4228 
4229 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4230                                                   size_t word_size) {
4231   if (purpose == GCAllocForSurvived) {
4232     HeapWord* result = survivor_attempt_allocation(word_size);
4233     if (result != NULL) {
4234       return result;
4235     } else {
4236       // Let's try to allocate in the old gen in case we can fit the
4237       // object there.
4238       return old_attempt_allocation(word_size);
4239     }
4240   } else {
4241     assert(purpose ==  GCAllocForTenured, "sanity");
4242     HeapWord* result = old_attempt_allocation(word_size);
4243     if (result != NULL) {
4244       return result;
4245     } else {
4246       // Let's try to allocate in the survivors in case we can fit the
4247       // object there.
4248       return survivor_attempt_allocation(word_size);
4249     }
4250   }
4251 
4252   ShouldNotReachHere();
4253   // Trying to keep some compilers happy.
4254   return NULL;
4255 }
4256 
4257 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4258   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4259 
4260 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4261   : _g1h(g1h),
4262     _refs(g1h->task_queue(queue_num)),
4263     _dcq(&g1h->dirty_card_queue_set()),
4264     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
4265     _g1_rem(g1h->g1_rem_set()),
4266     _hash_seed(17), _queue_num(queue_num),
4267     _term_attempts(0),
4268     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4269     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4270     _age_table(false),
4271     _strong_roots_time(0), _term_time(0),
4272     _alloc_buffer_waste(0), _undo_waste(0) {
4273   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4274   // we "sacrifice" entry 0 to keep track of surviving bytes for
4275   // non-young regions (where the age is -1)
4276   // We also add a few elements at the beginning and at the end in
4277   // an attempt to eliminate cache contention
4278   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4279   uint array_length = PADDING_ELEM_NUM +
4280                       real_length +
4281                       PADDING_ELEM_NUM;
4282   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4283   if (_surviving_young_words_base == NULL)
4284     vm_exit_out_of_memory(array_length * sizeof(size_t),
4285                           "Not enough space for young surv histo.");
4286   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4287   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4288 
4289   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4290   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4291 
4292   _start = os::elapsedTime();
4293 }
4294 
4295 void
4296 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4297 {
4298   st->print_raw_cr("GC Termination Stats");
4299   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4300                    " ------waste (KiB)------");
4301   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4302                    "  total   alloc    undo");
4303   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4304                    " ------- ------- -------");
4305 }
4306 
4307 void
4308 G1ParScanThreadState::print_termination_stats(int i,
4309                                               outputStream* const st) const
4310 {
4311   const double elapsed_ms = elapsed_time() * 1000.0;
4312   const double s_roots_ms = strong_roots_time() * 1000.0;
4313   const double term_ms    = term_time() * 1000.0;
4314   st->print_cr("%3d %9.2f %9.2f %6.2f "
4315                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4316                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4317                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4318                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4319                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4320                alloc_buffer_waste() * HeapWordSize / K,
4321                undo_waste() * HeapWordSize / K);
4322 }
4323 
4324 #ifdef ASSERT
4325 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4326   assert(ref != NULL, "invariant");
4327   assert(UseCompressedOops, "sanity");
4328   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4329   oop p = oopDesc::load_decode_heap_oop(ref);
4330   assert(_g1h->is_in_g1_reserved(p),
4331          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4332   return true;
4333 }
4334 
4335 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4336   assert(ref != NULL, "invariant");
4337   if (has_partial_array_mask(ref)) {
4338     // Must be in the collection set--it's already been copied.
4339     oop p = clear_partial_array_mask(ref);
4340     assert(_g1h->obj_in_cs(p),
4341            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4342   } else {
4343     oop p = oopDesc::load_decode_heap_oop(ref);
4344     assert(_g1h->is_in_g1_reserved(p),
4345            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4346   }
4347   return true;
4348 }
4349 
4350 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4351   if (ref.is_narrow()) {
4352     return verify_ref((narrowOop*) ref);
4353   } else {
4354     return verify_ref((oop*) ref);
4355   }
4356 }
4357 #endif // ASSERT
4358 
4359 void G1ParScanThreadState::trim_queue() {
4360   assert(_evac_cl != NULL, "not set");
4361   assert(_evac_failure_cl != NULL, "not set");
4362   assert(_partial_scan_cl != NULL, "not set");
4363 
4364   StarTask ref;
4365   do {
4366     // Drain the overflow stack first, so other threads can steal.
4367     while (refs()->pop_overflow(ref)) {
4368       deal_with_reference(ref);
4369     }
4370 
4371     while (refs()->pop_local(ref)) {
4372       deal_with_reference(ref);
4373     }
4374   } while (!refs()->is_empty());
4375 }
4376 
4377 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4378                                      G1ParScanThreadState* par_scan_state) :
4379   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4380   _par_scan_state(par_scan_state),
4381   _worker_id(par_scan_state->queue_num()),
4382   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4383   _mark_in_progress(_g1->mark_in_progress()) { }
4384 
4385 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4386 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4387 #ifdef ASSERT
4388   HeapRegion* hr = _g1->heap_region_containing(obj);
4389   assert(hr != NULL, "sanity");
4390   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4391 #endif // ASSERT
4392 
4393   // We know that the object is not moving so it's safe to read its size.
4394   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4395 }
4396 
4397 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4398 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4399   ::mark_forwarded_object(oop from_obj, oop to_obj) {
4400 #ifdef ASSERT
4401   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4402   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4403   assert(from_obj != to_obj, "should not be self-forwarded");
4404 
4405   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4406   assert(from_hr != NULL, "sanity");
4407   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4408 
4409   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4410   assert(to_hr != NULL, "sanity");
4411   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4412 #endif // ASSERT
4413 
4414   // The object might be in the process of being copied by another
4415   // worker so we cannot trust that its to-space image is
4416   // well-formed. So we have to read its size from its from-space
4417   // image which we know should not be changing.
4418   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4419 }
4420 
4421 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4422 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4423   ::copy_to_survivor_space(oop old) {
4424   size_t word_sz = old->size();
4425   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4426   // +1 to make the -1 indexes valid...
4427   int       young_index = from_region->young_index_in_cset()+1;
4428   assert( (from_region->is_young() && young_index >  0) ||
4429          (!from_region->is_young() && young_index == 0), "invariant" );
4430   G1CollectorPolicy* g1p = _g1->g1_policy();
4431   markOop m = old->mark();
4432   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4433                                            : m->age();
4434   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4435                                                              word_sz);
4436   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4437   oop       obj     = oop(obj_ptr);
4438 
4439   if (obj_ptr == NULL) {
4440     // This will either forward-to-self, or detect that someone else has
4441     // installed a forwarding pointer.
4442     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4443     return _g1->handle_evacuation_failure_par(cl, old);
4444   }
4445 
4446   // We're going to allocate linearly, so might as well prefetch ahead.
4447   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4448 
4449   oop forward_ptr = old->forward_to_atomic(obj);
4450   if (forward_ptr == NULL) {
4451     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4452     if (g1p->track_object_age(alloc_purpose)) {
4453       // We could simply do obj->incr_age(). However, this causes a
4454       // performance issue. obj->incr_age() will first check whether
4455       // the object has a displaced mark by checking its mark word;
4456       // getting the mark word from the new location of the object
4457       // stalls. So, given that we already have the mark word and we
4458       // are about to install it anyway, it's better to increase the
4459       // age on the mark word, when the object does not have a
4460       // displaced mark word. We're not expecting many objects to have
4461       // a displaced marked word, so that case is not optimized
4462       // further (it could be...) and we simply call obj->incr_age().
4463 
4464       if (m->has_displaced_mark_helper()) {
4465         // in this case, we have to install the mark word first,
4466         // otherwise obj looks to be forwarded (the old mark word,
4467         // which contains the forward pointer, was copied)
4468         obj->set_mark(m);
4469         obj->incr_age();
4470       } else {
4471         m = m->incr_age();
4472         obj->set_mark(m);
4473       }
4474       _par_scan_state->age_table()->add(obj, word_sz);
4475     } else {
4476       obj->set_mark(m);
4477     }
4478 
4479     size_t* surv_young_words = _par_scan_state->surviving_young_words();
4480     surv_young_words[young_index] += word_sz;
4481 
4482     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4483       // We keep track of the next start index in the length field of
4484       // the to-space object. The actual length can be found in the
4485       // length field of the from-space object.
4486       arrayOop(obj)->set_length(0);
4487       oop* old_p = set_partial_array_mask(old);
4488       _par_scan_state->push_on_queue(old_p);
4489     } else {
4490       // No point in using the slower heap_region_containing() method,
4491       // given that we know obj is in the heap.
4492       _scanner.set_region(_g1->heap_region_containing_raw(obj));
4493       obj->oop_iterate_backwards(&_scanner);
4494     }
4495   } else {
4496     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4497     obj = forward_ptr;
4498   }
4499   return obj;
4500 }
4501 
4502 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4503 template <class T>
4504 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4505 ::do_oop_work(T* p) {
4506   oop obj = oopDesc::load_decode_heap_oop(p);
4507   assert(barrier != G1BarrierRS || obj != NULL,
4508          "Precondition: G1BarrierRS implies obj is non-NULL");
4509 
4510   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4511 
4512   // here the null check is implicit in the cset_fast_test() test
4513   if (_g1->in_cset_fast_test(obj)) {
4514     oop forwardee;
4515     if (obj->is_forwarded()) {
4516       forwardee = obj->forwardee();
4517     } else {
4518       forwardee = copy_to_survivor_space(obj);
4519     }
4520     assert(forwardee != NULL, "forwardee should not be NULL");
4521     oopDesc::encode_store_heap_oop(p, forwardee);
4522     if (do_mark_object && forwardee != obj) {
4523       // If the object is self-forwarded we don't need to explicitly
4524       // mark it, the evacuation failure protocol will do so.
4525       mark_forwarded_object(obj, forwardee);
4526     }
4527 
4528     // When scanning the RS, we only care about objs in CS.
4529     if (barrier == G1BarrierRS) {
4530       _par_scan_state->update_rs(_from, p, _worker_id);
4531     }
4532   } else {
4533     // The object is not in collection set. If we're a root scanning
4534     // closure during an initial mark pause (i.e. do_mark_object will
4535     // be true) then attempt to mark the object.
4536     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4537       mark_object(obj);
4538     }
4539   }
4540 
4541   if (barrier == G1BarrierEvac && obj != NULL) {
4542     _par_scan_state->update_rs(_from, p, _worker_id);
4543   }
4544 
4545   if (do_gen_barrier && obj != NULL) {
4546     par_do_barrier(p);
4547   }
4548 }
4549 
4550 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4551 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4552 
4553 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4554   assert(has_partial_array_mask(p), "invariant");
4555   oop from_obj = clear_partial_array_mask(p);
4556 
4557   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4558   assert(from_obj->is_objArray(), "must be obj array");
4559   objArrayOop from_obj_array = objArrayOop(from_obj);
4560   // The from-space object contains the real length.
4561   int length                 = from_obj_array->length();
4562 
4563   assert(from_obj->is_forwarded(), "must be forwarded");
4564   oop to_obj                 = from_obj->forwardee();
4565   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4566   objArrayOop to_obj_array   = objArrayOop(to_obj);
4567   // We keep track of the next start index in the length field of the
4568   // to-space object.
4569   int next_index             = to_obj_array->length();
4570   assert(0 <= next_index && next_index < length,
4571          err_msg("invariant, next index: %d, length: %d", next_index, length));
4572 
4573   int start                  = next_index;
4574   int end                    = length;
4575   int remainder              = end - start;
4576   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4577   if (remainder > 2 * ParGCArrayScanChunk) {
4578     end = start + ParGCArrayScanChunk;
4579     to_obj_array->set_length(end);
4580     // Push the remainder before we process the range in case another
4581     // worker has run out of things to do and can steal it.
4582     oop* from_obj_p = set_partial_array_mask(from_obj);
4583     _par_scan_state->push_on_queue(from_obj_p);
4584   } else {
4585     assert(length == end, "sanity");
4586     // We'll process the final range for this object. Restore the length
4587     // so that the heap remains parsable in case of evacuation failure.
4588     to_obj_array->set_length(end);
4589   }
4590   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4591   // Process indexes [start,end). It will also process the header
4592   // along with the first chunk (i.e., the chunk with start == 0).
4593   // Note that at this point the length field of to_obj_array is not
4594   // correct given that we are using it to keep track of the next
4595   // start index. oop_iterate_range() (thankfully!) ignores the length
4596   // field and only relies on the start / end parameters.  It does
4597   // however return the size of the object which will be incorrect. So
4598   // we have to ignore it even if we wanted to use it.
4599   to_obj_array->oop_iterate_range(&_scanner, start, end);
4600 }
4601 
4602 class G1ParEvacuateFollowersClosure : public VoidClosure {
4603 protected:
4604   G1CollectedHeap*              _g1h;
4605   G1ParScanThreadState*         _par_scan_state;
4606   RefToScanQueueSet*            _queues;
4607   ParallelTaskTerminator*       _terminator;
4608 
4609   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4610   RefToScanQueueSet*      queues()         { return _queues; }
4611   ParallelTaskTerminator* terminator()     { return _terminator; }
4612 
4613 public:
4614   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4615                                 G1ParScanThreadState* par_scan_state,
4616                                 RefToScanQueueSet* queues,
4617                                 ParallelTaskTerminator* terminator)
4618     : _g1h(g1h), _par_scan_state(par_scan_state),
4619       _queues(queues), _terminator(terminator) {}
4620 
4621   void do_void();
4622 
4623 private:
4624   inline bool offer_termination();
4625 };
4626 
4627 bool G1ParEvacuateFollowersClosure::offer_termination() {
4628   G1ParScanThreadState* const pss = par_scan_state();
4629   pss->start_term_time();
4630   const bool res = terminator()->offer_termination();
4631   pss->end_term_time();
4632   return res;
4633 }
4634 
4635 void G1ParEvacuateFollowersClosure::do_void() {
4636   StarTask stolen_task;
4637   G1ParScanThreadState* const pss = par_scan_state();
4638   pss->trim_queue();
4639 
4640   do {
4641     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4642       assert(pss->verify_task(stolen_task), "sanity");
4643       if (stolen_task.is_narrow()) {
4644         pss->deal_with_reference((narrowOop*) stolen_task);
4645       } else {
4646         pss->deal_with_reference((oop*) stolen_task);
4647       }
4648 
4649       // We've just processed a reference and we might have made
4650       // available new entries on the queues. So we have to make sure
4651       // we drain the queues as necessary.
4652       pss->trim_queue();
4653     }
4654   } while (!offer_termination());
4655 
4656   pss->retire_alloc_buffers();
4657 }
4658 
4659 class G1ParTask : public AbstractGangTask {
4660 protected:
4661   G1CollectedHeap*       _g1h;
4662   RefToScanQueueSet      *_queues;
4663   ParallelTaskTerminator _terminator;
4664   uint _n_workers;
4665 
4666   Mutex _stats_lock;
4667   Mutex* stats_lock() { return &_stats_lock; }
4668 
4669   size_t getNCards() {
4670     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4671       / G1BlockOffsetSharedArray::N_bytes;
4672   }
4673 
4674 public:
4675   G1ParTask(G1CollectedHeap* g1h,
4676             RefToScanQueueSet *task_queues)
4677     : AbstractGangTask("G1 collection"),
4678       _g1h(g1h),
4679       _queues(task_queues),
4680       _terminator(0, _queues),
4681       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4682   {}
4683 
4684   RefToScanQueueSet* queues() { return _queues; }
4685 
4686   RefToScanQueue *work_queue(int i) {
4687     return queues()->queue(i);
4688   }
4689 
4690   ParallelTaskTerminator* terminator() { return &_terminator; }
4691 
4692   virtual void set_for_termination(int active_workers) {
4693     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4694     // in the young space (_par_seq_tasks) in the G1 heap
4695     // for SequentialSubTasksDone.
4696     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4697     // both of which need setting by set_n_termination().
4698     _g1h->SharedHeap::set_n_termination(active_workers);
4699     _g1h->set_n_termination(active_workers);
4700     terminator()->reset_for_reuse(active_workers);
4701     _n_workers = active_workers;
4702   }
4703 
4704   void work(uint worker_id) {
4705     if (worker_id >= _n_workers) return;  // no work needed this round
4706 
4707     double start_time_ms = os::elapsedTime() * 1000.0;
4708     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4709 
4710     {
4711       ResourceMark rm;
4712       HandleMark   hm;
4713 
4714       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4715 
4716       G1ParScanThreadState            pss(_g1h, worker_id);
4717       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
4718       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4719       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
4720 
4721       pss.set_evac_closure(&scan_evac_cl);
4722       pss.set_evac_failure_closure(&evac_failure_cl);
4723       pss.set_partial_scan_closure(&partial_scan_cl);
4724 
4725       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
4726       G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
4727 
4728       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4729       G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
4730 
4731       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4732       OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
4733 
4734       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4735         // We also need to mark copied objects.
4736         scan_root_cl = &scan_mark_root_cl;
4737         scan_perm_cl = &scan_mark_perm_cl;
4738       }
4739 
4740       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4741 
4742       pss.start_strong_roots();
4743       _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4744                                     SharedHeap::SO_AllClasses,
4745                                     scan_root_cl,
4746                                     &push_heap_rs_cl,
4747                                     scan_perm_cl,
4748                                     worker_id);
4749       pss.end_strong_roots();
4750 
4751       {
4752         double start = os::elapsedTime();
4753         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4754         evac.do_void();
4755         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4756         double term_ms = pss.term_time()*1000.0;
4757         _g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
4758         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4759       }
4760       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4761       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4762 
4763       if (ParallelGCVerbose) {
4764         MutexLocker x(stats_lock());
4765         pss.print_termination_stats(worker_id);
4766       }
4767 
4768       assert(pss.refs()->is_empty(), "should be empty");
4769 
4770       // Close the inner scope so that the ResourceMark and HandleMark
4771       // destructors are executed here and are included as part of the
4772       // "GC Worker Time".
4773     }
4774 
4775     double end_time_ms = os::elapsedTime() * 1000.0;
4776     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4777   }
4778 };
4779 
4780 // *** Common G1 Evacuation Stuff
4781 
4782 // Closures that support the filtering of CodeBlobs scanned during
4783 // external root scanning.
4784 
4785 // Closure applied to reference fields in code blobs (specifically nmethods)
4786 // to determine whether an nmethod contains references that point into
4787 // the collection set. Used as a predicate when walking code roots so
4788 // that only nmethods that point into the collection set are added to the
4789 // 'marked' list.
4790 
4791 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4792 
4793   class G1PointsIntoCSOopClosure : public OopClosure {
4794     G1CollectedHeap* _g1;
4795     bool _points_into_cs;
4796   public:
4797     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4798       _g1(g1), _points_into_cs(false) { }
4799 
4800     bool points_into_cs() const { return _points_into_cs; }
4801 
4802     template <class T>
4803     void do_oop_nv(T* p) {
4804       if (!_points_into_cs) {
4805         T heap_oop = oopDesc::load_heap_oop(p);
4806         if (!oopDesc::is_null(heap_oop) &&
4807             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4808           _points_into_cs = true;
4809         }
4810       }
4811     }
4812 
4813     virtual void do_oop(oop* p)        { do_oop_nv(p); }
4814     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
4815   };
4816 
4817   G1CollectedHeap* _g1;
4818 
4819 public:
4820   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4821     CodeBlobToOopClosure(cl, true), _g1(g1) { }
4822 
4823   virtual void do_code_blob(CodeBlob* cb) {
4824     nmethod* nm = cb->as_nmethod_or_null();
4825     if (nm != NULL && !(nm->test_oops_do_mark())) {
4826       G1PointsIntoCSOopClosure predicate_cl(_g1);
4827       nm->oops_do(&predicate_cl);
4828 
4829       if (predicate_cl.points_into_cs()) {
4830         // At least one of the reference fields or the oop relocations
4831         // in the nmethod points into the collection set. We have to
4832         // 'mark' this nmethod.
4833         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
4834         // or MarkingCodeBlobClosure::do_code_blob() change.
4835         if (!nm->test_set_oops_do_mark()) {
4836           do_newly_marked_nmethod(nm);
4837         }
4838       }
4839     }
4840   }
4841 };
4842 
4843 // This method is run in a GC worker.
4844 
4845 void
4846 G1CollectedHeap::
4847 g1_process_strong_roots(bool collecting_perm_gen,
4848                         ScanningOption so,
4849                         OopClosure* scan_non_heap_roots,
4850                         OopsInHeapRegionClosure* scan_rs,
4851                         OopsInGenClosure* scan_perm,
4852                         int worker_i) {
4853 
4854   // First scan the strong roots, including the perm gen.
4855   double ext_roots_start = os::elapsedTime();
4856   double closure_app_time_sec = 0.0;
4857 
4858   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4859   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4860   buf_scan_perm.set_generation(perm_gen());
4861 
4862   // Walk the code cache w/o buffering, because StarTask cannot handle
4863   // unaligned oop locations.
4864   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
4865 
4866   process_strong_roots(false, // no scoping; this is parallel code
4867                        collecting_perm_gen, so,
4868                        &buf_scan_non_heap_roots,
4869                        &eager_scan_code_roots,
4870                        &buf_scan_perm);
4871 
4872   // Now the CM ref_processor roots.
4873   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4874     // We need to treat the discovered reference lists of the
4875     // concurrent mark ref processor as roots and keep entries
4876     // (which are added by the marking threads) on them live
4877     // until they can be processed at the end of marking.
4878     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4879   }
4880 
4881   // Finish up any enqueued closure apps (attributed as object copy time).
4882   buf_scan_non_heap_roots.done();
4883   buf_scan_perm.done();
4884 
4885   double ext_roots_end = os::elapsedTime();
4886 
4887   g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
4888   double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4889                                 buf_scan_non_heap_roots.closure_app_seconds();
4890   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4891 
4892   double ext_root_time_ms =
4893     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4894 
4895   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4896 
4897   // During conc marking we have to filter the per-thread SATB buffers
4898   // to make sure we remove any oops into the CSet (which will show up
4899   // as implicitly live).
4900   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4901     if (mark_in_progress()) {
4902       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4903     }
4904   }
4905   double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4906   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4907 
4908   // Now scan the complement of the collection set.
4909   if (scan_rs != NULL) {
4910     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4911   }
4912 
4913   _process_strong_tasks->all_tasks_completed();
4914 }
4915 
4916 void
4917 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4918                                        OopClosure* non_root_closure) {
4919   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4920   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4921 }
4922 
4923 // Weak Reference Processing support
4924 
4925 // An always "is_alive" closure that is used to preserve referents.
4926 // If the object is non-null then it's alive.  Used in the preservation
4927 // of referent objects that are pointed to by reference objects
4928 // discovered by the CM ref processor.
4929 class G1AlwaysAliveClosure: public BoolObjectClosure {
4930   G1CollectedHeap* _g1;
4931 public:
4932   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4933   void do_object(oop p) { assert(false, "Do not call."); }
4934   bool do_object_b(oop p) {
4935     if (p != NULL) {
4936       return true;
4937     }
4938     return false;
4939   }
4940 };
4941 
4942 bool G1STWIsAliveClosure::do_object_b(oop p) {
4943   // An object is reachable if it is outside the collection set,
4944   // or is inside and copied.
4945   return !_g1->obj_in_cs(p) || p->is_forwarded();
4946 }
4947 
4948 // Non Copying Keep Alive closure
4949 class G1KeepAliveClosure: public OopClosure {
4950   G1CollectedHeap* _g1;
4951 public:
4952   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4953   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
4954   void do_oop(      oop* p) {
4955     oop obj = *p;
4956 
4957     if (_g1->obj_in_cs(obj)) {
4958       assert( obj->is_forwarded(), "invariant" );
4959       *p = obj->forwardee();
4960     }
4961   }
4962 };
4963 
4964 // Copying Keep Alive closure - can be called from both
4965 // serial and parallel code as long as different worker
4966 // threads utilize different G1ParScanThreadState instances
4967 // and different queues.
4968 
4969 class G1CopyingKeepAliveClosure: public OopClosure {
4970   G1CollectedHeap*         _g1h;
4971   OopClosure*              _copy_non_heap_obj_cl;
4972   OopsInHeapRegionClosure* _copy_perm_obj_cl;
4973   G1ParScanThreadState*    _par_scan_state;
4974 
4975 public:
4976   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
4977                             OopClosure* non_heap_obj_cl,
4978                             OopsInHeapRegionClosure* perm_obj_cl,
4979                             G1ParScanThreadState* pss):
4980     _g1h(g1h),
4981     _copy_non_heap_obj_cl(non_heap_obj_cl),
4982     _copy_perm_obj_cl(perm_obj_cl),
4983     _par_scan_state(pss)
4984   {}
4985 
4986   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
4987   virtual void do_oop(      oop* p) { do_oop_work(p); }
4988 
4989   template <class T> void do_oop_work(T* p) {
4990     oop obj = oopDesc::load_decode_heap_oop(p);
4991 
4992     if (_g1h->obj_in_cs(obj)) {
4993       // If the referent object has been forwarded (either copied
4994       // to a new location or to itself in the event of an
4995       // evacuation failure) then we need to update the reference
4996       // field and, if both reference and referent are in the G1
4997       // heap, update the RSet for the referent.
4998       //
4999       // If the referent has not been forwarded then we have to keep
5000       // it alive by policy. Therefore we have copy the referent.
5001       //
5002       // If the reference field is in the G1 heap then we can push
5003       // on the PSS queue. When the queue is drained (after each
5004       // phase of reference processing) the object and it's followers
5005       // will be copied, the reference field set to point to the
5006       // new location, and the RSet updated. Otherwise we need to
5007       // use the the non-heap or perm closures directly to copy
5008       // the refernt object and update the pointer, while avoiding
5009       // updating the RSet.
5010 
5011       if (_g1h->is_in_g1_reserved(p)) {
5012         _par_scan_state->push_on_queue(p);
5013       } else {
5014         // The reference field is not in the G1 heap.
5015         if (_g1h->perm_gen()->is_in(p)) {
5016           _copy_perm_obj_cl->do_oop(p);
5017         } else {
5018           _copy_non_heap_obj_cl->do_oop(p);
5019         }
5020       }
5021     }
5022   }
5023 };
5024 
5025 // Serial drain queue closure. Called as the 'complete_gc'
5026 // closure for each discovered list in some of the
5027 // reference processing phases.
5028 
5029 class G1STWDrainQueueClosure: public VoidClosure {
5030 protected:
5031   G1CollectedHeap* _g1h;
5032   G1ParScanThreadState* _par_scan_state;
5033 
5034   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5035 
5036 public:
5037   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5038     _g1h(g1h),
5039     _par_scan_state(pss)
5040   { }
5041 
5042   void do_void() {
5043     G1ParScanThreadState* const pss = par_scan_state();
5044     pss->trim_queue();
5045   }
5046 };
5047 
5048 // Parallel Reference Processing closures
5049 
5050 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5051 // processing during G1 evacuation pauses.
5052 
5053 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5054 private:
5055   G1CollectedHeap*   _g1h;
5056   RefToScanQueueSet* _queues;
5057   FlexibleWorkGang*  _workers;
5058   int                _active_workers;
5059 
5060 public:
5061   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5062                         FlexibleWorkGang* workers,
5063                         RefToScanQueueSet *task_queues,
5064                         int n_workers) :
5065     _g1h(g1h),
5066     _queues(task_queues),
5067     _workers(workers),
5068     _active_workers(n_workers)
5069   {
5070     assert(n_workers > 0, "shouldn't call this otherwise");
5071   }
5072 
5073   // Executes the given task using concurrent marking worker threads.
5074   virtual void execute(ProcessTask& task);
5075   virtual void execute(EnqueueTask& task);
5076 };
5077 
5078 // Gang task for possibly parallel reference processing
5079 
5080 class G1STWRefProcTaskProxy: public AbstractGangTask {
5081   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5082   ProcessTask&     _proc_task;
5083   G1CollectedHeap* _g1h;
5084   RefToScanQueueSet *_task_queues;
5085   ParallelTaskTerminator* _terminator;
5086 
5087 public:
5088   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5089                      G1CollectedHeap* g1h,
5090                      RefToScanQueueSet *task_queues,
5091                      ParallelTaskTerminator* terminator) :
5092     AbstractGangTask("Process reference objects in parallel"),
5093     _proc_task(proc_task),
5094     _g1h(g1h),
5095     _task_queues(task_queues),
5096     _terminator(terminator)
5097   {}
5098 
5099   virtual void work(uint worker_id) {
5100     // The reference processing task executed by a single worker.
5101     ResourceMark rm;
5102     HandleMark   hm;
5103 
5104     G1STWIsAliveClosure is_alive(_g1h);
5105 
5106     G1ParScanThreadState pss(_g1h, worker_id);
5107 
5108     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5109     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5110     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5111 
5112     pss.set_evac_closure(&scan_evac_cl);
5113     pss.set_evac_failure_closure(&evac_failure_cl);
5114     pss.set_partial_scan_closure(&partial_scan_cl);
5115 
5116     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5117     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
5118 
5119     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5120     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
5121 
5122     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5123     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
5124 
5125     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5126       // We also need to mark copied objects.
5127       copy_non_heap_cl = &copy_mark_non_heap_cl;
5128       copy_perm_cl = &copy_mark_perm_cl;
5129     }
5130 
5131     // Keep alive closure.
5132     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5133 
5134     // Complete GC closure
5135     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5136 
5137     // Call the reference processing task's work routine.
5138     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5139 
5140     // Note we cannot assert that the refs array is empty here as not all
5141     // of the processing tasks (specifically phase2 - pp2_work) execute
5142     // the complete_gc closure (which ordinarily would drain the queue) so
5143     // the queue may not be empty.
5144   }
5145 };
5146 
5147 // Driver routine for parallel reference processing.
5148 // Creates an instance of the ref processing gang
5149 // task and has the worker threads execute it.
5150 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5151   assert(_workers != NULL, "Need parallel worker threads.");
5152 
5153   ParallelTaskTerminator terminator(_active_workers, _queues);
5154   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5155 
5156   _g1h->set_par_threads(_active_workers);
5157   _workers->run_task(&proc_task_proxy);
5158   _g1h->set_par_threads(0);
5159 }
5160 
5161 // Gang task for parallel reference enqueueing.
5162 
5163 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5164   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5165   EnqueueTask& _enq_task;
5166 
5167 public:
5168   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5169     AbstractGangTask("Enqueue reference objects in parallel"),
5170     _enq_task(enq_task)
5171   { }
5172 
5173   virtual void work(uint worker_id) {
5174     _enq_task.work(worker_id);
5175   }
5176 };
5177 
5178 // Driver routine for parallel reference enqueing.
5179 // Creates an instance of the ref enqueueing gang
5180 // task and has the worker threads execute it.
5181 
5182 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5183   assert(_workers != NULL, "Need parallel worker threads.");
5184 
5185   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5186 
5187   _g1h->set_par_threads(_active_workers);
5188   _workers->run_task(&enq_task_proxy);
5189   _g1h->set_par_threads(0);
5190 }
5191 
5192 // End of weak reference support closures
5193 
5194 // Abstract task used to preserve (i.e. copy) any referent objects
5195 // that are in the collection set and are pointed to by reference
5196 // objects discovered by the CM ref processor.
5197 
5198 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5199 protected:
5200   G1CollectedHeap* _g1h;
5201   RefToScanQueueSet      *_queues;
5202   ParallelTaskTerminator _terminator;
5203   uint _n_workers;
5204 
5205 public:
5206   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5207     AbstractGangTask("ParPreserveCMReferents"),
5208     _g1h(g1h),
5209     _queues(task_queues),
5210     _terminator(workers, _queues),
5211     _n_workers(workers)
5212   { }
5213 
5214   void work(uint worker_id) {
5215     ResourceMark rm;
5216     HandleMark   hm;
5217 
5218     G1ParScanThreadState            pss(_g1h, worker_id);
5219     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5220     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5221     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5222 
5223     pss.set_evac_closure(&scan_evac_cl);
5224     pss.set_evac_failure_closure(&evac_failure_cl);
5225     pss.set_partial_scan_closure(&partial_scan_cl);
5226 
5227     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5228 
5229 
5230     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5231     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
5232 
5233     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5234     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
5235 
5236     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5237     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
5238 
5239     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5240       // We also need to mark copied objects.
5241       copy_non_heap_cl = &copy_mark_non_heap_cl;
5242       copy_perm_cl = &copy_mark_perm_cl;
5243     }
5244 
5245     // Is alive closure
5246     G1AlwaysAliveClosure always_alive(_g1h);
5247 
5248     // Copying keep alive closure. Applied to referent objects that need
5249     // to be copied.
5250     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5251 
5252     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5253 
5254     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5255     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5256 
5257     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5258     // So this must be true - but assert just in case someone decides to
5259     // change the worker ids.
5260     assert(0 <= worker_id && worker_id < limit, "sanity");
5261     assert(!rp->discovery_is_atomic(), "check this code");
5262 
5263     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5264     for (uint idx = worker_id; idx < limit; idx += stride) {
5265       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5266 
5267       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5268       while (iter.has_next()) {
5269         // Since discovery is not atomic for the CM ref processor, we
5270         // can see some null referent objects.
5271         iter.load_ptrs(DEBUG_ONLY(true));
5272         oop ref = iter.obj();
5273 
5274         // This will filter nulls.
5275         if (iter.is_referent_alive()) {
5276           iter.make_referent_alive();
5277         }
5278         iter.move_to_next();
5279       }
5280     }
5281 
5282     // Drain the queue - which may cause stealing
5283     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5284     drain_queue.do_void();
5285     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5286     assert(pss.refs()->is_empty(), "should be");
5287   }
5288 };
5289 
5290 // Weak Reference processing during an evacuation pause (part 1).
5291 void G1CollectedHeap::process_discovered_references() {
5292   double ref_proc_start = os::elapsedTime();
5293 
5294   ReferenceProcessor* rp = _ref_processor_stw;
5295   assert(rp->discovery_enabled(), "should have been enabled");
5296 
5297   // Any reference objects, in the collection set, that were 'discovered'
5298   // by the CM ref processor should have already been copied (either by
5299   // applying the external root copy closure to the discovered lists, or
5300   // by following an RSet entry).
5301   //
5302   // But some of the referents, that are in the collection set, that these
5303   // reference objects point to may not have been copied: the STW ref
5304   // processor would have seen that the reference object had already
5305   // been 'discovered' and would have skipped discovering the reference,
5306   // but would not have treated the reference object as a regular oop.
5307   // As a reult the copy closure would not have been applied to the
5308   // referent object.
5309   //
5310   // We need to explicitly copy these referent objects - the references
5311   // will be processed at the end of remarking.
5312   //
5313   // We also need to do this copying before we process the reference
5314   // objects discovered by the STW ref processor in case one of these
5315   // referents points to another object which is also referenced by an
5316   // object discovered by the STW ref processor.
5317 
5318   uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5319                         workers()->active_workers() : 1);
5320 
5321   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5322            active_workers == workers()->active_workers(),
5323            "Need to reset active_workers");
5324 
5325   set_par_threads(active_workers);
5326   G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
5327 
5328   if (G1CollectedHeap::use_parallel_gc_threads()) {
5329     workers()->run_task(&keep_cm_referents);
5330   } else {
5331     keep_cm_referents.work(0);
5332   }
5333 
5334   set_par_threads(0);
5335 
5336   // Closure to test whether a referent is alive.
5337   G1STWIsAliveClosure is_alive(this);
5338 
5339   // Even when parallel reference processing is enabled, the processing
5340   // of JNI refs is serial and performed serially by the current thread
5341   // rather than by a worker. The following PSS will be used for processing
5342   // JNI refs.
5343 
5344   // Use only a single queue for this PSS.
5345   G1ParScanThreadState pss(this, 0);
5346 
5347   // We do not embed a reference processor in the copying/scanning
5348   // closures while we're actually processing the discovered
5349   // reference objects.
5350   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5351   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5352   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5353 
5354   pss.set_evac_closure(&scan_evac_cl);
5355   pss.set_evac_failure_closure(&evac_failure_cl);
5356   pss.set_partial_scan_closure(&partial_scan_cl);
5357 
5358   assert(pss.refs()->is_empty(), "pre-condition");
5359 
5360   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5361   G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
5362 
5363   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5364   G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
5365 
5366   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5367   OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
5368 
5369   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5370     // We also need to mark copied objects.
5371     copy_non_heap_cl = &copy_mark_non_heap_cl;
5372     copy_perm_cl = &copy_mark_perm_cl;
5373   }
5374 
5375   // Keep alive closure.
5376   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
5377 
5378   // Serial Complete GC closure
5379   G1STWDrainQueueClosure drain_queue(this, &pss);
5380 
5381   // Setup the soft refs policy...
5382   rp->setup_policy(false);
5383 
5384   if (!rp->processing_is_mt()) {
5385     // Serial reference processing...
5386     rp->process_discovered_references(&is_alive,
5387                                       &keep_alive,
5388                                       &drain_queue,
5389                                       NULL);
5390   } else {
5391     // Parallel reference processing
5392     assert(rp->num_q() == active_workers, "sanity");
5393     assert(active_workers <= rp->max_num_q(), "sanity");
5394 
5395     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5396     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5397   }
5398 
5399   // We have completed copying any necessary live referent objects
5400   // (that were not copied during the actual pause) so we can
5401   // retire any active alloc buffers
5402   pss.retire_alloc_buffers();
5403   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5404 
5405   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5406   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5407 }
5408 
5409 // Weak Reference processing during an evacuation pause (part 2).
5410 void G1CollectedHeap::enqueue_discovered_references() {
5411   double ref_enq_start = os::elapsedTime();
5412 
5413   ReferenceProcessor* rp = _ref_processor_stw;
5414   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5415 
5416   // Now enqueue any remaining on the discovered lists on to
5417   // the pending list.
5418   if (!rp->processing_is_mt()) {
5419     // Serial reference processing...
5420     rp->enqueue_discovered_references();
5421   } else {
5422     // Parallel reference enqueuing
5423 
5424     uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
5425     assert(active_workers == workers()->active_workers(),
5426            "Need to reset active_workers");
5427     assert(rp->num_q() == active_workers, "sanity");
5428     assert(active_workers <= rp->max_num_q(), "sanity");
5429 
5430     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5431     rp->enqueue_discovered_references(&par_task_executor);
5432   }
5433 
5434   rp->verify_no_references_recorded();
5435   assert(!rp->discovery_enabled(), "should have been disabled");
5436 
5437   // FIXME
5438   // CM's reference processing also cleans up the string and symbol tables.
5439   // Should we do that here also? We could, but it is a serial operation
5440   // and could signicantly increase the pause time.
5441 
5442   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5443   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5444 }
5445 
5446 void G1CollectedHeap::evacuate_collection_set() {
5447   _expand_heap_after_alloc_failure = true;
5448   set_evacuation_failed(false);
5449 
5450   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5451   concurrent_g1_refine()->set_use_cache(false);
5452   concurrent_g1_refine()->clear_hot_cache_claimed_index();
5453 
5454   uint n_workers;
5455   if (G1CollectedHeap::use_parallel_gc_threads()) {
5456     n_workers =
5457       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5458                                      workers()->active_workers(),
5459                                      Threads::number_of_non_daemon_threads());
5460     assert(UseDynamicNumberOfGCThreads ||
5461            n_workers == workers()->total_workers(),
5462            "If not dynamic should be using all the  workers");
5463     workers()->set_active_workers(n_workers);
5464     set_par_threads(n_workers);
5465   } else {
5466     assert(n_par_threads() == 0,
5467            "Should be the original non-parallel value");
5468     n_workers = 1;
5469   }
5470 
5471   G1ParTask g1_par_task(this, _task_queues);
5472 
5473   init_for_evac_failure(NULL);
5474 
5475   rem_set()->prepare_for_younger_refs_iterate(true);
5476 
5477   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5478   double start_par_time_sec = os::elapsedTime();
5479   double end_par_time_sec;
5480 
5481   {
5482     StrongRootsScope srs(this);
5483 
5484     if (G1CollectedHeap::use_parallel_gc_threads()) {
5485       // The individual threads will set their evac-failure closures.
5486       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5487       // These tasks use ShareHeap::_process_strong_tasks
5488       assert(UseDynamicNumberOfGCThreads ||
5489              workers()->active_workers() == workers()->total_workers(),
5490              "If not dynamic should be using all the  workers");
5491       workers()->run_task(&g1_par_task);
5492     } else {
5493       g1_par_task.set_for_termination(n_workers);
5494       g1_par_task.work(0);
5495     }
5496     end_par_time_sec = os::elapsedTime();
5497 
5498     // Closing the inner scope will execute the destructor
5499     // for the StrongRootsScope object. We record the current
5500     // elapsed time before closing the scope so that time
5501     // taken for the SRS destructor is NOT included in the
5502     // reported parallel time.
5503   }
5504 
5505   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5506   g1_policy()->phase_times()->record_par_time(par_time_ms);
5507 
5508   double code_root_fixup_time_ms =
5509         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5510   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5511 
5512   set_par_threads(0);
5513 
5514   // Process any discovered reference objects - we have
5515   // to do this _before_ we retire the GC alloc regions
5516   // as we may have to copy some 'reachable' referent
5517   // objects (and their reachable sub-graphs) that were
5518   // not copied during the pause.
5519   process_discovered_references();
5520 
5521   // Weak root processing.
5522   // Note: when JSR 292 is enabled and code blobs can contain
5523   // non-perm oops then we will need to process the code blobs
5524   // here too.
5525   {
5526     G1STWIsAliveClosure is_alive(this);
5527     G1KeepAliveClosure keep_alive(this);
5528     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5529   }
5530 
5531   release_gc_alloc_regions();
5532   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5533 
5534   concurrent_g1_refine()->clear_hot_cache();
5535   concurrent_g1_refine()->set_use_cache(true);
5536 
5537   finalize_for_evac_failure();
5538 
5539   if (evacuation_failed()) {
5540     remove_self_forwarding_pointers();
5541     if (G1Log::finer()) {
5542       gclog_or_tty->print(" (to-space exhausted)");
5543     } else if (G1Log::fine()) {
5544       gclog_or_tty->print("--");
5545     }
5546   }
5547 
5548   // Enqueue any remaining references remaining on the STW
5549   // reference processor's discovered lists. We need to do
5550   // this after the card table is cleaned (and verified) as
5551   // the act of enqueuing entries on to the pending list
5552   // will log these updates (and dirty their associated
5553   // cards). We need these updates logged to update any
5554   // RSets.
5555   enqueue_discovered_references();
5556 
5557   if (G1DeferredRSUpdate) {
5558     RedirtyLoggedCardTableEntryFastClosure redirty;
5559     dirty_card_queue_set().set_closure(&redirty);
5560     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5561 
5562     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5563     dcq.merge_bufferlists(&dirty_card_queue_set());
5564     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5565   }
5566   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5567 }
5568 
5569 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5570                                      size_t* pre_used,
5571                                      FreeRegionList* free_list,
5572                                      OldRegionSet* old_proxy_set,
5573                                      HumongousRegionSet* humongous_proxy_set,
5574                                      HRRSCleanupTask* hrrs_cleanup_task,
5575                                      bool par) {
5576   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5577     if (hr->isHumongous()) {
5578       assert(hr->startsHumongous(), "we should only see starts humongous");
5579       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5580     } else {
5581       _old_set.remove_with_proxy(hr, old_proxy_set);
5582       free_region(hr, pre_used, free_list, par);
5583     }
5584   } else {
5585     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5586   }
5587 }
5588 
5589 void G1CollectedHeap::free_region(HeapRegion* hr,
5590                                   size_t* pre_used,
5591                                   FreeRegionList* free_list,
5592                                   bool par) {
5593   assert(!hr->isHumongous(), "this is only for non-humongous regions");
5594   assert(!hr->is_empty(), "the region should not be empty");
5595   assert(free_list != NULL, "pre-condition");
5596 
5597   *pre_used += hr->used();
5598   hr->hr_clear(par, true /* clear_space */);
5599   free_list->add_as_head(hr);
5600 }
5601 
5602 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5603                                      size_t* pre_used,
5604                                      FreeRegionList* free_list,
5605                                      HumongousRegionSet* humongous_proxy_set,
5606                                      bool par) {
5607   assert(hr->startsHumongous(), "this is only for starts humongous regions");
5608   assert(free_list != NULL, "pre-condition");
5609   assert(humongous_proxy_set != NULL, "pre-condition");
5610 
5611   size_t hr_used = hr->used();
5612   size_t hr_capacity = hr->capacity();
5613   size_t hr_pre_used = 0;
5614   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5615   hr->set_notHumongous();
5616   free_region(hr, &hr_pre_used, free_list, par);
5617 
5618   uint i = hr->hrs_index() + 1;
5619   uint num = 1;
5620   while (i < n_regions()) {
5621     HeapRegion* curr_hr = region_at(i);
5622     if (!curr_hr->continuesHumongous()) {
5623       break;
5624     }
5625     curr_hr->set_notHumongous();
5626     free_region(curr_hr, &hr_pre_used, free_list, par);
5627     num += 1;
5628     i += 1;
5629   }
5630   assert(hr_pre_used == hr_used,
5631          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
5632                  "should be the same", hr_pre_used, hr_used));
5633   *pre_used += hr_pre_used;
5634 }
5635 
5636 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5637                                        FreeRegionList* free_list,
5638                                        OldRegionSet* old_proxy_set,
5639                                        HumongousRegionSet* humongous_proxy_set,
5640                                        bool par) {
5641   if (pre_used > 0) {
5642     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5643     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5644     assert(_summary_bytes_used >= pre_used,
5645            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5646                    "should be >= pre_used: "SIZE_FORMAT,
5647                    _summary_bytes_used, pre_used));
5648     _summary_bytes_used -= pre_used;
5649   }
5650   if (free_list != NULL && !free_list->is_empty()) {
5651     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5652     _free_list.add_as_head(free_list);
5653   }
5654   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5655     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5656     _old_set.update_from_proxy(old_proxy_set);
5657   }
5658   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5659     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5660     _humongous_set.update_from_proxy(humongous_proxy_set);
5661   }
5662 }
5663 
5664 class G1ParCleanupCTTask : public AbstractGangTask {
5665   CardTableModRefBS* _ct_bs;
5666   G1CollectedHeap* _g1h;
5667   HeapRegion* volatile _su_head;
5668 public:
5669   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5670                      G1CollectedHeap* g1h) :
5671     AbstractGangTask("G1 Par Cleanup CT Task"),
5672     _ct_bs(ct_bs), _g1h(g1h) { }
5673 
5674   void work(uint worker_id) {
5675     HeapRegion* r;
5676     while (r = _g1h->pop_dirty_cards_region()) {
5677       clear_cards(r);
5678     }
5679   }
5680 
5681   void clear_cards(HeapRegion* r) {
5682     // Cards of the survivors should have already been dirtied.
5683     if (!r->is_survivor()) {
5684       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5685     }
5686   }
5687 };
5688 
5689 #ifndef PRODUCT
5690 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5691   G1CollectedHeap* _g1h;
5692   CardTableModRefBS* _ct_bs;
5693 public:
5694   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5695     : _g1h(g1h), _ct_bs(ct_bs) { }
5696   virtual bool doHeapRegion(HeapRegion* r) {
5697     if (r->is_survivor()) {
5698       _g1h->verify_dirty_region(r);
5699     } else {
5700       _g1h->verify_not_dirty_region(r);
5701     }
5702     return false;
5703   }
5704 };
5705 
5706 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5707   // All of the region should be clean.
5708   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
5709   MemRegion mr(hr->bottom(), hr->end());
5710   ct_bs->verify_not_dirty_region(mr);
5711 }
5712 
5713 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5714   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5715   // dirty allocated blocks as they allocate them. The thread that
5716   // retires each region and replaces it with a new one will do a
5717   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5718   // not dirty that area (one less thing to have to do while holding
5719   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5720   // is dirty.
5721   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5722   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5723   ct_bs->verify_dirty_region(mr);
5724 }
5725 
5726 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5727   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5728   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5729     verify_dirty_region(hr);
5730   }
5731 }
5732 
5733 void G1CollectedHeap::verify_dirty_young_regions() {
5734   verify_dirty_young_list(_young_list->first_region());
5735   verify_dirty_young_list(_young_list->first_survivor_region());
5736 }
5737 #endif
5738 
5739 void G1CollectedHeap::cleanUpCardTable() {
5740   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5741   double start = os::elapsedTime();
5742 
5743   {
5744     // Iterate over the dirty cards region list.
5745     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5746 
5747     if (G1CollectedHeap::use_parallel_gc_threads()) {
5748       set_par_threads();
5749       workers()->run_task(&cleanup_task);
5750       set_par_threads(0);
5751     } else {
5752       while (_dirty_cards_region_list) {
5753         HeapRegion* r = _dirty_cards_region_list;
5754         cleanup_task.clear_cards(r);
5755         _dirty_cards_region_list = r->get_next_dirty_cards_region();
5756         if (_dirty_cards_region_list == r) {
5757           // The last region.
5758           _dirty_cards_region_list = NULL;
5759         }
5760         r->set_next_dirty_cards_region(NULL);
5761       }
5762     }
5763 #ifndef PRODUCT
5764     if (G1VerifyCTCleanup || VerifyAfterGC) {
5765       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5766       heap_region_iterate(&cleanup_verifier);
5767     }
5768 #endif
5769   }
5770 
5771   double elapsed = os::elapsedTime() - start;
5772   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5773 }
5774 
5775 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5776   size_t pre_used = 0;
5777   FreeRegionList local_free_list("Local List for CSet Freeing");
5778 
5779   double young_time_ms     = 0.0;
5780   double non_young_time_ms = 0.0;
5781 
5782   // Since the collection set is a superset of the the young list,
5783   // all we need to do to clear the young list is clear its
5784   // head and length, and unlink any young regions in the code below
5785   _young_list->clear();
5786 
5787   G1CollectorPolicy* policy = g1_policy();
5788 
5789   double start_sec = os::elapsedTime();
5790   bool non_young = true;
5791 
5792   HeapRegion* cur = cs_head;
5793   int age_bound = -1;
5794   size_t rs_lengths = 0;
5795 
5796   while (cur != NULL) {
5797     assert(!is_on_master_free_list(cur), "sanity");
5798     if (non_young) {
5799       if (cur->is_young()) {
5800         double end_sec = os::elapsedTime();
5801         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5802         non_young_time_ms += elapsed_ms;
5803 
5804         start_sec = os::elapsedTime();
5805         non_young = false;
5806       }
5807     } else {
5808       if (!cur->is_young()) {
5809         double end_sec = os::elapsedTime();
5810         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5811         young_time_ms += elapsed_ms;
5812 
5813         start_sec = os::elapsedTime();
5814         non_young = true;
5815       }
5816     }
5817 
5818     rs_lengths += cur->rem_set()->occupied();
5819 
5820     HeapRegion* next = cur->next_in_collection_set();
5821     assert(cur->in_collection_set(), "bad CS");
5822     cur->set_next_in_collection_set(NULL);
5823     cur->set_in_collection_set(false);
5824 
5825     if (cur->is_young()) {
5826       int index = cur->young_index_in_cset();
5827       assert(index != -1, "invariant");
5828       assert((uint) index < policy->young_cset_region_length(), "invariant");
5829       size_t words_survived = _surviving_young_words[index];
5830       cur->record_surv_words_in_group(words_survived);
5831 
5832       // At this point the we have 'popped' cur from the collection set
5833       // (linked via next_in_collection_set()) but it is still in the
5834       // young list (linked via next_young_region()). Clear the
5835       // _next_young_region field.
5836       cur->set_next_young_region(NULL);
5837     } else {
5838       int index = cur->young_index_in_cset();
5839       assert(index == -1, "invariant");
5840     }
5841 
5842     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5843             (!cur->is_young() && cur->young_index_in_cset() == -1),
5844             "invariant" );
5845 
5846     if (!cur->evacuation_failed()) {
5847       MemRegion used_mr = cur->used_region();
5848 
5849       // And the region is empty.
5850       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5851       free_region(cur, &pre_used, &local_free_list, false /* par */);
5852     } else {
5853       cur->uninstall_surv_rate_group();
5854       if (cur->is_young()) {
5855         cur->set_young_index_in_cset(-1);
5856       }
5857       cur->set_not_young();
5858       cur->set_evacuation_failed(false);
5859       // The region is now considered to be old.
5860       _old_set.add(cur);
5861     }
5862     cur = next;
5863   }
5864 
5865   policy->record_max_rs_lengths(rs_lengths);
5866   policy->cset_regions_freed();
5867 
5868   double end_sec = os::elapsedTime();
5869   double elapsed_ms = (end_sec - start_sec) * 1000.0;
5870 
5871   if (non_young) {
5872     non_young_time_ms += elapsed_ms;
5873   } else {
5874     young_time_ms += elapsed_ms;
5875   }
5876 
5877   update_sets_after_freeing_regions(pre_used, &local_free_list,
5878                                     NULL /* old_proxy_set */,
5879                                     NULL /* humongous_proxy_set */,
5880                                     false /* par */);
5881   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
5882   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
5883 }
5884 
5885 // This routine is similar to the above but does not record
5886 // any policy statistics or update free lists; we are abandoning
5887 // the current incremental collection set in preparation of a
5888 // full collection. After the full GC we will start to build up
5889 // the incremental collection set again.
5890 // This is only called when we're doing a full collection
5891 // and is immediately followed by the tearing down of the young list.
5892 
5893 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5894   HeapRegion* cur = cs_head;
5895 
5896   while (cur != NULL) {
5897     HeapRegion* next = cur->next_in_collection_set();
5898     assert(cur->in_collection_set(), "bad CS");
5899     cur->set_next_in_collection_set(NULL);
5900     cur->set_in_collection_set(false);
5901     cur->set_young_index_in_cset(-1);
5902     cur = next;
5903   }
5904 }
5905 
5906 void G1CollectedHeap::set_free_regions_coming() {
5907   if (G1ConcRegionFreeingVerbose) {
5908     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5909                            "setting free regions coming");
5910   }
5911 
5912   assert(!free_regions_coming(), "pre-condition");
5913   _free_regions_coming = true;
5914 }
5915 
5916 void G1CollectedHeap::reset_free_regions_coming() {
5917   assert(free_regions_coming(), "pre-condition");
5918 
5919   {
5920     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5921     _free_regions_coming = false;
5922     SecondaryFreeList_lock->notify_all();
5923   }
5924 
5925   if (G1ConcRegionFreeingVerbose) {
5926     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5927                            "reset free regions coming");
5928   }
5929 }
5930 
5931 void G1CollectedHeap::wait_while_free_regions_coming() {
5932   // Most of the time we won't have to wait, so let's do a quick test
5933   // first before we take the lock.
5934   if (!free_regions_coming()) {
5935     return;
5936   }
5937 
5938   if (G1ConcRegionFreeingVerbose) {
5939     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5940                            "waiting for free regions");
5941   }
5942 
5943   {
5944     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5945     while (free_regions_coming()) {
5946       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5947     }
5948   }
5949 
5950   if (G1ConcRegionFreeingVerbose) {
5951     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5952                            "done waiting for free regions");
5953   }
5954 }
5955 
5956 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5957   assert(heap_lock_held_for_gc(),
5958               "the heap lock should already be held by or for this thread");
5959   _young_list->push_region(hr);
5960 }
5961 
5962 class NoYoungRegionsClosure: public HeapRegionClosure {
5963 private:
5964   bool _success;
5965 public:
5966   NoYoungRegionsClosure() : _success(true) { }
5967   bool doHeapRegion(HeapRegion* r) {
5968     if (r->is_young()) {
5969       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
5970                              r->bottom(), r->end());
5971       _success = false;
5972     }
5973     return false;
5974   }
5975   bool success() { return _success; }
5976 };
5977 
5978 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5979   bool ret = _young_list->check_list_empty(check_sample);
5980 
5981   if (check_heap) {
5982     NoYoungRegionsClosure closure;
5983     heap_region_iterate(&closure);
5984     ret = ret && closure.success();
5985   }
5986 
5987   return ret;
5988 }
5989 
5990 class TearDownRegionSetsClosure : public HeapRegionClosure {
5991 private:
5992   OldRegionSet *_old_set;
5993 
5994 public:
5995   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
5996 
5997   bool doHeapRegion(HeapRegion* r) {
5998     if (r->is_empty()) {
5999       // We ignore empty regions, we'll empty the free list afterwards
6000     } else if (r->is_young()) {
6001       // We ignore young regions, we'll empty the young list afterwards
6002     } else if (r->isHumongous()) {
6003       // We ignore humongous regions, we're not tearing down the
6004       // humongous region set
6005     } else {
6006       // The rest should be old
6007       _old_set->remove(r);
6008     }
6009     return false;
6010   }
6011 
6012   ~TearDownRegionSetsClosure() {
6013     assert(_old_set->is_empty(), "post-condition");
6014   }
6015 };
6016 
6017 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6018   assert_at_safepoint(true /* should_be_vm_thread */);
6019 
6020   if (!free_list_only) {
6021     TearDownRegionSetsClosure cl(&_old_set);
6022     heap_region_iterate(&cl);
6023 
6024     // Need to do this after the heap iteration to be able to
6025     // recognize the young regions and ignore them during the iteration.
6026     _young_list->empty_list();
6027   }
6028   _free_list.remove_all();
6029 }
6030 
6031 class RebuildRegionSetsClosure : public HeapRegionClosure {
6032 private:
6033   bool            _free_list_only;
6034   OldRegionSet*   _old_set;
6035   FreeRegionList* _free_list;
6036   size_t          _total_used;
6037 
6038 public:
6039   RebuildRegionSetsClosure(bool free_list_only,
6040                            OldRegionSet* old_set, FreeRegionList* free_list) :
6041     _free_list_only(free_list_only),
6042     _old_set(old_set), _free_list(free_list), _total_used(0) {
6043     assert(_free_list->is_empty(), "pre-condition");
6044     if (!free_list_only) {
6045       assert(_old_set->is_empty(), "pre-condition");
6046     }
6047   }
6048 
6049   bool doHeapRegion(HeapRegion* r) {
6050     if (r->continuesHumongous()) {
6051       return false;
6052     }
6053 
6054     if (r->is_empty()) {
6055       // Add free regions to the free list
6056       _free_list->add_as_tail(r);
6057     } else if (!_free_list_only) {
6058       assert(!r->is_young(), "we should not come across young regions");
6059 
6060       if (r->isHumongous()) {
6061         // We ignore humongous regions, we left the humongous set unchanged
6062       } else {
6063         // The rest should be old, add them to the old set
6064         _old_set->add(r);
6065       }
6066       _total_used += r->used();
6067     }
6068 
6069     return false;
6070   }
6071 
6072   size_t total_used() {
6073     return _total_used;
6074   }
6075 };
6076 
6077 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6078   assert_at_safepoint(true /* should_be_vm_thread */);
6079 
6080   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6081   heap_region_iterate(&cl);
6082 
6083   if (!free_list_only) {
6084     _summary_bytes_used = cl.total_used();
6085   }
6086   assert(_summary_bytes_used == recalculate_used(),
6087          err_msg("inconsistent _summary_bytes_used, "
6088                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6089                  _summary_bytes_used, recalculate_used()));
6090 }
6091 
6092 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6093   _refine_cte_cl->set_concurrent(concurrent);
6094 }
6095 
6096 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6097   HeapRegion* hr = heap_region_containing(p);
6098   if (hr == NULL) {
6099     return is_in_permanent(p);
6100   } else {
6101     return hr->is_in(p);
6102   }
6103 }
6104 
6105 // Methods for the mutator alloc region
6106 
6107 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6108                                                       bool force) {
6109   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6110   assert(!force || g1_policy()->can_expand_young_list(),
6111          "if force is true we should be able to expand the young list");
6112   bool young_list_full = g1_policy()->is_young_list_full();
6113   if (force || !young_list_full) {
6114     HeapRegion* new_alloc_region = new_region(word_size,
6115                                               false /* do_expand */);
6116     if (new_alloc_region != NULL) {
6117       set_region_short_lived_locked(new_alloc_region);
6118       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6119       return new_alloc_region;
6120     }
6121   }
6122   return NULL;
6123 }
6124 
6125 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6126                                                   size_t allocated_bytes) {
6127   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6128   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6129 
6130   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6131   _summary_bytes_used += allocated_bytes;
6132   _hr_printer.retire(alloc_region);
6133   // We update the eden sizes here, when the region is retired,
6134   // instead of when it's allocated, since this is the point that its
6135   // used space has been recored in _summary_bytes_used.
6136   g1mm()->update_eden_size();
6137 }
6138 
6139 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6140                                                     bool force) {
6141   return _g1h->new_mutator_alloc_region(word_size, force);
6142 }
6143 
6144 void G1CollectedHeap::set_par_threads() {
6145   // Don't change the number of workers.  Use the value previously set
6146   // in the workgroup.
6147   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6148   uint n_workers = workers()->active_workers();
6149   assert(UseDynamicNumberOfGCThreads ||
6150            n_workers == workers()->total_workers(),
6151       "Otherwise should be using the total number of workers");
6152   if (n_workers == 0) {
6153     assert(false, "Should have been set in prior evacuation pause.");
6154     n_workers = ParallelGCThreads;
6155     workers()->set_active_workers(n_workers);
6156   }
6157   set_par_threads(n_workers);
6158 }
6159 
6160 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6161                                        size_t allocated_bytes) {
6162   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6163 }
6164 
6165 // Methods for the GC alloc regions
6166 
6167 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6168                                                  uint count,
6169                                                  GCAllocPurpose ap) {
6170   assert(FreeList_lock->owned_by_self(), "pre-condition");
6171 
6172   if (count < g1_policy()->max_regions(ap)) {
6173     HeapRegion* new_alloc_region = new_region(word_size,
6174                                               true /* do_expand */);
6175     if (new_alloc_region != NULL) {
6176       // We really only need to do this for old regions given that we
6177       // should never scan survivors. But it doesn't hurt to do it
6178       // for survivors too.
6179       new_alloc_region->set_saved_mark();
6180       if (ap == GCAllocForSurvived) {
6181         new_alloc_region->set_survivor();
6182         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6183       } else {
6184         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6185       }
6186       bool during_im = g1_policy()->during_initial_mark_pause();
6187       new_alloc_region->note_start_of_copying(during_im);
6188       return new_alloc_region;
6189     } else {
6190       g1_policy()->note_alloc_region_limit_reached(ap);
6191     }
6192   }
6193   return NULL;
6194 }
6195 
6196 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6197                                              size_t allocated_bytes,
6198                                              GCAllocPurpose ap) {
6199   bool during_im = g1_policy()->during_initial_mark_pause();
6200   alloc_region->note_end_of_copying(during_im);
6201   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6202   if (ap == GCAllocForSurvived) {
6203     young_list()->add_survivor_region(alloc_region);
6204   } else {
6205     _old_set.add(alloc_region);
6206   }
6207   _hr_printer.retire(alloc_region);
6208 }
6209 
6210 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6211                                                        bool force) {
6212   assert(!force, "not supported for GC alloc regions");
6213   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6214 }
6215 
6216 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6217                                           size_t allocated_bytes) {
6218   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6219                                GCAllocForSurvived);
6220 }
6221 
6222 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6223                                                   bool force) {
6224   assert(!force, "not supported for GC alloc regions");
6225   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6226 }
6227 
6228 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6229                                      size_t allocated_bytes) {
6230   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6231                                GCAllocForTenured);
6232 }
6233 // Heap region set verification
6234 
6235 class VerifyRegionListsClosure : public HeapRegionClosure {
6236 private:
6237   FreeRegionList*     _free_list;
6238   OldRegionSet*       _old_set;
6239   HumongousRegionSet* _humongous_set;
6240   uint                _region_count;
6241 
6242 public:
6243   VerifyRegionListsClosure(OldRegionSet* old_set,
6244                            HumongousRegionSet* humongous_set,
6245                            FreeRegionList* free_list) :
6246     _old_set(old_set), _humongous_set(humongous_set),
6247     _free_list(free_list), _region_count(0) { }
6248 
6249   uint region_count() { return _region_count; }
6250 
6251   bool doHeapRegion(HeapRegion* hr) {
6252     _region_count += 1;
6253 
6254     if (hr->continuesHumongous()) {
6255       return false;
6256     }
6257 
6258     if (hr->is_young()) {
6259       // TODO
6260     } else if (hr->startsHumongous()) {
6261       _humongous_set->verify_next_region(hr);
6262     } else if (hr->is_empty()) {
6263       _free_list->verify_next_region(hr);
6264     } else {
6265       _old_set->verify_next_region(hr);
6266     }
6267     return false;
6268   }
6269 };
6270 
6271 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6272                                              HeapWord* bottom) {
6273   HeapWord* end = bottom + HeapRegion::GrainWords;
6274   MemRegion mr(bottom, end);
6275   assert(_g1_reserved.contains(mr), "invariant");
6276   // This might return NULL if the allocation fails
6277   return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
6278 }
6279 
6280 void G1CollectedHeap::verify_region_sets() {
6281   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6282 
6283   // First, check the explicit lists.
6284   _free_list.verify();
6285   {
6286     // Given that a concurrent operation might be adding regions to
6287     // the secondary free list we have to take the lock before
6288     // verifying it.
6289     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6290     _secondary_free_list.verify();
6291   }
6292   _old_set.verify();
6293   _humongous_set.verify();
6294 
6295   // If a concurrent region freeing operation is in progress it will
6296   // be difficult to correctly attributed any free regions we come
6297   // across to the correct free list given that they might belong to
6298   // one of several (free_list, secondary_free_list, any local lists,
6299   // etc.). So, if that's the case we will skip the rest of the
6300   // verification operation. Alternatively, waiting for the concurrent
6301   // operation to complete will have a non-trivial effect on the GC's
6302   // operation (no concurrent operation will last longer than the
6303   // interval between two calls to verification) and it might hide
6304   // any issues that we would like to catch during testing.
6305   if (free_regions_coming()) {
6306     return;
6307   }
6308 
6309   // Make sure we append the secondary_free_list on the free_list so
6310   // that all free regions we will come across can be safely
6311   // attributed to the free_list.
6312   append_secondary_free_list_if_not_empty_with_lock();
6313 
6314   // Finally, make sure that the region accounting in the lists is
6315   // consistent with what we see in the heap.
6316   _old_set.verify_start();
6317   _humongous_set.verify_start();
6318   _free_list.verify_start();
6319 
6320   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6321   heap_region_iterate(&cl);
6322 
6323   _old_set.verify_end();
6324   _humongous_set.verify_end();
6325   _free_list.verify_end();
6326 }