1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"
  54 
  55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  56 
  57 // turn it on so that the contents of the young list (scan-only /
  58 // to-be-collected) are printed at "strategic" points before / during
  59 // / after the collection --- this is useful for debugging
  60 #define YOUNG_LIST_VERBOSE 0
  61 // CURRENT STATUS
  62 // This file is under construction.  Search for "FIXME".
  63 
  64 // INVARIANTS/NOTES
  65 //
  66 // All allocation activity covered by the G1CollectedHeap interface is
  67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  68 // and allocate_new_tlab, which are the "entry" points to the
  69 // allocation code from the rest of the JVM.  (Note that this does not
  70 // apply to TLAB allocation, which is not part of this interface: it
  71 // is done by clients of this interface.)
  72 
  73 // Notes on implementation of parallelism in different tasks.
  74 //
  75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  77 // It does use run_task() which sets _n_workers in the task.
  78 // G1ParTask executes g1_process_strong_roots() ->
  79 // SharedHeap::process_strong_roots() which calls eventuall to
  80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  83 //
  84 
  85 // Local to this file.
  86 
  87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  88   SuspendibleThreadSet* _sts;
  89   G1RemSet* _g1rs;
  90   ConcurrentG1Refine* _cg1r;
  91   bool _concurrent;
  92 public:
  93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  94                               G1RemSet* g1rs,
  95                               ConcurrentG1Refine* cg1r) :
  96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
  97   {}
  98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
  99     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
 100     // This path is executed by the concurrent refine or mutator threads,
 101     // concurrently, and so we do not care if card_ptr contains references
 102     // that point into the collection set.
 103     assert(!oops_into_cset, "should be");
 104 
 105     if (_concurrent && _sts->should_yield()) {
 106       // Caller will actually yield.
 107       return false;
 108     }
 109     // Otherwise, we finished successfully; return true.
 110     return true;
 111   }
 112   void set_concurrent(bool b) { _concurrent = b; }
 113 };
 114 
 115 
 116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 117   int _calls;
 118   G1CollectedHeap* _g1h;
 119   CardTableModRefBS* _ctbs;
 120   int _histo[256];
 121 public:
 122   ClearLoggedCardTableEntryClosure() :
 123     _calls(0)
 124   {
 125     _g1h = G1CollectedHeap::heap();
 126     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 127     for (int i = 0; i < 256; i++) _histo[i] = 0;
 128   }
 129   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 131       _calls++;
 132       unsigned char* ujb = (unsigned char*)card_ptr;
 133       int ind = (int)(*ujb);
 134       _histo[ind]++;
 135       *card_ptr = -1;
 136     }
 137     return true;
 138   }
 139   int calls() { return _calls; }
 140   void print_histo() {
 141     gclog_or_tty->print_cr("Card table value histogram:");
 142     for (int i = 0; i < 256; i++) {
 143       if (_histo[i] != 0) {
 144         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 145       }
 146     }
 147   }
 148 };
 149 
 150 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 151   int _calls;
 152   G1CollectedHeap* _g1h;
 153   CardTableModRefBS* _ctbs;
 154 public:
 155   RedirtyLoggedCardTableEntryClosure() :
 156     _calls(0)
 157   {
 158     _g1h = G1CollectedHeap::heap();
 159     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 160   }
 161   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 162     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 163       _calls++;
 164       *card_ptr = 0;
 165     }
 166     return true;
 167   }
 168   int calls() { return _calls; }
 169 };
 170 
 171 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 172 public:
 173   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 174     *card_ptr = CardTableModRefBS::dirty_card_val();
 175     return true;
 176   }
 177 };
 178 
 179 YoungList::YoungList(G1CollectedHeap* g1h) :
 180     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 181     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 182   guarantee(check_list_empty(false), "just making sure...");
 183 }
 184 
 185 void YoungList::push_region(HeapRegion *hr) {
 186   assert(!hr->is_young(), "should not already be young");
 187   assert(hr->get_next_young_region() == NULL, "cause it should!");
 188 
 189   hr->set_next_young_region(_head);
 190   _head = hr;
 191 
 192   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 193   ++_length;
 194 }
 195 
 196 void YoungList::add_survivor_region(HeapRegion* hr) {
 197   assert(hr->is_survivor(), "should be flagged as survivor region");
 198   assert(hr->get_next_young_region() == NULL, "cause it should!");
 199 
 200   hr->set_next_young_region(_survivor_head);
 201   if (_survivor_head == NULL) {
 202     _survivor_tail = hr;
 203   }
 204   _survivor_head = hr;
 205   ++_survivor_length;
 206 }
 207 
 208 void YoungList::empty_list(HeapRegion* list) {
 209   while (list != NULL) {
 210     HeapRegion* next = list->get_next_young_region();
 211     list->set_next_young_region(NULL);
 212     list->uninstall_surv_rate_group();
 213     list->set_not_young();
 214     list = next;
 215   }
 216 }
 217 
 218 void YoungList::empty_list() {
 219   assert(check_list_well_formed(), "young list should be well formed");
 220 
 221   empty_list(_head);
 222   _head = NULL;
 223   _length = 0;
 224 
 225   empty_list(_survivor_head);
 226   _survivor_head = NULL;
 227   _survivor_tail = NULL;
 228   _survivor_length = 0;
 229 
 230   _last_sampled_rs_lengths = 0;
 231 
 232   assert(check_list_empty(false), "just making sure...");
 233 }
 234 
 235 bool YoungList::check_list_well_formed() {
 236   bool ret = true;
 237 
 238   uint length = 0;
 239   HeapRegion* curr = _head;
 240   HeapRegion* last = NULL;
 241   while (curr != NULL) {
 242     if (!curr->is_young()) {
 243       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 244                              "incorrectly tagged (y: %d, surv: %d)",
 245                              curr->bottom(), curr->end(),
 246                              curr->is_young(), curr->is_survivor());
 247       ret = false;
 248     }
 249     ++length;
 250     last = curr;
 251     curr = curr->get_next_young_region();
 252   }
 253   ret = ret && (length == _length);
 254 
 255   if (!ret) {
 256     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 257     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 258                            length, _length);
 259   }
 260 
 261   return ret;
 262 }
 263 
 264 bool YoungList::check_list_empty(bool check_sample) {
 265   bool ret = true;
 266 
 267   if (_length != 0) {
 268     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
 269                   _length);
 270     ret = false;
 271   }
 272   if (check_sample && _last_sampled_rs_lengths != 0) {
 273     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
 274     ret = false;
 275   }
 276   if (_head != NULL) {
 277     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
 278     ret = false;
 279   }
 280   if (!ret) {
 281     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
 282   }
 283 
 284   return ret;
 285 }
 286 
 287 void
 288 YoungList::rs_length_sampling_init() {
 289   _sampled_rs_lengths = 0;
 290   _curr               = _head;
 291 }
 292 
 293 bool
 294 YoungList::rs_length_sampling_more() {
 295   return _curr != NULL;
 296 }
 297 
 298 void
 299 YoungList::rs_length_sampling_next() {
 300   assert( _curr != NULL, "invariant" );
 301   size_t rs_length = _curr->rem_set()->occupied();
 302 
 303   _sampled_rs_lengths += rs_length;
 304 
 305   // The current region may not yet have been added to the
 306   // incremental collection set (it gets added when it is
 307   // retired as the current allocation region).
 308   if (_curr->in_collection_set()) {
 309     // Update the collection set policy information for this region
 310     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
 311   }
 312 
 313   _curr = _curr->get_next_young_region();
 314   if (_curr == NULL) {
 315     _last_sampled_rs_lengths = _sampled_rs_lengths;
 316     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
 317   }
 318 }
 319 
 320 void
 321 YoungList::reset_auxilary_lists() {
 322   guarantee( is_empty(), "young list should be empty" );
 323   assert(check_list_well_formed(), "young list should be well formed");
 324 
 325   // Add survivor regions to SurvRateGroup.
 326   _g1h->g1_policy()->note_start_adding_survivor_regions();
 327   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 328 
 329   int young_index_in_cset = 0;
 330   for (HeapRegion* curr = _survivor_head;
 331        curr != NULL;
 332        curr = curr->get_next_young_region()) {
 333     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 334 
 335     // The region is a non-empty survivor so let's add it to
 336     // the incremental collection set for the next evacuation
 337     // pause.
 338     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
 339     young_index_in_cset += 1;
 340   }
 341   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
 342   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 343 
 344   _head   = _survivor_head;
 345   _length = _survivor_length;
 346   if (_survivor_head != NULL) {
 347     assert(_survivor_tail != NULL, "cause it shouldn't be");
 348     assert(_survivor_length > 0, "invariant");
 349     _survivor_tail->set_next_young_region(NULL);
 350   }
 351 
 352   // Don't clear the survivor list handles until the start of
 353   // the next evacuation pause - we need it in order to re-tag
 354   // the survivor regions from this evacuation pause as 'young'
 355   // at the start of the next.
 356 
 357   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 358 
 359   assert(check_list_well_formed(), "young list should be well formed");
 360 }
 361 
 362 void YoungList::print() {
 363   HeapRegion* lists[] = {_head,   _survivor_head};
 364   const char* names[] = {"YOUNG", "SURVIVOR"};
 365 
 366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 368     HeapRegion *curr = lists[list];
 369     if (curr == NULL)
 370       gclog_or_tty->print_cr("  empty");
 371     while (curr != NULL) {
 372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 373                              HR_FORMAT_PARAMS(curr),
 374                              curr->prev_top_at_mark_start(),
 375                              curr->next_top_at_mark_start(),
 376                              curr->age_in_surv_rate_group_cond());
 377       curr = curr->get_next_young_region();
 378     }
 379   }
 380 
 381   gclog_or_tty->print_cr("");
 382 }
 383 
 384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 385 {
 386   // Claim the right to put the region on the dirty cards region list
 387   // by installing a self pointer.
 388   HeapRegion* next = hr->get_next_dirty_cards_region();
 389   if (next == NULL) {
 390     HeapRegion* res = (HeapRegion*)
 391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 392                           NULL);
 393     if (res == NULL) {
 394       HeapRegion* head;
 395       do {
 396         // Put the region to the dirty cards region list.
 397         head = _dirty_cards_region_list;
 398         next = (HeapRegion*)
 399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 400         if (next == head) {
 401           assert(hr->get_next_dirty_cards_region() == hr,
 402                  "hr->get_next_dirty_cards_region() != hr");
 403           if (next == NULL) {
 404             // The last region in the list points to itself.
 405             hr->set_next_dirty_cards_region(hr);
 406           } else {
 407             hr->set_next_dirty_cards_region(next);
 408           }
 409         }
 410       } while (next != head);
 411     }
 412   }
 413 }
 414 
 415 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 416 {
 417   HeapRegion* head;
 418   HeapRegion* hr;
 419   do {
 420     head = _dirty_cards_region_list;
 421     if (head == NULL) {
 422       return NULL;
 423     }
 424     HeapRegion* new_head = head->get_next_dirty_cards_region();
 425     if (head == new_head) {
 426       // The last region.
 427       new_head = NULL;
 428     }
 429     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 430                                           head);
 431   } while (hr != head);
 432   assert(hr != NULL, "invariant");
 433   hr->set_next_dirty_cards_region(NULL);
 434   return hr;
 435 }
 436 
 437 void G1CollectedHeap::stop_conc_gc_threads() {
 438   _cg1r->stop();
 439   _cmThread->stop();
 440 }
 441 
 442 #ifdef ASSERT
 443 // A region is added to the collection set as it is retired
 444 // so an address p can point to a region which will be in the
 445 // collection set but has not yet been retired.  This method
 446 // therefore is only accurate during a GC pause after all
 447 // regions have been retired.  It is used for debugging
 448 // to check if an nmethod has references to objects that can
 449 // be move during a partial collection.  Though it can be
 450 // inaccurate, it is sufficient for G1 because the conservative
 451 // implementation of is_scavengable() for G1 will indicate that
 452 // all nmethods must be scanned during a partial collection.
 453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 454   HeapRegion* hr = heap_region_containing(p);
 455   return hr != NULL && hr->in_collection_set();
 456 }
 457 #endif
 458 
 459 // Returns true if the reference points to an object that
 460 // can move in an incremental collecction.
 461 bool G1CollectedHeap::is_scavengable(const void* p) {
 462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 463   G1CollectorPolicy* g1p = g1h->g1_policy();
 464   HeapRegion* hr = heap_region_containing(p);
 465   if (hr == NULL) {
 466      // null
 467      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 468      return false;
 469   } else {
 470     return !hr->isHumongous();
 471   }
 472 }
 473 
 474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 475   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 476   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 477 
 478   // Count the dirty cards at the start.
 479   CountNonCleanMemRegionClosure count1(this);
 480   ct_bs->mod_card_iterate(&count1);
 481   int orig_count = count1.n();
 482 
 483   // First clear the logged cards.
 484   ClearLoggedCardTableEntryClosure clear;
 485   dcqs.set_closure(&clear);
 486   dcqs.apply_closure_to_all_completed_buffers();
 487   dcqs.iterate_closure_all_threads(false);
 488   clear.print_histo();
 489 
 490   // Now ensure that there's no dirty cards.
 491   CountNonCleanMemRegionClosure count2(this);
 492   ct_bs->mod_card_iterate(&count2);
 493   if (count2.n() != 0) {
 494     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 495                            count2.n(), orig_count);
 496   }
 497   guarantee(count2.n() == 0, "Card table should be clean.");
 498 
 499   RedirtyLoggedCardTableEntryClosure redirty;
 500   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 501   dcqs.apply_closure_to_all_completed_buffers();
 502   dcqs.iterate_closure_all_threads(false);
 503   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 504                          clear.calls(), orig_count);
 505   guarantee(redirty.calls() == clear.calls(),
 506             "Or else mechanism is broken.");
 507 
 508   CountNonCleanMemRegionClosure count3(this);
 509   ct_bs->mod_card_iterate(&count3);
 510   if (count3.n() != orig_count) {
 511     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 512                            orig_count, count3.n());
 513     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 514   }
 515 
 516   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 517 }
 518 
 519 // Private class members.
 520 
 521 G1CollectedHeap* G1CollectedHeap::_g1h;
 522 
 523 // Private methods.
 524 
 525 HeapRegion*
 526 G1CollectedHeap::new_region_try_secondary_free_list() {
 527   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 528   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 529     if (!_secondary_free_list.is_empty()) {
 530       if (G1ConcRegionFreeingVerbose) {
 531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 532                                "secondary_free_list has %u entries",
 533                                _secondary_free_list.length());
 534       }
 535       // It looks as if there are free regions available on the
 536       // secondary_free_list. Let's move them to the free_list and try
 537       // again to allocate from it.
 538       append_secondary_free_list();
 539 
 540       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 541              "empty we should have moved at least one entry to the free_list");
 542       HeapRegion* res = _free_list.remove_head();
 543       if (G1ConcRegionFreeingVerbose) {
 544         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 545                                "allocated "HR_FORMAT" from secondary_free_list",
 546                                HR_FORMAT_PARAMS(res));
 547       }
 548       return res;
 549     }
 550 
 551     // Wait here until we get notifed either when (a) there are no
 552     // more free regions coming or (b) some regions have been moved on
 553     // the secondary_free_list.
 554     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 555   }
 556 
 557   if (G1ConcRegionFreeingVerbose) {
 558     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 559                            "could not allocate from secondary_free_list");
 560   }
 561   return NULL;
 562 }
 563 
 564 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 565   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 566          "the only time we use this to allocate a humongous region is "
 567          "when we are allocating a single humongous region");
 568 
 569   HeapRegion* res;
 570   if (G1StressConcRegionFreeing) {
 571     if (!_secondary_free_list.is_empty()) {
 572       if (G1ConcRegionFreeingVerbose) {
 573         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 574                                "forced to look at the secondary_free_list");
 575       }
 576       res = new_region_try_secondary_free_list();
 577       if (res != NULL) {
 578         return res;
 579       }
 580     }
 581   }
 582   res = _free_list.remove_head_or_null();
 583   if (res == NULL) {
 584     if (G1ConcRegionFreeingVerbose) {
 585       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 586                              "res == NULL, trying the secondary_free_list");
 587     }
 588     res = new_region_try_secondary_free_list();
 589   }
 590   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 591     // Currently, only attempts to allocate GC alloc regions set
 592     // do_expand to true. So, we should only reach here during a
 593     // safepoint. If this assumption changes we might have to
 594     // reconsider the use of _expand_heap_after_alloc_failure.
 595     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 596 
 597     ergo_verbose1(ErgoHeapSizing,
 598                   "attempt heap expansion",
 599                   ergo_format_reason("region allocation request failed")
 600                   ergo_format_byte("allocation request"),
 601                   word_size * HeapWordSize);
 602     if (expand(word_size * HeapWordSize)) {
 603       // Given that expand() succeeded in expanding the heap, and we
 604       // always expand the heap by an amount aligned to the heap
 605       // region size, the free list should in theory not be empty. So
 606       // it would probably be OK to use remove_head(). But the extra
 607       // check for NULL is unlikely to be a performance issue here (we
 608       // just expanded the heap!) so let's just be conservative and
 609       // use remove_head_or_null().
 610       res = _free_list.remove_head_or_null();
 611     } else {
 612       _expand_heap_after_alloc_failure = false;
 613     }
 614   }
 615   return res;
 616 }
 617 
 618 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 619                                                         size_t word_size) {
 620   assert(isHumongous(word_size), "word_size should be humongous");
 621   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 622 
 623   uint first = G1_NULL_HRS_INDEX;
 624   if (num_regions == 1) {
 625     // Only one region to allocate, no need to go through the slower
 626     // path. The caller will attempt the expasion if this fails, so
 627     // let's not try to expand here too.
 628     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 629     if (hr != NULL) {
 630       first = hr->hrs_index();
 631     } else {
 632       first = G1_NULL_HRS_INDEX;
 633     }
 634   } else {
 635     // We can't allocate humongous regions while cleanupComplete() is
 636     // running, since some of the regions we find to be empty might not
 637     // yet be added to the free list and it is not straightforward to
 638     // know which list they are on so that we can remove them. Note
 639     // that we only need to do this if we need to allocate more than
 640     // one region to satisfy the current humongous allocation
 641     // request. If we are only allocating one region we use the common
 642     // region allocation code (see above).
 643     wait_while_free_regions_coming();
 644     append_secondary_free_list_if_not_empty_with_lock();
 645 
 646     if (free_regions() >= num_regions) {
 647       first = _hrs.find_contiguous(num_regions);
 648       if (first != G1_NULL_HRS_INDEX) {
 649         for (uint i = first; i < first + num_regions; ++i) {
 650           HeapRegion* hr = region_at(i);
 651           assert(hr->is_empty(), "sanity");
 652           assert(is_on_master_free_list(hr), "sanity");
 653           hr->set_pending_removal(true);
 654         }
 655         _free_list.remove_all_pending(num_regions);
 656       }
 657     }
 658   }
 659   return first;
 660 }
 661 
 662 HeapWord*
 663 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 664                                                            uint num_regions,
 665                                                            size_t word_size) {
 666   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
 667   assert(isHumongous(word_size), "word_size should be humongous");
 668   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 669 
 670   // Index of last region in the series + 1.
 671   uint last = first + num_regions;
 672 
 673   // We need to initialize the region(s) we just discovered. This is
 674   // a bit tricky given that it can happen concurrently with
 675   // refinement threads refining cards on these regions and
 676   // potentially wanting to refine the BOT as they are scanning
 677   // those cards (this can happen shortly after a cleanup; see CR
 678   // 6991377). So we have to set up the region(s) carefully and in
 679   // a specific order.
 680 
 681   // The word size sum of all the regions we will allocate.
 682   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 683   assert(word_size <= word_size_sum, "sanity");
 684 
 685   // This will be the "starts humongous" region.
 686   HeapRegion* first_hr = region_at(first);
 687   // The header of the new object will be placed at the bottom of
 688   // the first region.
 689   HeapWord* new_obj = first_hr->bottom();
 690   // This will be the new end of the first region in the series that
 691   // should also match the end of the last region in the seriers.
 692   HeapWord* new_end = new_obj + word_size_sum;
 693   // This will be the new top of the first region that will reflect
 694   // this allocation.
 695   HeapWord* new_top = new_obj + word_size;
 696 
 697   // First, we need to zero the header of the space that we will be
 698   // allocating. When we update top further down, some refinement
 699   // threads might try to scan the region. By zeroing the header we
 700   // ensure that any thread that will try to scan the region will
 701   // come across the zero klass word and bail out.
 702   //
 703   // NOTE: It would not have been correct to have used
 704   // CollectedHeap::fill_with_object() and make the space look like
 705   // an int array. The thread that is doing the allocation will
 706   // later update the object header to a potentially different array
 707   // type and, for a very short period of time, the klass and length
 708   // fields will be inconsistent. This could cause a refinement
 709   // thread to calculate the object size incorrectly.
 710   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 711 
 712   // We will set up the first region as "starts humongous". This
 713   // will also update the BOT covering all the regions to reflect
 714   // that there is a single object that starts at the bottom of the
 715   // first region.
 716   first_hr->set_startsHumongous(new_top, new_end);
 717 
 718   // Then, if there are any, we will set up the "continues
 719   // humongous" regions.
 720   HeapRegion* hr = NULL;
 721   for (uint i = first + 1; i < last; ++i) {
 722     hr = region_at(i);
 723     hr->set_continuesHumongous(first_hr);
 724   }
 725   // If we have "continues humongous" regions (hr != NULL), then the
 726   // end of the last one should match new_end.
 727   assert(hr == NULL || hr->end() == new_end, "sanity");
 728 
 729   // Up to this point no concurrent thread would have been able to
 730   // do any scanning on any region in this series. All the top
 731   // fields still point to bottom, so the intersection between
 732   // [bottom,top] and [card_start,card_end] will be empty. Before we
 733   // update the top fields, we'll do a storestore to make sure that
 734   // no thread sees the update to top before the zeroing of the
 735   // object header and the BOT initialization.
 736   OrderAccess::storestore();
 737 
 738   // Now that the BOT and the object header have been initialized,
 739   // we can update top of the "starts humongous" region.
 740   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
 741          "new_top should be in this region");
 742   first_hr->set_top(new_top);
 743   if (_hr_printer.is_active()) {
 744     HeapWord* bottom = first_hr->bottom();
 745     HeapWord* end = first_hr->orig_end();
 746     if ((first + 1) == last) {
 747       // the series has a single humongous region
 748       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
 749     } else {
 750       // the series has more than one humongous regions
 751       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
 752     }
 753   }
 754 
 755   // Now, we will update the top fields of the "continues humongous"
 756   // regions. The reason we need to do this is that, otherwise,
 757   // these regions would look empty and this will confuse parts of
 758   // G1. For example, the code that looks for a consecutive number
 759   // of empty regions will consider them empty and try to
 760   // re-allocate them. We can extend is_empty() to also include
 761   // !continuesHumongous(), but it is easier to just update the top
 762   // fields here. The way we set top for all regions (i.e., top ==
 763   // end for all regions but the last one, top == new_top for the
 764   // last one) is actually used when we will free up the humongous
 765   // region in free_humongous_region().
 766   hr = NULL;
 767   for (uint i = first + 1; i < last; ++i) {
 768     hr = region_at(i);
 769     if ((i + 1) == last) {
 770       // last continues humongous region
 771       assert(hr->bottom() < new_top && new_top <= hr->end(),
 772              "new_top should fall on this region");
 773       hr->set_top(new_top);
 774       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
 775     } else {
 776       // not last one
 777       assert(new_top > hr->end(), "new_top should be above this region");
 778       hr->set_top(hr->end());
 779       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 780     }
 781   }
 782   // If we have continues humongous regions (hr != NULL), then the
 783   // end of the last one should match new_end and its top should
 784   // match new_top.
 785   assert(hr == NULL ||
 786          (hr->end() == new_end && hr->top() == new_top), "sanity");
 787 
 788   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 789   _summary_bytes_used += first_hr->used();
 790   _humongous_set.add(first_hr);
 791 
 792   return new_obj;
 793 }
 794 
 795 // If could fit into free regions w/o expansion, try.
 796 // Otherwise, if can expand, do so.
 797 // Otherwise, if using ex regions might help, try with ex given back.
 798 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 799   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 800 
 801   verify_region_sets_optional();
 802 
 803   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
 804   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
 805   uint x_num = expansion_regions();
 806   uint fs = _hrs.free_suffix();
 807   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
 808   if (first == G1_NULL_HRS_INDEX) {
 809     // The only thing we can do now is attempt expansion.
 810     if (fs + x_num >= num_regions) {
 811       // If the number of regions we're trying to allocate for this
 812       // object is at most the number of regions in the free suffix,
 813       // then the call to humongous_obj_allocate_find_first() above
 814       // should have succeeded and we wouldn't be here.
 815       //
 816       // We should only be trying to expand when the free suffix is
 817       // not sufficient for the object _and_ we have some expansion
 818       // room available.
 819       assert(num_regions > fs, "earlier allocation should have succeeded");
 820 
 821       ergo_verbose1(ErgoHeapSizing,
 822                     "attempt heap expansion",
 823                     ergo_format_reason("humongous allocation request failed")
 824                     ergo_format_byte("allocation request"),
 825                     word_size * HeapWordSize);
 826       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
 827         // Even though the heap was expanded, it might not have
 828         // reached the desired size. So, we cannot assume that the
 829         // allocation will succeed.
 830         first = humongous_obj_allocate_find_first(num_regions, word_size);
 831       }
 832     }
 833   }
 834 
 835   HeapWord* result = NULL;
 836   if (first != G1_NULL_HRS_INDEX) {
 837     result =
 838       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
 839     assert(result != NULL, "it should always return a valid result");
 840 
 841     // A successful humongous object allocation changes the used space
 842     // information of the old generation so we need to recalculate the
 843     // sizes and update the jstat counters here.
 844     g1mm()->update_sizes();
 845   }
 846 
 847   verify_region_sets_optional();
 848 
 849   return result;
 850 }
 851 
 852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 853   assert_heap_not_locked_and_not_at_safepoint();
 854   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 855 
 856   unsigned int dummy_gc_count_before;
 857   return attempt_allocation(word_size, &dummy_gc_count_before);
 858 }
 859 
 860 HeapWord*
 861 G1CollectedHeap::mem_allocate(size_t word_size,
 862                               bool*  gc_overhead_limit_was_exceeded) {
 863   assert_heap_not_locked_and_not_at_safepoint();
 864 
 865   // Loop until the allocation is satisified, or unsatisfied after GC.
 866   for (int try_count = 1; /* we'll return */; try_count += 1) {
 867     unsigned int gc_count_before;
 868 
 869     HeapWord* result = NULL;
 870     if (!isHumongous(word_size)) {
 871       result = attempt_allocation(word_size, &gc_count_before);
 872     } else {
 873       result = attempt_allocation_humongous(word_size, &gc_count_before);
 874     }
 875     if (result != NULL) {
 876       return result;
 877     }
 878 
 879     // Create the garbage collection operation...
 880     VM_G1CollectForAllocation op(gc_count_before, word_size);
 881     // ...and get the VM thread to execute it.
 882     VMThread::execute(&op);
 883 
 884     if (op.prologue_succeeded() && op.pause_succeeded()) {
 885       // If the operation was successful we'll return the result even
 886       // if it is NULL. If the allocation attempt failed immediately
 887       // after a Full GC, it's unlikely we'll be able to allocate now.
 888       HeapWord* result = op.result();
 889       if (result != NULL && !isHumongous(word_size)) {
 890         // Allocations that take place on VM operations do not do any
 891         // card dirtying and we have to do it here. We only have to do
 892         // this for non-humongous allocations, though.
 893         dirty_young_block(result, word_size);
 894       }
 895       return result;
 896     } else {
 897       assert(op.result() == NULL,
 898              "the result should be NULL if the VM op did not succeed");
 899     }
 900 
 901     // Give a warning if we seem to be looping forever.
 902     if ((QueuedAllocationWarningCount > 0) &&
 903         (try_count % QueuedAllocationWarningCount == 0)) {
 904       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 905     }
 906   }
 907 
 908   ShouldNotReachHere();
 909   return NULL;
 910 }
 911 
 912 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 913                                            unsigned int *gc_count_before_ret) {
 914   // Make sure you read the note in attempt_allocation_humongous().
 915 
 916   assert_heap_not_locked_and_not_at_safepoint();
 917   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
 918          "be called for humongous allocation requests");
 919 
 920   // We should only get here after the first-level allocation attempt
 921   // (attempt_allocation()) failed to allocate.
 922 
 923   // We will loop until a) we manage to successfully perform the
 924   // allocation or b) we successfully schedule a collection which
 925   // fails to perform the allocation. b) is the only case when we'll
 926   // return NULL.
 927   HeapWord* result = NULL;
 928   for (int try_count = 1; /* we'll return */; try_count += 1) {
 929     bool should_try_gc;
 930     unsigned int gc_count_before;
 931 
 932     {
 933       MutexLockerEx x(Heap_lock);
 934 
 935       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
 936                                                       false /* bot_updates */);
 937       if (result != NULL) {
 938         return result;
 939       }
 940 
 941       // If we reach here, attempt_allocation_locked() above failed to
 942       // allocate a new region. So the mutator alloc region should be NULL.
 943       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
 944 
 945       if (GC_locker::is_active_and_needs_gc()) {
 946         if (g1_policy()->can_expand_young_list()) {
 947           // No need for an ergo verbose message here,
 948           // can_expand_young_list() does this when it returns true.
 949           result = _mutator_alloc_region.attempt_allocation_force(word_size,
 950                                                       false /* bot_updates */);
 951           if (result != NULL) {
 952             return result;
 953           }
 954         }
 955         should_try_gc = false;
 956       } else {
 957         // The GCLocker may not be active but the GCLocker initiated
 958         // GC may not yet have been performed (GCLocker::needs_gc()
 959         // returns true). In this case we do not try this GC and
 960         // wait until the GCLocker initiated GC is performed, and
 961         // then retry the allocation.
 962         if (GC_locker::needs_gc()) {
 963           should_try_gc = false;
 964         } else {
 965           // Read the GC count while still holding the Heap_lock.
 966           gc_count_before = total_collections();
 967           should_try_gc = true;
 968         }
 969       }
 970     }
 971 
 972     if (should_try_gc) {
 973       bool succeeded;
 974       result = do_collection_pause(word_size, gc_count_before, &succeeded);
 975       if (result != NULL) {
 976         assert(succeeded, "only way to get back a non-NULL result");
 977         return result;
 978       }
 979 
 980       if (succeeded) {
 981         // If we get here we successfully scheduled a collection which
 982         // failed to allocate. No point in trying to allocate
 983         // further. We'll just return NULL.
 984         MutexLockerEx x(Heap_lock);
 985         *gc_count_before_ret = total_collections();
 986         return NULL;
 987       }
 988     } else {
 989       // The GCLocker is either active or the GCLocker initiated
 990       // GC has not yet been performed. Stall until it is and
 991       // then retry the allocation.
 992       GC_locker::stall_until_clear();
 993     }
 994 
 995     // We can reach here if we were unsuccessul in scheduling a
 996     // collection (because another thread beat us to it) or if we were
 997     // stalled due to the GC locker. In either can we should retry the
 998     // allocation attempt in case another thread successfully
 999     // performed a collection and reclaimed enough space. We do the
1000     // first attempt (without holding the Heap_lock) here and the
1001     // follow-on attempt will be at the start of the next loop
1002     // iteration (after taking the Heap_lock).
1003     result = _mutator_alloc_region.attempt_allocation(word_size,
1004                                                       false /* bot_updates */);
1005     if (result != NULL) {
1006       return result;
1007     }
1008 
1009     // Give a warning if we seem to be looping forever.
1010     if ((QueuedAllocationWarningCount > 0) &&
1011         (try_count % QueuedAllocationWarningCount == 0)) {
1012       warning("G1CollectedHeap::attempt_allocation_slow() "
1013               "retries %d times", try_count);
1014     }
1015   }
1016 
1017   ShouldNotReachHere();
1018   return NULL;
1019 }
1020 
1021 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1022                                           unsigned int * gc_count_before_ret) {
1023   // The structure of this method has a lot of similarities to
1024   // attempt_allocation_slow(). The reason these two were not merged
1025   // into a single one is that such a method would require several "if
1026   // allocation is not humongous do this, otherwise do that"
1027   // conditional paths which would obscure its flow. In fact, an early
1028   // version of this code did use a unified method which was harder to
1029   // follow and, as a result, it had subtle bugs that were hard to
1030   // track down. So keeping these two methods separate allows each to
1031   // be more readable. It will be good to keep these two in sync as
1032   // much as possible.
1033 
1034   assert_heap_not_locked_and_not_at_safepoint();
1035   assert(isHumongous(word_size), "attempt_allocation_humongous() "
1036          "should only be called for humongous allocations");
1037 
1038   // Humongous objects can exhaust the heap quickly, so we should check if we
1039   // need to start a marking cycle at each humongous object allocation. We do
1040   // the check before we do the actual allocation. The reason for doing it
1041   // before the allocation is that we avoid having to keep track of the newly
1042   // allocated memory while we do a GC.
1043   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1044                                            word_size)) {
1045     collect(GCCause::_g1_humongous_allocation);
1046   }
1047 
1048   // We will loop until a) we manage to successfully perform the
1049   // allocation or b) we successfully schedule a collection which
1050   // fails to perform the allocation. b) is the only case when we'll
1051   // return NULL.
1052   HeapWord* result = NULL;
1053   for (int try_count = 1; /* we'll return */; try_count += 1) {
1054     bool should_try_gc;
1055     unsigned int gc_count_before;
1056 
1057     {
1058       MutexLockerEx x(Heap_lock);
1059 
1060       // Given that humongous objects are not allocated in young
1061       // regions, we'll first try to do the allocation without doing a
1062       // collection hoping that there's enough space in the heap.
1063       result = humongous_obj_allocate(word_size);
1064       if (result != NULL) {
1065         return result;
1066       }
1067 
1068       if (GC_locker::is_active_and_needs_gc()) {
1069         should_try_gc = false;
1070       } else {
1071          // The GCLocker may not be active but the GCLocker initiated
1072         // GC may not yet have been performed (GCLocker::needs_gc()
1073         // returns true). In this case we do not try this GC and
1074         // wait until the GCLocker initiated GC is performed, and
1075         // then retry the allocation.
1076         if (GC_locker::needs_gc()) {
1077           should_try_gc = false;
1078         } else {
1079           // Read the GC count while still holding the Heap_lock.
1080           gc_count_before = total_collections();
1081           should_try_gc = true;
1082         }
1083       }
1084     }
1085 
1086     if (should_try_gc) {
1087       // If we failed to allocate the humongous object, we should try to
1088       // do a collection pause (if we're allowed) in case it reclaims
1089       // enough space for the allocation to succeed after the pause.
1090 
1091       bool succeeded;
1092       result = do_collection_pause(word_size, gc_count_before, &succeeded);
1093       if (result != NULL) {
1094         assert(succeeded, "only way to get back a non-NULL result");
1095         return result;
1096       }
1097 
1098       if (succeeded) {
1099         // If we get here we successfully scheduled a collection which
1100         // failed to allocate. No point in trying to allocate
1101         // further. We'll just return NULL.
1102         MutexLockerEx x(Heap_lock);
1103         *gc_count_before_ret = total_collections();
1104         return NULL;
1105       }
1106     } else {
1107       // The GCLocker is either active or the GCLocker initiated
1108       // GC has not yet been performed. Stall until it is and
1109       // then retry the allocation.
1110       GC_locker::stall_until_clear();
1111     }
1112 
1113     // We can reach here if we were unsuccessul in scheduling a
1114     // collection (because another thread beat us to it) or if we were
1115     // stalled due to the GC locker. In either can we should retry the
1116     // allocation attempt in case another thread successfully
1117     // performed a collection and reclaimed enough space.  Give a
1118     // warning if we seem to be looping forever.
1119 
1120     if ((QueuedAllocationWarningCount > 0) &&
1121         (try_count % QueuedAllocationWarningCount == 0)) {
1122       warning("G1CollectedHeap::attempt_allocation_humongous() "
1123               "retries %d times", try_count);
1124     }
1125   }
1126 
1127   ShouldNotReachHere();
1128   return NULL;
1129 }
1130 
1131 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1132                                        bool expect_null_mutator_alloc_region) {
1133   assert_at_safepoint(true /* should_be_vm_thread */);
1134   assert(_mutator_alloc_region.get() == NULL ||
1135                                              !expect_null_mutator_alloc_region,
1136          "the current alloc region was unexpectedly found to be non-NULL");
1137 
1138   if (!isHumongous(word_size)) {
1139     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1140                                                       false /* bot_updates */);
1141   } else {
1142     HeapWord* result = humongous_obj_allocate(word_size);
1143     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1144       g1_policy()->set_initiate_conc_mark_if_possible();
1145     }
1146     return result;
1147   }
1148 
1149   ShouldNotReachHere();
1150 }
1151 
1152 class PostMCRemSetClearClosure: public HeapRegionClosure {
1153   G1CollectedHeap* _g1h;
1154   ModRefBarrierSet* _mr_bs;
1155 public:
1156   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1157     _g1h(g1h), _mr_bs(mr_bs) { }
1158   bool doHeapRegion(HeapRegion* r) {
1159     if (r->continuesHumongous()) {
1160       return false;
1161     }
1162     _g1h->reset_gc_time_stamps(r);
1163     HeapRegionRemSet* hrrs = r->rem_set();
1164     if (hrrs != NULL) hrrs->clear();
1165     // You might think here that we could clear just the cards
1166     // corresponding to the used region.  But no: if we leave a dirty card
1167     // in a region we might allocate into, then it would prevent that card
1168     // from being enqueued, and cause it to be missed.
1169     // Re: the performance cost: we shouldn't be doing full GC anyway!
1170     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1171     return false;
1172   }
1173 };
1174 
1175 void G1CollectedHeap::clear_rsets_post_compaction() {
1176   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1177   heap_region_iterate(&rs_clear);
1178 }
1179 
1180 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1181   G1CollectedHeap*   _g1h;
1182   UpdateRSOopClosure _cl;
1183   int                _worker_i;
1184 public:
1185   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1186     _cl(g1->g1_rem_set(), worker_i),
1187     _worker_i(worker_i),
1188     _g1h(g1)
1189   { }
1190 
1191   bool doHeapRegion(HeapRegion* r) {
1192     if (!r->continuesHumongous()) {
1193       _cl.set_from(r);
1194       r->oop_iterate(&_cl);
1195     }
1196     return false;
1197   }
1198 };
1199 
1200 class ParRebuildRSTask: public AbstractGangTask {
1201   G1CollectedHeap* _g1;
1202 public:
1203   ParRebuildRSTask(G1CollectedHeap* g1)
1204     : AbstractGangTask("ParRebuildRSTask"),
1205       _g1(g1)
1206   { }
1207 
1208   void work(uint worker_id) {
1209     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1210     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1211                                           _g1->workers()->active_workers(),
1212                                          HeapRegion::RebuildRSClaimValue);
1213   }
1214 };
1215 
1216 class PostCompactionPrinterClosure: public HeapRegionClosure {
1217 private:
1218   G1HRPrinter* _hr_printer;
1219 public:
1220   bool doHeapRegion(HeapRegion* hr) {
1221     assert(!hr->is_young(), "not expecting to find young regions");
1222     // We only generate output for non-empty regions.
1223     if (!hr->is_empty()) {
1224       if (!hr->isHumongous()) {
1225         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1226       } else if (hr->startsHumongous()) {
1227         if (hr->region_num() == 1) {
1228           // single humongous region
1229           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1230         } else {
1231           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1232         }
1233       } else {
1234         assert(hr->continuesHumongous(), "only way to get here");
1235         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1236       }
1237     }
1238     return false;
1239   }
1240 
1241   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1242     : _hr_printer(hr_printer) { }
1243 };
1244 
1245 void G1CollectedHeap::print_hrs_post_compaction() {
1246   PostCompactionPrinterClosure cl(hr_printer());
1247   heap_region_iterate(&cl);
1248 }
1249 
1250 double G1CollectedHeap::verify(bool guard, const char* msg) {
1251   double verify_time_ms = 0.0;
1252 
1253   if (guard && total_collections() >= VerifyGCStartAt) {
1254     double verify_start = os::elapsedTime();
1255     HandleMark hm;  // Discard invalid handles created during verification
1256     gclog_or_tty->print(msg);
1257     prepare_for_verify();
1258     Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
1259     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
1260   }
1261 
1262   return verify_time_ms;
1263 }
1264 
1265 void G1CollectedHeap::verify_before_gc() {
1266   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1267   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1268 }
1269 
1270 void G1CollectedHeap::verify_after_gc() {
1271   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1272   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1273 }
1274 
1275 bool G1CollectedHeap::do_collection(bool explicit_gc,
1276                                     bool clear_all_soft_refs,
1277                                     size_t word_size) {
1278   assert_at_safepoint(true /* should_be_vm_thread */);
1279 
1280   if (GC_locker::check_active_before_gc()) {
1281     return false;
1282   }
1283 
1284   SvcGCMarker sgcm(SvcGCMarker::FULL);
1285   ResourceMark rm;
1286 
1287   print_heap_before_gc();
1288 
1289   size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
1290 
1291   HRSPhaseSetter x(HRSPhaseFullGC);
1292   verify_region_sets_optional();
1293 
1294   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1295                            collector_policy()->should_clear_all_soft_refs();
1296 
1297   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1298 
1299   {
1300     IsGCActiveMark x;
1301 
1302     // Timing
1303     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1304     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1305     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1306 
1307     TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
1308     TraceCollectorStats tcs(g1mm()->full_collection_counters());
1309     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1310 
1311     double start = os::elapsedTime();
1312     g1_policy()->record_full_collection_start();
1313 
1314     // Note: When we have a more flexible GC logging framework that
1315     // allows us to add optional attributes to a GC log record we
1316     // could consider timing and reporting how long we wait in the
1317     // following two methods.
1318     wait_while_free_regions_coming();
1319     // If we start the compaction before the CM threads finish
1320     // scanning the root regions we might trip them over as we'll
1321     // be moving objects / updating references. So let's wait until
1322     // they are done. By telling them to abort, they should complete
1323     // early.
1324     _cm->root_regions()->abort();
1325     _cm->root_regions()->wait_until_scan_finished();
1326     append_secondary_free_list_if_not_empty_with_lock();
1327 
1328     gc_prologue(true);
1329     increment_total_collections(true /* full gc */);
1330     increment_old_marking_cycles_started();
1331 
1332     size_t g1h_prev_used = used();
1333     assert(used() == recalculate_used(), "Should be equal");
1334 
1335     verify_before_gc();
1336 
1337     pre_full_gc_dump();
1338 
1339     COMPILER2_PRESENT(DerivedPointerTable::clear());
1340 
1341     // Disable discovery and empty the discovered lists
1342     // for the CM ref processor.
1343     ref_processor_cm()->disable_discovery();
1344     ref_processor_cm()->abandon_partial_discovery();
1345     ref_processor_cm()->verify_no_references_recorded();
1346 
1347     // Abandon current iterations of concurrent marking and concurrent
1348     // refinement, if any are in progress. We have to do this before
1349     // wait_until_scan_finished() below.
1350     concurrent_mark()->abort();
1351 
1352     // Make sure we'll choose a new allocation region afterwards.
1353     release_mutator_alloc_region();
1354     abandon_gc_alloc_regions();
1355     g1_rem_set()->cleanupHRRS();
1356 
1357     // We should call this after we retire any currently active alloc
1358     // regions so that all the ALLOC / RETIRE events are generated
1359     // before the start GC event.
1360     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1361 
1362     // We may have added regions to the current incremental collection
1363     // set between the last GC or pause and now. We need to clear the
1364     // incremental collection set and then start rebuilding it afresh
1365     // after this full GC.
1366     abandon_collection_set(g1_policy()->inc_cset_head());
1367     g1_policy()->clear_incremental_cset();
1368     g1_policy()->stop_incremental_cset_building();
1369 
1370     tear_down_region_sets(false /* free_list_only */);
1371     g1_policy()->set_gcs_are_young(true);
1372 
1373     // See the comments in g1CollectedHeap.hpp and
1374     // G1CollectedHeap::ref_processing_init() about
1375     // how reference processing currently works in G1.
1376 
1377     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1378     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1379 
1380     // Temporarily clear the STW ref processor's _is_alive_non_header field.
1381     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1382 
1383     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1384     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1385 
1386     // Do collection work
1387     {
1388       HandleMark hm;  // Discard invalid handles created during gc
1389       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1390     }
1391 
1392     assert(free_regions() == 0, "we should not have added any free regions");
1393     rebuild_region_sets(false /* free_list_only */);
1394 
1395     // Enqueue any discovered reference objects that have
1396     // not been removed from the discovered lists.
1397     ref_processor_stw()->enqueue_discovered_references();
1398 
1399     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1400 
1401     MemoryService::track_memory_usage();
1402 
1403     verify_after_gc();
1404 
1405     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1406     ref_processor_stw()->verify_no_references_recorded();
1407 
1408     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1409     ClassLoaderDataGraph::purge();
1410 
1411     // Note: since we've just done a full GC, concurrent
1412     // marking is no longer active. Therefore we need not
1413     // re-enable reference discovery for the CM ref processor.
1414     // That will be done at the start of the next marking cycle.
1415     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1416     ref_processor_cm()->verify_no_references_recorded();
1417 
1418     reset_gc_time_stamp();
1419     // Since everything potentially moved, we will clear all remembered
1420     // sets, and clear all cards.  Later we will rebuild remebered
1421     // sets. We will also reset the GC time stamps of the regions.
1422     clear_rsets_post_compaction();
1423     check_gc_time_stamps();
1424 
1425     // Resize the heap if necessary.
1426     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1427 
1428     if (_hr_printer.is_active()) {
1429       // We should do this after we potentially resize the heap so
1430       // that all the COMMIT / UNCOMMIT events are generated before
1431       // the end GC event.
1432 
1433       print_hrs_post_compaction();
1434       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1435     }
1436 
1437     if (_cg1r->use_cache()) {
1438       _cg1r->clear_and_record_card_counts();
1439       _cg1r->clear_hot_cache();
1440     }
1441 
1442     // Rebuild remembered sets of all regions.
1443     if (G1CollectedHeap::use_parallel_gc_threads()) {
1444       uint n_workers =
1445         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1446                                        workers()->active_workers(),
1447                                        Threads::number_of_non_daemon_threads());
1448       assert(UseDynamicNumberOfGCThreads ||
1449              n_workers == workers()->total_workers(),
1450              "If not dynamic should be using all the  workers");
1451       workers()->set_active_workers(n_workers);
1452       // Set parallel threads in the heap (_n_par_threads) only
1453       // before a parallel phase and always reset it to 0 after
1454       // the phase so that the number of parallel threads does
1455       // no get carried forward to a serial phase where there
1456       // may be code that is "possibly_parallel".
1457       set_par_threads(n_workers);
1458 
1459       ParRebuildRSTask rebuild_rs_task(this);
1460       assert(check_heap_region_claim_values(
1461              HeapRegion::InitialClaimValue), "sanity check");
1462       assert(UseDynamicNumberOfGCThreads ||
1463              workers()->active_workers() == workers()->total_workers(),
1464         "Unless dynamic should use total workers");
1465       // Use the most recent number of  active workers
1466       assert(workers()->active_workers() > 0,
1467         "Active workers not properly set");
1468       set_par_threads(workers()->active_workers());
1469       workers()->run_task(&rebuild_rs_task);
1470       set_par_threads(0);
1471       assert(check_heap_region_claim_values(
1472              HeapRegion::RebuildRSClaimValue), "sanity check");
1473       reset_heap_region_claim_values();
1474     } else {
1475       RebuildRSOutOfRegionClosure rebuild_rs(this);
1476       heap_region_iterate(&rebuild_rs);
1477     }
1478 
1479     if (G1Log::fine()) {
1480       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1481     }
1482 
1483     if (true) { // FIXME
1484       MetaspaceGC::compute_new_size();
1485     }
1486 
1487     // Start a new incremental collection set for the next pause
1488     assert(g1_policy()->collection_set() == NULL, "must be");
1489     g1_policy()->start_incremental_cset_building();
1490 
1491     // Clear the _cset_fast_test bitmap in anticipation of adding
1492     // regions to the incremental collection set for the next
1493     // evacuation pause.
1494     clear_cset_fast_test();
1495 
1496     init_mutator_alloc_region();
1497 
1498     double end = os::elapsedTime();
1499     g1_policy()->record_full_collection_end();
1500 
1501 #ifdef TRACESPINNING
1502     ParallelTaskTerminator::print_termination_counts();
1503 #endif
1504 
1505     gc_epilogue(true);
1506 
1507     // Discard all rset updates
1508     JavaThread::dirty_card_queue_set().abandon_logs();
1509     assert(!G1DeferredRSUpdate
1510            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1511 
1512     _young_list->reset_sampled_info();
1513     // At this point there should be no regions in the
1514     // entire heap tagged as young.
1515     assert( check_young_list_empty(true /* check_heap */),
1516       "young list should be empty at this point");
1517 
1518     // Update the number of full collections that have been completed.
1519     increment_old_marking_cycles_completed(false /* concurrent */);
1520 
1521     _hrs.verify_optional();
1522     verify_region_sets_optional();
1523 
1524     print_heap_after_gc();
1525 
1526     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1527     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1528     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1529     // before any GC notifications are raised.
1530     g1mm()->update_sizes();
1531   }
1532 
1533   post_full_gc_dump();
1534 
1535   return true;
1536 }
1537 
1538 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1539   // do_collection() will return whether it succeeded in performing
1540   // the GC. Currently, there is no facility on the
1541   // do_full_collection() API to notify the caller than the collection
1542   // did not succeed (e.g., because it was locked out by the GC
1543   // locker). So, right now, we'll ignore the return value.
1544   bool dummy = do_collection(true,                /* explicit_gc */
1545                              clear_all_soft_refs,
1546                              0                    /* word_size */);
1547 }
1548 
1549 // This code is mostly copied from TenuredGeneration.
1550 void
1551 G1CollectedHeap::
1552 resize_if_necessary_after_full_collection(size_t word_size) {
1553   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1554 
1555   // Include the current allocation, if any, and bytes that will be
1556   // pre-allocated to support collections, as "used".
1557   const size_t used_after_gc = used();
1558   const size_t capacity_after_gc = capacity();
1559   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1560 
1561   // This is enforced in arguments.cpp.
1562   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1563          "otherwise the code below doesn't make sense");
1564 
1565   // We don't have floating point command-line arguments
1566   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1567   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1568   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1569   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1570 
1571   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1572   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1573 
1574   // We have to be careful here as these two calculations can overflow
1575   // 32-bit size_t's.
1576   double used_after_gc_d = (double) used_after_gc;
1577   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1578   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1579 
1580   // Let's make sure that they are both under the max heap size, which
1581   // by default will make them fit into a size_t.
1582   double desired_capacity_upper_bound = (double) max_heap_size;
1583   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1584                                     desired_capacity_upper_bound);
1585   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1586                                     desired_capacity_upper_bound);
1587 
1588   // We can now safely turn them into size_t's.
1589   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1590   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1591 
1592   // This assert only makes sense here, before we adjust them
1593   // with respect to the min and max heap size.
1594   assert(minimum_desired_capacity <= maximum_desired_capacity,
1595          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1596                  "maximum_desired_capacity = "SIZE_FORMAT,
1597                  minimum_desired_capacity, maximum_desired_capacity));
1598 
1599   // Should not be greater than the heap max size. No need to adjust
1600   // it with respect to the heap min size as it's a lower bound (i.e.,
1601   // we'll try to make the capacity larger than it, not smaller).
1602   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1603   // Should not be less than the heap min size. No need to adjust it
1604   // with respect to the heap max size as it's an upper bound (i.e.,
1605   // we'll try to make the capacity smaller than it, not greater).
1606   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1607 
1608   if (capacity_after_gc < minimum_desired_capacity) {
1609     // Don't expand unless it's significant
1610     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1611     ergo_verbose4(ErgoHeapSizing,
1612                   "attempt heap expansion",
1613                   ergo_format_reason("capacity lower than "
1614                                      "min desired capacity after Full GC")
1615                   ergo_format_byte("capacity")
1616                   ergo_format_byte("occupancy")
1617                   ergo_format_byte_perc("min desired capacity"),
1618                   capacity_after_gc, used_after_gc,
1619                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1620     expand(expand_bytes);
1621 
1622     // No expansion, now see if we want to shrink
1623   } else if (capacity_after_gc > maximum_desired_capacity) {
1624     // Capacity too large, compute shrinking size
1625     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1626     ergo_verbose4(ErgoHeapSizing,
1627                   "attempt heap shrinking",
1628                   ergo_format_reason("capacity higher than "
1629                                      "max desired capacity after Full GC")
1630                   ergo_format_byte("capacity")
1631                   ergo_format_byte("occupancy")
1632                   ergo_format_byte_perc("max desired capacity"),
1633                   capacity_after_gc, used_after_gc,
1634                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1635     shrink(shrink_bytes);
1636   }
1637 }
1638 
1639 
1640 HeapWord*
1641 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1642                                            bool* succeeded) {
1643   assert_at_safepoint(true /* should_be_vm_thread */);
1644 
1645   *succeeded = true;
1646   // Let's attempt the allocation first.
1647   HeapWord* result =
1648     attempt_allocation_at_safepoint(word_size,
1649                                  false /* expect_null_mutator_alloc_region */);
1650   if (result != NULL) {
1651     assert(*succeeded, "sanity");
1652     return result;
1653   }
1654 
1655   // In a G1 heap, we're supposed to keep allocation from failing by
1656   // incremental pauses.  Therefore, at least for now, we'll favor
1657   // expansion over collection.  (This might change in the future if we can
1658   // do something smarter than full collection to satisfy a failed alloc.)
1659   result = expand_and_allocate(word_size);
1660   if (result != NULL) {
1661     assert(*succeeded, "sanity");
1662     return result;
1663   }
1664 
1665   // Expansion didn't work, we'll try to do a Full GC.
1666   bool gc_succeeded = do_collection(false, /* explicit_gc */
1667                                     false, /* clear_all_soft_refs */
1668                                     word_size);
1669   if (!gc_succeeded) {
1670     *succeeded = false;
1671     return NULL;
1672   }
1673 
1674   // Retry the allocation
1675   result = attempt_allocation_at_safepoint(word_size,
1676                                   true /* expect_null_mutator_alloc_region */);
1677   if (result != NULL) {
1678     assert(*succeeded, "sanity");
1679     return result;
1680   }
1681 
1682   // Then, try a Full GC that will collect all soft references.
1683   gc_succeeded = do_collection(false, /* explicit_gc */
1684                                true,  /* clear_all_soft_refs */
1685                                word_size);
1686   if (!gc_succeeded) {
1687     *succeeded = false;
1688     return NULL;
1689   }
1690 
1691   // Retry the allocation once more
1692   result = attempt_allocation_at_safepoint(word_size,
1693                                   true /* expect_null_mutator_alloc_region */);
1694   if (result != NULL) {
1695     assert(*succeeded, "sanity");
1696     return result;
1697   }
1698 
1699   assert(!collector_policy()->should_clear_all_soft_refs(),
1700          "Flag should have been handled and cleared prior to this point");
1701 
1702   // What else?  We might try synchronous finalization later.  If the total
1703   // space available is large enough for the allocation, then a more
1704   // complete compaction phase than we've tried so far might be
1705   // appropriate.
1706   assert(*succeeded, "sanity");
1707   return NULL;
1708 }
1709 
1710 // Attempting to expand the heap sufficiently
1711 // to support an allocation of the given "word_size".  If
1712 // successful, perform the allocation and return the address of the
1713 // allocated block, or else "NULL".
1714 
1715 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1716   assert_at_safepoint(true /* should_be_vm_thread */);
1717 
1718   verify_region_sets_optional();
1719 
1720   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1721   ergo_verbose1(ErgoHeapSizing,
1722                 "attempt heap expansion",
1723                 ergo_format_reason("allocation request failed")
1724                 ergo_format_byte("allocation request"),
1725                 word_size * HeapWordSize);
1726   if (expand(expand_bytes)) {
1727     _hrs.verify_optional();
1728     verify_region_sets_optional();
1729     return attempt_allocation_at_safepoint(word_size,
1730                                  false /* expect_null_mutator_alloc_region */);
1731   }
1732   return NULL;
1733 }
1734 
1735 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1736                                              HeapWord* new_end) {
1737   assert(old_end != new_end, "don't call this otherwise");
1738   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1739 
1740   // Update the committed mem region.
1741   _g1_committed.set_end(new_end);
1742   // Tell the card table about the update.
1743   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1744   // Tell the BOT about the update.
1745   _bot_shared->resize(_g1_committed.word_size());
1746 }
1747 
1748 bool G1CollectedHeap::expand(size_t expand_bytes) {
1749   size_t old_mem_size = _g1_storage.committed_size();
1750   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1751   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1752                                        HeapRegion::GrainBytes);
1753   ergo_verbose2(ErgoHeapSizing,
1754                 "expand the heap",
1755                 ergo_format_byte("requested expansion amount")
1756                 ergo_format_byte("attempted expansion amount"),
1757                 expand_bytes, aligned_expand_bytes);
1758 
1759   // First commit the memory.
1760   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1761   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1762   if (successful) {
1763     // Then propagate this update to the necessary data structures.
1764     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1765     update_committed_space(old_end, new_end);
1766 
1767     FreeRegionList expansion_list("Local Expansion List");
1768     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1769     assert(mr.start() == old_end, "post-condition");
1770     // mr might be a smaller region than what was requested if
1771     // expand_by() was unable to allocate the HeapRegion instances
1772     assert(mr.end() <= new_end, "post-condition");
1773 
1774     size_t actual_expand_bytes = mr.byte_size();
1775     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1776     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1777            "post-condition");
1778     if (actual_expand_bytes < aligned_expand_bytes) {
1779       // We could not expand _hrs to the desired size. In this case we
1780       // need to shrink the committed space accordingly.
1781       assert(mr.end() < new_end, "invariant");
1782 
1783       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1784       // First uncommit the memory.
1785       _g1_storage.shrink_by(diff_bytes);
1786       // Then propagate this update to the necessary data structures.
1787       update_committed_space(new_end, mr.end());
1788     }
1789     _free_list.add_as_tail(&expansion_list);
1790 
1791     if (_hr_printer.is_active()) {
1792       HeapWord* curr = mr.start();
1793       while (curr < mr.end()) {
1794         HeapWord* curr_end = curr + HeapRegion::GrainWords;
1795         _hr_printer.commit(curr, curr_end);
1796         curr = curr_end;
1797       }
1798       assert(curr == mr.end(), "post-condition");
1799     }
1800     g1_policy()->record_new_heap_size(n_regions());
1801   } else {
1802     ergo_verbose0(ErgoHeapSizing,
1803                   "did not expand the heap",
1804                   ergo_format_reason("heap expansion operation failed"));
1805     // The expansion of the virtual storage space was unsuccessful.
1806     // Let's see if it was because we ran out of swap.
1807     if (G1ExitOnExpansionFailure &&
1808         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1809       // We had head room...
1810       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
1811     }
1812   }
1813   return successful;
1814 }
1815 
1816 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1817   size_t old_mem_size = _g1_storage.committed_size();
1818   size_t aligned_shrink_bytes =
1819     ReservedSpace::page_align_size_down(shrink_bytes);
1820   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1821                                          HeapRegion::GrainBytes);
1822   uint num_regions_deleted = 0;
1823   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1824   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1825   assert(mr.end() == old_end, "post-condition");
1826 
1827   ergo_verbose3(ErgoHeapSizing,
1828                 "shrink the heap",
1829                 ergo_format_byte("requested shrinking amount")
1830                 ergo_format_byte("aligned shrinking amount")
1831                 ergo_format_byte("attempted shrinking amount"),
1832                 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
1833   if (mr.byte_size() > 0) {
1834     if (_hr_printer.is_active()) {
1835       HeapWord* curr = mr.end();
1836       while (curr > mr.start()) {
1837         HeapWord* curr_end = curr;
1838         curr -= HeapRegion::GrainWords;
1839         _hr_printer.uncommit(curr, curr_end);
1840       }
1841       assert(curr == mr.start(), "post-condition");
1842     }
1843 
1844     _g1_storage.shrink_by(mr.byte_size());
1845     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1846     assert(mr.start() == new_end, "post-condition");
1847 
1848     _expansion_regions += num_regions_deleted;
1849     update_committed_space(old_end, new_end);
1850     HeapRegionRemSet::shrink_heap(n_regions());
1851     g1_policy()->record_new_heap_size(n_regions());
1852   } else {
1853     ergo_verbose0(ErgoHeapSizing,
1854                   "did not shrink the heap",
1855                   ergo_format_reason("heap shrinking operation failed"));
1856   }
1857 }
1858 
1859 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1860   verify_region_sets_optional();
1861 
1862   // We should only reach here at the end of a Full GC which means we
1863   // should not not be holding to any GC alloc regions. The method
1864   // below will make sure of that and do any remaining clean up.
1865   abandon_gc_alloc_regions();
1866 
1867   // Instead of tearing down / rebuilding the free lists here, we
1868   // could instead use the remove_all_pending() method on free_list to
1869   // remove only the ones that we need to remove.
1870   tear_down_region_sets(true /* free_list_only */);
1871   shrink_helper(shrink_bytes);
1872   rebuild_region_sets(true /* free_list_only */);
1873 
1874   _hrs.verify_optional();
1875   verify_region_sets_optional();
1876 }
1877 
1878 // Public methods.
1879 
1880 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1881 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1882 #endif // _MSC_VER
1883 
1884 
1885 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1886   SharedHeap(policy_),
1887   _g1_policy(policy_),
1888   _dirty_card_queue_set(false),
1889   _into_cset_dirty_card_queue_set(false),
1890   _is_alive_closure_cm(this),
1891   _is_alive_closure_stw(this),
1892   _ref_processor_cm(NULL),
1893   _ref_processor_stw(NULL),
1894   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1895   _bot_shared(NULL),
1896   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1897   _evac_failure_scan_stack(NULL) ,
1898   _mark_in_progress(false),
1899   _cg1r(NULL), _summary_bytes_used(0),
1900   _g1mm(NULL),
1901   _refine_cte_cl(NULL),
1902   _full_collection(false),
1903   _free_list("Master Free List"),
1904   _secondary_free_list("Secondary Free List"),
1905   _old_set("Old Set"),
1906   _humongous_set("Master Humongous Set"),
1907   _free_regions_coming(false),
1908   _young_list(new YoungList(this)),
1909   _gc_time_stamp(0),
1910   _retained_old_gc_alloc_region(NULL),
1911   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1912   _old_plab_stats(OldPLABSize, PLABWeight),
1913   _expand_heap_after_alloc_failure(true),
1914   _surviving_young_words(NULL),
1915   _old_marking_cycles_started(0),
1916   _old_marking_cycles_completed(0),
1917   _in_cset_fast_test(NULL),
1918   _in_cset_fast_test_base(NULL),
1919   _dirty_cards_region_list(NULL),
1920   _worker_cset_start_region(NULL),
1921   _worker_cset_start_region_time_stamp(NULL) {
1922   _g1h = this; // To catch bugs.
1923   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1924     vm_exit_during_initialization("Failed necessary allocation.");
1925   }
1926 
1927   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1928 
1929   int n_queues = MAX2((int)ParallelGCThreads, 1);
1930   _task_queues = new RefToScanQueueSet(n_queues);
1931 
1932   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1933   assert(n_rem_sets > 0, "Invariant.");
1934 
1935   HeapRegionRemSetIterator** iter_arr =
1936     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
1937   for (int i = 0; i < n_queues; i++) {
1938     iter_arr[i] = new HeapRegionRemSetIterator();
1939   }
1940   _rem_set_iterator = iter_arr;
1941 
1942   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1943   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1944 
1945   for (int i = 0; i < n_queues; i++) {
1946     RefToScanQueue* q = new RefToScanQueue();
1947     q->initialize();
1948     _task_queues->register_queue(i, q);
1949   }
1950 
1951   clear_cset_start_regions();
1952 
1953   // Initialize the G1EvacuationFailureALot counters and flags.
1954   NOT_PRODUCT(reset_evacuation_should_fail();)
1955 
1956   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1957 #ifdef SPARC
1958   // Issue a stern warning, but allow use for experimentation and debugging.
1959   if (VM_Version::is_sun4v() && UseMemSetInBOT) {
1960     assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
1961     warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
1962             " on sun4v; please understand that you are using at your own risk!");
1963   }
1964 #endif
1965 }
1966 
1967 jint G1CollectedHeap::initialize() {
1968   CollectedHeap::pre_initialize();
1969   os::enable_vtime();
1970 
1971   G1Log::init();
1972 
1973   // Necessary to satisfy locking discipline assertions.
1974 
1975   MutexLocker x(Heap_lock);
1976 
1977   // We have to initialize the printer before committing the heap, as
1978   // it will be used then.
1979   _hr_printer.set_active(G1PrintHeapRegions);
1980 
1981   // While there are no constraints in the GC code that HeapWordSize
1982   // be any particular value, there are multiple other areas in the
1983   // system which believe this to be true (e.g. oop->object_size in some
1984   // cases incorrectly returns the size in wordSize units rather than
1985   // HeapWordSize).
1986   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1987 
1988   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1989   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1990 
1991   // Ensure that the sizes are properly aligned.
1992   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1993   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1994 
1995   _cg1r = new ConcurrentG1Refine();
1996 
1997   // Reserve the maximum.
1998 
1999   // When compressed oops are enabled, the preferred heap base
2000   // is calculated by subtracting the requested size from the
2001   // 32Gb boundary and using the result as the base address for
2002   // heap reservation. If the requested size is not aligned to
2003   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2004   // into the ReservedHeapSpace constructor) then the actual
2005   // base of the reserved heap may end up differing from the
2006   // address that was requested (i.e. the preferred heap base).
2007   // If this happens then we could end up using a non-optimal
2008   // compressed oops mode.
2009 
2010   // Since max_byte_size is aligned to the size of a heap region (checked
2011   // above).
2012   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2013 
2014   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2015                                                  HeapRegion::GrainBytes);
2016 
2017   // It is important to do this in a way such that concurrent readers can't
2018   // temporarily think somethings in the heap.  (I've actually seen this
2019   // happen in asserts: DLD.)
2020   _reserved.set_word_size(0);
2021   _reserved.set_start((HeapWord*)heap_rs.base());
2022   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2023 
2024   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2025 
2026   // Create the gen rem set (and barrier set) for the entire reserved region.
2027   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2028   set_barrier_set(rem_set()->bs());
2029   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2030     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2031   } else {
2032     vm_exit_during_initialization("G1 requires a mod ref bs.");
2033     return JNI_ENOMEM;
2034   }
2035 
2036   // Also create a G1 rem set.
2037   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2038     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2039   } else {
2040     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2041     return JNI_ENOMEM;
2042   }
2043 
2044   // Carve out the G1 part of the heap.
2045 
2046   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
2047   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2048                            g1_rs.size()/HeapWordSize);
2049 
2050   _g1_storage.initialize(g1_rs, 0);
2051   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2052   _hrs.initialize((HeapWord*) _g1_reserved.start(),
2053                   (HeapWord*) _g1_reserved.end(),
2054                   _expansion_regions);
2055 
2056   // 6843694 - ensure that the maximum region index can fit
2057   // in the remembered set structures.
2058   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2059   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2060 
2061   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2062   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2063   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2064             "too many cards per region");
2065 
2066   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2067 
2068   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2069                                              heap_word_size(init_byte_size));
2070 
2071   _g1h = this;
2072 
2073    _in_cset_fast_test_length = max_regions();
2074    _in_cset_fast_test_base =
2075                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2076 
2077    // We're biasing _in_cset_fast_test to avoid subtracting the
2078    // beginning of the heap every time we want to index; basically
2079    // it's the same with what we do with the card table.
2080    _in_cset_fast_test = _in_cset_fast_test_base -
2081                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2082 
2083    // Clear the _cset_fast_test bitmap in anticipation of adding
2084    // regions to the incremental collection set for the first
2085    // evacuation pause.
2086    clear_cset_fast_test();
2087 
2088   // Create the ConcurrentMark data structure and thread.
2089   // (Must do this late, so that "max_regions" is defined.)
2090   _cm       = new ConcurrentMark(heap_rs, max_regions());
2091   _cmThread = _cm->cmThread();
2092 
2093   // Initialize the from_card cache structure of HeapRegionRemSet.
2094   HeapRegionRemSet::init_heap(max_regions());
2095 
2096   // Now expand into the initial heap size.
2097   if (!expand(init_byte_size)) {
2098     vm_exit_during_initialization("Failed to allocate initial heap.");
2099     return JNI_ENOMEM;
2100   }
2101 
2102   // Perform any initialization actions delegated to the policy.
2103   g1_policy()->init();
2104 
2105   _refine_cte_cl =
2106     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2107                                     g1_rem_set(),
2108                                     concurrent_g1_refine());
2109   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2110 
2111   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2112                                                SATB_Q_FL_lock,
2113                                                G1SATBProcessCompletedThreshold,
2114                                                Shared_SATB_Q_lock);
2115 
2116   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2117                                                 DirtyCardQ_FL_lock,
2118                                                 concurrent_g1_refine()->yellow_zone(),
2119                                                 concurrent_g1_refine()->red_zone(),
2120                                                 Shared_DirtyCardQ_lock);
2121 
2122   if (G1DeferredRSUpdate) {
2123     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2124                                       DirtyCardQ_FL_lock,
2125                                       -1, // never trigger processing
2126                                       -1, // no limit on length
2127                                       Shared_DirtyCardQ_lock,
2128                                       &JavaThread::dirty_card_queue_set());
2129   }
2130 
2131   // Initialize the card queue set used to hold cards containing
2132   // references into the collection set.
2133   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2134                                              DirtyCardQ_FL_lock,
2135                                              -1, // never trigger processing
2136                                              -1, // no limit on length
2137                                              Shared_DirtyCardQ_lock,
2138                                              &JavaThread::dirty_card_queue_set());
2139 
2140   // In case we're keeping closure specialization stats, initialize those
2141   // counts and that mechanism.
2142   SpecializationStats::clear();
2143 
2144   // Do later initialization work for concurrent refinement.
2145   _cg1r->init();
2146 
2147   // Here we allocate the dummy full region that is required by the
2148   // G1AllocRegion class. If we don't pass an address in the reserved
2149   // space here, lots of asserts fire.
2150 
2151   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2152                                              _g1_reserved.start());
2153   // We'll re-use the same region whether the alloc region will
2154   // require BOT updates or not and, if it doesn't, then a non-young
2155   // region will complain that it cannot support allocations without
2156   // BOT updates. So we'll tag the dummy region as young to avoid that.
2157   dummy_region->set_young();
2158   // Make sure it's full.
2159   dummy_region->set_top(dummy_region->end());
2160   G1AllocRegion::setup(this, dummy_region);
2161 
2162   init_mutator_alloc_region();
2163 
2164   // Do create of the monitoring and management support so that
2165   // values in the heap have been properly initialized.
2166   _g1mm = new G1MonitoringSupport(this);
2167 
2168   return JNI_OK;
2169 }
2170 
2171 void G1CollectedHeap::ref_processing_init() {
2172   // Reference processing in G1 currently works as follows:
2173   //
2174   // * There are two reference processor instances. One is
2175   //   used to record and process discovered references
2176   //   during concurrent marking; the other is used to
2177   //   record and process references during STW pauses
2178   //   (both full and incremental).
2179   // * Both ref processors need to 'span' the entire heap as
2180   //   the regions in the collection set may be dotted around.
2181   //
2182   // * For the concurrent marking ref processor:
2183   //   * Reference discovery is enabled at initial marking.
2184   //   * Reference discovery is disabled and the discovered
2185   //     references processed etc during remarking.
2186   //   * Reference discovery is MT (see below).
2187   //   * Reference discovery requires a barrier (see below).
2188   //   * Reference processing may or may not be MT
2189   //     (depending on the value of ParallelRefProcEnabled
2190   //     and ParallelGCThreads).
2191   //   * A full GC disables reference discovery by the CM
2192   //     ref processor and abandons any entries on it's
2193   //     discovered lists.
2194   //
2195   // * For the STW processor:
2196   //   * Non MT discovery is enabled at the start of a full GC.
2197   //   * Processing and enqueueing during a full GC is non-MT.
2198   //   * During a full GC, references are processed after marking.
2199   //
2200   //   * Discovery (may or may not be MT) is enabled at the start
2201   //     of an incremental evacuation pause.
2202   //   * References are processed near the end of a STW evacuation pause.
2203   //   * For both types of GC:
2204   //     * Discovery is atomic - i.e. not concurrent.
2205   //     * Reference discovery will not need a barrier.
2206 
2207   SharedHeap::ref_processing_init();
2208   MemRegion mr = reserved_region();
2209 
2210   // Concurrent Mark ref processor
2211   _ref_processor_cm =
2212     new ReferenceProcessor(mr,    // span
2213                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2214                                 // mt processing
2215                            (int) ParallelGCThreads,
2216                                 // degree of mt processing
2217                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2218                                 // mt discovery
2219                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2220                                 // degree of mt discovery
2221                            false,
2222                                 // Reference discovery is not atomic
2223                            &_is_alive_closure_cm,
2224                                 // is alive closure
2225                                 // (for efficiency/performance)
2226                            true);
2227                                 // Setting next fields of discovered
2228                                 // lists requires a barrier.
2229 
2230   // STW ref processor
2231   _ref_processor_stw =
2232     new ReferenceProcessor(mr,    // span
2233                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2234                                 // mt processing
2235                            MAX2((int)ParallelGCThreads, 1),
2236                                 // degree of mt processing
2237                            (ParallelGCThreads > 1),
2238                                 // mt discovery
2239                            MAX2((int)ParallelGCThreads, 1),
2240                                 // degree of mt discovery
2241                            true,
2242                                 // Reference discovery is atomic
2243                            &_is_alive_closure_stw,
2244                                 // is alive closure
2245                                 // (for efficiency/performance)
2246                            false);
2247                                 // Setting next fields of discovered
2248                                 // lists requires a barrier.
2249 }
2250 
2251 size_t G1CollectedHeap::capacity() const {
2252   return _g1_committed.byte_size();
2253 }
2254 
2255 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2256   assert(!hr->continuesHumongous(), "pre-condition");
2257   hr->reset_gc_time_stamp();
2258   if (hr->startsHumongous()) {
2259     uint first_index = hr->hrs_index() + 1;
2260     uint last_index = hr->last_hc_index();
2261     for (uint i = first_index; i < last_index; i += 1) {
2262       HeapRegion* chr = region_at(i);
2263       assert(chr->continuesHumongous(), "sanity");
2264       chr->reset_gc_time_stamp();
2265     }
2266   }
2267 }
2268 
2269 #ifndef PRODUCT
2270 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2271 private:
2272   unsigned _gc_time_stamp;
2273   bool _failures;
2274 
2275 public:
2276   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2277     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2278 
2279   virtual bool doHeapRegion(HeapRegion* hr) {
2280     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2281     if (_gc_time_stamp != region_gc_time_stamp) {
2282       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2283                              "expected %d", HR_FORMAT_PARAMS(hr),
2284                              region_gc_time_stamp, _gc_time_stamp);
2285       _failures = true;
2286     }
2287     return false;
2288   }
2289 
2290   bool failures() { return _failures; }
2291 };
2292 
2293 void G1CollectedHeap::check_gc_time_stamps() {
2294   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2295   heap_region_iterate(&cl);
2296   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2297 }
2298 #endif // PRODUCT
2299 
2300 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2301                                                  DirtyCardQueue* into_cset_dcq,
2302                                                  bool concurrent,
2303                                                  int worker_i) {
2304   // Clean cards in the hot card cache
2305   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2306 
2307   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2308   int n_completed_buffers = 0;
2309   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2310     n_completed_buffers++;
2311   }
2312   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2313   dcqs.clear_n_completed_buffers();
2314   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2315 }
2316 
2317 
2318 // Computes the sum of the storage used by the various regions.
2319 
2320 size_t G1CollectedHeap::used() const {
2321   assert(Heap_lock->owner() != NULL,
2322          "Should be owned on this thread's behalf.");
2323   size_t result = _summary_bytes_used;
2324   // Read only once in case it is set to NULL concurrently
2325   HeapRegion* hr = _mutator_alloc_region.get();
2326   if (hr != NULL)
2327     result += hr->used();
2328   return result;
2329 }
2330 
2331 size_t G1CollectedHeap::used_unlocked() const {
2332   size_t result = _summary_bytes_used;
2333   return result;
2334 }
2335 
2336 class SumUsedClosure: public HeapRegionClosure {
2337   size_t _used;
2338 public:
2339   SumUsedClosure() : _used(0) {}
2340   bool doHeapRegion(HeapRegion* r) {
2341     if (!r->continuesHumongous()) {
2342       _used += r->used();
2343     }
2344     return false;
2345   }
2346   size_t result() { return _used; }
2347 };
2348 
2349 size_t G1CollectedHeap::recalculate_used() const {
2350   SumUsedClosure blk;
2351   heap_region_iterate(&blk);
2352   return blk.result();
2353 }
2354 
2355 size_t G1CollectedHeap::unsafe_max_alloc() {
2356   if (free_regions() > 0) return HeapRegion::GrainBytes;
2357   // otherwise, is there space in the current allocation region?
2358 
2359   // We need to store the current allocation region in a local variable
2360   // here. The problem is that this method doesn't take any locks and
2361   // there may be other threads which overwrite the current allocation
2362   // region field. attempt_allocation(), for example, sets it to NULL
2363   // and this can happen *after* the NULL check here but before the call
2364   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2365   // to be a problem in the optimized build, since the two loads of the
2366   // current allocation region field are optimized away.
2367   HeapRegion* hr = _mutator_alloc_region.get();
2368   if (hr == NULL) {
2369     return 0;
2370   }
2371   return hr->free();
2372 }
2373 
2374 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2375   switch (cause) {
2376     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2377     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2378     case GCCause::_g1_humongous_allocation: return true;
2379     default:                                return false;
2380   }
2381 }
2382 
2383 #ifndef PRODUCT
2384 void G1CollectedHeap::allocate_dummy_regions() {
2385   // Let's fill up most of the region
2386   size_t word_size = HeapRegion::GrainWords - 1024;
2387   // And as a result the region we'll allocate will be humongous.
2388   guarantee(isHumongous(word_size), "sanity");
2389 
2390   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2391     // Let's use the existing mechanism for the allocation
2392     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2393     if (dummy_obj != NULL) {
2394       MemRegion mr(dummy_obj, word_size);
2395       CollectedHeap::fill_with_object(mr);
2396     } else {
2397       // If we can't allocate once, we probably cannot allocate
2398       // again. Let's get out of the loop.
2399       break;
2400     }
2401   }
2402 }
2403 #endif // !PRODUCT
2404 
2405 void G1CollectedHeap::increment_old_marking_cycles_started() {
2406   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2407     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2408     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2409     _old_marking_cycles_started, _old_marking_cycles_completed));
2410 
2411   _old_marking_cycles_started++;
2412 }
2413 
2414 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2415   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2416 
2417   // We assume that if concurrent == true, then the caller is a
2418   // concurrent thread that was joined the Suspendible Thread
2419   // Set. If there's ever a cheap way to check this, we should add an
2420   // assert here.
2421 
2422   // Given that this method is called at the end of a Full GC or of a
2423   // concurrent cycle, and those can be nested (i.e., a Full GC can
2424   // interrupt a concurrent cycle), the number of full collections
2425   // completed should be either one (in the case where there was no
2426   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2427   // behind the number of full collections started.
2428 
2429   // This is the case for the inner caller, i.e. a Full GC.
2430   assert(concurrent ||
2431          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2432          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2433          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2434                  "is inconsistent with _old_marking_cycles_completed = %u",
2435                  _old_marking_cycles_started, _old_marking_cycles_completed));
2436 
2437   // This is the case for the outer caller, i.e. the concurrent cycle.
2438   assert(!concurrent ||
2439          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2440          err_msg("for outer caller (concurrent cycle): "
2441                  "_old_marking_cycles_started = %u "
2442                  "is inconsistent with _old_marking_cycles_completed = %u",
2443                  _old_marking_cycles_started, _old_marking_cycles_completed));
2444 
2445   _old_marking_cycles_completed += 1;
2446 
2447   // We need to clear the "in_progress" flag in the CM thread before
2448   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2449   // is set) so that if a waiter requests another System.gc() it doesn't
2450   // incorrectly see that a marking cyle is still in progress.
2451   if (concurrent) {
2452     _cmThread->clear_in_progress();
2453   }
2454 
2455   // This notify_all() will ensure that a thread that called
2456   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2457   // and it's waiting for a full GC to finish will be woken up. It is
2458   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2459   FullGCCount_lock->notify_all();
2460 }
2461 
2462 void G1CollectedHeap::collect(GCCause::Cause cause) {
2463   assert_heap_not_locked();
2464 
2465   unsigned int gc_count_before;
2466   unsigned int old_marking_count_before;
2467   bool retry_gc;
2468 
2469   do {
2470     retry_gc = false;
2471 
2472     {
2473       MutexLocker ml(Heap_lock);
2474 
2475       // Read the GC count while holding the Heap_lock
2476       gc_count_before = total_collections();
2477       old_marking_count_before = _old_marking_cycles_started;
2478     }
2479 
2480     if (should_do_concurrent_full_gc(cause)) {
2481       // Schedule an initial-mark evacuation pause that will start a
2482       // concurrent cycle. We're setting word_size to 0 which means that
2483       // we are not requesting a post-GC allocation.
2484       VM_G1IncCollectionPause op(gc_count_before,
2485                                  0,     /* word_size */
2486                                  true,  /* should_initiate_conc_mark */
2487                                  g1_policy()->max_pause_time_ms(),
2488                                  cause);
2489 
2490       VMThread::execute(&op);
2491       if (!op.pause_succeeded()) {
2492         if (old_marking_count_before == _old_marking_cycles_started) {
2493           retry_gc = op.should_retry_gc();
2494         } else {
2495           // A Full GC happened while we were trying to schedule the
2496           // initial-mark GC. No point in starting a new cycle given
2497           // that the whole heap was collected anyway.
2498         }
2499 
2500         if (retry_gc) {
2501           if (GC_locker::is_active_and_needs_gc()) {
2502             GC_locker::stall_until_clear();
2503           }
2504         }
2505       }
2506     } else {
2507       if (cause == GCCause::_gc_locker
2508           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2509 
2510         // Schedule a standard evacuation pause. We're setting word_size
2511         // to 0 which means that we are not requesting a post-GC allocation.
2512         VM_G1IncCollectionPause op(gc_count_before,
2513                                    0,     /* word_size */
2514                                    false, /* should_initiate_conc_mark */
2515                                    g1_policy()->max_pause_time_ms(),
2516                                    cause);
2517         VMThread::execute(&op);
2518       } else {
2519         // Schedule a Full GC.
2520         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2521         VMThread::execute(&op);
2522       }
2523     }
2524   } while (retry_gc);
2525 }
2526 
2527 bool G1CollectedHeap::is_in(const void* p) const {
2528   if (_g1_committed.contains(p)) {
2529     // Given that we know that p is in the committed space,
2530     // heap_region_containing_raw() should successfully
2531     // return the containing region.
2532     HeapRegion* hr = heap_region_containing_raw(p);
2533     return hr->is_in(p);
2534   } else {
2535     return false;
2536   }
2537 }
2538 
2539 // Iteration functions.
2540 
2541 // Iterates an OopClosure over all ref-containing fields of objects
2542 // within a HeapRegion.
2543 
2544 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2545   MemRegion _mr;
2546   ExtendedOopClosure* _cl;
2547 public:
2548   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
2549     : _mr(mr), _cl(cl) {}
2550   bool doHeapRegion(HeapRegion* r) {
2551     if (!r->continuesHumongous()) {
2552       r->oop_iterate(_cl);
2553     }
2554     return false;
2555   }
2556 };
2557 
2558 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2559   IterateOopClosureRegionClosure blk(_g1_committed, cl);
2560   heap_region_iterate(&blk);
2561 }
2562 
2563 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
2564   IterateOopClosureRegionClosure blk(mr, cl);
2565   heap_region_iterate(&blk);
2566 }
2567 
2568 // Iterates an ObjectClosure over all objects within a HeapRegion.
2569 
2570 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2571   ObjectClosure* _cl;
2572 public:
2573   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2574   bool doHeapRegion(HeapRegion* r) {
2575     if (! r->continuesHumongous()) {
2576       r->object_iterate(_cl);
2577     }
2578     return false;
2579   }
2580 };
2581 
2582 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2583   IterateObjectClosureRegionClosure blk(cl);
2584   heap_region_iterate(&blk);
2585 }
2586 
2587 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2588   // FIXME: is this right?
2589   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2590 }
2591 
2592 // Calls a SpaceClosure on a HeapRegion.
2593 
2594 class SpaceClosureRegionClosure: public HeapRegionClosure {
2595   SpaceClosure* _cl;
2596 public:
2597   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2598   bool doHeapRegion(HeapRegion* r) {
2599     _cl->do_space(r);
2600     return false;
2601   }
2602 };
2603 
2604 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2605   SpaceClosureRegionClosure blk(cl);
2606   heap_region_iterate(&blk);
2607 }
2608 
2609 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2610   _hrs.iterate(cl);
2611 }
2612 
2613 void
2614 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2615                                                  uint worker_id,
2616                                                  uint no_of_par_workers,
2617                                                  jint claim_value) {
2618   const uint regions = n_regions();
2619   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2620                              no_of_par_workers :
2621                              1);
2622   assert(UseDynamicNumberOfGCThreads ||
2623          no_of_par_workers == workers()->total_workers(),
2624          "Non dynamic should use fixed number of workers");
2625   // try to spread out the starting points of the workers
2626   const HeapRegion* start_hr =
2627                         start_region_for_worker(worker_id, no_of_par_workers);
2628   const uint start_index = start_hr->hrs_index();
2629 
2630   // each worker will actually look at all regions
2631   for (uint count = 0; count < regions; ++count) {
2632     const uint index = (start_index + count) % regions;
2633     assert(0 <= index && index < regions, "sanity");
2634     HeapRegion* r = region_at(index);
2635     // we'll ignore "continues humongous" regions (we'll process them
2636     // when we come across their corresponding "start humongous"
2637     // region) and regions already claimed
2638     if (r->claim_value() == claim_value || r->continuesHumongous()) {
2639       continue;
2640     }
2641     // OK, try to claim it
2642     if (r->claimHeapRegion(claim_value)) {
2643       // success!
2644       assert(!r->continuesHumongous(), "sanity");
2645       if (r->startsHumongous()) {
2646         // If the region is "starts humongous" we'll iterate over its
2647         // "continues humongous" first; in fact we'll do them
2648         // first. The order is important. In on case, calling the
2649         // closure on the "starts humongous" region might de-allocate
2650         // and clear all its "continues humongous" regions and, as a
2651         // result, we might end up processing them twice. So, we'll do
2652         // them first (notice: most closures will ignore them anyway) and
2653         // then we'll do the "starts humongous" region.
2654         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2655           HeapRegion* chr = region_at(ch_index);
2656 
2657           // if the region has already been claimed or it's not
2658           // "continues humongous" we're done
2659           if (chr->claim_value() == claim_value ||
2660               !chr->continuesHumongous()) {
2661             break;
2662           }
2663 
2664           // Noone should have claimed it directly. We can given
2665           // that we claimed its "starts humongous" region.
2666           assert(chr->claim_value() != claim_value, "sanity");
2667           assert(chr->humongous_start_region() == r, "sanity");
2668 
2669           if (chr->claimHeapRegion(claim_value)) {
2670             // we should always be able to claim it; noone else should
2671             // be trying to claim this region
2672 
2673             bool res2 = cl->doHeapRegion(chr);
2674             assert(!res2, "Should not abort");
2675 
2676             // Right now, this holds (i.e., no closure that actually
2677             // does something with "continues humongous" regions
2678             // clears them). We might have to weaken it in the future,
2679             // but let's leave these two asserts here for extra safety.
2680             assert(chr->continuesHumongous(), "should still be the case");
2681             assert(chr->humongous_start_region() == r, "sanity");
2682           } else {
2683             guarantee(false, "we should not reach here");
2684           }
2685         }
2686       }
2687 
2688       assert(!r->continuesHumongous(), "sanity");
2689       bool res = cl->doHeapRegion(r);
2690       assert(!res, "Should not abort");
2691     }
2692   }
2693 }
2694 
2695 class ResetClaimValuesClosure: public HeapRegionClosure {
2696 public:
2697   bool doHeapRegion(HeapRegion* r) {
2698     r->set_claim_value(HeapRegion::InitialClaimValue);
2699     return false;
2700   }
2701 };
2702 
2703 void G1CollectedHeap::reset_heap_region_claim_values() {
2704   ResetClaimValuesClosure blk;
2705   heap_region_iterate(&blk);
2706 }
2707 
2708 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2709   ResetClaimValuesClosure blk;
2710   collection_set_iterate(&blk);
2711 }
2712 
2713 #ifdef ASSERT
2714 // This checks whether all regions in the heap have the correct claim
2715 // value. I also piggy-backed on this a check to ensure that the
2716 // humongous_start_region() information on "continues humongous"
2717 // regions is correct.
2718 
2719 class CheckClaimValuesClosure : public HeapRegionClosure {
2720 private:
2721   jint _claim_value;
2722   uint _failures;
2723   HeapRegion* _sh_region;
2724 
2725 public:
2726   CheckClaimValuesClosure(jint claim_value) :
2727     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2728   bool doHeapRegion(HeapRegion* r) {
2729     if (r->claim_value() != _claim_value) {
2730       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2731                              "claim value = %d, should be %d",
2732                              HR_FORMAT_PARAMS(r),
2733                              r->claim_value(), _claim_value);
2734       ++_failures;
2735     }
2736     if (!r->isHumongous()) {
2737       _sh_region = NULL;
2738     } else if (r->startsHumongous()) {
2739       _sh_region = r;
2740     } else if (r->continuesHumongous()) {
2741       if (r->humongous_start_region() != _sh_region) {
2742         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2743                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2744                                HR_FORMAT_PARAMS(r),
2745                                r->humongous_start_region(),
2746                                _sh_region);
2747         ++_failures;
2748       }
2749     }
2750     return false;
2751   }
2752   uint failures() { return _failures; }
2753 };
2754 
2755 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2756   CheckClaimValuesClosure cl(claim_value);
2757   heap_region_iterate(&cl);
2758   return cl.failures() == 0;
2759 }
2760 
2761 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2762 private:
2763   jint _claim_value;
2764   uint _failures;
2765 
2766 public:
2767   CheckClaimValuesInCSetHRClosure(jint claim_value) :
2768     _claim_value(claim_value), _failures(0) { }
2769 
2770   uint failures() { return _failures; }
2771 
2772   bool doHeapRegion(HeapRegion* hr) {
2773     assert(hr->in_collection_set(), "how?");
2774     assert(!hr->isHumongous(), "H-region in CSet");
2775     if (hr->claim_value() != _claim_value) {
2776       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2777                              "claim value = %d, should be %d",
2778                              HR_FORMAT_PARAMS(hr),
2779                              hr->claim_value(), _claim_value);
2780       _failures += 1;
2781     }
2782     return false;
2783   }
2784 };
2785 
2786 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2787   CheckClaimValuesInCSetHRClosure cl(claim_value);
2788   collection_set_iterate(&cl);
2789   return cl.failures() == 0;
2790 }
2791 #endif // ASSERT
2792 
2793 // Clear the cached CSet starting regions and (more importantly)
2794 // the time stamps. Called when we reset the GC time stamp.
2795 void G1CollectedHeap::clear_cset_start_regions() {
2796   assert(_worker_cset_start_region != NULL, "sanity");
2797   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2798 
2799   int n_queues = MAX2((int)ParallelGCThreads, 1);
2800   for (int i = 0; i < n_queues; i++) {
2801     _worker_cset_start_region[i] = NULL;
2802     _worker_cset_start_region_time_stamp[i] = 0;
2803   }
2804 }
2805 
2806 // Given the id of a worker, obtain or calculate a suitable
2807 // starting region for iterating over the current collection set.
2808 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2809   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2810 
2811   HeapRegion* result = NULL;
2812   unsigned gc_time_stamp = get_gc_time_stamp();
2813 
2814   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2815     // Cached starting region for current worker was set
2816     // during the current pause - so it's valid.
2817     // Note: the cached starting heap region may be NULL
2818     // (when the collection set is empty).
2819     result = _worker_cset_start_region[worker_i];
2820     assert(result == NULL || result->in_collection_set(), "sanity");
2821     return result;
2822   }
2823 
2824   // The cached entry was not valid so let's calculate
2825   // a suitable starting heap region for this worker.
2826 
2827   // We want the parallel threads to start their collection
2828   // set iteration at different collection set regions to
2829   // avoid contention.
2830   // If we have:
2831   //          n collection set regions
2832   //          p threads
2833   // Then thread t will start at region floor ((t * n) / p)
2834 
2835   result = g1_policy()->collection_set();
2836   if (G1CollectedHeap::use_parallel_gc_threads()) {
2837     uint cs_size = g1_policy()->cset_region_length();
2838     uint active_workers = workers()->active_workers();
2839     assert(UseDynamicNumberOfGCThreads ||
2840              active_workers == workers()->total_workers(),
2841              "Unless dynamic should use total workers");
2842 
2843     uint end_ind   = (cs_size * worker_i) / active_workers;
2844     uint start_ind = 0;
2845 
2846     if (worker_i > 0 &&
2847         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2848       // Previous workers starting region is valid
2849       // so let's iterate from there
2850       start_ind = (cs_size * (worker_i - 1)) / active_workers;
2851       result = _worker_cset_start_region[worker_i - 1];
2852     }
2853 
2854     for (uint i = start_ind; i < end_ind; i++) {
2855       result = result->next_in_collection_set();
2856     }
2857   }
2858 
2859   // Note: the calculated starting heap region may be NULL
2860   // (when the collection set is empty).
2861   assert(result == NULL || result->in_collection_set(), "sanity");
2862   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2863          "should be updated only once per pause");
2864   _worker_cset_start_region[worker_i] = result;
2865   OrderAccess::storestore();
2866   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2867   return result;
2868 }
2869 
2870 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2871                                                      uint no_of_par_workers) {
2872   uint worker_num =
2873            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2874   assert(UseDynamicNumberOfGCThreads ||
2875          no_of_par_workers == workers()->total_workers(),
2876          "Non dynamic should use fixed number of workers");
2877   const uint start_index = n_regions() * worker_i / worker_num;
2878   return region_at(start_index);
2879 }
2880 
2881 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2882   HeapRegion* r = g1_policy()->collection_set();
2883   while (r != NULL) {
2884     HeapRegion* next = r->next_in_collection_set();
2885     if (cl->doHeapRegion(r)) {
2886       cl->incomplete();
2887       return;
2888     }
2889     r = next;
2890   }
2891 }
2892 
2893 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2894                                                   HeapRegionClosure *cl) {
2895   if (r == NULL) {
2896     // The CSet is empty so there's nothing to do.
2897     return;
2898   }
2899 
2900   assert(r->in_collection_set(),
2901          "Start region must be a member of the collection set.");
2902   HeapRegion* cur = r;
2903   while (cur != NULL) {
2904     HeapRegion* next = cur->next_in_collection_set();
2905     if (cl->doHeapRegion(cur) && false) {
2906       cl->incomplete();
2907       return;
2908     }
2909     cur = next;
2910   }
2911   cur = g1_policy()->collection_set();
2912   while (cur != r) {
2913     HeapRegion* next = cur->next_in_collection_set();
2914     if (cl->doHeapRegion(cur) && false) {
2915       cl->incomplete();
2916       return;
2917     }
2918     cur = next;
2919   }
2920 }
2921 
2922 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2923   return n_regions() > 0 ? region_at(0) : NULL;
2924 }
2925 
2926 
2927 Space* G1CollectedHeap::space_containing(const void* addr) const {
2928   Space* res = heap_region_containing(addr);
2929   return res;
2930 }
2931 
2932 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2933   Space* sp = space_containing(addr);
2934   if (sp != NULL) {
2935     return sp->block_start(addr);
2936   }
2937   return NULL;
2938 }
2939 
2940 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2941   Space* sp = space_containing(addr);
2942   assert(sp != NULL, "block_size of address outside of heap");
2943   return sp->block_size(addr);
2944 }
2945 
2946 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2947   Space* sp = space_containing(addr);
2948   return sp->block_is_obj(addr);
2949 }
2950 
2951 bool G1CollectedHeap::supports_tlab_allocation() const {
2952   return true;
2953 }
2954 
2955 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2956   return HeapRegion::GrainBytes;
2957 }
2958 
2959 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2960   // Return the remaining space in the cur alloc region, but not less than
2961   // the min TLAB size.
2962 
2963   // Also, this value can be at most the humongous object threshold,
2964   // since we can't allow tlabs to grow big enough to accomodate
2965   // humongous objects.
2966 
2967   HeapRegion* hr = _mutator_alloc_region.get();
2968   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2969   if (hr == NULL) {
2970     return max_tlab_size;
2971   } else {
2972     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2973   }
2974 }
2975 
2976 size_t G1CollectedHeap::max_capacity() const {
2977   return _g1_reserved.byte_size();
2978 }
2979 
2980 jlong G1CollectedHeap::millis_since_last_gc() {
2981   // assert(false, "NYI");
2982   return 0;
2983 }
2984 
2985 void G1CollectedHeap::prepare_for_verify() {
2986   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2987     ensure_parsability(false);
2988   }
2989   g1_rem_set()->prepare_for_verify();
2990 }
2991 
2992 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2993                                               VerifyOption vo) {
2994   switch (vo) {
2995   case VerifyOption_G1UsePrevMarking:
2996     return hr->obj_allocated_since_prev_marking(obj);
2997   case VerifyOption_G1UseNextMarking:
2998     return hr->obj_allocated_since_next_marking(obj);
2999   case VerifyOption_G1UseMarkWord:
3000     return false;
3001   default:
3002     ShouldNotReachHere();
3003   }
3004   return false; // keep some compilers happy
3005 }
3006 
3007 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
3008   switch (vo) {
3009   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
3010   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
3011   case VerifyOption_G1UseMarkWord:    return NULL;
3012   default:                            ShouldNotReachHere();
3013   }
3014   return NULL; // keep some compilers happy
3015 }
3016 
3017 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3018   switch (vo) {
3019   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3020   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3021   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
3022   default:                            ShouldNotReachHere();
3023   }
3024   return false; // keep some compilers happy
3025 }
3026 
3027 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3028   switch (vo) {
3029   case VerifyOption_G1UsePrevMarking: return "PTAMS";
3030   case VerifyOption_G1UseNextMarking: return "NTAMS";
3031   case VerifyOption_G1UseMarkWord:    return "NONE";
3032   default:                            ShouldNotReachHere();
3033   }
3034   return NULL; // keep some compilers happy
3035 }
3036 
3037 class VerifyLivenessOopClosure: public OopClosure {
3038   G1CollectedHeap* _g1h;
3039   VerifyOption _vo;
3040 public:
3041   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3042     _g1h(g1h), _vo(vo)
3043   { }
3044   void do_oop(narrowOop *p) { do_oop_work(p); }
3045   void do_oop(      oop *p) { do_oop_work(p); }
3046 
3047   template <class T> void do_oop_work(T *p) {
3048     oop obj = oopDesc::load_decode_heap_oop(p);
3049     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3050               "Dead object referenced by a not dead object");
3051   }
3052 };
3053 
3054 class VerifyObjsInRegionClosure: public ObjectClosure {
3055 private:
3056   G1CollectedHeap* _g1h;
3057   size_t _live_bytes;
3058   HeapRegion *_hr;
3059   VerifyOption _vo;
3060 public:
3061   // _vo == UsePrevMarking -> use "prev" marking information,
3062   // _vo == UseNextMarking -> use "next" marking information,
3063   // _vo == UseMarkWord    -> use mark word from object header.
3064   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3065     : _live_bytes(0), _hr(hr), _vo(vo) {
3066     _g1h = G1CollectedHeap::heap();
3067   }
3068   void do_object(oop o) {
3069     VerifyLivenessOopClosure isLive(_g1h, _vo);
3070     assert(o != NULL, "Huh?");
3071     if (!_g1h->is_obj_dead_cond(o, _vo)) {
3072       // If the object is alive according to the mark word,
3073       // then verify that the marking information agrees.
3074       // Note we can't verify the contra-positive of the
3075       // above: if the object is dead (according to the mark
3076       // word), it may not be marked, or may have been marked
3077       // but has since became dead, or may have been allocated
3078       // since the last marking.
3079       if (_vo == VerifyOption_G1UseMarkWord) {
3080         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3081       }
3082 
3083       o->oop_iterate_no_header(&isLive);
3084       if (!_hr->obj_allocated_since_prev_marking(o)) {
3085         size_t obj_size = o->size();    // Make sure we don't overflow
3086         _live_bytes += (obj_size * HeapWordSize);
3087       }
3088     }
3089   }
3090   size_t live_bytes() { return _live_bytes; }
3091 };
3092 
3093 class PrintObjsInRegionClosure : public ObjectClosure {
3094   HeapRegion *_hr;
3095   G1CollectedHeap *_g1;
3096 public:
3097   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3098     _g1 = G1CollectedHeap::heap();
3099   };
3100 
3101   void do_object(oop o) {
3102     if (o != NULL) {
3103       HeapWord *start = (HeapWord *) o;
3104       size_t word_sz = o->size();
3105       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3106                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3107                           (void*) o, word_sz,
3108                           _g1->isMarkedPrev(o),
3109                           _g1->isMarkedNext(o),
3110                           _hr->obj_allocated_since_prev_marking(o));
3111       HeapWord *end = start + word_sz;
3112       HeapWord *cur;
3113       int *val;
3114       for (cur = start; cur < end; cur++) {
3115         val = (int *) cur;
3116         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
3117       }
3118     }
3119   }
3120 };
3121 
3122 class VerifyRegionClosure: public HeapRegionClosure {
3123 private:
3124   bool             _par;
3125   VerifyOption     _vo;
3126   bool             _failures;
3127 public:
3128   // _vo == UsePrevMarking -> use "prev" marking information,
3129   // _vo == UseNextMarking -> use "next" marking information,
3130   // _vo == UseMarkWord    -> use mark word from object header.
3131   VerifyRegionClosure(bool par, VerifyOption vo)
3132     : _par(par),
3133       _vo(vo),
3134       _failures(false) {}
3135 
3136   bool failures() {
3137     return _failures;
3138   }
3139 
3140   bool doHeapRegion(HeapRegion* r) {
3141     if (!r->continuesHumongous()) {
3142       bool failures = false;
3143       r->verify(_vo, &failures);
3144       if (failures) {
3145         _failures = true;
3146       } else {
3147         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3148         r->object_iterate(&not_dead_yet_cl);
3149         if (_vo != VerifyOption_G1UseNextMarking) {
3150           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3151             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3152                                    "max_live_bytes "SIZE_FORMAT" "
3153                                    "< calculated "SIZE_FORMAT,
3154                                    r->bottom(), r->end(),
3155                                    r->max_live_bytes(),
3156                                  not_dead_yet_cl.live_bytes());
3157             _failures = true;
3158           }
3159         } else {
3160           // When vo == UseNextMarking we cannot currently do a sanity
3161           // check on the live bytes as the calculation has not been
3162           // finalized yet.
3163         }
3164       }
3165     }
3166     return false; // stop the region iteration if we hit a failure
3167   }
3168 };
3169 
3170 class YoungRefCounterClosure : public OopClosure {
3171   G1CollectedHeap* _g1h;
3172   int              _count;
3173  public:
3174   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3175   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
3176   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3177 
3178   int count() { return _count; }
3179   void reset_count() { _count = 0; };
3180 };
3181 
3182 class VerifyKlassClosure: public KlassClosure {
3183   YoungRefCounterClosure _young_ref_counter_closure;
3184   OopClosure *_oop_closure;
3185  public:
3186   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3187   void do_klass(Klass* k) {
3188     k->oops_do(_oop_closure);
3189 
3190     _young_ref_counter_closure.reset_count();
3191     k->oops_do(&_young_ref_counter_closure);
3192     if (_young_ref_counter_closure.count() > 0) {
3193       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3194     }
3195   }
3196 };
3197 
3198 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3199 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
3200 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
3201 //       we can change this closure to extend the simpler OopClosure.
3202 class VerifyRootsClosure: public OopsInGenClosure {
3203 private:
3204   G1CollectedHeap* _g1h;
3205   VerifyOption     _vo;
3206   bool             _failures;
3207 public:
3208   // _vo == UsePrevMarking -> use "prev" marking information,
3209   // _vo == UseNextMarking -> use "next" marking information,
3210   // _vo == UseMarkWord    -> use mark word from object header.
3211   VerifyRootsClosure(VerifyOption vo) :
3212     _g1h(G1CollectedHeap::heap()),
3213     _vo(vo),
3214     _failures(false) { }
3215 
3216   bool failures() { return _failures; }
3217 
3218   template <class T> void do_oop_nv(T* p) {
3219     T heap_oop = oopDesc::load_heap_oop(p);
3220     if (!oopDesc::is_null(heap_oop)) {
3221       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3222       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3223         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3224                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3225         if (_vo == VerifyOption_G1UseMarkWord) {
3226           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3227         }
3228         obj->print_on(gclog_or_tty);
3229         _failures = true;
3230       }
3231     }
3232   }
3233 
3234   void do_oop(oop* p)       { do_oop_nv(p); }
3235   void do_oop(narrowOop* p) { do_oop_nv(p); }
3236 };
3237 
3238 // This is the task used for parallel heap verification.
3239 
3240 class G1ParVerifyTask: public AbstractGangTask {
3241 private:
3242   G1CollectedHeap* _g1h;
3243   VerifyOption     _vo;
3244   bool             _failures;
3245 
3246 public:
3247   // _vo == UsePrevMarking -> use "prev" marking information,
3248   // _vo == UseNextMarking -> use "next" marking information,
3249   // _vo == UseMarkWord    -> use mark word from object header.
3250   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3251     AbstractGangTask("Parallel verify task"),
3252     _g1h(g1h),
3253     _vo(vo),
3254     _failures(false) { }
3255 
3256   bool failures() {
3257     return _failures;
3258   }
3259 
3260   void work(uint worker_id) {
3261     HandleMark hm;
3262     VerifyRegionClosure blk(true, _vo);
3263     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3264                                           _g1h->workers()->active_workers(),
3265                                           HeapRegion::ParVerifyClaimValue);
3266     if (blk.failures()) {
3267       _failures = true;
3268     }
3269   }
3270 };
3271 
3272 void G1CollectedHeap::verify(bool silent) {
3273   verify(silent, VerifyOption_G1UsePrevMarking);
3274 }
3275 
3276 void G1CollectedHeap::verify(bool silent,
3277                              VerifyOption vo) {
3278   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3279     if (!silent) { gclog_or_tty->print("Roots "); }
3280     VerifyRootsClosure rootsCl(vo);
3281 
3282     assert(Thread::current()->is_VM_thread(),
3283       "Expected to be executed serially by the VM thread at this point");
3284 
3285     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3286     VerifyKlassClosure klassCl(this, &rootsCl);
3287 
3288     // We apply the relevant closures to all the oops in the
3289     // system dictionary, the string table and the code cache.
3290     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3291 
3292     // Need cleared claim bits for the strong roots processing
3293     ClassLoaderDataGraph::clear_claimed_marks();
3294 
3295     process_strong_roots(true,      // activate StrongRootsScope
3296                          false,     // we set "is scavenging" to false,
3297                                     // so we don't reset the dirty cards.
3298                          ScanningOption(so),  // roots scanning options
3299                          &rootsCl,
3300                          &blobsCl,
3301                          &klassCl
3302                          );
3303 
3304     bool failures = rootsCl.failures();
3305 
3306     if (vo != VerifyOption_G1UseMarkWord) {
3307       // If we're verifying during a full GC then the region sets
3308       // will have been torn down at the start of the GC. Therefore
3309       // verifying the region sets will fail. So we only verify
3310       // the region sets when not in a full GC.
3311       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3312       verify_region_sets();
3313     }
3314 
3315     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3316     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3317       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3318              "sanity check");
3319 
3320       G1ParVerifyTask task(this, vo);
3321       assert(UseDynamicNumberOfGCThreads ||
3322         workers()->active_workers() == workers()->total_workers(),
3323         "If not dynamic should be using all the workers");
3324       int n_workers = workers()->active_workers();
3325       set_par_threads(n_workers);
3326       workers()->run_task(&task);
3327       set_par_threads(0);
3328       if (task.failures()) {
3329         failures = true;
3330       }
3331 
3332       // Checks that the expected amount of parallel work was done.
3333       // The implication is that n_workers is > 0.
3334       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3335              "sanity check");
3336 
3337       reset_heap_region_claim_values();
3338 
3339       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3340              "sanity check");
3341     } else {
3342       VerifyRegionClosure blk(false, vo);
3343       heap_region_iterate(&blk);
3344       if (blk.failures()) {
3345         failures = true;
3346       }
3347     }
3348     if (!silent) gclog_or_tty->print("RemSet ");
3349     rem_set()->verify();
3350 
3351     if (failures) {
3352       gclog_or_tty->print_cr("Heap:");
3353       // It helps to have the per-region information in the output to
3354       // help us track down what went wrong. This is why we call
3355       // print_extended_on() instead of print_on().
3356       print_extended_on(gclog_or_tty);
3357       gclog_or_tty->print_cr("");
3358 #ifndef PRODUCT
3359       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3360         concurrent_mark()->print_reachable("at-verification-failure",
3361                                            vo, false /* all */);
3362       }
3363 #endif
3364       gclog_or_tty->flush();
3365     }
3366     guarantee(!failures, "there should not have been any failures");
3367   } else {
3368     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
3369   }
3370 }
3371 
3372 class PrintRegionClosure: public HeapRegionClosure {
3373   outputStream* _st;
3374 public:
3375   PrintRegionClosure(outputStream* st) : _st(st) {}
3376   bool doHeapRegion(HeapRegion* r) {
3377     r->print_on(_st);
3378     return false;
3379   }
3380 };
3381 
3382 void G1CollectedHeap::print_on(outputStream* st) const {
3383   st->print(" %-20s", "garbage-first heap");
3384   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3385             capacity()/K, used_unlocked()/K);
3386   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3387             _g1_storage.low_boundary(),
3388             _g1_storage.high(),
3389             _g1_storage.high_boundary());
3390   st->cr();
3391   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3392   uint young_regions = _young_list->length();
3393   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3394             (size_t) young_regions * HeapRegion::GrainBytes / K);
3395   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3396   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3397             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3398   st->cr();
3399 }
3400 
3401 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3402   print_on(st);
3403 
3404   // Print the per-region information.
3405   st->cr();
3406   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3407                "HS=humongous(starts), HC=humongous(continues), "
3408                "CS=collection set, F=free, TS=gc time stamp, "
3409                "PTAMS=previous top-at-mark-start, "
3410                "NTAMS=next top-at-mark-start)");
3411   PrintRegionClosure blk(st);
3412   heap_region_iterate(&blk);
3413 }
3414 
3415 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3416   if (G1CollectedHeap::use_parallel_gc_threads()) {
3417     workers()->print_worker_threads_on(st);
3418   }
3419   _cmThread->print_on(st);
3420   st->cr();
3421   _cm->print_worker_threads_on(st);
3422   _cg1r->print_worker_threads_on(st);
3423   st->cr();
3424 }
3425 
3426 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3427   if (G1CollectedHeap::use_parallel_gc_threads()) {
3428     workers()->threads_do(tc);
3429   }
3430   tc->do_thread(_cmThread);
3431   _cg1r->threads_do(tc);
3432 }
3433 
3434 void G1CollectedHeap::print_tracing_info() const {
3435   // We'll overload this to mean "trace GC pause statistics."
3436   if (TraceGen0Time || TraceGen1Time) {
3437     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3438     // to that.
3439     g1_policy()->print_tracing_info();
3440   }
3441   if (G1SummarizeRSetStats) {
3442     g1_rem_set()->print_summary_info();
3443   }
3444   if (G1SummarizeConcMark) {
3445     concurrent_mark()->print_summary_info();
3446   }
3447   g1_policy()->print_yg_surv_rate_info();
3448   SpecializationStats::print();
3449 }
3450 
3451 #ifndef PRODUCT
3452 // Helpful for debugging RSet issues.
3453 
3454 class PrintRSetsClosure : public HeapRegionClosure {
3455 private:
3456   const char* _msg;
3457   size_t _occupied_sum;
3458 
3459 public:
3460   bool doHeapRegion(HeapRegion* r) {
3461     HeapRegionRemSet* hrrs = r->rem_set();
3462     size_t occupied = hrrs->occupied();
3463     _occupied_sum += occupied;
3464 
3465     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3466                            HR_FORMAT_PARAMS(r));
3467     if (occupied == 0) {
3468       gclog_or_tty->print_cr("  RSet is empty");
3469     } else {
3470       hrrs->print();
3471     }
3472     gclog_or_tty->print_cr("----------");
3473     return false;
3474   }
3475 
3476   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3477     gclog_or_tty->cr();
3478     gclog_or_tty->print_cr("========================================");
3479     gclog_or_tty->print_cr(msg);
3480     gclog_or_tty->cr();
3481   }
3482 
3483   ~PrintRSetsClosure() {
3484     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3485     gclog_or_tty->print_cr("========================================");
3486     gclog_or_tty->cr();
3487   }
3488 };
3489 
3490 void G1CollectedHeap::print_cset_rsets() {
3491   PrintRSetsClosure cl("Printing CSet RSets");
3492   collection_set_iterate(&cl);
3493 }
3494 
3495 void G1CollectedHeap::print_all_rsets() {
3496   PrintRSetsClosure cl("Printing All RSets");;
3497   heap_region_iterate(&cl);
3498 }
3499 #endif // PRODUCT
3500 
3501 G1CollectedHeap* G1CollectedHeap::heap() {
3502   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3503          "not a garbage-first heap");
3504   return _g1h;
3505 }
3506 
3507 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3508   // always_do_update_barrier = false;
3509   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3510   // Call allocation profiler
3511   AllocationProfiler::iterate_since_last_gc();
3512   // Fill TLAB's and such
3513   ensure_parsability(true);
3514 }
3515 
3516 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3517   // FIXME: what is this about?
3518   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3519   // is set.
3520   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3521                         "derived pointer present"));
3522   // always_do_update_barrier = true;
3523 
3524   // We have just completed a GC. Update the soft reference
3525   // policy with the new heap occupancy
3526   Universe::update_heap_info_at_gc();
3527 }
3528 
3529 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3530                                                unsigned int gc_count_before,
3531                                                bool* succeeded) {
3532   assert_heap_not_locked_and_not_at_safepoint();
3533   g1_policy()->record_stop_world_start();
3534   VM_G1IncCollectionPause op(gc_count_before,
3535                              word_size,
3536                              false, /* should_initiate_conc_mark */
3537                              g1_policy()->max_pause_time_ms(),
3538                              GCCause::_g1_inc_collection_pause);
3539   VMThread::execute(&op);
3540 
3541   HeapWord* result = op.result();
3542   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3543   assert(result == NULL || ret_succeeded,
3544          "the result should be NULL if the VM did not succeed");
3545   *succeeded = ret_succeeded;
3546 
3547   assert_heap_not_locked();
3548   return result;
3549 }
3550 
3551 void
3552 G1CollectedHeap::doConcurrentMark() {
3553   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3554   if (!_cmThread->in_progress()) {
3555     _cmThread->set_started();
3556     CGC_lock->notify();
3557   }
3558 }
3559 
3560 size_t G1CollectedHeap::pending_card_num() {
3561   size_t extra_cards = 0;
3562   JavaThread *curr = Threads::first();
3563   while (curr != NULL) {
3564     DirtyCardQueue& dcq = curr->dirty_card_queue();
3565     extra_cards += dcq.size();
3566     curr = curr->next();
3567   }
3568   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3569   size_t buffer_size = dcqs.buffer_size();
3570   size_t buffer_num = dcqs.completed_buffers_num();
3571 
3572   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3573   // in bytes - not the number of 'entries'. We need to convert
3574   // into a number of cards.
3575   return (buffer_size * buffer_num + extra_cards) / oopSize;
3576 }
3577 
3578 size_t G1CollectedHeap::cards_scanned() {
3579   return g1_rem_set()->cardsScanned();
3580 }
3581 
3582 void
3583 G1CollectedHeap::setup_surviving_young_words() {
3584   assert(_surviving_young_words == NULL, "pre-condition");
3585   uint array_length = g1_policy()->young_cset_region_length();
3586   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3587   if (_surviving_young_words == NULL) {
3588     vm_exit_out_of_memory(sizeof(size_t) * array_length,
3589                           "Not enough space for young surv words summary.");
3590   }
3591   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3592 #ifdef ASSERT
3593   for (uint i = 0;  i < array_length; ++i) {
3594     assert( _surviving_young_words[i] == 0, "memset above" );
3595   }
3596 #endif // !ASSERT
3597 }
3598 
3599 void
3600 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3601   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3602   uint array_length = g1_policy()->young_cset_region_length();
3603   for (uint i = 0; i < array_length; ++i) {
3604     _surviving_young_words[i] += surv_young_words[i];
3605   }
3606 }
3607 
3608 void
3609 G1CollectedHeap::cleanup_surviving_young_words() {
3610   guarantee( _surviving_young_words != NULL, "pre-condition" );
3611   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3612   _surviving_young_words = NULL;
3613 }
3614 
3615 #ifdef ASSERT
3616 class VerifyCSetClosure: public HeapRegionClosure {
3617 public:
3618   bool doHeapRegion(HeapRegion* hr) {
3619     // Here we check that the CSet region's RSet is ready for parallel
3620     // iteration. The fields that we'll verify are only manipulated
3621     // when the region is part of a CSet and is collected. Afterwards,
3622     // we reset these fields when we clear the region's RSet (when the
3623     // region is freed) so they are ready when the region is
3624     // re-allocated. The only exception to this is if there's an
3625     // evacuation failure and instead of freeing the region we leave
3626     // it in the heap. In that case, we reset these fields during
3627     // evacuation failure handling.
3628     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3629 
3630     // Here's a good place to add any other checks we'd like to
3631     // perform on CSet regions.
3632     return false;
3633   }
3634 };
3635 #endif // ASSERT
3636 
3637 #if TASKQUEUE_STATS
3638 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3639   st->print_raw_cr("GC Task Stats");
3640   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3641   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3642 }
3643 
3644 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3645   print_taskqueue_stats_hdr(st);
3646 
3647   TaskQueueStats totals;
3648   const int n = workers() != NULL ? workers()->total_workers() : 1;
3649   for (int i = 0; i < n; ++i) {
3650     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3651     totals += task_queue(i)->stats;
3652   }
3653   st->print_raw("tot "); totals.print(st); st->cr();
3654 
3655   DEBUG_ONLY(totals.verify());
3656 }
3657 
3658 void G1CollectedHeap::reset_taskqueue_stats() {
3659   const int n = workers() != NULL ? workers()->total_workers() : 1;
3660   for (int i = 0; i < n; ++i) {
3661     task_queue(i)->stats.reset();
3662   }
3663 }
3664 #endif // TASKQUEUE_STATS
3665 
3666 bool
3667 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3668   assert_at_safepoint(true /* should_be_vm_thread */);
3669   guarantee(!is_gc_active(), "collection is not reentrant");
3670 
3671   if (GC_locker::check_active_before_gc()) {
3672     return false;
3673   }
3674 
3675   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3676   ResourceMark rm;
3677 
3678   print_heap_before_gc();
3679 
3680   HRSPhaseSetter x(HRSPhaseEvacuation);
3681   verify_region_sets_optional();
3682   verify_dirty_young_regions();
3683 
3684   // This call will decide whether this pause is an initial-mark
3685   // pause. If it is, during_initial_mark_pause() will return true
3686   // for the duration of this pause.
3687   g1_policy()->decide_on_conc_mark_initiation();
3688 
3689   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3690   assert(!g1_policy()->during_initial_mark_pause() ||
3691           g1_policy()->gcs_are_young(), "sanity");
3692 
3693   // We also do not allow mixed GCs during marking.
3694   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3695 
3696   // Record whether this pause is an initial mark. When the current
3697   // thread has completed its logging output and it's safe to signal
3698   // the CM thread, the flag's value in the policy has been reset.
3699   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3700 
3701   // Inner scope for scope based logging, timers, and stats collection
3702   {
3703     if (g1_policy()->during_initial_mark_pause()) {
3704       // We are about to start a marking cycle, so we increment the
3705       // full collection counter.
3706       increment_old_marking_cycles_started();
3707     }
3708     // if the log level is "finer" is on, we'll print long statistics information
3709     // in the collector policy code, so let's not print this as the output
3710     // is messy if we do.
3711     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
3712     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3713 
3714     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3715                                 workers()->active_workers() : 1);
3716     double pause_start_sec = os::elapsedTime();
3717     g1_policy()->phase_times()->note_gc_start(active_workers);
3718     bool initial_mark_gc = g1_policy()->during_initial_mark_pause();
3719 
3720     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3721     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3722 
3723     // If the secondary_free_list is not empty, append it to the
3724     // free_list. No need to wait for the cleanup operation to finish;
3725     // the region allocation code will check the secondary_free_list
3726     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3727     // set, skip this step so that the region allocation code has to
3728     // get entries from the secondary_free_list.
3729     if (!G1StressConcRegionFreeing) {
3730       append_secondary_free_list_if_not_empty_with_lock();
3731     }
3732 
3733     assert(check_young_list_well_formed(),
3734       "young list should be well formed");
3735 
3736     // Don't dynamically change the number of GC threads this early.  A value of
3737     // 0 is used to indicate serial work.  When parallel work is done,
3738     // it will be set.
3739 
3740     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3741       IsGCActiveMark x;
3742 
3743       gc_prologue(false);
3744       increment_total_collections(false /* full gc */);
3745       increment_gc_time_stamp();
3746 
3747       verify_before_gc();
3748 
3749       COMPILER2_PRESENT(DerivedPointerTable::clear());
3750 
3751       // Please see comment in g1CollectedHeap.hpp and
3752       // G1CollectedHeap::ref_processing_init() to see how
3753       // reference processing currently works in G1.
3754 
3755       // Enable discovery in the STW reference processor
3756       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3757                                             true /*verify_no_refs*/);
3758 
3759       {
3760         // We want to temporarily turn off discovery by the
3761         // CM ref processor, if necessary, and turn it back on
3762         // on again later if we do. Using a scoped
3763         // NoRefDiscovery object will do this.
3764         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3765 
3766         // Forget the current alloc region (we might even choose it to be part
3767         // of the collection set!).
3768         release_mutator_alloc_region();
3769 
3770         // We should call this after we retire the mutator alloc
3771         // region(s) so that all the ALLOC / RETIRE events are generated
3772         // before the start GC event.
3773         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3774 
3775         // This timing is only used by the ergonomics to handle our pause target.
3776         // It is unclear why this should not include the full pause. We will
3777         // investigate this in CR 7178365.
3778         //
3779         // Preserving the old comment here if that helps the investigation:
3780         //
3781         // The elapsed time induced by the start time below deliberately elides
3782         // the possible verification above.
3783         double sample_start_time_sec = os::elapsedTime();
3784         size_t start_used_bytes = used();
3785 
3786 #if YOUNG_LIST_VERBOSE
3787         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3788         _young_list->print();
3789         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3790 #endif // YOUNG_LIST_VERBOSE
3791 
3792         g1_policy()->record_collection_pause_start(sample_start_time_sec,
3793                                                    start_used_bytes);
3794 
3795         double scan_wait_start = os::elapsedTime();
3796         // We have to wait until the CM threads finish scanning the
3797         // root regions as it's the only way to ensure that all the
3798         // objects on them have been correctly scanned before we start
3799         // moving them during the GC.
3800         bool waited = _cm->root_regions()->wait_until_scan_finished();
3801         double wait_time_ms = 0.0;
3802         if (waited) {
3803           double scan_wait_end = os::elapsedTime();
3804           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3805         }
3806         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3807 
3808 #if YOUNG_LIST_VERBOSE
3809         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3810         _young_list->print();
3811 #endif // YOUNG_LIST_VERBOSE
3812 
3813         if (g1_policy()->during_initial_mark_pause()) {
3814           concurrent_mark()->checkpointRootsInitialPre();
3815         }
3816 
3817 #if YOUNG_LIST_VERBOSE
3818         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3819         _young_list->print();
3820         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3821 #endif // YOUNG_LIST_VERBOSE
3822 
3823         g1_policy()->finalize_cset(target_pause_time_ms);
3824 
3825         _cm->note_start_of_gc();
3826         // We should not verify the per-thread SATB buffers given that
3827         // we have not filtered them yet (we'll do so during the
3828         // GC). We also call this after finalize_cset() to
3829         // ensure that the CSet has been finalized.
3830         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3831                                  true  /* verify_enqueued_buffers */,
3832                                  false /* verify_thread_buffers */,
3833                                  true  /* verify_fingers */);
3834 
3835         if (_hr_printer.is_active()) {
3836           HeapRegion* hr = g1_policy()->collection_set();
3837           while (hr != NULL) {
3838             G1HRPrinter::RegionType type;
3839             if (!hr->is_young()) {
3840               type = G1HRPrinter::Old;
3841             } else if (hr->is_survivor()) {
3842               type = G1HRPrinter::Survivor;
3843             } else {
3844               type = G1HRPrinter::Eden;
3845             }
3846             _hr_printer.cset(hr);
3847             hr = hr->next_in_collection_set();
3848           }
3849         }
3850 
3851 #ifdef ASSERT
3852         VerifyCSetClosure cl;
3853         collection_set_iterate(&cl);
3854 #endif // ASSERT
3855 
3856         setup_surviving_young_words();
3857 
3858         // Initialize the GC alloc regions.
3859         init_gc_alloc_regions();
3860 
3861         // Actually do the work...
3862         evacuate_collection_set();
3863 
3864         // We do this to mainly verify the per-thread SATB buffers
3865         // (which have been filtered by now) since we didn't verify
3866         // them earlier. No point in re-checking the stacks / enqueued
3867         // buffers given that the CSet has not changed since last time
3868         // we checked.
3869         _cm->verify_no_cset_oops(false /* verify_stacks */,
3870                                  false /* verify_enqueued_buffers */,
3871                                  true  /* verify_thread_buffers */,
3872                                  true  /* verify_fingers */);
3873 
3874         free_collection_set(g1_policy()->collection_set());
3875         g1_policy()->clear_collection_set();
3876 
3877         cleanup_surviving_young_words();
3878 
3879         // Start a new incremental collection set for the next pause.
3880         g1_policy()->start_incremental_cset_building();
3881 
3882         // Clear the _cset_fast_test bitmap in anticipation of adding
3883         // regions to the incremental collection set for the next
3884         // evacuation pause.
3885         clear_cset_fast_test();
3886 
3887         _young_list->reset_sampled_info();
3888 
3889         // Don't check the whole heap at this point as the
3890         // GC alloc regions from this pause have been tagged
3891         // as survivors and moved on to the survivor list.
3892         // Survivor regions will fail the !is_young() check.
3893         assert(check_young_list_empty(false /* check_heap */),
3894           "young list should be empty");
3895 
3896 #if YOUNG_LIST_VERBOSE
3897         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3898         _young_list->print();
3899 #endif // YOUNG_LIST_VERBOSE
3900 
3901         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3902                                             _young_list->first_survivor_region(),
3903                                             _young_list->last_survivor_region());
3904 
3905         _young_list->reset_auxilary_lists();
3906 
3907         if (evacuation_failed()) {
3908           _summary_bytes_used = recalculate_used();
3909         } else {
3910           // The "used" of the the collection set have already been subtracted
3911           // when they were freed.  Add in the bytes evacuated.
3912           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3913         }
3914 
3915         if (g1_policy()->during_initial_mark_pause()) {
3916           // We have to do this before we notify the CM threads that
3917           // they can start working to make sure that all the
3918           // appropriate initialization is done on the CM object.
3919           concurrent_mark()->checkpointRootsInitialPost();
3920           set_marking_started();
3921           // Note that we don't actually trigger the CM thread at
3922           // this point. We do that later when we're sure that
3923           // the current thread has completed its logging output.
3924         }
3925 
3926         allocate_dummy_regions();
3927 
3928 #if YOUNG_LIST_VERBOSE
3929         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3930         _young_list->print();
3931         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3932 #endif // YOUNG_LIST_VERBOSE
3933 
3934         init_mutator_alloc_region();
3935 
3936         {
3937           size_t expand_bytes = g1_policy()->expansion_amount();
3938           if (expand_bytes > 0) {
3939             size_t bytes_before = capacity();
3940             // No need for an ergo verbose message here,
3941             // expansion_amount() does this when it returns a value > 0.
3942             if (!expand(expand_bytes)) {
3943               // We failed to expand the heap so let's verify that
3944               // committed/uncommitted amount match the backing store
3945               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3946               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3947             }
3948           }
3949         }
3950 
3951         // We redo the verificaiton but now wrt to the new CSet which
3952         // has just got initialized after the previous CSet was freed.
3953         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3954                                  true  /* verify_enqueued_buffers */,
3955                                  true  /* verify_thread_buffers */,
3956                                  true  /* verify_fingers */);
3957         _cm->note_end_of_gc();
3958 
3959         // This timing is only used by the ergonomics to handle our pause target.
3960         // It is unclear why this should not include the full pause. We will
3961         // investigate this in CR 7178365.
3962         double sample_end_time_sec = os::elapsedTime();
3963         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3964         g1_policy()->record_collection_pause_end(pause_time_ms);
3965 
3966         MemoryService::track_memory_usage();
3967 
3968         // In prepare_for_verify() below we'll need to scan the deferred
3969         // update buffers to bring the RSets up-to-date if
3970         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3971         // the update buffers we'll probably need to scan cards on the
3972         // regions we just allocated to (i.e., the GC alloc
3973         // regions). However, during the last GC we called
3974         // set_saved_mark() on all the GC alloc regions, so card
3975         // scanning might skip the [saved_mark_word()...top()] area of
3976         // those regions (i.e., the area we allocated objects into
3977         // during the last GC). But it shouldn't. Given that
3978         // saved_mark_word() is conditional on whether the GC time stamp
3979         // on the region is current or not, by incrementing the GC time
3980         // stamp here we invalidate all the GC time stamps on all the
3981         // regions and saved_mark_word() will simply return top() for
3982         // all the regions. This is a nicer way of ensuring this rather
3983         // than iterating over the regions and fixing them. In fact, the
3984         // GC time stamp increment here also ensures that
3985         // saved_mark_word() will return top() between pauses, i.e.,
3986         // during concurrent refinement. So we don't need the
3987         // is_gc_active() check to decided which top to use when
3988         // scanning cards (see CR 7039627).
3989         increment_gc_time_stamp();
3990 
3991         verify_after_gc();
3992 
3993         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3994         ref_processor_stw()->verify_no_references_recorded();
3995 
3996         // CM reference discovery will be re-enabled if necessary.
3997       }
3998 
3999       // We should do this after we potentially expand the heap so
4000       // that all the COMMIT events are generated before the end GC
4001       // event, and after we retire the GC alloc regions so that all
4002       // RETIRE events are generated before the end GC event.
4003       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4004 
4005       if (mark_in_progress()) {
4006         concurrent_mark()->update_g1_committed();
4007       }
4008 
4009 #ifdef TRACESPINNING
4010       ParallelTaskTerminator::print_termination_counts();
4011 #endif
4012 
4013       gc_epilogue(false);
4014 
4015       if (G1Log::fine()) {
4016         if (PrintGCTimeStamps) {
4017           gclog_or_tty->stamp();
4018           gclog_or_tty->print(": ");
4019         }
4020 
4021         GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
4022           .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
4023           .append(initial_mark_gc ? " (initial-mark)" : "");
4024 
4025         double pause_time_sec = os::elapsedTime() - pause_start_sec;
4026 
4027         if (G1Log::finer()) {
4028           if (evacuation_failed()) {
4029             gc_cause_str.append(" (to-space exhausted)");
4030           }
4031           gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec);
4032           g1_policy()->phase_times()->note_gc_end();
4033           g1_policy()->phase_times()->print(pause_time_sec);
4034           g1_policy()->print_detailed_heap_transition();
4035         } else {
4036           if (evacuation_failed()) {
4037             gc_cause_str.append("--");
4038           }
4039           gclog_or_tty->print("[%s", (const char*)gc_cause_str);
4040       g1_policy()->print_heap_transition();
4041           gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
4042         }
4043       }
4044     }
4045 
4046     // It is not yet to safe to tell the concurrent mark to
4047     // start as we have some optional output below. We don't want the
4048     // output from the concurrent mark thread interfering with this
4049     // logging output either.
4050 
4051     _hrs.verify_optional();
4052     verify_region_sets_optional();
4053 
4054     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4055     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4056 
4057     print_heap_after_gc();
4058 
4059     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4060     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4061     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4062     // before any GC notifications are raised.
4063     g1mm()->update_sizes();
4064   }
4065 
4066   if (G1SummarizeRSetStats &&
4067       (G1SummarizeRSetStatsPeriod > 0) &&
4068       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
4069     g1_rem_set()->print_summary_info();
4070   }
4071 
4072   // It should now be safe to tell the concurrent mark thread to start
4073   // without its logging output interfering with the logging output
4074   // that came from the pause.
4075 
4076   if (should_start_conc_mark) {
4077     // CAUTION: after the doConcurrentMark() call below,
4078     // the concurrent marking thread(s) could be running
4079     // concurrently with us. Make sure that anything after
4080     // this point does not assume that we are the only GC thread
4081     // running. Note: of course, the actual marking work will
4082     // not start until the safepoint itself is released in
4083     // ConcurrentGCThread::safepoint_desynchronize().
4084     doConcurrentMark();
4085   }
4086 
4087   return true;
4088 }
4089 
4090 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4091 {
4092   size_t gclab_word_size;
4093   switch (purpose) {
4094     case GCAllocForSurvived:
4095       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
4096       break;
4097     case GCAllocForTenured:
4098       gclab_word_size = _old_plab_stats.desired_plab_sz();
4099       break;
4100     default:
4101       assert(false, "unknown GCAllocPurpose");
4102       gclab_word_size = _old_plab_stats.desired_plab_sz();
4103       break;
4104   }
4105 
4106   // Prevent humongous PLAB sizes for two reasons:
4107   // * PLABs are allocated using a similar paths as oops, but should
4108   //   never be in a humongous region
4109   // * Allowing humongous PLABs needlessly churns the region free lists
4110   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4111 }
4112 
4113 void G1CollectedHeap::init_mutator_alloc_region() {
4114   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4115   _mutator_alloc_region.init();
4116 }
4117 
4118 void G1CollectedHeap::release_mutator_alloc_region() {
4119   _mutator_alloc_region.release();
4120   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4121 }
4122 
4123 void G1CollectedHeap::init_gc_alloc_regions() {
4124   assert_at_safepoint(true /* should_be_vm_thread */);
4125 
4126   _survivor_gc_alloc_region.init();
4127   _old_gc_alloc_region.init();
4128   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4129   _retained_old_gc_alloc_region = NULL;
4130 
4131   // We will discard the current GC alloc region if:
4132   // a) it's in the collection set (it can happen!),
4133   // b) it's already full (no point in using it),
4134   // c) it's empty (this means that it was emptied during
4135   // a cleanup and it should be on the free list now), or
4136   // d) it's humongous (this means that it was emptied
4137   // during a cleanup and was added to the free list, but
4138   // has been subseqently used to allocate a humongous
4139   // object that may be less than the region size).
4140   if (retained_region != NULL &&
4141       !retained_region->in_collection_set() &&
4142       !(retained_region->top() == retained_region->end()) &&
4143       !retained_region->is_empty() &&
4144       !retained_region->isHumongous()) {
4145     retained_region->set_saved_mark();
4146     // The retained region was added to the old region set when it was
4147     // retired. We have to remove it now, since we don't allow regions
4148     // we allocate to in the region sets. We'll re-add it later, when
4149     // it's retired again.
4150     _old_set.remove(retained_region);
4151     bool during_im = g1_policy()->during_initial_mark_pause();
4152     retained_region->note_start_of_copying(during_im);
4153     _old_gc_alloc_region.set(retained_region);
4154     _hr_printer.reuse(retained_region);
4155   }
4156 }
4157 
4158 void G1CollectedHeap::release_gc_alloc_regions() {
4159   _survivor_gc_alloc_region.release();
4160   // If we have an old GC alloc region to release, we'll save it in
4161   // _retained_old_gc_alloc_region. If we don't
4162   // _retained_old_gc_alloc_region will become NULL. This is what we
4163   // want either way so no reason to check explicitly for either
4164   // condition.
4165   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4166 
4167   if (ResizePLAB) {
4168     _survivor_plab_stats.adjust_desired_plab_sz();
4169     _old_plab_stats.adjust_desired_plab_sz();
4170   }
4171 }
4172 
4173 void G1CollectedHeap::abandon_gc_alloc_regions() {
4174   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4175   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4176   _retained_old_gc_alloc_region = NULL;
4177 }
4178 
4179 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4180   _drain_in_progress = false;
4181   set_evac_failure_closure(cl);
4182   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4183 }
4184 
4185 void G1CollectedHeap::finalize_for_evac_failure() {
4186   assert(_evac_failure_scan_stack != NULL &&
4187          _evac_failure_scan_stack->length() == 0,
4188          "Postcondition");
4189   assert(!_drain_in_progress, "Postcondition");
4190   delete _evac_failure_scan_stack;
4191   _evac_failure_scan_stack = NULL;
4192 }
4193 
4194 void G1CollectedHeap::remove_self_forwarding_pointers() {
4195   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4196 
4197   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4198 
4199   if (G1CollectedHeap::use_parallel_gc_threads()) {
4200     set_par_threads();
4201     workers()->run_task(&rsfp_task);
4202     set_par_threads(0);
4203   } else {
4204     rsfp_task.work(0);
4205   }
4206 
4207   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4208 
4209   // Reset the claim values in the regions in the collection set.
4210   reset_cset_heap_region_claim_values();
4211 
4212   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4213 
4214   // Now restore saved marks, if any.
4215   if (_objs_with_preserved_marks != NULL) {
4216     assert(_preserved_marks_of_objs != NULL, "Both or none.");
4217     guarantee(_objs_with_preserved_marks->length() ==
4218               _preserved_marks_of_objs->length(), "Both or none.");
4219     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
4220       oop obj   = _objs_with_preserved_marks->at(i);
4221       markOop m = _preserved_marks_of_objs->at(i);
4222       obj->set_mark(m);
4223     }
4224 
4225     // Delete the preserved marks growable arrays (allocated on the C heap).
4226     delete _objs_with_preserved_marks;
4227     delete _preserved_marks_of_objs;
4228     _objs_with_preserved_marks = NULL;
4229     _preserved_marks_of_objs = NULL;
4230   }
4231 }
4232 
4233 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4234   _evac_failure_scan_stack->push(obj);
4235 }
4236 
4237 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4238   assert(_evac_failure_scan_stack != NULL, "precondition");
4239 
4240   while (_evac_failure_scan_stack->length() > 0) {
4241      oop obj = _evac_failure_scan_stack->pop();
4242      _evac_failure_closure->set_region(heap_region_containing(obj));
4243      obj->oop_iterate_backwards(_evac_failure_closure);
4244   }
4245 }
4246 
4247 oop
4248 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4249                                                oop old) {
4250   assert(obj_in_cs(old),
4251          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4252                  (HeapWord*) old));
4253   markOop m = old->mark();
4254   oop forward_ptr = old->forward_to_atomic(old);
4255   if (forward_ptr == NULL) {
4256     // Forward-to-self succeeded.
4257 
4258     if (_evac_failure_closure != cl) {
4259       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4260       assert(!_drain_in_progress,
4261              "Should only be true while someone holds the lock.");
4262       // Set the global evac-failure closure to the current thread's.
4263       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4264       set_evac_failure_closure(cl);
4265       // Now do the common part.
4266       handle_evacuation_failure_common(old, m);
4267       // Reset to NULL.
4268       set_evac_failure_closure(NULL);
4269     } else {
4270       // The lock is already held, and this is recursive.
4271       assert(_drain_in_progress, "This should only be the recursive case.");
4272       handle_evacuation_failure_common(old, m);
4273     }
4274     return old;
4275   } else {
4276     // Forward-to-self failed. Either someone else managed to allocate
4277     // space for this object (old != forward_ptr) or they beat us in
4278     // self-forwarding it (old == forward_ptr).
4279     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4280            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4281                    "should not be in the CSet",
4282                    (HeapWord*) old, (HeapWord*) forward_ptr));
4283     return forward_ptr;
4284   }
4285 }
4286 
4287 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4288   set_evacuation_failed(true);
4289 
4290   preserve_mark_if_necessary(old, m);
4291 
4292   HeapRegion* r = heap_region_containing(old);
4293   if (!r->evacuation_failed()) {
4294     r->set_evacuation_failed(true);
4295     _hr_printer.evac_failure(r);
4296   }
4297 
4298   push_on_evac_failure_scan_stack(old);
4299 
4300   if (!_drain_in_progress) {
4301     // prevent recursion in copy_to_survivor_space()
4302     _drain_in_progress = true;
4303     drain_evac_failure_scan_stack();
4304     _drain_in_progress = false;
4305   }
4306 }
4307 
4308 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4309   assert(evacuation_failed(), "Oversaving!");
4310   // We want to call the "for_promotion_failure" version only in the
4311   // case of a promotion failure.
4312   if (m->must_be_preserved_for_promotion_failure(obj)) {
4313     if (_objs_with_preserved_marks == NULL) {
4314       assert(_preserved_marks_of_objs == NULL, "Both or none.");
4315       _objs_with_preserved_marks =
4316         new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4317       _preserved_marks_of_objs =
4318         new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
4319     }
4320     _objs_with_preserved_marks->push(obj);
4321     _preserved_marks_of_objs->push(m);
4322   }
4323 }
4324 
4325 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4326                                                   size_t word_size) {
4327   if (purpose == GCAllocForSurvived) {
4328     HeapWord* result = survivor_attempt_allocation(word_size);
4329     if (result != NULL) {
4330       return result;
4331     } else {
4332       // Let's try to allocate in the old gen in case we can fit the
4333       // object there.
4334       return old_attempt_allocation(word_size);
4335     }
4336   } else {
4337     assert(purpose ==  GCAllocForTenured, "sanity");
4338     HeapWord* result = old_attempt_allocation(word_size);
4339     if (result != NULL) {
4340       return result;
4341     } else {
4342       // Let's try to allocate in the survivors in case we can fit the
4343       // object there.
4344       return survivor_attempt_allocation(word_size);
4345     }
4346   }
4347 
4348   ShouldNotReachHere();
4349   // Trying to keep some compilers happy.
4350   return NULL;
4351 }
4352 
4353 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4354   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4355 
4356 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4357   : _g1h(g1h),
4358     _refs(g1h->task_queue(queue_num)),
4359     _dcq(&g1h->dirty_card_queue_set()),
4360     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
4361     _g1_rem(g1h->g1_rem_set()),
4362     _hash_seed(17), _queue_num(queue_num),
4363     _term_attempts(0),
4364     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4365     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4366     _age_table(false),
4367     _strong_roots_time(0), _term_time(0),
4368     _alloc_buffer_waste(0), _undo_waste(0) {
4369   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4370   // we "sacrifice" entry 0 to keep track of surviving bytes for
4371   // non-young regions (where the age is -1)
4372   // We also add a few elements at the beginning and at the end in
4373   // an attempt to eliminate cache contention
4374   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4375   uint array_length = PADDING_ELEM_NUM +
4376                       real_length +
4377                       PADDING_ELEM_NUM;
4378   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4379   if (_surviving_young_words_base == NULL)
4380     vm_exit_out_of_memory(array_length * sizeof(size_t),
4381                           "Not enough space for young surv histo.");
4382   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4383   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4384 
4385   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4386   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4387 
4388   _start = os::elapsedTime();
4389 }
4390 
4391 void
4392 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4393 {
4394   st->print_raw_cr("GC Termination Stats");
4395   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4396                    " ------waste (KiB)------");
4397   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4398                    "  total   alloc    undo");
4399   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4400                    " ------- ------- -------");
4401 }
4402 
4403 void
4404 G1ParScanThreadState::print_termination_stats(int i,
4405                                               outputStream* const st) const
4406 {
4407   const double elapsed_ms = elapsed_time() * 1000.0;
4408   const double s_roots_ms = strong_roots_time() * 1000.0;
4409   const double term_ms    = term_time() * 1000.0;
4410   st->print_cr("%3d %9.2f %9.2f %6.2f "
4411                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4412                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4413                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4414                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4415                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4416                alloc_buffer_waste() * HeapWordSize / K,
4417                undo_waste() * HeapWordSize / K);
4418 }
4419 
4420 #ifdef ASSERT
4421 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4422   assert(ref != NULL, "invariant");
4423   assert(UseCompressedOops, "sanity");
4424   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4425   oop p = oopDesc::load_decode_heap_oop(ref);
4426   assert(_g1h->is_in_g1_reserved(p),
4427          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4428   return true;
4429 }
4430 
4431 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4432   assert(ref != NULL, "invariant");
4433   if (has_partial_array_mask(ref)) {
4434     // Must be in the collection set--it's already been copied.
4435     oop p = clear_partial_array_mask(ref);
4436     assert(_g1h->obj_in_cs(p),
4437            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4438   } else {
4439     oop p = oopDesc::load_decode_heap_oop(ref);
4440     assert(_g1h->is_in_g1_reserved(p),
4441            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4442   }
4443   return true;
4444 }
4445 
4446 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4447   if (ref.is_narrow()) {
4448     return verify_ref((narrowOop*) ref);
4449   } else {
4450     return verify_ref((oop*) ref);
4451   }
4452 }
4453 #endif // ASSERT
4454 
4455 void G1ParScanThreadState::trim_queue() {
4456   assert(_evac_cl != NULL, "not set");
4457   assert(_evac_failure_cl != NULL, "not set");
4458   assert(_partial_scan_cl != NULL, "not set");
4459 
4460   StarTask ref;
4461   do {
4462     // Drain the overflow stack first, so other threads can steal.
4463     while (refs()->pop_overflow(ref)) {
4464       deal_with_reference(ref);
4465     }
4466 
4467     while (refs()->pop_local(ref)) {
4468       deal_with_reference(ref);
4469     }
4470   } while (!refs()->is_empty());
4471 }
4472 
4473 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4474                                      G1ParScanThreadState* par_scan_state) :
4475   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4476   _par_scan_state(par_scan_state),
4477   _worker_id(par_scan_state->queue_num()),
4478   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4479   _mark_in_progress(_g1->mark_in_progress()) { }
4480 
4481 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4482 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4483 #ifdef ASSERT
4484   HeapRegion* hr = _g1->heap_region_containing(obj);
4485   assert(hr != NULL, "sanity");
4486   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4487 #endif // ASSERT
4488 
4489   // We know that the object is not moving so it's safe to read its size.
4490   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4491 }
4492 
4493 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4494 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4495   ::mark_forwarded_object(oop from_obj, oop to_obj) {
4496 #ifdef ASSERT
4497   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4498   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4499   assert(from_obj != to_obj, "should not be self-forwarded");
4500 
4501   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4502   assert(from_hr != NULL, "sanity");
4503   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4504 
4505   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4506   assert(to_hr != NULL, "sanity");
4507   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4508 #endif // ASSERT
4509 
4510   // The object might be in the process of being copied by another
4511   // worker so we cannot trust that its to-space image is
4512   // well-formed. So we have to read its size from its from-space
4513   // image which we know should not be changing.
4514   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4515 }
4516 
4517 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4518 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4519   ::copy_to_survivor_space(oop old) {
4520   size_t word_sz = old->size();
4521   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4522   // +1 to make the -1 indexes valid...
4523   int       young_index = from_region->young_index_in_cset()+1;
4524   assert( (from_region->is_young() && young_index >  0) ||
4525          (!from_region->is_young() && young_index == 0), "invariant" );
4526   G1CollectorPolicy* g1p = _g1->g1_policy();
4527   markOop m = old->mark();
4528   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4529                                            : m->age();
4530   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4531                                                              word_sz);
4532   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4533 #ifndef PRODUCT
4534   // Should this evacuation fail?
4535   if (_g1->evacuation_should_fail()) {
4536     if (obj_ptr != NULL) {
4537       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4538       obj_ptr = NULL;
4539     }
4540   }
4541 #endif // !PRODUCT
4542 
4543   if (obj_ptr == NULL) {
4544     // This will either forward-to-self, or detect that someone else has
4545     // installed a forwarding pointer.
4546     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4547     return _g1->handle_evacuation_failure_par(cl, old);
4548   }
4549 
4550   oop obj = oop(obj_ptr);
4551 
4552   // We're going to allocate linearly, so might as well prefetch ahead.
4553   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4554 
4555   oop forward_ptr = old->forward_to_atomic(obj);
4556   if (forward_ptr == NULL) {
4557     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4558     if (g1p->track_object_age(alloc_purpose)) {
4559       // We could simply do obj->incr_age(). However, this causes a
4560       // performance issue. obj->incr_age() will first check whether
4561       // the object has a displaced mark by checking its mark word;
4562       // getting the mark word from the new location of the object
4563       // stalls. So, given that we already have the mark word and we
4564       // are about to install it anyway, it's better to increase the
4565       // age on the mark word, when the object does not have a
4566       // displaced mark word. We're not expecting many objects to have
4567       // a displaced marked word, so that case is not optimized
4568       // further (it could be...) and we simply call obj->incr_age().
4569 
4570       if (m->has_displaced_mark_helper()) {
4571         // in this case, we have to install the mark word first,
4572         // otherwise obj looks to be forwarded (the old mark word,
4573         // which contains the forward pointer, was copied)
4574         obj->set_mark(m);
4575         obj->incr_age();
4576       } else {
4577         m = m->incr_age();
4578         obj->set_mark(m);
4579       }
4580       _par_scan_state->age_table()->add(obj, word_sz);
4581     } else {
4582       obj->set_mark(m);
4583     }
4584 
4585     size_t* surv_young_words = _par_scan_state->surviving_young_words();
4586     surv_young_words[young_index] += word_sz;
4587 
4588     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4589       // We keep track of the next start index in the length field of
4590       // the to-space object. The actual length can be found in the
4591       // length field of the from-space object.
4592       arrayOop(obj)->set_length(0);
4593       oop* old_p = set_partial_array_mask(old);
4594       _par_scan_state->push_on_queue(old_p);
4595     } else {
4596       // No point in using the slower heap_region_containing() method,
4597       // given that we know obj is in the heap.
4598       _scanner.set_region(_g1->heap_region_containing_raw(obj));
4599       obj->oop_iterate_backwards(&_scanner);
4600     }
4601   } else {
4602     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4603     obj = forward_ptr;
4604   }
4605   return obj;
4606 }
4607 
4608 template <class T>
4609 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4610   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4611     _scanned_klass->record_modified_oops();
4612   }
4613 }
4614 
4615 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4616 template <class T>
4617 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4618 ::do_oop_work(T* p) {
4619   oop obj = oopDesc::load_decode_heap_oop(p);
4620   assert(barrier != G1BarrierRS || obj != NULL,
4621          "Precondition: G1BarrierRS implies obj is non-NULL");
4622 
4623   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4624 
4625   // here the null check is implicit in the cset_fast_test() test
4626   if (_g1->in_cset_fast_test(obj)) {
4627     oop forwardee;
4628     if (obj->is_forwarded()) {
4629       forwardee = obj->forwardee();
4630     } else {
4631       forwardee = copy_to_survivor_space(obj);
4632     }
4633     assert(forwardee != NULL, "forwardee should not be NULL");
4634     oopDesc::encode_store_heap_oop(p, forwardee);
4635     if (do_mark_object && forwardee != obj) {
4636       // If the object is self-forwarded we don't need to explicitly
4637       // mark it, the evacuation failure protocol will do so.
4638       mark_forwarded_object(obj, forwardee);
4639     }
4640 
4641     // When scanning the RS, we only care about objs in CS.
4642     if (barrier == G1BarrierRS) {
4643       _par_scan_state->update_rs(_from, p, _worker_id);
4644     } else if (barrier == G1BarrierKlass) {
4645       do_klass_barrier(p, forwardee);
4646     }
4647   } else {
4648     // The object is not in collection set. If we're a root scanning
4649     // closure during an initial mark pause (i.e. do_mark_object will
4650     // be true) then attempt to mark the object.
4651     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4652       mark_object(obj);
4653     }
4654   }
4655 
4656   if (barrier == G1BarrierEvac && obj != NULL) {
4657     _par_scan_state->update_rs(_from, p, _worker_id);
4658   }
4659 
4660   if (do_gen_barrier && obj != NULL) {
4661     par_do_barrier(p);
4662   }
4663 }
4664 
4665 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4666 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4667 
4668 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4669   assert(has_partial_array_mask(p), "invariant");
4670   oop from_obj = clear_partial_array_mask(p);
4671 
4672   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4673   assert(from_obj->is_objArray(), "must be obj array");
4674   objArrayOop from_obj_array = objArrayOop(from_obj);
4675   // The from-space object contains the real length.
4676   int length                 = from_obj_array->length();
4677 
4678   assert(from_obj->is_forwarded(), "must be forwarded");
4679   oop to_obj                 = from_obj->forwardee();
4680   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4681   objArrayOop to_obj_array   = objArrayOop(to_obj);
4682   // We keep track of the next start index in the length field of the
4683   // to-space object.
4684   int next_index             = to_obj_array->length();
4685   assert(0 <= next_index && next_index < length,
4686          err_msg("invariant, next index: %d, length: %d", next_index, length));
4687 
4688   int start                  = next_index;
4689   int end                    = length;
4690   int remainder              = end - start;
4691   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4692   if (remainder > 2 * ParGCArrayScanChunk) {
4693     end = start + ParGCArrayScanChunk;
4694     to_obj_array->set_length(end);
4695     // Push the remainder before we process the range in case another
4696     // worker has run out of things to do and can steal it.
4697     oop* from_obj_p = set_partial_array_mask(from_obj);
4698     _par_scan_state->push_on_queue(from_obj_p);
4699   } else {
4700     assert(length == end, "sanity");
4701     // We'll process the final range for this object. Restore the length
4702     // so that the heap remains parsable in case of evacuation failure.
4703     to_obj_array->set_length(end);
4704   }
4705   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4706   // Process indexes [start,end). It will also process the header
4707   // along with the first chunk (i.e., the chunk with start == 0).
4708   // Note that at this point the length field of to_obj_array is not
4709   // correct given that we are using it to keep track of the next
4710   // start index. oop_iterate_range() (thankfully!) ignores the length
4711   // field and only relies on the start / end parameters.  It does
4712   // however return the size of the object which will be incorrect. So
4713   // we have to ignore it even if we wanted to use it.
4714   to_obj_array->oop_iterate_range(&_scanner, start, end);
4715 }
4716 
4717 class G1ParEvacuateFollowersClosure : public VoidClosure {
4718 protected:
4719   G1CollectedHeap*              _g1h;
4720   G1ParScanThreadState*         _par_scan_state;
4721   RefToScanQueueSet*            _queues;
4722   ParallelTaskTerminator*       _terminator;
4723 
4724   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4725   RefToScanQueueSet*      queues()         { return _queues; }
4726   ParallelTaskTerminator* terminator()     { return _terminator; }
4727 
4728 public:
4729   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4730                                 G1ParScanThreadState* par_scan_state,
4731                                 RefToScanQueueSet* queues,
4732                                 ParallelTaskTerminator* terminator)
4733     : _g1h(g1h), _par_scan_state(par_scan_state),
4734       _queues(queues), _terminator(terminator) {}
4735 
4736   void do_void();
4737 
4738 private:
4739   inline bool offer_termination();
4740 };
4741 
4742 bool G1ParEvacuateFollowersClosure::offer_termination() {
4743   G1ParScanThreadState* const pss = par_scan_state();
4744   pss->start_term_time();
4745   const bool res = terminator()->offer_termination();
4746   pss->end_term_time();
4747   return res;
4748 }
4749 
4750 void G1ParEvacuateFollowersClosure::do_void() {
4751   StarTask stolen_task;
4752   G1ParScanThreadState* const pss = par_scan_state();
4753   pss->trim_queue();
4754 
4755   do {
4756     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4757       assert(pss->verify_task(stolen_task), "sanity");
4758       if (stolen_task.is_narrow()) {
4759         pss->deal_with_reference((narrowOop*) stolen_task);
4760       } else {
4761         pss->deal_with_reference((oop*) stolen_task);
4762       }
4763 
4764       // We've just processed a reference and we might have made
4765       // available new entries on the queues. So we have to make sure
4766       // we drain the queues as necessary.
4767       pss->trim_queue();
4768     }
4769   } while (!offer_termination());
4770 
4771   pss->retire_alloc_buffers();
4772 }
4773 
4774 class G1KlassScanClosure : public KlassClosure {
4775  G1ParCopyHelper* _closure;
4776  bool             _process_only_dirty;
4777  int              _count;
4778  public:
4779   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4780       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4781   void do_klass(Klass* klass) {
4782     // If the klass has not been dirtied we know that there's
4783     // no references into  the young gen and we can skip it.
4784    if (!_process_only_dirty || klass->has_modified_oops()) {
4785       // Clean the klass since we're going to scavenge all the metadata.
4786       klass->clear_modified_oops();
4787 
4788       // Tell the closure that this klass is the Klass to scavenge
4789       // and is the one to dirty if oops are left pointing into the young gen.
4790       _closure->set_scanned_klass(klass);
4791 
4792       klass->oops_do(_closure);
4793 
4794       _closure->set_scanned_klass(NULL);
4795     }
4796     _count++;
4797   }
4798 };
4799 
4800 class G1ParTask : public AbstractGangTask {
4801 protected:
4802   G1CollectedHeap*       _g1h;
4803   RefToScanQueueSet      *_queues;
4804   ParallelTaskTerminator _terminator;
4805   uint _n_workers;
4806 
4807   Mutex _stats_lock;
4808   Mutex* stats_lock() { return &_stats_lock; }
4809 
4810   size_t getNCards() {
4811     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4812       / G1BlockOffsetSharedArray::N_bytes;
4813   }
4814 
4815 public:
4816   G1ParTask(G1CollectedHeap* g1h,
4817             RefToScanQueueSet *task_queues)
4818     : AbstractGangTask("G1 collection"),
4819       _g1h(g1h),
4820       _queues(task_queues),
4821       _terminator(0, _queues),
4822       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4823   {}
4824 
4825   RefToScanQueueSet* queues() { return _queues; }
4826 
4827   RefToScanQueue *work_queue(int i) {
4828     return queues()->queue(i);
4829   }
4830 
4831   ParallelTaskTerminator* terminator() { return &_terminator; }
4832 
4833   virtual void set_for_termination(int active_workers) {
4834     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4835     // in the young space (_par_seq_tasks) in the G1 heap
4836     // for SequentialSubTasksDone.
4837     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4838     // both of which need setting by set_n_termination().
4839     _g1h->SharedHeap::set_n_termination(active_workers);
4840     _g1h->set_n_termination(active_workers);
4841     terminator()->reset_for_reuse(active_workers);
4842     _n_workers = active_workers;
4843   }
4844 
4845   void work(uint worker_id) {
4846     if (worker_id >= _n_workers) return;  // no work needed this round
4847 
4848     double start_time_ms = os::elapsedTime() * 1000.0;
4849     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4850 
4851     {
4852       ResourceMark rm;
4853       HandleMark   hm;
4854 
4855       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4856 
4857       G1ParScanThreadState            pss(_g1h, worker_id);
4858       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
4859       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4860       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
4861 
4862       pss.set_evac_closure(&scan_evac_cl);
4863       pss.set_evac_failure_closure(&evac_failure_cl);
4864       pss.set_partial_scan_closure(&partial_scan_cl);
4865 
4866       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
4867       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4868 
4869       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4870       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4871 
4872       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4873       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4874       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4875 
4876       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4877       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4878 
4879       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4880         // We also need to mark copied objects.
4881         scan_root_cl = &scan_mark_root_cl;
4882         scan_klasses_cl = &scan_mark_klasses_cl_s;
4883       }
4884 
4885       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4886 
4887       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
4888 
4889       pss.start_strong_roots();
4890       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4891                                     SharedHeap::ScanningOption(so),
4892                                     scan_root_cl,
4893                                     &push_heap_rs_cl,
4894                                     scan_klasses_cl,
4895                                     worker_id);
4896       pss.end_strong_roots();
4897 
4898       {
4899         double start = os::elapsedTime();
4900         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4901         evac.do_void();
4902         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4903         double term_ms = pss.term_time()*1000.0;
4904         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4905         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4906       }
4907       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4908       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4909 
4910       if (ParallelGCVerbose) {
4911         MutexLocker x(stats_lock());
4912         pss.print_termination_stats(worker_id);
4913       }
4914 
4915       assert(pss.refs()->is_empty(), "should be empty");
4916 
4917       // Close the inner scope so that the ResourceMark and HandleMark
4918       // destructors are executed here and are included as part of the
4919       // "GC Worker Time".
4920     }
4921 
4922     double end_time_ms = os::elapsedTime() * 1000.0;
4923     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4924   }
4925 };
4926 
4927 // *** Common G1 Evacuation Stuff
4928 
4929 // Closures that support the filtering of CodeBlobs scanned during
4930 // external root scanning.
4931 
4932 // Closure applied to reference fields in code blobs (specifically nmethods)
4933 // to determine whether an nmethod contains references that point into
4934 // the collection set. Used as a predicate when walking code roots so
4935 // that only nmethods that point into the collection set are added to the
4936 // 'marked' list.
4937 
4938 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4939 
4940   class G1PointsIntoCSOopClosure : public OopClosure {
4941     G1CollectedHeap* _g1;
4942     bool _points_into_cs;
4943   public:
4944     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4945       _g1(g1), _points_into_cs(false) { }
4946 
4947     bool points_into_cs() const { return _points_into_cs; }
4948 
4949     template <class T>
4950     void do_oop_nv(T* p) {
4951       if (!_points_into_cs) {
4952         T heap_oop = oopDesc::load_heap_oop(p);
4953         if (!oopDesc::is_null(heap_oop) &&
4954             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4955           _points_into_cs = true;
4956         }
4957       }
4958     }
4959 
4960     virtual void do_oop(oop* p)        { do_oop_nv(p); }
4961     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
4962   };
4963 
4964   G1CollectedHeap* _g1;
4965 
4966 public:
4967   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4968     CodeBlobToOopClosure(cl, true), _g1(g1) { }
4969 
4970   virtual void do_code_blob(CodeBlob* cb) {
4971     nmethod* nm = cb->as_nmethod_or_null();
4972     if (nm != NULL && !(nm->test_oops_do_mark())) {
4973       G1PointsIntoCSOopClosure predicate_cl(_g1);
4974       nm->oops_do(&predicate_cl);
4975 
4976       if (predicate_cl.points_into_cs()) {
4977         // At least one of the reference fields or the oop relocations
4978         // in the nmethod points into the collection set. We have to
4979         // 'mark' this nmethod.
4980         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
4981         // or MarkingCodeBlobClosure::do_code_blob() change.
4982         if (!nm->test_set_oops_do_mark()) {
4983           do_newly_marked_nmethod(nm);
4984         }
4985       }
4986     }
4987   }
4988 };
4989 
4990 // This method is run in a GC worker.
4991 
4992 void
4993 G1CollectedHeap::
4994 g1_process_strong_roots(bool is_scavenging,
4995                         ScanningOption so,
4996                         OopClosure* scan_non_heap_roots,
4997                         OopsInHeapRegionClosure* scan_rs,
4998                         G1KlassScanClosure* scan_klasses,
4999                         int worker_i) {
5000 
5001   // First scan the strong roots
5002   double ext_roots_start = os::elapsedTime();
5003   double closure_app_time_sec = 0.0;
5004 
5005   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5006 
5007   // Walk the code cache w/o buffering, because StarTask cannot handle
5008   // unaligned oop locations.
5009   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
5010 
5011   process_strong_roots(false, // no scoping; this is parallel code
5012                        is_scavenging, so,
5013                        &buf_scan_non_heap_roots,
5014                        &eager_scan_code_roots,
5015                        scan_klasses
5016                        );
5017 
5018   // Now the CM ref_processor roots.
5019   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5020     // We need to treat the discovered reference lists of the
5021     // concurrent mark ref processor as roots and keep entries
5022     // (which are added by the marking threads) on them live
5023     // until they can be processed at the end of marking.
5024     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5025   }
5026 
5027   // Finish up any enqueued closure apps (attributed as object copy time).
5028   buf_scan_non_heap_roots.done();
5029 
5030   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
5031 
5032   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
5033 
5034   double ext_root_time_ms =
5035     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5036 
5037   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
5038 
5039   // During conc marking we have to filter the per-thread SATB buffers
5040   // to make sure we remove any oops into the CSet (which will show up
5041   // as implicitly live).
5042   double satb_filtering_ms = 0.0;
5043   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5044     if (mark_in_progress()) {
5045       double satb_filter_start = os::elapsedTime();
5046 
5047       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5048 
5049       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5050     }
5051   }
5052   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5053 
5054   // Now scan the complement of the collection set.
5055   if (scan_rs != NULL) {
5056     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
5057   }
5058   _process_strong_tasks->all_tasks_completed();
5059 }
5060 
5061 void
5062 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
5063                                        OopClosure* non_root_closure) {
5064   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5065   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
5066 }
5067 
5068 // Weak Reference Processing support
5069 
5070 // An always "is_alive" closure that is used to preserve referents.
5071 // If the object is non-null then it's alive.  Used in the preservation
5072 // of referent objects that are pointed to by reference objects
5073 // discovered by the CM ref processor.
5074 class G1AlwaysAliveClosure: public BoolObjectClosure {
5075   G1CollectedHeap* _g1;
5076 public:
5077   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5078   void do_object(oop p) { assert(false, "Do not call."); }
5079   bool do_object_b(oop p) {
5080     if (p != NULL) {
5081       return true;
5082     }
5083     return false;
5084   }
5085 };
5086 
5087 bool G1STWIsAliveClosure::do_object_b(oop p) {
5088   // An object is reachable if it is outside the collection set,
5089   // or is inside and copied.
5090   return !_g1->obj_in_cs(p) || p->is_forwarded();
5091 }
5092 
5093 // Non Copying Keep Alive closure
5094 class G1KeepAliveClosure: public OopClosure {
5095   G1CollectedHeap* _g1;
5096 public:
5097   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5098   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5099   void do_oop(      oop* p) {
5100     oop obj = *p;
5101 
5102     if (_g1->obj_in_cs(obj)) {
5103       assert( obj->is_forwarded(), "invariant" );
5104       *p = obj->forwardee();
5105     }
5106   }
5107 };
5108 
5109 // Copying Keep Alive closure - can be called from both
5110 // serial and parallel code as long as different worker
5111 // threads utilize different G1ParScanThreadState instances
5112 // and different queues.
5113 
5114 class G1CopyingKeepAliveClosure: public OopClosure {
5115   G1CollectedHeap*         _g1h;
5116   OopClosure*              _copy_non_heap_obj_cl;
5117   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5118   G1ParScanThreadState*    _par_scan_state;
5119 
5120 public:
5121   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5122                             OopClosure* non_heap_obj_cl,
5123                             OopsInHeapRegionClosure* metadata_obj_cl,
5124                             G1ParScanThreadState* pss):
5125     _g1h(g1h),
5126     _copy_non_heap_obj_cl(non_heap_obj_cl),
5127     _copy_metadata_obj_cl(metadata_obj_cl),
5128     _par_scan_state(pss)
5129   {}
5130 
5131   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5132   virtual void do_oop(      oop* p) { do_oop_work(p); }
5133 
5134   template <class T> void do_oop_work(T* p) {
5135     oop obj = oopDesc::load_decode_heap_oop(p);
5136 
5137     if (_g1h->obj_in_cs(obj)) {
5138       // If the referent object has been forwarded (either copied
5139       // to a new location or to itself in the event of an
5140       // evacuation failure) then we need to update the reference
5141       // field and, if both reference and referent are in the G1
5142       // heap, update the RSet for the referent.
5143       //
5144       // If the referent has not been forwarded then we have to keep
5145       // it alive by policy. Therefore we have copy the referent.
5146       //
5147       // If the reference field is in the G1 heap then we can push
5148       // on the PSS queue. When the queue is drained (after each
5149       // phase of reference processing) the object and it's followers
5150       // will be copied, the reference field set to point to the
5151       // new location, and the RSet updated. Otherwise we need to
5152       // use the the non-heap or metadata closures directly to copy
5153       // the refernt object and update the pointer, while avoiding
5154       // updating the RSet.
5155 
5156       if (_g1h->is_in_g1_reserved(p)) {
5157         _par_scan_state->push_on_queue(p);
5158       } else {
5159         assert(!ClassLoaderDataGraph::contains((address)p),
5160                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5161                               PTR_FORMAT, p));
5162           _copy_non_heap_obj_cl->do_oop(p);
5163         }
5164       }
5165     }
5166 };
5167 
5168 // Serial drain queue closure. Called as the 'complete_gc'
5169 // closure for each discovered list in some of the
5170 // reference processing phases.
5171 
5172 class G1STWDrainQueueClosure: public VoidClosure {
5173 protected:
5174   G1CollectedHeap* _g1h;
5175   G1ParScanThreadState* _par_scan_state;
5176 
5177   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5178 
5179 public:
5180   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5181     _g1h(g1h),
5182     _par_scan_state(pss)
5183   { }
5184 
5185   void do_void() {
5186     G1ParScanThreadState* const pss = par_scan_state();
5187     pss->trim_queue();
5188   }
5189 };
5190 
5191 // Parallel Reference Processing closures
5192 
5193 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5194 // processing during G1 evacuation pauses.
5195 
5196 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5197 private:
5198   G1CollectedHeap*   _g1h;
5199   RefToScanQueueSet* _queues;
5200   FlexibleWorkGang*  _workers;
5201   int                _active_workers;
5202 
5203 public:
5204   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5205                         FlexibleWorkGang* workers,
5206                         RefToScanQueueSet *task_queues,
5207                         int n_workers) :
5208     _g1h(g1h),
5209     _queues(task_queues),
5210     _workers(workers),
5211     _active_workers(n_workers)
5212   {
5213     assert(n_workers > 0, "shouldn't call this otherwise");
5214   }
5215 
5216   // Executes the given task using concurrent marking worker threads.
5217   virtual void execute(ProcessTask& task);
5218   virtual void execute(EnqueueTask& task);
5219 };
5220 
5221 // Gang task for possibly parallel reference processing
5222 
5223 class G1STWRefProcTaskProxy: public AbstractGangTask {
5224   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5225   ProcessTask&     _proc_task;
5226   G1CollectedHeap* _g1h;
5227   RefToScanQueueSet *_task_queues;
5228   ParallelTaskTerminator* _terminator;
5229 
5230 public:
5231   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5232                      G1CollectedHeap* g1h,
5233                      RefToScanQueueSet *task_queues,
5234                      ParallelTaskTerminator* terminator) :
5235     AbstractGangTask("Process reference objects in parallel"),
5236     _proc_task(proc_task),
5237     _g1h(g1h),
5238     _task_queues(task_queues),
5239     _terminator(terminator)
5240   {}
5241 
5242   virtual void work(uint worker_id) {
5243     // The reference processing task executed by a single worker.
5244     ResourceMark rm;
5245     HandleMark   hm;
5246 
5247     G1STWIsAliveClosure is_alive(_g1h);
5248 
5249     G1ParScanThreadState pss(_g1h, worker_id);
5250 
5251     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5252     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5253     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5254 
5255     pss.set_evac_closure(&scan_evac_cl);
5256     pss.set_evac_failure_closure(&evac_failure_cl);
5257     pss.set_partial_scan_closure(&partial_scan_cl);
5258 
5259     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5260     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5261 
5262     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5263     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5264 
5265     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5266     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5267 
5268     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5269       // We also need to mark copied objects.
5270       copy_non_heap_cl = &copy_mark_non_heap_cl;
5271       copy_metadata_cl = &copy_mark_metadata_cl;
5272     }
5273 
5274     // Keep alive closure.
5275     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5276 
5277     // Complete GC closure
5278     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5279 
5280     // Call the reference processing task's work routine.
5281     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5282 
5283     // Note we cannot assert that the refs array is empty here as not all
5284     // of the processing tasks (specifically phase2 - pp2_work) execute
5285     // the complete_gc closure (which ordinarily would drain the queue) so
5286     // the queue may not be empty.
5287   }
5288 };
5289 
5290 // Driver routine for parallel reference processing.
5291 // Creates an instance of the ref processing gang
5292 // task and has the worker threads execute it.
5293 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5294   assert(_workers != NULL, "Need parallel worker threads.");
5295 
5296   ParallelTaskTerminator terminator(_active_workers, _queues);
5297   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5298 
5299   _g1h->set_par_threads(_active_workers);
5300   _workers->run_task(&proc_task_proxy);
5301   _g1h->set_par_threads(0);
5302 }
5303 
5304 // Gang task for parallel reference enqueueing.
5305 
5306 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5307   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5308   EnqueueTask& _enq_task;
5309 
5310 public:
5311   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5312     AbstractGangTask("Enqueue reference objects in parallel"),
5313     _enq_task(enq_task)
5314   { }
5315 
5316   virtual void work(uint worker_id) {
5317     _enq_task.work(worker_id);
5318   }
5319 };
5320 
5321 // Driver routine for parallel reference enqueing.
5322 // Creates an instance of the ref enqueueing gang
5323 // task and has the worker threads execute it.
5324 
5325 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5326   assert(_workers != NULL, "Need parallel worker threads.");
5327 
5328   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5329 
5330   _g1h->set_par_threads(_active_workers);
5331   _workers->run_task(&enq_task_proxy);
5332   _g1h->set_par_threads(0);
5333 }
5334 
5335 // End of weak reference support closures
5336 
5337 // Abstract task used to preserve (i.e. copy) any referent objects
5338 // that are in the collection set and are pointed to by reference
5339 // objects discovered by the CM ref processor.
5340 
5341 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5342 protected:
5343   G1CollectedHeap* _g1h;
5344   RefToScanQueueSet      *_queues;
5345   ParallelTaskTerminator _terminator;
5346   uint _n_workers;
5347 
5348 public:
5349   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5350     AbstractGangTask("ParPreserveCMReferents"),
5351     _g1h(g1h),
5352     _queues(task_queues),
5353     _terminator(workers, _queues),
5354     _n_workers(workers)
5355   { }
5356 
5357   void work(uint worker_id) {
5358     ResourceMark rm;
5359     HandleMark   hm;
5360 
5361     G1ParScanThreadState            pss(_g1h, worker_id);
5362     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5363     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5364     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5365 
5366     pss.set_evac_closure(&scan_evac_cl);
5367     pss.set_evac_failure_closure(&evac_failure_cl);
5368     pss.set_partial_scan_closure(&partial_scan_cl);
5369 
5370     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5371 
5372 
5373     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5374     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5375 
5376     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5377     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5378 
5379     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5380     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5381 
5382     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5383       // We also need to mark copied objects.
5384       copy_non_heap_cl = &copy_mark_non_heap_cl;
5385       copy_metadata_cl = &copy_mark_metadata_cl;
5386     }
5387 
5388     // Is alive closure
5389     G1AlwaysAliveClosure always_alive(_g1h);
5390 
5391     // Copying keep alive closure. Applied to referent objects that need
5392     // to be copied.
5393     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5394 
5395     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5396 
5397     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5398     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5399 
5400     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5401     // So this must be true - but assert just in case someone decides to
5402     // change the worker ids.
5403     assert(0 <= worker_id && worker_id < limit, "sanity");
5404     assert(!rp->discovery_is_atomic(), "check this code");
5405 
5406     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5407     for (uint idx = worker_id; idx < limit; idx += stride) {
5408       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5409 
5410       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5411       while (iter.has_next()) {
5412         // Since discovery is not atomic for the CM ref processor, we
5413         // can see some null referent objects.
5414         iter.load_ptrs(DEBUG_ONLY(true));
5415         oop ref = iter.obj();
5416 
5417         // This will filter nulls.
5418         if (iter.is_referent_alive()) {
5419           iter.make_referent_alive();
5420         }
5421         iter.move_to_next();
5422       }
5423     }
5424 
5425     // Drain the queue - which may cause stealing
5426     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5427     drain_queue.do_void();
5428     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5429     assert(pss.refs()->is_empty(), "should be");
5430   }
5431 };
5432 
5433 // Weak Reference processing during an evacuation pause (part 1).
5434 void G1CollectedHeap::process_discovered_references() {
5435   double ref_proc_start = os::elapsedTime();
5436 
5437   ReferenceProcessor* rp = _ref_processor_stw;
5438   assert(rp->discovery_enabled(), "should have been enabled");
5439 
5440   // Any reference objects, in the collection set, that were 'discovered'
5441   // by the CM ref processor should have already been copied (either by
5442   // applying the external root copy closure to the discovered lists, or
5443   // by following an RSet entry).
5444   //
5445   // But some of the referents, that are in the collection set, that these
5446   // reference objects point to may not have been copied: the STW ref
5447   // processor would have seen that the reference object had already
5448   // been 'discovered' and would have skipped discovering the reference,
5449   // but would not have treated the reference object as a regular oop.
5450   // As a reult the copy closure would not have been applied to the
5451   // referent object.
5452   //
5453   // We need to explicitly copy these referent objects - the references
5454   // will be processed at the end of remarking.
5455   //
5456   // We also need to do this copying before we process the reference
5457   // objects discovered by the STW ref processor in case one of these
5458   // referents points to another object which is also referenced by an
5459   // object discovered by the STW ref processor.
5460 
5461   uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5462                         workers()->active_workers() : 1);
5463 
5464   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5465            active_workers == workers()->active_workers(),
5466            "Need to reset active_workers");
5467 
5468   set_par_threads(active_workers);
5469   G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
5470 
5471   if (G1CollectedHeap::use_parallel_gc_threads()) {
5472     workers()->run_task(&keep_cm_referents);
5473   } else {
5474     keep_cm_referents.work(0);
5475   }
5476 
5477   set_par_threads(0);
5478 
5479   // Closure to test whether a referent is alive.
5480   G1STWIsAliveClosure is_alive(this);
5481 
5482   // Even when parallel reference processing is enabled, the processing
5483   // of JNI refs is serial and performed serially by the current thread
5484   // rather than by a worker. The following PSS will be used for processing
5485   // JNI refs.
5486 
5487   // Use only a single queue for this PSS.
5488   G1ParScanThreadState pss(this, 0);
5489 
5490   // We do not embed a reference processor in the copying/scanning
5491   // closures while we're actually processing the discovered
5492   // reference objects.
5493   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5494   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5495   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5496 
5497   pss.set_evac_closure(&scan_evac_cl);
5498   pss.set_evac_failure_closure(&evac_failure_cl);
5499   pss.set_partial_scan_closure(&partial_scan_cl);
5500 
5501   assert(pss.refs()->is_empty(), "pre-condition");
5502 
5503   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5504   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5505 
5506   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5507   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5508 
5509   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5510   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5511 
5512   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5513     // We also need to mark copied objects.
5514     copy_non_heap_cl = &copy_mark_non_heap_cl;
5515     copy_metadata_cl = &copy_mark_metadata_cl;
5516   }
5517 
5518   // Keep alive closure.
5519   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5520 
5521   // Serial Complete GC closure
5522   G1STWDrainQueueClosure drain_queue(this, &pss);
5523 
5524   // Setup the soft refs policy...
5525   rp->setup_policy(false);
5526 
5527   if (!rp->processing_is_mt()) {
5528     // Serial reference processing...
5529     rp->process_discovered_references(&is_alive,
5530                                       &keep_alive,
5531                                       &drain_queue,
5532                                       NULL);
5533   } else {
5534     // Parallel reference processing
5535     assert(rp->num_q() == active_workers, "sanity");
5536     assert(active_workers <= rp->max_num_q(), "sanity");
5537 
5538     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5539     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5540   }
5541 
5542   // We have completed copying any necessary live referent objects
5543   // (that were not copied during the actual pause) so we can
5544   // retire any active alloc buffers
5545   pss.retire_alloc_buffers();
5546   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5547 
5548   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5549   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5550 }
5551 
5552 // Weak Reference processing during an evacuation pause (part 2).
5553 void G1CollectedHeap::enqueue_discovered_references() {
5554   double ref_enq_start = os::elapsedTime();
5555 
5556   ReferenceProcessor* rp = _ref_processor_stw;
5557   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5558 
5559   // Now enqueue any remaining on the discovered lists on to
5560   // the pending list.
5561   if (!rp->processing_is_mt()) {
5562     // Serial reference processing...
5563     rp->enqueue_discovered_references();
5564   } else {
5565     // Parallel reference enqueuing
5566 
5567     uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
5568     assert(active_workers == workers()->active_workers(),
5569            "Need to reset active_workers");
5570     assert(rp->num_q() == active_workers, "sanity");
5571     assert(active_workers <= rp->max_num_q(), "sanity");
5572 
5573     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5574     rp->enqueue_discovered_references(&par_task_executor);
5575   }
5576 
5577   rp->verify_no_references_recorded();
5578   assert(!rp->discovery_enabled(), "should have been disabled");
5579 
5580   // FIXME
5581   // CM's reference processing also cleans up the string and symbol tables.
5582   // Should we do that here also? We could, but it is a serial operation
5583   // and could signicantly increase the pause time.
5584 
5585   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5586   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5587 }
5588 
5589 void G1CollectedHeap::evacuate_collection_set() {
5590   _expand_heap_after_alloc_failure = true;
5591   set_evacuation_failed(false);
5592 
5593   // Should G1EvacuationFailureALot be in effect for this GC?
5594   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5595 
5596   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5597   concurrent_g1_refine()->set_use_cache(false);
5598   concurrent_g1_refine()->clear_hot_cache_claimed_index();
5599 
5600   uint n_workers;
5601   if (G1CollectedHeap::use_parallel_gc_threads()) {
5602     n_workers =
5603       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5604                                      workers()->active_workers(),
5605                                      Threads::number_of_non_daemon_threads());
5606     assert(UseDynamicNumberOfGCThreads ||
5607            n_workers == workers()->total_workers(),
5608            "If not dynamic should be using all the  workers");
5609     workers()->set_active_workers(n_workers);
5610     set_par_threads(n_workers);
5611   } else {
5612     assert(n_par_threads() == 0,
5613            "Should be the original non-parallel value");
5614     n_workers = 1;
5615   }
5616 
5617   G1ParTask g1_par_task(this, _task_queues);
5618 
5619   init_for_evac_failure(NULL);
5620 
5621   rem_set()->prepare_for_younger_refs_iterate(true);
5622 
5623   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5624   double start_par_time_sec = os::elapsedTime();
5625   double end_par_time_sec;
5626 
5627   {
5628     StrongRootsScope srs(this);
5629 
5630     if (G1CollectedHeap::use_parallel_gc_threads()) {
5631       // The individual threads will set their evac-failure closures.
5632       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5633       // These tasks use ShareHeap::_process_strong_tasks
5634       assert(UseDynamicNumberOfGCThreads ||
5635              workers()->active_workers() == workers()->total_workers(),
5636              "If not dynamic should be using all the  workers");
5637       workers()->run_task(&g1_par_task);
5638     } else {
5639       g1_par_task.set_for_termination(n_workers);
5640       g1_par_task.work(0);
5641     }
5642     end_par_time_sec = os::elapsedTime();
5643 
5644     // Closing the inner scope will execute the destructor
5645     // for the StrongRootsScope object. We record the current
5646     // elapsed time before closing the scope so that time
5647     // taken for the SRS destructor is NOT included in the
5648     // reported parallel time.
5649   }
5650 
5651   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5652   g1_policy()->phase_times()->record_par_time(par_time_ms);
5653 
5654   double code_root_fixup_time_ms =
5655         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5656   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5657 
5658   set_par_threads(0);
5659 
5660   // Process any discovered reference objects - we have
5661   // to do this _before_ we retire the GC alloc regions
5662   // as we may have to copy some 'reachable' referent
5663   // objects (and their reachable sub-graphs) that were
5664   // not copied during the pause.
5665   process_discovered_references();
5666 
5667   // Weak root processing.
5668   // Note: when JSR 292 is enabled and code blobs can contain
5669   // non-perm oops then we will need to process the code blobs
5670   // here too.
5671   {
5672     G1STWIsAliveClosure is_alive(this);
5673     G1KeepAliveClosure keep_alive(this);
5674     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5675   }
5676 
5677   release_gc_alloc_regions();
5678   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5679 
5680   concurrent_g1_refine()->clear_hot_cache();
5681   concurrent_g1_refine()->set_use_cache(true);
5682 
5683   finalize_for_evac_failure();
5684 
5685   if (evacuation_failed()) {
5686     remove_self_forwarding_pointers();
5687 
5688     // Reset the G1EvacuationFailureALot counters and flags
5689     // Note: the values are reset only when an actual
5690     // evacuation failure occurs.
5691     NOT_PRODUCT(reset_evacuation_should_fail();)
5692   }
5693 
5694   // Enqueue any remaining references remaining on the STW
5695   // reference processor's discovered lists. We need to do
5696   // this after the card table is cleaned (and verified) as
5697   // the act of enqueuing entries on to the pending list
5698   // will log these updates (and dirty their associated
5699   // cards). We need these updates logged to update any
5700   // RSets.
5701   enqueue_discovered_references();
5702 
5703   if (G1DeferredRSUpdate) {
5704     RedirtyLoggedCardTableEntryFastClosure redirty;
5705     dirty_card_queue_set().set_closure(&redirty);
5706     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5707 
5708     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5709     dcq.merge_bufferlists(&dirty_card_queue_set());
5710     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5711   }
5712   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5713 }
5714 
5715 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5716                                      size_t* pre_used,
5717                                      FreeRegionList* free_list,
5718                                      OldRegionSet* old_proxy_set,
5719                                      HumongousRegionSet* humongous_proxy_set,
5720                                      HRRSCleanupTask* hrrs_cleanup_task,
5721                                      bool par) {
5722   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5723     if (hr->isHumongous()) {
5724       assert(hr->startsHumongous(), "we should only see starts humongous");
5725       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5726     } else {
5727       _old_set.remove_with_proxy(hr, old_proxy_set);
5728       free_region(hr, pre_used, free_list, par);
5729     }
5730   } else {
5731     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5732   }
5733 }
5734 
5735 void G1CollectedHeap::free_region(HeapRegion* hr,
5736                                   size_t* pre_used,
5737                                   FreeRegionList* free_list,
5738                                   bool par) {
5739   assert(!hr->isHumongous(), "this is only for non-humongous regions");
5740   assert(!hr->is_empty(), "the region should not be empty");
5741   assert(free_list != NULL, "pre-condition");
5742 
5743   *pre_used += hr->used();
5744   hr->hr_clear(par, true /* clear_space */);
5745   free_list->add_as_head(hr);
5746 }
5747 
5748 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5749                                      size_t* pre_used,
5750                                      FreeRegionList* free_list,
5751                                      HumongousRegionSet* humongous_proxy_set,
5752                                      bool par) {
5753   assert(hr->startsHumongous(), "this is only for starts humongous regions");
5754   assert(free_list != NULL, "pre-condition");
5755   assert(humongous_proxy_set != NULL, "pre-condition");
5756 
5757   size_t hr_used = hr->used();
5758   size_t hr_capacity = hr->capacity();
5759   size_t hr_pre_used = 0;
5760   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5761   // We need to read this before we make the region non-humongous,
5762   // otherwise the information will be gone.
5763   uint last_index = hr->last_hc_index();
5764   hr->set_notHumongous();
5765   free_region(hr, &hr_pre_used, free_list, par);
5766 
5767   uint i = hr->hrs_index() + 1;
5768   while (i < last_index) {
5769     HeapRegion* curr_hr = region_at(i);
5770     assert(curr_hr->continuesHumongous(), "invariant");
5771     curr_hr->set_notHumongous();
5772     free_region(curr_hr, &hr_pre_used, free_list, par);
5773     i += 1;
5774   }
5775   assert(hr_pre_used == hr_used,
5776          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
5777                  "should be the same", hr_pre_used, hr_used));
5778   *pre_used += hr_pre_used;
5779 }
5780 
5781 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5782                                        FreeRegionList* free_list,
5783                                        OldRegionSet* old_proxy_set,
5784                                        HumongousRegionSet* humongous_proxy_set,
5785                                        bool par) {
5786   if (pre_used > 0) {
5787     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5788     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5789     assert(_summary_bytes_used >= pre_used,
5790            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5791                    "should be >= pre_used: "SIZE_FORMAT,
5792                    _summary_bytes_used, pre_used));
5793     _summary_bytes_used -= pre_used;
5794   }
5795   if (free_list != NULL && !free_list->is_empty()) {
5796     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5797     _free_list.add_as_head(free_list);
5798   }
5799   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5800     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5801     _old_set.update_from_proxy(old_proxy_set);
5802   }
5803   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5804     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5805     _humongous_set.update_from_proxy(humongous_proxy_set);
5806   }
5807 }
5808 
5809 class G1ParCleanupCTTask : public AbstractGangTask {
5810   CardTableModRefBS* _ct_bs;
5811   G1CollectedHeap* _g1h;
5812   HeapRegion* volatile _su_head;
5813 public:
5814   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5815                      G1CollectedHeap* g1h) :
5816     AbstractGangTask("G1 Par Cleanup CT Task"),
5817     _ct_bs(ct_bs), _g1h(g1h) { }
5818 
5819   void work(uint worker_id) {
5820     HeapRegion* r;
5821     while (r = _g1h->pop_dirty_cards_region()) {
5822       clear_cards(r);
5823     }
5824   }
5825 
5826   void clear_cards(HeapRegion* r) {
5827     // Cards of the survivors should have already been dirtied.
5828     if (!r->is_survivor()) {
5829       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5830     }
5831   }
5832 };
5833 
5834 #ifndef PRODUCT
5835 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5836   G1CollectedHeap* _g1h;
5837   CardTableModRefBS* _ct_bs;
5838 public:
5839   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5840     : _g1h(g1h), _ct_bs(ct_bs) { }
5841   virtual bool doHeapRegion(HeapRegion* r) {
5842     if (r->is_survivor()) {
5843       _g1h->verify_dirty_region(r);
5844     } else {
5845       _g1h->verify_not_dirty_region(r);
5846     }
5847     return false;
5848   }
5849 };
5850 
5851 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5852   // All of the region should be clean.
5853   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
5854   MemRegion mr(hr->bottom(), hr->end());
5855   ct_bs->verify_not_dirty_region(mr);
5856 }
5857 
5858 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5859   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5860   // dirty allocated blocks as they allocate them. The thread that
5861   // retires each region and replaces it with a new one will do a
5862   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5863   // not dirty that area (one less thing to have to do while holding
5864   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5865   // is dirty.
5866   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5867   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5868   ct_bs->verify_dirty_region(mr);
5869 }
5870 
5871 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5872   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5873   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5874     verify_dirty_region(hr);
5875   }
5876 }
5877 
5878 void G1CollectedHeap::verify_dirty_young_regions() {
5879   verify_dirty_young_list(_young_list->first_region());
5880 }
5881 #endif
5882 
5883 void G1CollectedHeap::cleanUpCardTable() {
5884   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5885   double start = os::elapsedTime();
5886 
5887   {
5888     // Iterate over the dirty cards region list.
5889     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5890 
5891     if (G1CollectedHeap::use_parallel_gc_threads()) {
5892       set_par_threads();
5893       workers()->run_task(&cleanup_task);
5894       set_par_threads(0);
5895     } else {
5896       while (_dirty_cards_region_list) {
5897         HeapRegion* r = _dirty_cards_region_list;
5898         cleanup_task.clear_cards(r);
5899         _dirty_cards_region_list = r->get_next_dirty_cards_region();
5900         if (_dirty_cards_region_list == r) {
5901           // The last region.
5902           _dirty_cards_region_list = NULL;
5903         }
5904         r->set_next_dirty_cards_region(NULL);
5905       }
5906     }
5907 #ifndef PRODUCT
5908     if (G1VerifyCTCleanup || VerifyAfterGC) {
5909       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5910       heap_region_iterate(&cleanup_verifier);
5911     }
5912 #endif
5913   }
5914 
5915   double elapsed = os::elapsedTime() - start;
5916   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5917 }
5918 
5919 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5920   size_t pre_used = 0;
5921   FreeRegionList local_free_list("Local List for CSet Freeing");
5922 
5923   double young_time_ms     = 0.0;
5924   double non_young_time_ms = 0.0;
5925 
5926   // Since the collection set is a superset of the the young list,
5927   // all we need to do to clear the young list is clear its
5928   // head and length, and unlink any young regions in the code below
5929   _young_list->clear();
5930 
5931   G1CollectorPolicy* policy = g1_policy();
5932 
5933   double start_sec = os::elapsedTime();
5934   bool non_young = true;
5935 
5936   HeapRegion* cur = cs_head;
5937   int age_bound = -1;
5938   size_t rs_lengths = 0;
5939 
5940   while (cur != NULL) {
5941     assert(!is_on_master_free_list(cur), "sanity");
5942     if (non_young) {
5943       if (cur->is_young()) {
5944         double end_sec = os::elapsedTime();
5945         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5946         non_young_time_ms += elapsed_ms;
5947 
5948         start_sec = os::elapsedTime();
5949         non_young = false;
5950       }
5951     } else {
5952       if (!cur->is_young()) {
5953         double end_sec = os::elapsedTime();
5954         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5955         young_time_ms += elapsed_ms;
5956 
5957         start_sec = os::elapsedTime();
5958         non_young = true;
5959       }
5960     }
5961 
5962     rs_lengths += cur->rem_set()->occupied();
5963 
5964     HeapRegion* next = cur->next_in_collection_set();
5965     assert(cur->in_collection_set(), "bad CS");
5966     cur->set_next_in_collection_set(NULL);
5967     cur->set_in_collection_set(false);
5968 
5969     if (cur->is_young()) {
5970       int index = cur->young_index_in_cset();
5971       assert(index != -1, "invariant");
5972       assert((uint) index < policy->young_cset_region_length(), "invariant");
5973       size_t words_survived = _surviving_young_words[index];
5974       cur->record_surv_words_in_group(words_survived);
5975 
5976       // At this point the we have 'popped' cur from the collection set
5977       // (linked via next_in_collection_set()) but it is still in the
5978       // young list (linked via next_young_region()). Clear the
5979       // _next_young_region field.
5980       cur->set_next_young_region(NULL);
5981     } else {
5982       int index = cur->young_index_in_cset();
5983       assert(index == -1, "invariant");
5984     }
5985 
5986     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5987             (!cur->is_young() && cur->young_index_in_cset() == -1),
5988             "invariant" );
5989 
5990     if (!cur->evacuation_failed()) {
5991       MemRegion used_mr = cur->used_region();
5992 
5993       // And the region is empty.
5994       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5995       free_region(cur, &pre_used, &local_free_list, false /* par */);
5996     } else {
5997       cur->uninstall_surv_rate_group();
5998       if (cur->is_young()) {
5999         cur->set_young_index_in_cset(-1);
6000       }
6001       cur->set_not_young();
6002       cur->set_evacuation_failed(false);
6003       // The region is now considered to be old.
6004       _old_set.add(cur);
6005     }
6006     cur = next;
6007   }
6008 
6009   policy->record_max_rs_lengths(rs_lengths);
6010   policy->cset_regions_freed();
6011 
6012   double end_sec = os::elapsedTime();
6013   double elapsed_ms = (end_sec - start_sec) * 1000.0;
6014 
6015   if (non_young) {
6016     non_young_time_ms += elapsed_ms;
6017   } else {
6018     young_time_ms += elapsed_ms;
6019   }
6020 
6021   update_sets_after_freeing_regions(pre_used, &local_free_list,
6022                                     NULL /* old_proxy_set */,
6023                                     NULL /* humongous_proxy_set */,
6024                                     false /* par */);
6025   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6026   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6027 }
6028 
6029 // This routine is similar to the above but does not record
6030 // any policy statistics or update free lists; we are abandoning
6031 // the current incremental collection set in preparation of a
6032 // full collection. After the full GC we will start to build up
6033 // the incremental collection set again.
6034 // This is only called when we're doing a full collection
6035 // and is immediately followed by the tearing down of the young list.
6036 
6037 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6038   HeapRegion* cur = cs_head;
6039 
6040   while (cur != NULL) {
6041     HeapRegion* next = cur->next_in_collection_set();
6042     assert(cur->in_collection_set(), "bad CS");
6043     cur->set_next_in_collection_set(NULL);
6044     cur->set_in_collection_set(false);
6045     cur->set_young_index_in_cset(-1);
6046     cur = next;
6047   }
6048 }
6049 
6050 void G1CollectedHeap::set_free_regions_coming() {
6051   if (G1ConcRegionFreeingVerbose) {
6052     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6053                            "setting free regions coming");
6054   }
6055 
6056   assert(!free_regions_coming(), "pre-condition");
6057   _free_regions_coming = true;
6058 }
6059 
6060 void G1CollectedHeap::reset_free_regions_coming() {
6061   assert(free_regions_coming(), "pre-condition");
6062 
6063   {
6064     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6065     _free_regions_coming = false;
6066     SecondaryFreeList_lock->notify_all();
6067   }
6068 
6069   if (G1ConcRegionFreeingVerbose) {
6070     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6071                            "reset free regions coming");
6072   }
6073 }
6074 
6075 void G1CollectedHeap::wait_while_free_regions_coming() {
6076   // Most of the time we won't have to wait, so let's do a quick test
6077   // first before we take the lock.
6078   if (!free_regions_coming()) {
6079     return;
6080   }
6081 
6082   if (G1ConcRegionFreeingVerbose) {
6083     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6084                            "waiting for free regions");
6085   }
6086 
6087   {
6088     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6089     while (free_regions_coming()) {
6090       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6091     }
6092   }
6093 
6094   if (G1ConcRegionFreeingVerbose) {
6095     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6096                            "done waiting for free regions");
6097   }
6098 }
6099 
6100 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6101   assert(heap_lock_held_for_gc(),
6102               "the heap lock should already be held by or for this thread");
6103   _young_list->push_region(hr);
6104 }
6105 
6106 class NoYoungRegionsClosure: public HeapRegionClosure {
6107 private:
6108   bool _success;
6109 public:
6110   NoYoungRegionsClosure() : _success(true) { }
6111   bool doHeapRegion(HeapRegion* r) {
6112     if (r->is_young()) {
6113       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
6114                              r->bottom(), r->end());
6115       _success = false;
6116     }
6117     return false;
6118   }
6119   bool success() { return _success; }
6120 };
6121 
6122 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6123   bool ret = _young_list->check_list_empty(check_sample);
6124 
6125   if (check_heap) {
6126     NoYoungRegionsClosure closure;
6127     heap_region_iterate(&closure);
6128     ret = ret && closure.success();
6129   }
6130 
6131   return ret;
6132 }
6133 
6134 class TearDownRegionSetsClosure : public HeapRegionClosure {
6135 private:
6136   OldRegionSet *_old_set;
6137 
6138 public:
6139   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
6140 
6141   bool doHeapRegion(HeapRegion* r) {
6142     if (r->is_empty()) {
6143       // We ignore empty regions, we'll empty the free list afterwards
6144     } else if (r->is_young()) {
6145       // We ignore young regions, we'll empty the young list afterwards
6146     } else if (r->isHumongous()) {
6147       // We ignore humongous regions, we're not tearing down the
6148       // humongous region set
6149     } else {
6150       // The rest should be old
6151       _old_set->remove(r);
6152     }
6153     return false;
6154   }
6155 
6156   ~TearDownRegionSetsClosure() {
6157     assert(_old_set->is_empty(), "post-condition");
6158   }
6159 };
6160 
6161 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6162   assert_at_safepoint(true /* should_be_vm_thread */);
6163 
6164   if (!free_list_only) {
6165     TearDownRegionSetsClosure cl(&_old_set);
6166     heap_region_iterate(&cl);
6167 
6168     // Need to do this after the heap iteration to be able to
6169     // recognize the young regions and ignore them during the iteration.
6170     _young_list->empty_list();
6171   }
6172   _free_list.remove_all();
6173 }
6174 
6175 class RebuildRegionSetsClosure : public HeapRegionClosure {
6176 private:
6177   bool            _free_list_only;
6178   OldRegionSet*   _old_set;
6179   FreeRegionList* _free_list;
6180   size_t          _total_used;
6181 
6182 public:
6183   RebuildRegionSetsClosure(bool free_list_only,
6184                            OldRegionSet* old_set, FreeRegionList* free_list) :
6185     _free_list_only(free_list_only),
6186     _old_set(old_set), _free_list(free_list), _total_used(0) {
6187     assert(_free_list->is_empty(), "pre-condition");
6188     if (!free_list_only) {
6189       assert(_old_set->is_empty(), "pre-condition");
6190     }
6191   }
6192 
6193   bool doHeapRegion(HeapRegion* r) {
6194     if (r->continuesHumongous()) {
6195       return false;
6196     }
6197 
6198     if (r->is_empty()) {
6199       // Add free regions to the free list
6200       _free_list->add_as_tail(r);
6201     } else if (!_free_list_only) {
6202       assert(!r->is_young(), "we should not come across young regions");
6203 
6204       if (r->isHumongous()) {
6205         // We ignore humongous regions, we left the humongous set unchanged
6206       } else {
6207         // The rest should be old, add them to the old set
6208         _old_set->add(r);
6209       }
6210       _total_used += r->used();
6211     }
6212 
6213     return false;
6214   }
6215 
6216   size_t total_used() {
6217     return _total_used;
6218   }
6219 };
6220 
6221 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6222   assert_at_safepoint(true /* should_be_vm_thread */);
6223 
6224   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6225   heap_region_iterate(&cl);
6226 
6227   if (!free_list_only) {
6228     _summary_bytes_used = cl.total_used();
6229   }
6230   assert(_summary_bytes_used == recalculate_used(),
6231          err_msg("inconsistent _summary_bytes_used, "
6232                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6233                  _summary_bytes_used, recalculate_used()));
6234 }
6235 
6236 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6237   _refine_cte_cl->set_concurrent(concurrent);
6238 }
6239 
6240 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6241   HeapRegion* hr = heap_region_containing(p);
6242   if (hr == NULL) {
6243     return false;
6244   } else {
6245     return hr->is_in(p);
6246   }
6247 }
6248 
6249 // Methods for the mutator alloc region
6250 
6251 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6252                                                       bool force) {
6253   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6254   assert(!force || g1_policy()->can_expand_young_list(),
6255          "if force is true we should be able to expand the young list");
6256   bool young_list_full = g1_policy()->is_young_list_full();
6257   if (force || !young_list_full) {
6258     HeapRegion* new_alloc_region = new_region(word_size,
6259                                               false /* do_expand */);
6260     if (new_alloc_region != NULL) {
6261       set_region_short_lived_locked(new_alloc_region);
6262       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6263       return new_alloc_region;
6264     }
6265   }
6266   return NULL;
6267 }
6268 
6269 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6270                                                   size_t allocated_bytes) {
6271   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6272   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6273 
6274   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6275   _summary_bytes_used += allocated_bytes;
6276   _hr_printer.retire(alloc_region);
6277   // We update the eden sizes here, when the region is retired,
6278   // instead of when it's allocated, since this is the point that its
6279   // used space has been recored in _summary_bytes_used.
6280   g1mm()->update_eden_size();
6281 }
6282 
6283 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6284                                                     bool force) {
6285   return _g1h->new_mutator_alloc_region(word_size, force);
6286 }
6287 
6288 void G1CollectedHeap::set_par_threads() {
6289   // Don't change the number of workers.  Use the value previously set
6290   // in the workgroup.
6291   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6292   uint n_workers = workers()->active_workers();
6293   assert(UseDynamicNumberOfGCThreads ||
6294            n_workers == workers()->total_workers(),
6295       "Otherwise should be using the total number of workers");
6296   if (n_workers == 0) {
6297     assert(false, "Should have been set in prior evacuation pause.");
6298     n_workers = ParallelGCThreads;
6299     workers()->set_active_workers(n_workers);
6300   }
6301   set_par_threads(n_workers);
6302 }
6303 
6304 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6305                                        size_t allocated_bytes) {
6306   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6307 }
6308 
6309 // Methods for the GC alloc regions
6310 
6311 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6312                                                  uint count,
6313                                                  GCAllocPurpose ap) {
6314   assert(FreeList_lock->owned_by_self(), "pre-condition");
6315 
6316   if (count < g1_policy()->max_regions(ap)) {
6317     HeapRegion* new_alloc_region = new_region(word_size,
6318                                               true /* do_expand */);
6319     if (new_alloc_region != NULL) {
6320       // We really only need to do this for old regions given that we
6321       // should never scan survivors. But it doesn't hurt to do it
6322       // for survivors too.
6323       new_alloc_region->set_saved_mark();
6324       if (ap == GCAllocForSurvived) {
6325         new_alloc_region->set_survivor();
6326         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6327       } else {
6328         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6329       }
6330       bool during_im = g1_policy()->during_initial_mark_pause();
6331       new_alloc_region->note_start_of_copying(during_im);
6332       return new_alloc_region;
6333     } else {
6334       g1_policy()->note_alloc_region_limit_reached(ap);
6335     }
6336   }
6337   return NULL;
6338 }
6339 
6340 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6341                                              size_t allocated_bytes,
6342                                              GCAllocPurpose ap) {
6343   bool during_im = g1_policy()->during_initial_mark_pause();
6344   alloc_region->note_end_of_copying(during_im);
6345   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6346   if (ap == GCAllocForSurvived) {
6347     young_list()->add_survivor_region(alloc_region);
6348   } else {
6349     _old_set.add(alloc_region);
6350   }
6351   _hr_printer.retire(alloc_region);
6352 }
6353 
6354 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6355                                                        bool force) {
6356   assert(!force, "not supported for GC alloc regions");
6357   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6358 }
6359 
6360 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6361                                           size_t allocated_bytes) {
6362   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6363                                GCAllocForSurvived);
6364 }
6365 
6366 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6367                                                   bool force) {
6368   assert(!force, "not supported for GC alloc regions");
6369   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6370 }
6371 
6372 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6373                                      size_t allocated_bytes) {
6374   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6375                                GCAllocForTenured);
6376 }
6377 // Heap region set verification
6378 
6379 class VerifyRegionListsClosure : public HeapRegionClosure {
6380 private:
6381   FreeRegionList*     _free_list;
6382   OldRegionSet*       _old_set;
6383   HumongousRegionSet* _humongous_set;
6384   uint                _region_count;
6385 
6386 public:
6387   VerifyRegionListsClosure(OldRegionSet* old_set,
6388                            HumongousRegionSet* humongous_set,
6389                            FreeRegionList* free_list) :
6390     _old_set(old_set), _humongous_set(humongous_set),
6391     _free_list(free_list), _region_count(0) { }
6392 
6393   uint region_count() { return _region_count; }
6394 
6395   bool doHeapRegion(HeapRegion* hr) {
6396     _region_count += 1;
6397 
6398     if (hr->continuesHumongous()) {
6399       return false;
6400     }
6401 
6402     if (hr->is_young()) {
6403       // TODO
6404     } else if (hr->startsHumongous()) {
6405       _humongous_set->verify_next_region(hr);
6406     } else if (hr->is_empty()) {
6407       _free_list->verify_next_region(hr);
6408     } else {
6409       _old_set->verify_next_region(hr);
6410     }
6411     return false;
6412   }
6413 };
6414 
6415 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6416                                              HeapWord* bottom) {
6417   HeapWord* end = bottom + HeapRegion::GrainWords;
6418   MemRegion mr(bottom, end);
6419   assert(_g1_reserved.contains(mr), "invariant");
6420   // This might return NULL if the allocation fails
6421   return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
6422 }
6423 
6424 void G1CollectedHeap::verify_region_sets() {
6425   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6426 
6427   // First, check the explicit lists.
6428   _free_list.verify();
6429   {
6430     // Given that a concurrent operation might be adding regions to
6431     // the secondary free list we have to take the lock before
6432     // verifying it.
6433     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6434     _secondary_free_list.verify();
6435   }
6436   _old_set.verify();
6437   _humongous_set.verify();
6438 
6439   // If a concurrent region freeing operation is in progress it will
6440   // be difficult to correctly attributed any free regions we come
6441   // across to the correct free list given that they might belong to
6442   // one of several (free_list, secondary_free_list, any local lists,
6443   // etc.). So, if that's the case we will skip the rest of the
6444   // verification operation. Alternatively, waiting for the concurrent
6445   // operation to complete will have a non-trivial effect on the GC's
6446   // operation (no concurrent operation will last longer than the
6447   // interval between two calls to verification) and it might hide
6448   // any issues that we would like to catch during testing.
6449   if (free_regions_coming()) {
6450     return;
6451   }
6452 
6453   // Make sure we append the secondary_free_list on the free_list so
6454   // that all free regions we will come across can be safely
6455   // attributed to the free_list.
6456   append_secondary_free_list_if_not_empty_with_lock();
6457 
6458   // Finally, make sure that the region accounting in the lists is
6459   // consistent with what we see in the heap.
6460   _old_set.verify_start();
6461   _humongous_set.verify_start();
6462   _free_list.verify_start();
6463 
6464   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6465   heap_region_iterate(&cl);
6466 
6467   _old_set.verify_end();
6468   _humongous_set.verify_end();
6469   _free_list.verify_end();
6470 }