1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1PreserveMarkQueue.hpp"
  41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  42 #include "gc_implementation/g1/heapRegion.inline.hpp"
  43 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  44 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  45 #include "gc_implementation/g1/vm_operations_g1.hpp"
  46 #include "gc_implementation/shared/isGCActiveMark.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genOopClosures.inline.hpp"
  49 #include "memory/generationSpec.hpp"
  50 #include "memory/referenceProcessor.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "oops/oop.pcgc.inline.hpp"
  53 #include "runtime/aprofiler.hpp"
  54 #include "runtime/vmThread.hpp"
  55 
  56 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  57 
  58 // turn it on so that the contents of the young list (scan-only /
  59 // to-be-collected) are printed at "strategic" points before / during
  60 // / after the collection --- this is useful for debugging
  61 #define YOUNG_LIST_VERBOSE 0
  62 // CURRENT STATUS
  63 // This file is under construction.  Search for "FIXME".
  64 
  65 // INVARIANTS/NOTES
  66 //
  67 // All allocation activity covered by the G1CollectedHeap interface is
  68 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  69 // and allocate_new_tlab, which are the "entry" points to the
  70 // allocation code from the rest of the JVM.  (Note that this does not
  71 // apply to TLAB allocation, which is not part of this interface: it
  72 // is done by clients of this interface.)
  73 
  74 // Notes on implementation of parallelism in different tasks.
  75 //
  76 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  77 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  78 // It does use run_task() which sets _n_workers in the task.
  79 // G1ParTask executes g1_process_strong_roots() ->
  80 // SharedHeap::process_strong_roots() which calls eventuall to
  81 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  82 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  83 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  84 //
  85 
  86 // Local to this file.
  87 
  88 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  89   SuspendibleThreadSet* _sts;
  90   G1RemSet* _g1rs;
  91   ConcurrentG1Refine* _cg1r;
  92   bool _concurrent;
  93 public:
  94   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  95                               G1RemSet* g1rs,
  96                               ConcurrentG1Refine* cg1r) :
  97     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
  98   {}
  99   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 100     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
 101     // This path is executed by the concurrent refine or mutator threads,
 102     // concurrently, and so we do not care if card_ptr contains references
 103     // that point into the collection set.
 104     assert(!oops_into_cset, "should be");
 105 
 106     if (_concurrent && _sts->should_yield()) {
 107       // Caller will actually yield.
 108       return false;
 109     }
 110     // Otherwise, we finished successfully; return true.
 111     return true;
 112   }
 113   void set_concurrent(bool b) { _concurrent = b; }
 114 };
 115 
 116 
 117 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 118   int _calls;
 119   G1CollectedHeap* _g1h;
 120   CardTableModRefBS* _ctbs;
 121   int _histo[256];
 122 public:
 123   ClearLoggedCardTableEntryClosure() :
 124     _calls(0)
 125   {
 126     _g1h = G1CollectedHeap::heap();
 127     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 128     for (int i = 0; i < 256; i++) _histo[i] = 0;
 129   }
 130   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 131     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 132       _calls++;
 133       unsigned char* ujb = (unsigned char*)card_ptr;
 134       int ind = (int)(*ujb);
 135       _histo[ind]++;
 136       *card_ptr = -1;
 137     }
 138     return true;
 139   }
 140   int calls() { return _calls; }
 141   void print_histo() {
 142     gclog_or_tty->print_cr("Card table value histogram:");
 143     for (int i = 0; i < 256; i++) {
 144       if (_histo[i] != 0) {
 145         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 146       }
 147     }
 148   }
 149 };
 150 
 151 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 152   int _calls;
 153   G1CollectedHeap* _g1h;
 154   CardTableModRefBS* _ctbs;
 155 public:
 156   RedirtyLoggedCardTableEntryClosure() :
 157     _calls(0)
 158   {
 159     _g1h = G1CollectedHeap::heap();
 160     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 161   }
 162   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 163     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 164       _calls++;
 165       *card_ptr = 0;
 166     }
 167     return true;
 168   }
 169   int calls() { return _calls; }
 170 };
 171 
 172 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 173 public:
 174   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 175     *card_ptr = CardTableModRefBS::dirty_card_val();
 176     return true;
 177   }
 178 };
 179 
 180 YoungList::YoungList(G1CollectedHeap* g1h) :
 181     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 182     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 183   guarantee(check_list_empty(false), "just making sure...");
 184 }
 185 
 186 void YoungList::push_region(HeapRegion *hr) {
 187   assert(!hr->is_young(), "should not already be young");
 188   assert(hr->get_next_young_region() == NULL, "cause it should!");
 189 
 190   hr->set_next_young_region(_head);
 191   _head = hr;
 192 
 193   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 194   ++_length;
 195 }
 196 
 197 void YoungList::add_survivor_region(HeapRegion* hr) {
 198   assert(hr->is_survivor(), "should be flagged as survivor region");
 199   assert(hr->get_next_young_region() == NULL, "cause it should!");
 200 
 201   hr->set_next_young_region(_survivor_head);
 202   if (_survivor_head == NULL) {
 203     _survivor_tail = hr;
 204   }
 205   _survivor_head = hr;
 206   ++_survivor_length;
 207 }
 208 
 209 void YoungList::empty_list(HeapRegion* list) {
 210   while (list != NULL) {
 211     HeapRegion* next = list->get_next_young_region();
 212     list->set_next_young_region(NULL);
 213     list->uninstall_surv_rate_group();
 214     list->set_not_young();
 215     list = next;
 216   }
 217 }
 218 
 219 void YoungList::empty_list() {
 220   assert(check_list_well_formed(), "young list should be well formed");
 221 
 222   empty_list(_head);
 223   _head = NULL;
 224   _length = 0;
 225 
 226   empty_list(_survivor_head);
 227   _survivor_head = NULL;
 228   _survivor_tail = NULL;
 229   _survivor_length = 0;
 230 
 231   _last_sampled_rs_lengths = 0;
 232 
 233   assert(check_list_empty(false), "just making sure...");
 234 }
 235 
 236 bool YoungList::check_list_well_formed() {
 237   bool ret = true;
 238 
 239   uint length = 0;
 240   HeapRegion* curr = _head;
 241   HeapRegion* last = NULL;
 242   while (curr != NULL) {
 243     if (!curr->is_young()) {
 244       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 245                              "incorrectly tagged (y: %d, surv: %d)",
 246                              curr->bottom(), curr->end(),
 247                              curr->is_young(), curr->is_survivor());
 248       ret = false;
 249     }
 250     ++length;
 251     last = curr;
 252     curr = curr->get_next_young_region();
 253   }
 254   ret = ret && (length == _length);
 255 
 256   if (!ret) {
 257     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 258     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 259                            length, _length);
 260   }
 261 
 262   return ret;
 263 }
 264 
 265 bool YoungList::check_list_empty(bool check_sample) {
 266   bool ret = true;
 267 
 268   if (_length != 0) {
 269     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
 270                   _length);
 271     ret = false;
 272   }
 273   if (check_sample && _last_sampled_rs_lengths != 0) {
 274     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
 275     ret = false;
 276   }
 277   if (_head != NULL) {
 278     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
 279     ret = false;
 280   }
 281   if (!ret) {
 282     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
 283   }
 284 
 285   return ret;
 286 }
 287 
 288 void
 289 YoungList::rs_length_sampling_init() {
 290   _sampled_rs_lengths = 0;
 291   _curr               = _head;
 292 }
 293 
 294 bool
 295 YoungList::rs_length_sampling_more() {
 296   return _curr != NULL;
 297 }
 298 
 299 void
 300 YoungList::rs_length_sampling_next() {
 301   assert( _curr != NULL, "invariant" );
 302   size_t rs_length = _curr->rem_set()->occupied();
 303 
 304   _sampled_rs_lengths += rs_length;
 305 
 306   // The current region may not yet have been added to the
 307   // incremental collection set (it gets added when it is
 308   // retired as the current allocation region).
 309   if (_curr->in_collection_set()) {
 310     // Update the collection set policy information for this region
 311     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
 312   }
 313 
 314   _curr = _curr->get_next_young_region();
 315   if (_curr == NULL) {
 316     _last_sampled_rs_lengths = _sampled_rs_lengths;
 317     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
 318   }
 319 }
 320 
 321 void
 322 YoungList::reset_auxilary_lists() {
 323   guarantee( is_empty(), "young list should be empty" );
 324   assert(check_list_well_formed(), "young list should be well formed");
 325 
 326   // Add survivor regions to SurvRateGroup.
 327   _g1h->g1_policy()->note_start_adding_survivor_regions();
 328   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 329 
 330   int young_index_in_cset = 0;
 331   for (HeapRegion* curr = _survivor_head;
 332        curr != NULL;
 333        curr = curr->get_next_young_region()) {
 334     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 335 
 336     // The region is a non-empty survivor so let's add it to
 337     // the incremental collection set for the next evacuation
 338     // pause.
 339     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
 340     young_index_in_cset += 1;
 341   }
 342   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
 343   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 344 
 345   _head   = _survivor_head;
 346   _length = _survivor_length;
 347   if (_survivor_head != NULL) {
 348     assert(_survivor_tail != NULL, "cause it shouldn't be");
 349     assert(_survivor_length > 0, "invariant");
 350     _survivor_tail->set_next_young_region(NULL);
 351   }
 352 
 353   // Don't clear the survivor list handles until the start of
 354   // the next evacuation pause - we need it in order to re-tag
 355   // the survivor regions from this evacuation pause as 'young'
 356   // at the start of the next.
 357 
 358   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 359 
 360   assert(check_list_well_formed(), "young list should be well formed");
 361 }
 362 
 363 void YoungList::print() {
 364   HeapRegion* lists[] = {_head,   _survivor_head};
 365   const char* names[] = {"YOUNG", "SURVIVOR"};
 366 
 367   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 368     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 369     HeapRegion *curr = lists[list];
 370     if (curr == NULL)
 371       gclog_or_tty->print_cr("  empty");
 372     while (curr != NULL) {
 373       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 374                              HR_FORMAT_PARAMS(curr),
 375                              curr->prev_top_at_mark_start(),
 376                              curr->next_top_at_mark_start(),
 377                              curr->age_in_surv_rate_group_cond());
 378       curr = curr->get_next_young_region();
 379     }
 380   }
 381 
 382   gclog_or_tty->print_cr("");
 383 }
 384 
 385 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 386 {
 387   // Claim the right to put the region on the dirty cards region list
 388   // by installing a self pointer.
 389   HeapRegion* next = hr->get_next_dirty_cards_region();
 390   if (next == NULL) {
 391     HeapRegion* res = (HeapRegion*)
 392       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 393                           NULL);
 394     if (res == NULL) {
 395       HeapRegion* head;
 396       do {
 397         // Put the region to the dirty cards region list.
 398         head = _dirty_cards_region_list;
 399         next = (HeapRegion*)
 400           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 401         if (next == head) {
 402           assert(hr->get_next_dirty_cards_region() == hr,
 403                  "hr->get_next_dirty_cards_region() != hr");
 404           if (next == NULL) {
 405             // The last region in the list points to itself.
 406             hr->set_next_dirty_cards_region(hr);
 407           } else {
 408             hr->set_next_dirty_cards_region(next);
 409           }
 410         }
 411       } while (next != head);
 412     }
 413   }
 414 }
 415 
 416 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 417 {
 418   HeapRegion* head;
 419   HeapRegion* hr;
 420   do {
 421     head = _dirty_cards_region_list;
 422     if (head == NULL) {
 423       return NULL;
 424     }
 425     HeapRegion* new_head = head->get_next_dirty_cards_region();
 426     if (head == new_head) {
 427       // The last region.
 428       new_head = NULL;
 429     }
 430     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 431                                           head);
 432   } while (hr != head);
 433   assert(hr != NULL, "invariant");
 434   hr->set_next_dirty_cards_region(NULL);
 435   return hr;
 436 }
 437 
 438 void G1CollectedHeap::stop_conc_gc_threads() {
 439   _cg1r->stop();
 440   _cmThread->stop();
 441 }
 442 
 443 #ifdef ASSERT
 444 // A region is added to the collection set as it is retired
 445 // so an address p can point to a region which will be in the
 446 // collection set but has not yet been retired.  This method
 447 // therefore is only accurate during a GC pause after all
 448 // regions have been retired.  It is used for debugging
 449 // to check if an nmethod has references to objects that can
 450 // be move during a partial collection.  Though it can be
 451 // inaccurate, it is sufficient for G1 because the conservative
 452 // implementation of is_scavengable() for G1 will indicate that
 453 // all nmethods must be scanned during a partial collection.
 454 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 455   HeapRegion* hr = heap_region_containing(p);
 456   return hr != NULL && hr->in_collection_set();
 457 }
 458 #endif
 459 
 460 // Returns true if the reference points to an object that
 461 // can move in an incremental collecction.
 462 bool G1CollectedHeap::is_scavengable(const void* p) {
 463   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 464   G1CollectorPolicy* g1p = g1h->g1_policy();
 465   HeapRegion* hr = heap_region_containing(p);
 466   if (hr == NULL) {
 467      // null
 468      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 469      return false;
 470   } else {
 471     return !hr->isHumongous();
 472   }
 473 }
 474 
 475 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 476   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 477   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 478 
 479   // Count the dirty cards at the start.
 480   CountNonCleanMemRegionClosure count1(this);
 481   ct_bs->mod_card_iterate(&count1);
 482   int orig_count = count1.n();
 483 
 484   // First clear the logged cards.
 485   ClearLoggedCardTableEntryClosure clear;
 486   dcqs.set_closure(&clear);
 487   dcqs.apply_closure_to_all_completed_buffers();
 488   dcqs.iterate_closure_all_threads(false);
 489   clear.print_histo();
 490 
 491   // Now ensure that there's no dirty cards.
 492   CountNonCleanMemRegionClosure count2(this);
 493   ct_bs->mod_card_iterate(&count2);
 494   if (count2.n() != 0) {
 495     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 496                            count2.n(), orig_count);
 497   }
 498   guarantee(count2.n() == 0, "Card table should be clean.");
 499 
 500   RedirtyLoggedCardTableEntryClosure redirty;
 501   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 502   dcqs.apply_closure_to_all_completed_buffers();
 503   dcqs.iterate_closure_all_threads(false);
 504   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 505                          clear.calls(), orig_count);
 506   guarantee(redirty.calls() == clear.calls(),
 507             "Or else mechanism is broken.");
 508 
 509   CountNonCleanMemRegionClosure count3(this);
 510   ct_bs->mod_card_iterate(&count3);
 511   if (count3.n() != orig_count) {
 512     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 513                            orig_count, count3.n());
 514     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 515   }
 516 
 517   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 518 }
 519 
 520 // Private class members.
 521 
 522 G1CollectedHeap* G1CollectedHeap::_g1h;
 523 
 524 // Private methods.
 525 
 526 HeapRegion*
 527 G1CollectedHeap::new_region_try_secondary_free_list() {
 528   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 529   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 530     if (!_secondary_free_list.is_empty()) {
 531       if (G1ConcRegionFreeingVerbose) {
 532         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 533                                "secondary_free_list has %u entries",
 534                                _secondary_free_list.length());
 535       }
 536       // It looks as if there are free regions available on the
 537       // secondary_free_list. Let's move them to the free_list and try
 538       // again to allocate from it.
 539       append_secondary_free_list();
 540 
 541       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 542              "empty we should have moved at least one entry to the free_list");
 543       HeapRegion* res = _free_list.remove_head();
 544       if (G1ConcRegionFreeingVerbose) {
 545         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 546                                "allocated "HR_FORMAT" from secondary_free_list",
 547                                HR_FORMAT_PARAMS(res));
 548       }
 549       return res;
 550     }
 551 
 552     // Wait here until we get notifed either when (a) there are no
 553     // more free regions coming or (b) some regions have been moved on
 554     // the secondary_free_list.
 555     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 556   }
 557 
 558   if (G1ConcRegionFreeingVerbose) {
 559     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 560                            "could not allocate from secondary_free_list");
 561   }
 562   return NULL;
 563 }
 564 
 565 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 566   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 567          "the only time we use this to allocate a humongous region is "
 568          "when we are allocating a single humongous region");
 569 
 570   HeapRegion* res;
 571   if (G1StressConcRegionFreeing) {
 572     if (!_secondary_free_list.is_empty()) {
 573       if (G1ConcRegionFreeingVerbose) {
 574         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 575                                "forced to look at the secondary_free_list");
 576       }
 577       res = new_region_try_secondary_free_list();
 578       if (res != NULL) {
 579         return res;
 580       }
 581     }
 582   }
 583   res = _free_list.remove_head_or_null();
 584   if (res == NULL) {
 585     if (G1ConcRegionFreeingVerbose) {
 586       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 587                              "res == NULL, trying the secondary_free_list");
 588     }
 589     res = new_region_try_secondary_free_list();
 590   }
 591   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 592     // Currently, only attempts to allocate GC alloc regions set
 593     // do_expand to true. So, we should only reach here during a
 594     // safepoint. If this assumption changes we might have to
 595     // reconsider the use of _expand_heap_after_alloc_failure.
 596     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 597 
 598     ergo_verbose1(ErgoHeapSizing,
 599                   "attempt heap expansion",
 600                   ergo_format_reason("region allocation request failed")
 601                   ergo_format_byte("allocation request"),
 602                   word_size * HeapWordSize);
 603     if (expand(word_size * HeapWordSize)) {
 604       // Given that expand() succeeded in expanding the heap, and we
 605       // always expand the heap by an amount aligned to the heap
 606       // region size, the free list should in theory not be empty. So
 607       // it would probably be OK to use remove_head(). But the extra
 608       // check for NULL is unlikely to be a performance issue here (we
 609       // just expanded the heap!) so let's just be conservative and
 610       // use remove_head_or_null().
 611       res = _free_list.remove_head_or_null();
 612     } else {
 613       _expand_heap_after_alloc_failure = false;
 614     }
 615   }
 616   return res;
 617 }
 618 
 619 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 620                                                         size_t word_size) {
 621   assert(isHumongous(word_size), "word_size should be humongous");
 622   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 623 
 624   uint first = G1_NULL_HRS_INDEX;
 625   if (num_regions == 1) {
 626     // Only one region to allocate, no need to go through the slower
 627     // path. The caller will attempt the expasion if this fails, so
 628     // let's not try to expand here too.
 629     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 630     if (hr != NULL) {
 631       first = hr->hrs_index();
 632     } else {
 633       first = G1_NULL_HRS_INDEX;
 634     }
 635   } else {
 636     // We can't allocate humongous regions while cleanupComplete() is
 637     // running, since some of the regions we find to be empty might not
 638     // yet be added to the free list and it is not straightforward to
 639     // know which list they are on so that we can remove them. Note
 640     // that we only need to do this if we need to allocate more than
 641     // one region to satisfy the current humongous allocation
 642     // request. If we are only allocating one region we use the common
 643     // region allocation code (see above).
 644     wait_while_free_regions_coming();
 645     append_secondary_free_list_if_not_empty_with_lock();
 646 
 647     if (free_regions() >= num_regions) {
 648       first = _hrs.find_contiguous(num_regions);
 649       if (first != G1_NULL_HRS_INDEX) {
 650         for (uint i = first; i < first + num_regions; ++i) {
 651           HeapRegion* hr = region_at(i);
 652           assert(hr->is_empty(), "sanity");
 653           assert(is_on_master_free_list(hr), "sanity");
 654           hr->set_pending_removal(true);
 655         }
 656         _free_list.remove_all_pending(num_regions);
 657       }
 658     }
 659   }
 660   return first;
 661 }
 662 
 663 HeapWord*
 664 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 665                                                            uint num_regions,
 666                                                            size_t word_size) {
 667   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
 668   assert(isHumongous(word_size), "word_size should be humongous");
 669   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 670 
 671   // Index of last region in the series + 1.
 672   uint last = first + num_regions;
 673 
 674   // We need to initialize the region(s) we just discovered. This is
 675   // a bit tricky given that it can happen concurrently with
 676   // refinement threads refining cards on these regions and
 677   // potentially wanting to refine the BOT as they are scanning
 678   // those cards (this can happen shortly after a cleanup; see CR
 679   // 6991377). So we have to set up the region(s) carefully and in
 680   // a specific order.
 681 
 682   // The word size sum of all the regions we will allocate.
 683   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 684   assert(word_size <= word_size_sum, "sanity");
 685 
 686   // This will be the "starts humongous" region.
 687   HeapRegion* first_hr = region_at(first);
 688   // The header of the new object will be placed at the bottom of
 689   // the first region.
 690   HeapWord* new_obj = first_hr->bottom();
 691   // This will be the new end of the first region in the series that
 692   // should also match the end of the last region in the seriers.
 693   HeapWord* new_end = new_obj + word_size_sum;
 694   // This will be the new top of the first region that will reflect
 695   // this allocation.
 696   HeapWord* new_top = new_obj + word_size;
 697 
 698   // First, we need to zero the header of the space that we will be
 699   // allocating. When we update top further down, some refinement
 700   // threads might try to scan the region. By zeroing the header we
 701   // ensure that any thread that will try to scan the region will
 702   // come across the zero klass word and bail out.
 703   //
 704   // NOTE: It would not have been correct to have used
 705   // CollectedHeap::fill_with_object() and make the space look like
 706   // an int array. The thread that is doing the allocation will
 707   // later update the object header to a potentially different array
 708   // type and, for a very short period of time, the klass and length
 709   // fields will be inconsistent. This could cause a refinement
 710   // thread to calculate the object size incorrectly.
 711   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 712 
 713   // We will set up the first region as "starts humongous". This
 714   // will also update the BOT covering all the regions to reflect
 715   // that there is a single object that starts at the bottom of the
 716   // first region.
 717   first_hr->set_startsHumongous(new_top, new_end);
 718 
 719   // Then, if there are any, we will set up the "continues
 720   // humongous" regions.
 721   HeapRegion* hr = NULL;
 722   for (uint i = first + 1; i < last; ++i) {
 723     hr = region_at(i);
 724     hr->set_continuesHumongous(first_hr);
 725   }
 726   // If we have "continues humongous" regions (hr != NULL), then the
 727   // end of the last one should match new_end.
 728   assert(hr == NULL || hr->end() == new_end, "sanity");
 729 
 730   // Up to this point no concurrent thread would have been able to
 731   // do any scanning on any region in this series. All the top
 732   // fields still point to bottom, so the intersection between
 733   // [bottom,top] and [card_start,card_end] will be empty. Before we
 734   // update the top fields, we'll do a storestore to make sure that
 735   // no thread sees the update to top before the zeroing of the
 736   // object header and the BOT initialization.
 737   OrderAccess::storestore();
 738 
 739   // Now that the BOT and the object header have been initialized,
 740   // we can update top of the "starts humongous" region.
 741   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
 742          "new_top should be in this region");
 743   first_hr->set_top(new_top);
 744   if (_hr_printer.is_active()) {
 745     HeapWord* bottom = first_hr->bottom();
 746     HeapWord* end = first_hr->orig_end();
 747     if ((first + 1) == last) {
 748       // the series has a single humongous region
 749       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
 750     } else {
 751       // the series has more than one humongous regions
 752       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
 753     }
 754   }
 755 
 756   // Now, we will update the top fields of the "continues humongous"
 757   // regions. The reason we need to do this is that, otherwise,
 758   // these regions would look empty and this will confuse parts of
 759   // G1. For example, the code that looks for a consecutive number
 760   // of empty regions will consider them empty and try to
 761   // re-allocate them. We can extend is_empty() to also include
 762   // !continuesHumongous(), but it is easier to just update the top
 763   // fields here. The way we set top for all regions (i.e., top ==
 764   // end for all regions but the last one, top == new_top for the
 765   // last one) is actually used when we will free up the humongous
 766   // region in free_humongous_region().
 767   hr = NULL;
 768   for (uint i = first + 1; i < last; ++i) {
 769     hr = region_at(i);
 770     if ((i + 1) == last) {
 771       // last continues humongous region
 772       assert(hr->bottom() < new_top && new_top <= hr->end(),
 773              "new_top should fall on this region");
 774       hr->set_top(new_top);
 775       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
 776     } else {
 777       // not last one
 778       assert(new_top > hr->end(), "new_top should be above this region");
 779       hr->set_top(hr->end());
 780       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 781     }
 782   }
 783   // If we have continues humongous regions (hr != NULL), then the
 784   // end of the last one should match new_end and its top should
 785   // match new_top.
 786   assert(hr == NULL ||
 787          (hr->end() == new_end && hr->top() == new_top), "sanity");
 788 
 789   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 790   _summary_bytes_used += first_hr->used();
 791   _humongous_set.add(first_hr);
 792 
 793   return new_obj;
 794 }
 795 
 796 // If could fit into free regions w/o expansion, try.
 797 // Otherwise, if can expand, do so.
 798 // Otherwise, if using ex regions might help, try with ex given back.
 799 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 800   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 801 
 802   verify_region_sets_optional();
 803 
 804   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
 805   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
 806   uint x_num = expansion_regions();
 807   uint fs = _hrs.free_suffix();
 808   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
 809   if (first == G1_NULL_HRS_INDEX) {
 810     // The only thing we can do now is attempt expansion.
 811     if (fs + x_num >= num_regions) {
 812       // If the number of regions we're trying to allocate for this
 813       // object is at most the number of regions in the free suffix,
 814       // then the call to humongous_obj_allocate_find_first() above
 815       // should have succeeded and we wouldn't be here.
 816       //
 817       // We should only be trying to expand when the free suffix is
 818       // not sufficient for the object _and_ we have some expansion
 819       // room available.
 820       assert(num_regions > fs, "earlier allocation should have succeeded");
 821 
 822       ergo_verbose1(ErgoHeapSizing,
 823                     "attempt heap expansion",
 824                     ergo_format_reason("humongous allocation request failed")
 825                     ergo_format_byte("allocation request"),
 826                     word_size * HeapWordSize);
 827       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
 828         // Even though the heap was expanded, it might not have
 829         // reached the desired size. So, we cannot assume that the
 830         // allocation will succeed.
 831         first = humongous_obj_allocate_find_first(num_regions, word_size);
 832       }
 833     }
 834   }
 835 
 836   HeapWord* result = NULL;
 837   if (first != G1_NULL_HRS_INDEX) {
 838     result =
 839       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
 840     assert(result != NULL, "it should always return a valid result");
 841 
 842     // A successful humongous object allocation changes the used space
 843     // information of the old generation so we need to recalculate the
 844     // sizes and update the jstat counters here.
 845     g1mm()->update_sizes();
 846   }
 847 
 848   verify_region_sets_optional();
 849 
 850   return result;
 851 }
 852 
 853 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 854   assert_heap_not_locked_and_not_at_safepoint();
 855   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 856 
 857   unsigned int dummy_gc_count_before;
 858   return attempt_allocation(word_size, &dummy_gc_count_before);
 859 }
 860 
 861 HeapWord*
 862 G1CollectedHeap::mem_allocate(size_t word_size,
 863                               bool*  gc_overhead_limit_was_exceeded) {
 864   assert_heap_not_locked_and_not_at_safepoint();
 865 
 866   // Loop until the allocation is satisified, or unsatisfied after GC.
 867   for (int try_count = 1; /* we'll return */; try_count += 1) {
 868     unsigned int gc_count_before;
 869 
 870     HeapWord* result = NULL;
 871     if (!isHumongous(word_size)) {
 872       result = attempt_allocation(word_size, &gc_count_before);
 873     } else {
 874       result = attempt_allocation_humongous(word_size, &gc_count_before);
 875     }
 876     if (result != NULL) {
 877       return result;
 878     }
 879 
 880     // Create the garbage collection operation...
 881     VM_G1CollectForAllocation op(gc_count_before, word_size);
 882     // ...and get the VM thread to execute it.
 883     VMThread::execute(&op);
 884 
 885     if (op.prologue_succeeded() && op.pause_succeeded()) {
 886       // If the operation was successful we'll return the result even
 887       // if it is NULL. If the allocation attempt failed immediately
 888       // after a Full GC, it's unlikely we'll be able to allocate now.
 889       HeapWord* result = op.result();
 890       if (result != NULL && !isHumongous(word_size)) {
 891         // Allocations that take place on VM operations do not do any
 892         // card dirtying and we have to do it here. We only have to do
 893         // this for non-humongous allocations, though.
 894         dirty_young_block(result, word_size);
 895       }
 896       return result;
 897     } else {
 898       assert(op.result() == NULL,
 899              "the result should be NULL if the VM op did not succeed");
 900     }
 901 
 902     // Give a warning if we seem to be looping forever.
 903     if ((QueuedAllocationWarningCount > 0) &&
 904         (try_count % QueuedAllocationWarningCount == 0)) {
 905       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 906     }
 907   }
 908 
 909   ShouldNotReachHere();
 910   return NULL;
 911 }
 912 
 913 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 914                                            unsigned int *gc_count_before_ret) {
 915   // Make sure you read the note in attempt_allocation_humongous().
 916 
 917   assert_heap_not_locked_and_not_at_safepoint();
 918   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
 919          "be called for humongous allocation requests");
 920 
 921   // We should only get here after the first-level allocation attempt
 922   // (attempt_allocation()) failed to allocate.
 923 
 924   // We will loop until a) we manage to successfully perform the
 925   // allocation or b) we successfully schedule a collection which
 926   // fails to perform the allocation. b) is the only case when we'll
 927   // return NULL.
 928   HeapWord* result = NULL;
 929   for (int try_count = 1; /* we'll return */; try_count += 1) {
 930     bool should_try_gc;
 931     unsigned int gc_count_before;
 932 
 933     {
 934       MutexLockerEx x(Heap_lock);
 935 
 936       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
 937                                                       false /* bot_updates */);
 938       if (result != NULL) {
 939         return result;
 940       }
 941 
 942       // If we reach here, attempt_allocation_locked() above failed to
 943       // allocate a new region. So the mutator alloc region should be NULL.
 944       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
 945 
 946       if (GC_locker::is_active_and_needs_gc()) {
 947         if (g1_policy()->can_expand_young_list()) {
 948           // No need for an ergo verbose message here,
 949           // can_expand_young_list() does this when it returns true.
 950           result = _mutator_alloc_region.attempt_allocation_force(word_size,
 951                                                       false /* bot_updates */);
 952           if (result != NULL) {
 953             return result;
 954           }
 955         }
 956         should_try_gc = false;
 957       } else {
 958         // The GCLocker may not be active but the GCLocker initiated
 959         // GC may not yet have been performed (GCLocker::needs_gc()
 960         // returns true). In this case we do not try this GC and
 961         // wait until the GCLocker initiated GC is performed, and
 962         // then retry the allocation.
 963         if (GC_locker::needs_gc()) {
 964           should_try_gc = false;
 965         } else {
 966           // Read the GC count while still holding the Heap_lock.
 967           gc_count_before = total_collections();
 968           should_try_gc = true;
 969         }
 970       }
 971     }
 972 
 973     if (should_try_gc) {
 974       bool succeeded;
 975       result = do_collection_pause(word_size, gc_count_before, &succeeded);
 976       if (result != NULL) {
 977         assert(succeeded, "only way to get back a non-NULL result");
 978         return result;
 979       }
 980 
 981       if (succeeded) {
 982         // If we get here we successfully scheduled a collection which
 983         // failed to allocate. No point in trying to allocate
 984         // further. We'll just return NULL.
 985         MutexLockerEx x(Heap_lock);
 986         *gc_count_before_ret = total_collections();
 987         return NULL;
 988       }
 989     } else {
 990       // The GCLocker is either active or the GCLocker initiated
 991       // GC has not yet been performed. Stall until it is and
 992       // then retry the allocation.
 993       GC_locker::stall_until_clear();
 994     }
 995 
 996     // We can reach here if we were unsuccessul in scheduling a
 997     // collection (because another thread beat us to it) or if we were
 998     // stalled due to the GC locker. In either can we should retry the
 999     // allocation attempt in case another thread successfully
1000     // performed a collection and reclaimed enough space. We do the
1001     // first attempt (without holding the Heap_lock) here and the
1002     // follow-on attempt will be at the start of the next loop
1003     // iteration (after taking the Heap_lock).
1004     result = _mutator_alloc_region.attempt_allocation(word_size,
1005                                                       false /* bot_updates */);
1006     if (result != NULL) {
1007       return result;
1008     }
1009 
1010     // Give a warning if we seem to be looping forever.
1011     if ((QueuedAllocationWarningCount > 0) &&
1012         (try_count % QueuedAllocationWarningCount == 0)) {
1013       warning("G1CollectedHeap::attempt_allocation_slow() "
1014               "retries %d times", try_count);
1015     }
1016   }
1017 
1018   ShouldNotReachHere();
1019   return NULL;
1020 }
1021 
1022 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1023                                           unsigned int * gc_count_before_ret) {
1024   // The structure of this method has a lot of similarities to
1025   // attempt_allocation_slow(). The reason these two were not merged
1026   // into a single one is that such a method would require several "if
1027   // allocation is not humongous do this, otherwise do that"
1028   // conditional paths which would obscure its flow. In fact, an early
1029   // version of this code did use a unified method which was harder to
1030   // follow and, as a result, it had subtle bugs that were hard to
1031   // track down. So keeping these two methods separate allows each to
1032   // be more readable. It will be good to keep these two in sync as
1033   // much as possible.
1034 
1035   assert_heap_not_locked_and_not_at_safepoint();
1036   assert(isHumongous(word_size), "attempt_allocation_humongous() "
1037          "should only be called for humongous allocations");
1038 
1039   // Humongous objects can exhaust the heap quickly, so we should check if we
1040   // need to start a marking cycle at each humongous object allocation. We do
1041   // the check before we do the actual allocation. The reason for doing it
1042   // before the allocation is that we avoid having to keep track of the newly
1043   // allocated memory while we do a GC.
1044   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1045                                            word_size)) {
1046     collect(GCCause::_g1_humongous_allocation);
1047   }
1048 
1049   // We will loop until a) we manage to successfully perform the
1050   // allocation or b) we successfully schedule a collection which
1051   // fails to perform the allocation. b) is the only case when we'll
1052   // return NULL.
1053   HeapWord* result = NULL;
1054   for (int try_count = 1; /* we'll return */; try_count += 1) {
1055     bool should_try_gc;
1056     unsigned int gc_count_before;
1057 
1058     {
1059       MutexLockerEx x(Heap_lock);
1060 
1061       // Given that humongous objects are not allocated in young
1062       // regions, we'll first try to do the allocation without doing a
1063       // collection hoping that there's enough space in the heap.
1064       result = humongous_obj_allocate(word_size);
1065       if (result != NULL) {
1066         return result;
1067       }
1068 
1069       if (GC_locker::is_active_and_needs_gc()) {
1070         should_try_gc = false;
1071       } else {
1072          // The GCLocker may not be active but the GCLocker initiated
1073         // GC may not yet have been performed (GCLocker::needs_gc()
1074         // returns true). In this case we do not try this GC and
1075         // wait until the GCLocker initiated GC is performed, and
1076         // then retry the allocation.
1077         if (GC_locker::needs_gc()) {
1078           should_try_gc = false;
1079         } else {
1080           // Read the GC count while still holding the Heap_lock.
1081           gc_count_before = total_collections();
1082           should_try_gc = true;
1083         }
1084       }
1085     }
1086 
1087     if (should_try_gc) {
1088       // If we failed to allocate the humongous object, we should try to
1089       // do a collection pause (if we're allowed) in case it reclaims
1090       // enough space for the allocation to succeed after the pause.
1091 
1092       bool succeeded;
1093       result = do_collection_pause(word_size, gc_count_before, &succeeded);
1094       if (result != NULL) {
1095         assert(succeeded, "only way to get back a non-NULL result");
1096         return result;
1097       }
1098 
1099       if (succeeded) {
1100         // If we get here we successfully scheduled a collection which
1101         // failed to allocate. No point in trying to allocate
1102         // further. We'll just return NULL.
1103         MutexLockerEx x(Heap_lock);
1104         *gc_count_before_ret = total_collections();
1105         return NULL;
1106       }
1107     } else {
1108       // The GCLocker is either active or the GCLocker initiated
1109       // GC has not yet been performed. Stall until it is and
1110       // then retry the allocation.
1111       GC_locker::stall_until_clear();
1112     }
1113 
1114     // We can reach here if we were unsuccessul in scheduling a
1115     // collection (because another thread beat us to it) or if we were
1116     // stalled due to the GC locker. In either can we should retry the
1117     // allocation attempt in case another thread successfully
1118     // performed a collection and reclaimed enough space.  Give a
1119     // warning if we seem to be looping forever.
1120 
1121     if ((QueuedAllocationWarningCount > 0) &&
1122         (try_count % QueuedAllocationWarningCount == 0)) {
1123       warning("G1CollectedHeap::attempt_allocation_humongous() "
1124               "retries %d times", try_count);
1125     }
1126   }
1127 
1128   ShouldNotReachHere();
1129   return NULL;
1130 }
1131 
1132 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1133                                        bool expect_null_mutator_alloc_region) {
1134   assert_at_safepoint(true /* should_be_vm_thread */);
1135   assert(_mutator_alloc_region.get() == NULL ||
1136                                              !expect_null_mutator_alloc_region,
1137          "the current alloc region was unexpectedly found to be non-NULL");
1138 
1139   if (!isHumongous(word_size)) {
1140     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1141                                                       false /* bot_updates */);
1142   } else {
1143     HeapWord* result = humongous_obj_allocate(word_size);
1144     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1145       g1_policy()->set_initiate_conc_mark_if_possible();
1146     }
1147     return result;
1148   }
1149 
1150   ShouldNotReachHere();
1151 }
1152 
1153 class PostMCRemSetClearClosure: public HeapRegionClosure {
1154   G1CollectedHeap* _g1h;
1155   ModRefBarrierSet* _mr_bs;
1156 public:
1157   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1158     _g1h(g1h), _mr_bs(mr_bs) { }
1159   bool doHeapRegion(HeapRegion* r) {
1160     if (r->continuesHumongous()) {
1161       return false;
1162     }
1163     _g1h->reset_gc_time_stamps(r);
1164     HeapRegionRemSet* hrrs = r->rem_set();
1165     if (hrrs != NULL) hrrs->clear();
1166     // You might think here that we could clear just the cards
1167     // corresponding to the used region.  But no: if we leave a dirty card
1168     // in a region we might allocate into, then it would prevent that card
1169     // from being enqueued, and cause it to be missed.
1170     // Re: the performance cost: we shouldn't be doing full GC anyway!
1171     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1172     return false;
1173   }
1174 };
1175 
1176 void G1CollectedHeap::clear_rsets_post_compaction() {
1177   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1178   heap_region_iterate(&rs_clear);
1179 }
1180 
1181 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1182   G1CollectedHeap*   _g1h;
1183   UpdateRSOopClosure _cl;
1184   int                _worker_i;
1185 public:
1186   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1187     _cl(g1->g1_rem_set(), worker_i),
1188     _worker_i(worker_i),
1189     _g1h(g1)
1190   { }
1191 
1192   bool doHeapRegion(HeapRegion* r) {
1193     if (!r->continuesHumongous()) {
1194       _cl.set_from(r);
1195       r->oop_iterate(&_cl);
1196     }
1197     return false;
1198   }
1199 };
1200 
1201 class ParRebuildRSTask: public AbstractGangTask {
1202   G1CollectedHeap* _g1;
1203 public:
1204   ParRebuildRSTask(G1CollectedHeap* g1)
1205     : AbstractGangTask("ParRebuildRSTask"),
1206       _g1(g1)
1207   { }
1208 
1209   void work(uint worker_id) {
1210     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1211     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1212                                           _g1->workers()->active_workers(),
1213                                          HeapRegion::RebuildRSClaimValue);
1214   }
1215 };
1216 
1217 class PostCompactionPrinterClosure: public HeapRegionClosure {
1218 private:
1219   G1HRPrinter* _hr_printer;
1220 public:
1221   bool doHeapRegion(HeapRegion* hr) {
1222     assert(!hr->is_young(), "not expecting to find young regions");
1223     // We only generate output for non-empty regions.
1224     if (!hr->is_empty()) {
1225       if (!hr->isHumongous()) {
1226         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1227       } else if (hr->startsHumongous()) {
1228         if (hr->region_num() == 1) {
1229           // single humongous region
1230           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1231         } else {
1232           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1233         }
1234       } else {
1235         assert(hr->continuesHumongous(), "only way to get here");
1236         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1237       }
1238     }
1239     return false;
1240   }
1241 
1242   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1243     : _hr_printer(hr_printer) { }
1244 };
1245 
1246 void G1CollectedHeap::print_hrs_post_compaction() {
1247   PostCompactionPrinterClosure cl(hr_printer());
1248   heap_region_iterate(&cl);
1249 }
1250 
1251 double G1CollectedHeap::verify(bool guard, const char* msg) {
1252   double verify_time_ms = 0.0;
1253 
1254   if (guard && total_collections() >= VerifyGCStartAt) {
1255     double verify_start = os::elapsedTime();
1256     HandleMark hm;  // Discard invalid handles created during verification
1257     gclog_or_tty->print(msg);
1258     prepare_for_verify();
1259     Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
1260     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
1261   }
1262 
1263   return verify_time_ms;
1264 }
1265 
1266 void G1CollectedHeap::verify_before_gc() {
1267   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1268   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1269 }
1270 
1271 void G1CollectedHeap::verify_after_gc() {
1272   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1273   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1274 }
1275 
1276 bool G1CollectedHeap::do_collection(bool explicit_gc,
1277                                     bool clear_all_soft_refs,
1278                                     size_t word_size) {
1279   assert_at_safepoint(true /* should_be_vm_thread */);
1280 
1281   if (GC_locker::check_active_before_gc()) {
1282     return false;
1283   }
1284 
1285   SvcGCMarker sgcm(SvcGCMarker::FULL);
1286   ResourceMark rm;
1287 
1288   print_heap_before_gc();
1289 
1290   size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
1291 
1292   HRSPhaseSetter x(HRSPhaseFullGC);
1293   verify_region_sets_optional();
1294 
1295   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1296                            collector_policy()->should_clear_all_soft_refs();
1297 
1298   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1299 
1300   {
1301     IsGCActiveMark x;
1302 
1303     // Timing
1304     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1305     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1306     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1307 
1308     TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
1309     TraceCollectorStats tcs(g1mm()->full_collection_counters());
1310     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1311 
1312     double start = os::elapsedTime();
1313     g1_policy()->record_full_collection_start();
1314 
1315     // Note: When we have a more flexible GC logging framework that
1316     // allows us to add optional attributes to a GC log record we
1317     // could consider timing and reporting how long we wait in the
1318     // following two methods.
1319     wait_while_free_regions_coming();
1320     // If we start the compaction before the CM threads finish
1321     // scanning the root regions we might trip them over as we'll
1322     // be moving objects / updating references. So let's wait until
1323     // they are done. By telling them to abort, they should complete
1324     // early.
1325     _cm->root_regions()->abort();
1326     _cm->root_regions()->wait_until_scan_finished();
1327     append_secondary_free_list_if_not_empty_with_lock();
1328 
1329     gc_prologue(true);
1330     increment_total_collections(true /* full gc */);
1331     increment_old_marking_cycles_started();
1332 
1333     size_t g1h_prev_used = used();
1334     assert(used() == recalculate_used(), "Should be equal");
1335 
1336     verify_before_gc();
1337 
1338     pre_full_gc_dump();
1339 
1340     COMPILER2_PRESENT(DerivedPointerTable::clear());
1341 
1342     // Disable discovery and empty the discovered lists
1343     // for the CM ref processor.
1344     ref_processor_cm()->disable_discovery();
1345     ref_processor_cm()->abandon_partial_discovery();
1346     ref_processor_cm()->verify_no_references_recorded();
1347 
1348     // Abandon current iterations of concurrent marking and concurrent
1349     // refinement, if any are in progress. We have to do this before
1350     // wait_until_scan_finished() below.
1351     concurrent_mark()->abort();
1352 
1353     // Make sure we'll choose a new allocation region afterwards.
1354     release_mutator_alloc_region();
1355     abandon_gc_alloc_regions();
1356     g1_rem_set()->cleanupHRRS();
1357 
1358     // We should call this after we retire any currently active alloc
1359     // regions so that all the ALLOC / RETIRE events are generated
1360     // before the start GC event.
1361     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1362 
1363     // We may have added regions to the current incremental collection
1364     // set between the last GC or pause and now. We need to clear the
1365     // incremental collection set and then start rebuilding it afresh
1366     // after this full GC.
1367     abandon_collection_set(g1_policy()->inc_cset_head());
1368     g1_policy()->clear_incremental_cset();
1369     g1_policy()->stop_incremental_cset_building();
1370 
1371     tear_down_region_sets(false /* free_list_only */);
1372     g1_policy()->set_gcs_are_young(true);
1373 
1374     // See the comments in g1CollectedHeap.hpp and
1375     // G1CollectedHeap::ref_processing_init() about
1376     // how reference processing currently works in G1.
1377 
1378     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1379     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1380 
1381     // Temporarily clear the STW ref processor's _is_alive_non_header field.
1382     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1383 
1384     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1385     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1386 
1387     // Do collection work
1388     {
1389       HandleMark hm;  // Discard invalid handles created during gc
1390       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1391     }
1392 
1393     assert(free_regions() == 0, "we should not have added any free regions");
1394     rebuild_region_sets(false /* free_list_only */);
1395 
1396     // Enqueue any discovered reference objects that have
1397     // not been removed from the discovered lists.
1398     ref_processor_stw()->enqueue_discovered_references();
1399 
1400     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1401 
1402     MemoryService::track_memory_usage();
1403 
1404     verify_after_gc();
1405 
1406     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1407     ref_processor_stw()->verify_no_references_recorded();
1408 
1409     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1410     ClassLoaderDataGraph::purge();
1411 
1412     // Note: since we've just done a full GC, concurrent
1413     // marking is no longer active. Therefore we need not
1414     // re-enable reference discovery for the CM ref processor.
1415     // That will be done at the start of the next marking cycle.
1416     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1417     ref_processor_cm()->verify_no_references_recorded();
1418 
1419     reset_gc_time_stamp();
1420     // Since everything potentially moved, we will clear all remembered
1421     // sets, and clear all cards.  Later we will rebuild remebered
1422     // sets. We will also reset the GC time stamps of the regions.
1423     clear_rsets_post_compaction();
1424     check_gc_time_stamps();
1425 
1426     // Resize the heap if necessary.
1427     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1428 
1429     if (_hr_printer.is_active()) {
1430       // We should do this after we potentially resize the heap so
1431       // that all the COMMIT / UNCOMMIT events are generated before
1432       // the end GC event.
1433 
1434       print_hrs_post_compaction();
1435       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1436     }
1437 
1438     if (_cg1r->use_cache()) {
1439       _cg1r->clear_and_record_card_counts();
1440       _cg1r->clear_hot_cache();
1441     }
1442 
1443     // Rebuild remembered sets of all regions.
1444     if (G1CollectedHeap::use_parallel_gc_threads()) {
1445       uint n_workers =
1446         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1447                                        workers()->active_workers(),
1448                                        Threads::number_of_non_daemon_threads());
1449       assert(UseDynamicNumberOfGCThreads ||
1450              n_workers == workers()->total_workers(),
1451              "If not dynamic should be using all the  workers");
1452       workers()->set_active_workers(n_workers);
1453       // Set parallel threads in the heap (_n_par_threads) only
1454       // before a parallel phase and always reset it to 0 after
1455       // the phase so that the number of parallel threads does
1456       // no get carried forward to a serial phase where there
1457       // may be code that is "possibly_parallel".
1458       set_par_threads(n_workers);
1459 
1460       ParRebuildRSTask rebuild_rs_task(this);
1461       assert(check_heap_region_claim_values(
1462              HeapRegion::InitialClaimValue), "sanity check");
1463       assert(UseDynamicNumberOfGCThreads ||
1464              workers()->active_workers() == workers()->total_workers(),
1465         "Unless dynamic should use total workers");
1466       // Use the most recent number of  active workers
1467       assert(workers()->active_workers() > 0,
1468         "Active workers not properly set");
1469       set_par_threads(workers()->active_workers());
1470       workers()->run_task(&rebuild_rs_task);
1471       set_par_threads(0);
1472       assert(check_heap_region_claim_values(
1473              HeapRegion::RebuildRSClaimValue), "sanity check");
1474       reset_heap_region_claim_values();
1475     } else {
1476       RebuildRSOutOfRegionClosure rebuild_rs(this);
1477       heap_region_iterate(&rebuild_rs);
1478     }
1479 
1480     if (G1Log::fine()) {
1481       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
1482     }
1483 
1484     if (true) { // FIXME
1485       MetaspaceGC::compute_new_size();
1486     }
1487 
1488     // Start a new incremental collection set for the next pause
1489     assert(g1_policy()->collection_set() == NULL, "must be");
1490     g1_policy()->start_incremental_cset_building();
1491 
1492     // Clear the _cset_fast_test bitmap in anticipation of adding
1493     // regions to the incremental collection set for the next
1494     // evacuation pause.
1495     clear_cset_fast_test();
1496 
1497     init_mutator_alloc_region();
1498 
1499     double end = os::elapsedTime();
1500     g1_policy()->record_full_collection_end();
1501 
1502 #ifdef TRACESPINNING
1503     ParallelTaskTerminator::print_termination_counts();
1504 #endif
1505 
1506     gc_epilogue(true);
1507 
1508     // Discard all rset updates
1509     JavaThread::dirty_card_queue_set().abandon_logs();
1510     assert(!G1DeferredRSUpdate
1511            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1512 
1513     _young_list->reset_sampled_info();
1514     // At this point there should be no regions in the
1515     // entire heap tagged as young.
1516     assert( check_young_list_empty(true /* check_heap */),
1517       "young list should be empty at this point");
1518 
1519     // Update the number of full collections that have been completed.
1520     increment_old_marking_cycles_completed(false /* concurrent */);
1521 
1522     _hrs.verify_optional();
1523     verify_region_sets_optional();
1524 
1525     print_heap_after_gc();
1526 
1527     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1528     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1529     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1530     // before any GC notifications are raised.
1531     g1mm()->update_sizes();
1532   }
1533 
1534   post_full_gc_dump();
1535 
1536   return true;
1537 }
1538 
1539 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1540   // do_collection() will return whether it succeeded in performing
1541   // the GC. Currently, there is no facility on the
1542   // do_full_collection() API to notify the caller than the collection
1543   // did not succeed (e.g., because it was locked out by the GC
1544   // locker). So, right now, we'll ignore the return value.
1545   bool dummy = do_collection(true,                /* explicit_gc */
1546                              clear_all_soft_refs,
1547                              0                    /* word_size */);
1548 }
1549 
1550 // This code is mostly copied from TenuredGeneration.
1551 void
1552 G1CollectedHeap::
1553 resize_if_necessary_after_full_collection(size_t word_size) {
1554   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1555 
1556   // Include the current allocation, if any, and bytes that will be
1557   // pre-allocated to support collections, as "used".
1558   const size_t used_after_gc = used();
1559   const size_t capacity_after_gc = capacity();
1560   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1561 
1562   // This is enforced in arguments.cpp.
1563   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1564          "otherwise the code below doesn't make sense");
1565 
1566   // We don't have floating point command-line arguments
1567   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1568   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1569   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1570   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1571 
1572   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1573   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1574 
1575   // We have to be careful here as these two calculations can overflow
1576   // 32-bit size_t's.
1577   double used_after_gc_d = (double) used_after_gc;
1578   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1579   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1580 
1581   // Let's make sure that they are both under the max heap size, which
1582   // by default will make them fit into a size_t.
1583   double desired_capacity_upper_bound = (double) max_heap_size;
1584   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1585                                     desired_capacity_upper_bound);
1586   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1587                                     desired_capacity_upper_bound);
1588 
1589   // We can now safely turn them into size_t's.
1590   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1591   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1592 
1593   // This assert only makes sense here, before we adjust them
1594   // with respect to the min and max heap size.
1595   assert(minimum_desired_capacity <= maximum_desired_capacity,
1596          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1597                  "maximum_desired_capacity = "SIZE_FORMAT,
1598                  minimum_desired_capacity, maximum_desired_capacity));
1599 
1600   // Should not be greater than the heap max size. No need to adjust
1601   // it with respect to the heap min size as it's a lower bound (i.e.,
1602   // we'll try to make the capacity larger than it, not smaller).
1603   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1604   // Should not be less than the heap min size. No need to adjust it
1605   // with respect to the heap max size as it's an upper bound (i.e.,
1606   // we'll try to make the capacity smaller than it, not greater).
1607   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1608 
1609   if (capacity_after_gc < minimum_desired_capacity) {
1610     // Don't expand unless it's significant
1611     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1612     ergo_verbose4(ErgoHeapSizing,
1613                   "attempt heap expansion",
1614                   ergo_format_reason("capacity lower than "
1615                                      "min desired capacity after Full GC")
1616                   ergo_format_byte("capacity")
1617                   ergo_format_byte("occupancy")
1618                   ergo_format_byte_perc("min desired capacity"),
1619                   capacity_after_gc, used_after_gc,
1620                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1621     expand(expand_bytes);
1622 
1623     // No expansion, now see if we want to shrink
1624   } else if (capacity_after_gc > maximum_desired_capacity) {
1625     // Capacity too large, compute shrinking size
1626     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1627     ergo_verbose4(ErgoHeapSizing,
1628                   "attempt heap shrinking",
1629                   ergo_format_reason("capacity higher than "
1630                                      "max desired capacity after Full GC")
1631                   ergo_format_byte("capacity")
1632                   ergo_format_byte("occupancy")
1633                   ergo_format_byte_perc("max desired capacity"),
1634                   capacity_after_gc, used_after_gc,
1635                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1636     shrink(shrink_bytes);
1637   }
1638 }
1639 
1640 
1641 HeapWord*
1642 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1643                                            bool* succeeded) {
1644   assert_at_safepoint(true /* should_be_vm_thread */);
1645 
1646   *succeeded = true;
1647   // Let's attempt the allocation first.
1648   HeapWord* result =
1649     attempt_allocation_at_safepoint(word_size,
1650                                  false /* expect_null_mutator_alloc_region */);
1651   if (result != NULL) {
1652     assert(*succeeded, "sanity");
1653     return result;
1654   }
1655 
1656   // In a G1 heap, we're supposed to keep allocation from failing by
1657   // incremental pauses.  Therefore, at least for now, we'll favor
1658   // expansion over collection.  (This might change in the future if we can
1659   // do something smarter than full collection to satisfy a failed alloc.)
1660   result = expand_and_allocate(word_size);
1661   if (result != NULL) {
1662     assert(*succeeded, "sanity");
1663     return result;
1664   }
1665 
1666   // Expansion didn't work, we'll try to do a Full GC.
1667   bool gc_succeeded = do_collection(false, /* explicit_gc */
1668                                     false, /* clear_all_soft_refs */
1669                                     word_size);
1670   if (!gc_succeeded) {
1671     *succeeded = false;
1672     return NULL;
1673   }
1674 
1675   // Retry the allocation
1676   result = attempt_allocation_at_safepoint(word_size,
1677                                   true /* expect_null_mutator_alloc_region */);
1678   if (result != NULL) {
1679     assert(*succeeded, "sanity");
1680     return result;
1681   }
1682 
1683   // Then, try a Full GC that will collect all soft references.
1684   gc_succeeded = do_collection(false, /* explicit_gc */
1685                                true,  /* clear_all_soft_refs */
1686                                word_size);
1687   if (!gc_succeeded) {
1688     *succeeded = false;
1689     return NULL;
1690   }
1691 
1692   // Retry the allocation once more
1693   result = attempt_allocation_at_safepoint(word_size,
1694                                   true /* expect_null_mutator_alloc_region */);
1695   if (result != NULL) {
1696     assert(*succeeded, "sanity");
1697     return result;
1698   }
1699 
1700   assert(!collector_policy()->should_clear_all_soft_refs(),
1701          "Flag should have been handled and cleared prior to this point");
1702 
1703   // What else?  We might try synchronous finalization later.  If the total
1704   // space available is large enough for the allocation, then a more
1705   // complete compaction phase than we've tried so far might be
1706   // appropriate.
1707   assert(*succeeded, "sanity");
1708   return NULL;
1709 }
1710 
1711 // Attempting to expand the heap sufficiently
1712 // to support an allocation of the given "word_size".  If
1713 // successful, perform the allocation and return the address of the
1714 // allocated block, or else "NULL".
1715 
1716 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1717   assert_at_safepoint(true /* should_be_vm_thread */);
1718 
1719   verify_region_sets_optional();
1720 
1721   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1722   ergo_verbose1(ErgoHeapSizing,
1723                 "attempt heap expansion",
1724                 ergo_format_reason("allocation request failed")
1725                 ergo_format_byte("allocation request"),
1726                 word_size * HeapWordSize);
1727   if (expand(expand_bytes)) {
1728     _hrs.verify_optional();
1729     verify_region_sets_optional();
1730     return attempt_allocation_at_safepoint(word_size,
1731                                  false /* expect_null_mutator_alloc_region */);
1732   }
1733   return NULL;
1734 }
1735 
1736 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1737                                              HeapWord* new_end) {
1738   assert(old_end != new_end, "don't call this otherwise");
1739   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1740 
1741   // Update the committed mem region.
1742   _g1_committed.set_end(new_end);
1743   // Tell the card table about the update.
1744   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1745   // Tell the BOT about the update.
1746   _bot_shared->resize(_g1_committed.word_size());
1747 }
1748 
1749 bool G1CollectedHeap::expand(size_t expand_bytes) {
1750   size_t old_mem_size = _g1_storage.committed_size();
1751   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1752   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1753                                        HeapRegion::GrainBytes);
1754   ergo_verbose2(ErgoHeapSizing,
1755                 "expand the heap",
1756                 ergo_format_byte("requested expansion amount")
1757                 ergo_format_byte("attempted expansion amount"),
1758                 expand_bytes, aligned_expand_bytes);
1759 
1760   // First commit the memory.
1761   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1762   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1763   if (successful) {
1764     // Then propagate this update to the necessary data structures.
1765     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1766     update_committed_space(old_end, new_end);
1767 
1768     FreeRegionList expansion_list("Local Expansion List");
1769     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1770     assert(mr.start() == old_end, "post-condition");
1771     // mr might be a smaller region than what was requested if
1772     // expand_by() was unable to allocate the HeapRegion instances
1773     assert(mr.end() <= new_end, "post-condition");
1774 
1775     size_t actual_expand_bytes = mr.byte_size();
1776     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1777     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1778            "post-condition");
1779     if (actual_expand_bytes < aligned_expand_bytes) {
1780       // We could not expand _hrs to the desired size. In this case we
1781       // need to shrink the committed space accordingly.
1782       assert(mr.end() < new_end, "invariant");
1783 
1784       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1785       // First uncommit the memory.
1786       _g1_storage.shrink_by(diff_bytes);
1787       // Then propagate this update to the necessary data structures.
1788       update_committed_space(new_end, mr.end());
1789     }
1790     _free_list.add_as_tail(&expansion_list);
1791 
1792     if (_hr_printer.is_active()) {
1793       HeapWord* curr = mr.start();
1794       while (curr < mr.end()) {
1795         HeapWord* curr_end = curr + HeapRegion::GrainWords;
1796         _hr_printer.commit(curr, curr_end);
1797         curr = curr_end;
1798       }
1799       assert(curr == mr.end(), "post-condition");
1800     }
1801     g1_policy()->record_new_heap_size(n_regions());
1802   } else {
1803     ergo_verbose0(ErgoHeapSizing,
1804                   "did not expand the heap",
1805                   ergo_format_reason("heap expansion operation failed"));
1806     // The expansion of the virtual storage space was unsuccessful.
1807     // Let's see if it was because we ran out of swap.
1808     if (G1ExitOnExpansionFailure &&
1809         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1810       // We had head room...
1811       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
1812     }
1813   }
1814   return successful;
1815 }
1816 
1817 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1818   size_t old_mem_size = _g1_storage.committed_size();
1819   size_t aligned_shrink_bytes =
1820     ReservedSpace::page_align_size_down(shrink_bytes);
1821   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1822                                          HeapRegion::GrainBytes);
1823   uint num_regions_deleted = 0;
1824   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
1825   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1826   assert(mr.end() == old_end, "post-condition");
1827 
1828   ergo_verbose3(ErgoHeapSizing,
1829                 "shrink the heap",
1830                 ergo_format_byte("requested shrinking amount")
1831                 ergo_format_byte("aligned shrinking amount")
1832                 ergo_format_byte("attempted shrinking amount"),
1833                 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
1834   if (mr.byte_size() > 0) {
1835     if (_hr_printer.is_active()) {
1836       HeapWord* curr = mr.end();
1837       while (curr > mr.start()) {
1838         HeapWord* curr_end = curr;
1839         curr -= HeapRegion::GrainWords;
1840         _hr_printer.uncommit(curr, curr_end);
1841       }
1842       assert(curr == mr.start(), "post-condition");
1843     }
1844 
1845     _g1_storage.shrink_by(mr.byte_size());
1846     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1847     assert(mr.start() == new_end, "post-condition");
1848 
1849     _expansion_regions += num_regions_deleted;
1850     update_committed_space(old_end, new_end);
1851     HeapRegionRemSet::shrink_heap(n_regions());
1852     g1_policy()->record_new_heap_size(n_regions());
1853   } else {
1854     ergo_verbose0(ErgoHeapSizing,
1855                   "did not shrink the heap",
1856                   ergo_format_reason("heap shrinking operation failed"));
1857   }
1858 }
1859 
1860 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1861   verify_region_sets_optional();
1862 
1863   // We should only reach here at the end of a Full GC which means we
1864   // should not not be holding to any GC alloc regions. The method
1865   // below will make sure of that and do any remaining clean up.
1866   abandon_gc_alloc_regions();
1867 
1868   // Instead of tearing down / rebuilding the free lists here, we
1869   // could instead use the remove_all_pending() method on free_list to
1870   // remove only the ones that we need to remove.
1871   tear_down_region_sets(true /* free_list_only */);
1872   shrink_helper(shrink_bytes);
1873   rebuild_region_sets(true /* free_list_only */);
1874 
1875   _hrs.verify_optional();
1876   verify_region_sets_optional();
1877 }
1878 
1879 // Public methods.
1880 
1881 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1882 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1883 #endif // _MSC_VER
1884 
1885 
1886 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1887   SharedHeap(policy_),
1888   _g1_policy(policy_),
1889   _dirty_card_queue_set(false),
1890   _into_cset_dirty_card_queue_set(false),
1891   _is_alive_closure_cm(this),
1892   _is_alive_closure_stw(this),
1893   _ref_processor_cm(NULL),
1894   _ref_processor_stw(NULL),
1895   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1896   _bot_shared(NULL),
1897   _preserved_marks(40, 10000),
1898   _evac_failure_scan_stack(NULL) ,
1899   _mark_in_progress(false),
1900   _cg1r(NULL), _summary_bytes_used(0),
1901   _g1mm(NULL),
1902   _refine_cte_cl(NULL),
1903   _full_collection(false),
1904   _free_list("Master Free List"),
1905   _secondary_free_list("Secondary Free List"),
1906   _old_set("Old Set"),
1907   _humongous_set("Master Humongous Set"),
1908   _free_regions_coming(false),
1909   _young_list(new YoungList(this)),
1910   _gc_time_stamp(0),
1911   _retained_old_gc_alloc_region(NULL),
1912   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1913   _old_plab_stats(OldPLABSize, PLABWeight),
1914   _expand_heap_after_alloc_failure(true),
1915   _surviving_young_words(NULL),
1916   _old_marking_cycles_started(0),
1917   _old_marking_cycles_completed(0),
1918   _in_cset_fast_test(NULL),
1919   _in_cset_fast_test_base(NULL),
1920   _dirty_cards_region_list(NULL),
1921   _worker_cset_start_region(NULL),
1922   _worker_cset_start_region_time_stamp(NULL) {
1923   _g1h = this; // To catch bugs.
1924   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1925     vm_exit_during_initialization("Failed necessary allocation.");
1926   }
1927 
1928   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1929 
1930   int n_queues = MAX2((int)ParallelGCThreads, 1);
1931   _task_queues = new RefToScanQueueSet(n_queues);
1932 
1933   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1934   assert(n_rem_sets > 0, "Invariant.");
1935 
1936   HeapRegionRemSetIterator** iter_arr =
1937     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
1938   for (int i = 0; i < n_queues; i++) {
1939     iter_arr[i] = new HeapRegionRemSetIterator();
1940   }
1941   _rem_set_iterator = iter_arr;
1942 
1943   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1944   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1945 
1946   for (int i = 0; i < n_queues; i++) {
1947     RefToScanQueue* q = new RefToScanQueue();
1948     q->initialize();
1949     _task_queues->register_queue(i, q);
1950   }
1951 
1952   clear_cset_start_regions();
1953 
1954   // Initialize the G1EvacuationFailureALot counters and flags.
1955   NOT_PRODUCT(reset_evacuation_should_fail();)
1956 
1957   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1958 }
1959 
1960 jint G1CollectedHeap::initialize() {
1961   CollectedHeap::pre_initialize();
1962   os::enable_vtime();
1963 
1964   G1Log::init();
1965 
1966   // Necessary to satisfy locking discipline assertions.
1967 
1968   MutexLocker x(Heap_lock);
1969 
1970   // We have to initialize the printer before committing the heap, as
1971   // it will be used then.
1972   _hr_printer.set_active(G1PrintHeapRegions);
1973 
1974   // While there are no constraints in the GC code that HeapWordSize
1975   // be any particular value, there are multiple other areas in the
1976   // system which believe this to be true (e.g. oop->object_size in some
1977   // cases incorrectly returns the size in wordSize units rather than
1978   // HeapWordSize).
1979   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1980 
1981   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1982   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1983 
1984   // Ensure that the sizes are properly aligned.
1985   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1986   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1987 
1988   _cg1r = new ConcurrentG1Refine();
1989 
1990   // Reserve the maximum.
1991 
1992   // When compressed oops are enabled, the preferred heap base
1993   // is calculated by subtracting the requested size from the
1994   // 32Gb boundary and using the result as the base address for
1995   // heap reservation. If the requested size is not aligned to
1996   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1997   // into the ReservedHeapSpace constructor) then the actual
1998   // base of the reserved heap may end up differing from the
1999   // address that was requested (i.e. the preferred heap base).
2000   // If this happens then we could end up using a non-optimal
2001   // compressed oops mode.
2002 
2003   // Since max_byte_size is aligned to the size of a heap region (checked
2004   // above).
2005   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2006 
2007   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2008                                                  HeapRegion::GrainBytes);
2009 
2010   // It is important to do this in a way such that concurrent readers can't
2011   // temporarily think somethings in the heap.  (I've actually seen this
2012   // happen in asserts: DLD.)
2013   _reserved.set_word_size(0);
2014   _reserved.set_start((HeapWord*)heap_rs.base());
2015   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2016 
2017   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2018 
2019   // Create the gen rem set (and barrier set) for the entire reserved region.
2020   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2021   set_barrier_set(rem_set()->bs());
2022   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2023     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2024   } else {
2025     vm_exit_during_initialization("G1 requires a mod ref bs.");
2026     return JNI_ENOMEM;
2027   }
2028 
2029   // Also create a G1 rem set.
2030   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2031     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2032   } else {
2033     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2034     return JNI_ENOMEM;
2035   }
2036 
2037   // Carve out the G1 part of the heap.
2038 
2039   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
2040   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2041                            g1_rs.size()/HeapWordSize);
2042 
2043   _g1_storage.initialize(g1_rs, 0);
2044   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2045   _hrs.initialize((HeapWord*) _g1_reserved.start(),
2046                   (HeapWord*) _g1_reserved.end(),
2047                   _expansion_regions);
2048 
2049   // 6843694 - ensure that the maximum region index can fit
2050   // in the remembered set structures.
2051   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2052   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2053 
2054   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2055   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2056   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2057             "too many cards per region");
2058 
2059   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2060 
2061   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2062                                              heap_word_size(init_byte_size));
2063 
2064   _g1h = this;
2065 
2066    _in_cset_fast_test_length = max_regions();
2067    _in_cset_fast_test_base =
2068                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2069 
2070    // We're biasing _in_cset_fast_test to avoid subtracting the
2071    // beginning of the heap every time we want to index; basically
2072    // it's the same with what we do with the card table.
2073    _in_cset_fast_test = _in_cset_fast_test_base -
2074                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2075 
2076    // Clear the _cset_fast_test bitmap in anticipation of adding
2077    // regions to the incremental collection set for the first
2078    // evacuation pause.
2079    clear_cset_fast_test();
2080 
2081   // Create the ConcurrentMark data structure and thread.
2082   // (Must do this late, so that "max_regions" is defined.)
2083   _cm = new ConcurrentMark(this, heap_rs);
2084   if (_cm == NULL || !_cm->completed_initialization()) {
2085     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2086     return JNI_ENOMEM;
2087   }
2088   _cmThread = _cm->cmThread();
2089 
2090   // Initialize the from_card cache structure of HeapRegionRemSet.
2091   HeapRegionRemSet::init_heap(max_regions());
2092 
2093   // Now expand into the initial heap size.
2094   if (!expand(init_byte_size)) {
2095     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2096     return JNI_ENOMEM;
2097   }
2098 
2099   // Perform any initialization actions delegated to the policy.
2100   g1_policy()->init();
2101 
2102   _refine_cte_cl =
2103     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2104                                     g1_rem_set(),
2105                                     concurrent_g1_refine());
2106   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2107 
2108   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2109                                                SATB_Q_FL_lock,
2110                                                G1SATBProcessCompletedThreshold,
2111                                                Shared_SATB_Q_lock);
2112 
2113   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2114                                                 DirtyCardQ_FL_lock,
2115                                                 concurrent_g1_refine()->yellow_zone(),
2116                                                 concurrent_g1_refine()->red_zone(),
2117                                                 Shared_DirtyCardQ_lock);
2118 
2119   if (G1DeferredRSUpdate) {
2120     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2121                                       DirtyCardQ_FL_lock,
2122                                       -1, // never trigger processing
2123                                       -1, // no limit on length
2124                                       Shared_DirtyCardQ_lock,
2125                                       &JavaThread::dirty_card_queue_set());
2126   }
2127 
2128   // Initialize the card queue set used to hold cards containing
2129   // references into the collection set.
2130   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2131                                              DirtyCardQ_FL_lock,
2132                                              -1, // never trigger processing
2133                                              -1, // no limit on length
2134                                              Shared_DirtyCardQ_lock,
2135                                              &JavaThread::dirty_card_queue_set());
2136 
2137   // In case we're keeping closure specialization stats, initialize those
2138   // counts and that mechanism.
2139   SpecializationStats::clear();
2140 
2141   // Do later initialization work for concurrent refinement.
2142   _cg1r->init();
2143 
2144   // Here we allocate the dummy full region that is required by the
2145   // G1AllocRegion class. If we don't pass an address in the reserved
2146   // space here, lots of asserts fire.
2147 
2148   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2149                                              _g1_reserved.start());
2150   // We'll re-use the same region whether the alloc region will
2151   // require BOT updates or not and, if it doesn't, then a non-young
2152   // region will complain that it cannot support allocations without
2153   // BOT updates. So we'll tag the dummy region as young to avoid that.
2154   dummy_region->set_young();
2155   // Make sure it's full.
2156   dummy_region->set_top(dummy_region->end());
2157   G1AllocRegion::setup(this, dummy_region);
2158 
2159   init_mutator_alloc_region();
2160 
2161   // Do create of the monitoring and management support so that
2162   // values in the heap have been properly initialized.
2163   _g1mm = new G1MonitoringSupport(this);
2164 
2165   return JNI_OK;
2166 }
2167 
2168 void G1CollectedHeap::ref_processing_init() {
2169   // Reference processing in G1 currently works as follows:
2170   //
2171   // * There are two reference processor instances. One is
2172   //   used to record and process discovered references
2173   //   during concurrent marking; the other is used to
2174   //   record and process references during STW pauses
2175   //   (both full and incremental).
2176   // * Both ref processors need to 'span' the entire heap as
2177   //   the regions in the collection set may be dotted around.
2178   //
2179   // * For the concurrent marking ref processor:
2180   //   * Reference discovery is enabled at initial marking.
2181   //   * Reference discovery is disabled and the discovered
2182   //     references processed etc during remarking.
2183   //   * Reference discovery is MT (see below).
2184   //   * Reference discovery requires a barrier (see below).
2185   //   * Reference processing may or may not be MT
2186   //     (depending on the value of ParallelRefProcEnabled
2187   //     and ParallelGCThreads).
2188   //   * A full GC disables reference discovery by the CM
2189   //     ref processor and abandons any entries on it's
2190   //     discovered lists.
2191   //
2192   // * For the STW processor:
2193   //   * Non MT discovery is enabled at the start of a full GC.
2194   //   * Processing and enqueueing during a full GC is non-MT.
2195   //   * During a full GC, references are processed after marking.
2196   //
2197   //   * Discovery (may or may not be MT) is enabled at the start
2198   //     of an incremental evacuation pause.
2199   //   * References are processed near the end of a STW evacuation pause.
2200   //   * For both types of GC:
2201   //     * Discovery is atomic - i.e. not concurrent.
2202   //     * Reference discovery will not need a barrier.
2203 
2204   SharedHeap::ref_processing_init();
2205   MemRegion mr = reserved_region();
2206 
2207   // Concurrent Mark ref processor
2208   _ref_processor_cm =
2209     new ReferenceProcessor(mr,    // span
2210                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2211                                 // mt processing
2212                            (int) ParallelGCThreads,
2213                                 // degree of mt processing
2214                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2215                                 // mt discovery
2216                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2217                                 // degree of mt discovery
2218                            false,
2219                                 // Reference discovery is not atomic
2220                            &_is_alive_closure_cm,
2221                                 // is alive closure
2222                                 // (for efficiency/performance)
2223                            true);
2224                                 // Setting next fields of discovered
2225                                 // lists requires a barrier.
2226 
2227   // STW ref processor
2228   _ref_processor_stw =
2229     new ReferenceProcessor(mr,    // span
2230                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2231                                 // mt processing
2232                            MAX2((int)ParallelGCThreads, 1),
2233                                 // degree of mt processing
2234                            (ParallelGCThreads > 1),
2235                                 // mt discovery
2236                            MAX2((int)ParallelGCThreads, 1),
2237                                 // degree of mt discovery
2238                            true,
2239                                 // Reference discovery is atomic
2240                            &_is_alive_closure_stw,
2241                                 // is alive closure
2242                                 // (for efficiency/performance)
2243                            false);
2244                                 // Setting next fields of discovered
2245                                 // lists requires a barrier.
2246 }
2247 
2248 size_t G1CollectedHeap::capacity() const {
2249   return _g1_committed.byte_size();
2250 }
2251 
2252 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2253   assert(!hr->continuesHumongous(), "pre-condition");
2254   hr->reset_gc_time_stamp();
2255   if (hr->startsHumongous()) {
2256     uint first_index = hr->hrs_index() + 1;
2257     uint last_index = hr->last_hc_index();
2258     for (uint i = first_index; i < last_index; i += 1) {
2259       HeapRegion* chr = region_at(i);
2260       assert(chr->continuesHumongous(), "sanity");
2261       chr->reset_gc_time_stamp();
2262     }
2263   }
2264 }
2265 
2266 #ifndef PRODUCT
2267 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2268 private:
2269   unsigned _gc_time_stamp;
2270   bool _failures;
2271 
2272 public:
2273   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2274     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2275 
2276   virtual bool doHeapRegion(HeapRegion* hr) {
2277     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2278     if (_gc_time_stamp != region_gc_time_stamp) {
2279       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2280                              "expected %d", HR_FORMAT_PARAMS(hr),
2281                              region_gc_time_stamp, _gc_time_stamp);
2282       _failures = true;
2283     }
2284     return false;
2285   }
2286 
2287   bool failures() { return _failures; }
2288 };
2289 
2290 void G1CollectedHeap::check_gc_time_stamps() {
2291   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2292   heap_region_iterate(&cl);
2293   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2294 }
2295 #endif // PRODUCT
2296 
2297 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2298                                                  DirtyCardQueue* into_cset_dcq,
2299                                                  bool concurrent,
2300                                                  int worker_i) {
2301   // Clean cards in the hot card cache
2302   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2303 
2304   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2305   int n_completed_buffers = 0;
2306   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2307     n_completed_buffers++;
2308   }
2309   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2310   dcqs.clear_n_completed_buffers();
2311   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2312 }
2313 
2314 
2315 // Computes the sum of the storage used by the various regions.
2316 
2317 size_t G1CollectedHeap::used() const {
2318   assert(Heap_lock->owner() != NULL,
2319          "Should be owned on this thread's behalf.");
2320   size_t result = _summary_bytes_used;
2321   // Read only once in case it is set to NULL concurrently
2322   HeapRegion* hr = _mutator_alloc_region.get();
2323   if (hr != NULL)
2324     result += hr->used();
2325   return result;
2326 }
2327 
2328 size_t G1CollectedHeap::used_unlocked() const {
2329   size_t result = _summary_bytes_used;
2330   return result;
2331 }
2332 
2333 class SumUsedClosure: public HeapRegionClosure {
2334   size_t _used;
2335 public:
2336   SumUsedClosure() : _used(0) {}
2337   bool doHeapRegion(HeapRegion* r) {
2338     if (!r->continuesHumongous()) {
2339       _used += r->used();
2340     }
2341     return false;
2342   }
2343   size_t result() { return _used; }
2344 };
2345 
2346 size_t G1CollectedHeap::recalculate_used() const {
2347   SumUsedClosure blk;
2348   heap_region_iterate(&blk);
2349   return blk.result();
2350 }
2351 
2352 size_t G1CollectedHeap::unsafe_max_alloc() {
2353   if (free_regions() > 0) return HeapRegion::GrainBytes;
2354   // otherwise, is there space in the current allocation region?
2355 
2356   // We need to store the current allocation region in a local variable
2357   // here. The problem is that this method doesn't take any locks and
2358   // there may be other threads which overwrite the current allocation
2359   // region field. attempt_allocation(), for example, sets it to NULL
2360   // and this can happen *after* the NULL check here but before the call
2361   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2362   // to be a problem in the optimized build, since the two loads of the
2363   // current allocation region field are optimized away.
2364   HeapRegion* hr = _mutator_alloc_region.get();
2365   if (hr == NULL) {
2366     return 0;
2367   }
2368   return hr->free();
2369 }
2370 
2371 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2372   switch (cause) {
2373     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2374     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2375     case GCCause::_g1_humongous_allocation: return true;
2376     default:                                return false;
2377   }
2378 }
2379 
2380 #ifndef PRODUCT
2381 void G1CollectedHeap::allocate_dummy_regions() {
2382   // Let's fill up most of the region
2383   size_t word_size = HeapRegion::GrainWords - 1024;
2384   // And as a result the region we'll allocate will be humongous.
2385   guarantee(isHumongous(word_size), "sanity");
2386 
2387   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2388     // Let's use the existing mechanism for the allocation
2389     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2390     if (dummy_obj != NULL) {
2391       MemRegion mr(dummy_obj, word_size);
2392       CollectedHeap::fill_with_object(mr);
2393     } else {
2394       // If we can't allocate once, we probably cannot allocate
2395       // again. Let's get out of the loop.
2396       break;
2397     }
2398   }
2399 }
2400 #endif // !PRODUCT
2401 
2402 void G1CollectedHeap::increment_old_marking_cycles_started() {
2403   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2404     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2405     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2406     _old_marking_cycles_started, _old_marking_cycles_completed));
2407 
2408   _old_marking_cycles_started++;
2409 }
2410 
2411 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2412   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2413 
2414   // We assume that if concurrent == true, then the caller is a
2415   // concurrent thread that was joined the Suspendible Thread
2416   // Set. If there's ever a cheap way to check this, we should add an
2417   // assert here.
2418 
2419   // Given that this method is called at the end of a Full GC or of a
2420   // concurrent cycle, and those can be nested (i.e., a Full GC can
2421   // interrupt a concurrent cycle), the number of full collections
2422   // completed should be either one (in the case where there was no
2423   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2424   // behind the number of full collections started.
2425 
2426   // This is the case for the inner caller, i.e. a Full GC.
2427   assert(concurrent ||
2428          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2429          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2430          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2431                  "is inconsistent with _old_marking_cycles_completed = %u",
2432                  _old_marking_cycles_started, _old_marking_cycles_completed));
2433 
2434   // This is the case for the outer caller, i.e. the concurrent cycle.
2435   assert(!concurrent ||
2436          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2437          err_msg("for outer caller (concurrent cycle): "
2438                  "_old_marking_cycles_started = %u "
2439                  "is inconsistent with _old_marking_cycles_completed = %u",
2440                  _old_marking_cycles_started, _old_marking_cycles_completed));
2441 
2442   _old_marking_cycles_completed += 1;
2443 
2444   // We need to clear the "in_progress" flag in the CM thread before
2445   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2446   // is set) so that if a waiter requests another System.gc() it doesn't
2447   // incorrectly see that a marking cyle is still in progress.
2448   if (concurrent) {
2449     _cmThread->clear_in_progress();
2450   }
2451 
2452   // This notify_all() will ensure that a thread that called
2453   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2454   // and it's waiting for a full GC to finish will be woken up. It is
2455   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2456   FullGCCount_lock->notify_all();
2457 }
2458 
2459 void G1CollectedHeap::collect(GCCause::Cause cause) {
2460   assert_heap_not_locked();
2461 
2462   unsigned int gc_count_before;
2463   unsigned int old_marking_count_before;
2464   bool retry_gc;
2465 
2466   do {
2467     retry_gc = false;
2468 
2469     {
2470       MutexLocker ml(Heap_lock);
2471 
2472       // Read the GC count while holding the Heap_lock
2473       gc_count_before = total_collections();
2474       old_marking_count_before = _old_marking_cycles_started;
2475     }
2476 
2477     if (should_do_concurrent_full_gc(cause)) {
2478       // Schedule an initial-mark evacuation pause that will start a
2479       // concurrent cycle. We're setting word_size to 0 which means that
2480       // we are not requesting a post-GC allocation.
2481       VM_G1IncCollectionPause op(gc_count_before,
2482                                  0,     /* word_size */
2483                                  true,  /* should_initiate_conc_mark */
2484                                  g1_policy()->max_pause_time_ms(),
2485                                  cause);
2486 
2487       VMThread::execute(&op);
2488       if (!op.pause_succeeded()) {
2489         if (old_marking_count_before == _old_marking_cycles_started) {
2490           retry_gc = op.should_retry_gc();
2491         } else {
2492           // A Full GC happened while we were trying to schedule the
2493           // initial-mark GC. No point in starting a new cycle given
2494           // that the whole heap was collected anyway.
2495         }
2496 
2497         if (retry_gc) {
2498           if (GC_locker::is_active_and_needs_gc()) {
2499             GC_locker::stall_until_clear();
2500           }
2501         }
2502       }
2503     } else {
2504       if (cause == GCCause::_gc_locker
2505           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2506 
2507         // Schedule a standard evacuation pause. We're setting word_size
2508         // to 0 which means that we are not requesting a post-GC allocation.
2509         VM_G1IncCollectionPause op(gc_count_before,
2510                                    0,     /* word_size */
2511                                    false, /* should_initiate_conc_mark */
2512                                    g1_policy()->max_pause_time_ms(),
2513                                    cause);
2514         VMThread::execute(&op);
2515       } else {
2516         // Schedule a Full GC.
2517         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2518         VMThread::execute(&op);
2519       }
2520     }
2521   } while (retry_gc);
2522 }
2523 
2524 bool G1CollectedHeap::is_in(const void* p) const {
2525   if (_g1_committed.contains(p)) {
2526     // Given that we know that p is in the committed space,
2527     // heap_region_containing_raw() should successfully
2528     // return the containing region.
2529     HeapRegion* hr = heap_region_containing_raw(p);
2530     return hr->is_in(p);
2531   } else {
2532     return false;
2533   }
2534 }
2535 
2536 // Iteration functions.
2537 
2538 // Iterates an OopClosure over all ref-containing fields of objects
2539 // within a HeapRegion.
2540 
2541 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2542   MemRegion _mr;
2543   ExtendedOopClosure* _cl;
2544 public:
2545   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
2546     : _mr(mr), _cl(cl) {}
2547   bool doHeapRegion(HeapRegion* r) {
2548     if (!r->continuesHumongous()) {
2549       r->oop_iterate(_cl);
2550     }
2551     return false;
2552   }
2553 };
2554 
2555 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2556   IterateOopClosureRegionClosure blk(_g1_committed, cl);
2557   heap_region_iterate(&blk);
2558 }
2559 
2560 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
2561   IterateOopClosureRegionClosure blk(mr, cl);
2562   heap_region_iterate(&blk);
2563 }
2564 
2565 // Iterates an ObjectClosure over all objects within a HeapRegion.
2566 
2567 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2568   ObjectClosure* _cl;
2569 public:
2570   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2571   bool doHeapRegion(HeapRegion* r) {
2572     if (! r->continuesHumongous()) {
2573       r->object_iterate(_cl);
2574     }
2575     return false;
2576   }
2577 };
2578 
2579 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2580   IterateObjectClosureRegionClosure blk(cl);
2581   heap_region_iterate(&blk);
2582 }
2583 
2584 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2585   // FIXME: is this right?
2586   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2587 }
2588 
2589 // Calls a SpaceClosure on a HeapRegion.
2590 
2591 class SpaceClosureRegionClosure: public HeapRegionClosure {
2592   SpaceClosure* _cl;
2593 public:
2594   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2595   bool doHeapRegion(HeapRegion* r) {
2596     _cl->do_space(r);
2597     return false;
2598   }
2599 };
2600 
2601 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2602   SpaceClosureRegionClosure blk(cl);
2603   heap_region_iterate(&blk);
2604 }
2605 
2606 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2607   _hrs.iterate(cl);
2608 }
2609 
2610 void
2611 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2612                                                  uint worker_id,
2613                                                  uint no_of_par_workers,
2614                                                  jint claim_value) {
2615   const uint regions = n_regions();
2616   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2617                              no_of_par_workers :
2618                              1);
2619   assert(UseDynamicNumberOfGCThreads ||
2620          no_of_par_workers == workers()->total_workers(),
2621          "Non dynamic should use fixed number of workers");
2622   // try to spread out the starting points of the workers
2623   const HeapRegion* start_hr =
2624                         start_region_for_worker(worker_id, no_of_par_workers);
2625   const uint start_index = start_hr->hrs_index();
2626 
2627   // each worker will actually look at all regions
2628   for (uint count = 0; count < regions; ++count) {
2629     const uint index = (start_index + count) % regions;
2630     assert(0 <= index && index < regions, "sanity");
2631     HeapRegion* r = region_at(index);
2632     // we'll ignore "continues humongous" regions (we'll process them
2633     // when we come across their corresponding "start humongous"
2634     // region) and regions already claimed
2635     if (r->claim_value() == claim_value || r->continuesHumongous()) {
2636       continue;
2637     }
2638     // OK, try to claim it
2639     if (r->claimHeapRegion(claim_value)) {
2640       // success!
2641       assert(!r->continuesHumongous(), "sanity");
2642       if (r->startsHumongous()) {
2643         // If the region is "starts humongous" we'll iterate over its
2644         // "continues humongous" first; in fact we'll do them
2645         // first. The order is important. In on case, calling the
2646         // closure on the "starts humongous" region might de-allocate
2647         // and clear all its "continues humongous" regions and, as a
2648         // result, we might end up processing them twice. So, we'll do
2649         // them first (notice: most closures will ignore them anyway) and
2650         // then we'll do the "starts humongous" region.
2651         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2652           HeapRegion* chr = region_at(ch_index);
2653 
2654           // if the region has already been claimed or it's not
2655           // "continues humongous" we're done
2656           if (chr->claim_value() == claim_value ||
2657               !chr->continuesHumongous()) {
2658             break;
2659           }
2660 
2661           // Noone should have claimed it directly. We can given
2662           // that we claimed its "starts humongous" region.
2663           assert(chr->claim_value() != claim_value, "sanity");
2664           assert(chr->humongous_start_region() == r, "sanity");
2665 
2666           if (chr->claimHeapRegion(claim_value)) {
2667             // we should always be able to claim it; noone else should
2668             // be trying to claim this region
2669 
2670             bool res2 = cl->doHeapRegion(chr);
2671             assert(!res2, "Should not abort");
2672 
2673             // Right now, this holds (i.e., no closure that actually
2674             // does something with "continues humongous" regions
2675             // clears them). We might have to weaken it in the future,
2676             // but let's leave these two asserts here for extra safety.
2677             assert(chr->continuesHumongous(), "should still be the case");
2678             assert(chr->humongous_start_region() == r, "sanity");
2679           } else {
2680             guarantee(false, "we should not reach here");
2681           }
2682         }
2683       }
2684 
2685       assert(!r->continuesHumongous(), "sanity");
2686       bool res = cl->doHeapRegion(r);
2687       assert(!res, "Should not abort");
2688     }
2689   }
2690 }
2691 
2692 class ResetClaimValuesClosure: public HeapRegionClosure {
2693 public:
2694   bool doHeapRegion(HeapRegion* r) {
2695     r->set_claim_value(HeapRegion::InitialClaimValue);
2696     return false;
2697   }
2698 };
2699 
2700 void G1CollectedHeap::reset_heap_region_claim_values() {
2701   ResetClaimValuesClosure blk;
2702   heap_region_iterate(&blk);
2703 }
2704 
2705 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2706   ResetClaimValuesClosure blk;
2707   collection_set_iterate(&blk);
2708 }
2709 
2710 #ifdef ASSERT
2711 // This checks whether all regions in the heap have the correct claim
2712 // value. I also piggy-backed on this a check to ensure that the
2713 // humongous_start_region() information on "continues humongous"
2714 // regions is correct.
2715 
2716 class CheckClaimValuesClosure : public HeapRegionClosure {
2717 private:
2718   jint _claim_value;
2719   uint _failures;
2720   HeapRegion* _sh_region;
2721 
2722 public:
2723   CheckClaimValuesClosure(jint claim_value) :
2724     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2725   bool doHeapRegion(HeapRegion* r) {
2726     if (r->claim_value() != _claim_value) {
2727       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2728                              "claim value = %d, should be %d",
2729                              HR_FORMAT_PARAMS(r),
2730                              r->claim_value(), _claim_value);
2731       ++_failures;
2732     }
2733     if (!r->isHumongous()) {
2734       _sh_region = NULL;
2735     } else if (r->startsHumongous()) {
2736       _sh_region = r;
2737     } else if (r->continuesHumongous()) {
2738       if (r->humongous_start_region() != _sh_region) {
2739         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2740                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2741                                HR_FORMAT_PARAMS(r),
2742                                r->humongous_start_region(),
2743                                _sh_region);
2744         ++_failures;
2745       }
2746     }
2747     return false;
2748   }
2749   uint failures() { return _failures; }
2750 };
2751 
2752 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2753   CheckClaimValuesClosure cl(claim_value);
2754   heap_region_iterate(&cl);
2755   return cl.failures() == 0;
2756 }
2757 
2758 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2759 private:
2760   jint _claim_value;
2761   uint _failures;
2762 
2763 public:
2764   CheckClaimValuesInCSetHRClosure(jint claim_value) :
2765     _claim_value(claim_value), _failures(0) { }
2766 
2767   uint failures() { return _failures; }
2768 
2769   bool doHeapRegion(HeapRegion* hr) {
2770     assert(hr->in_collection_set(), "how?");
2771     assert(!hr->isHumongous(), "H-region in CSet");
2772     if (hr->claim_value() != _claim_value) {
2773       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2774                              "claim value = %d, should be %d",
2775                              HR_FORMAT_PARAMS(hr),
2776                              hr->claim_value(), _claim_value);
2777       _failures += 1;
2778     }
2779     return false;
2780   }
2781 };
2782 
2783 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2784   CheckClaimValuesInCSetHRClosure cl(claim_value);
2785   collection_set_iterate(&cl);
2786   return cl.failures() == 0;
2787 }
2788 #endif // ASSERT
2789 
2790 // Clear the cached CSet starting regions and (more importantly)
2791 // the time stamps. Called when we reset the GC time stamp.
2792 void G1CollectedHeap::clear_cset_start_regions() {
2793   assert(_worker_cset_start_region != NULL, "sanity");
2794   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2795 
2796   int n_queues = MAX2((int)ParallelGCThreads, 1);
2797   for (int i = 0; i < n_queues; i++) {
2798     _worker_cset_start_region[i] = NULL;
2799     _worker_cset_start_region_time_stamp[i] = 0;
2800   }
2801 }
2802 
2803 // Given the id of a worker, obtain or calculate a suitable
2804 // starting region for iterating over the current collection set.
2805 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2806   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2807 
2808   HeapRegion* result = NULL;
2809   unsigned gc_time_stamp = get_gc_time_stamp();
2810 
2811   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2812     // Cached starting region for current worker was set
2813     // during the current pause - so it's valid.
2814     // Note: the cached starting heap region may be NULL
2815     // (when the collection set is empty).
2816     result = _worker_cset_start_region[worker_i];
2817     assert(result == NULL || result->in_collection_set(), "sanity");
2818     return result;
2819   }
2820 
2821   // The cached entry was not valid so let's calculate
2822   // a suitable starting heap region for this worker.
2823 
2824   // We want the parallel threads to start their collection
2825   // set iteration at different collection set regions to
2826   // avoid contention.
2827   // If we have:
2828   //          n collection set regions
2829   //          p threads
2830   // Then thread t will start at region floor ((t * n) / p)
2831 
2832   result = g1_policy()->collection_set();
2833   if (G1CollectedHeap::use_parallel_gc_threads()) {
2834     uint cs_size = g1_policy()->cset_region_length();
2835     uint active_workers = workers()->active_workers();
2836     assert(UseDynamicNumberOfGCThreads ||
2837              active_workers == workers()->total_workers(),
2838              "Unless dynamic should use total workers");
2839 
2840     uint end_ind   = (cs_size * worker_i) / active_workers;
2841     uint start_ind = 0;
2842 
2843     if (worker_i > 0 &&
2844         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2845       // Previous workers starting region is valid
2846       // so let's iterate from there
2847       start_ind = (cs_size * (worker_i - 1)) / active_workers;
2848       result = _worker_cset_start_region[worker_i - 1];
2849     }
2850 
2851     for (uint i = start_ind; i < end_ind; i++) {
2852       result = result->next_in_collection_set();
2853     }
2854   }
2855 
2856   // Note: the calculated starting heap region may be NULL
2857   // (when the collection set is empty).
2858   assert(result == NULL || result->in_collection_set(), "sanity");
2859   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2860          "should be updated only once per pause");
2861   _worker_cset_start_region[worker_i] = result;
2862   OrderAccess::storestore();
2863   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2864   return result;
2865 }
2866 
2867 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2868                                                      uint no_of_par_workers) {
2869   uint worker_num =
2870            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2871   assert(UseDynamicNumberOfGCThreads ||
2872          no_of_par_workers == workers()->total_workers(),
2873          "Non dynamic should use fixed number of workers");
2874   const uint start_index = n_regions() * worker_i / worker_num;
2875   return region_at(start_index);
2876 }
2877 
2878 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2879   HeapRegion* r = g1_policy()->collection_set();
2880   while (r != NULL) {
2881     HeapRegion* next = r->next_in_collection_set();
2882     if (cl->doHeapRegion(r)) {
2883       cl->incomplete();
2884       return;
2885     }
2886     r = next;
2887   }
2888 }
2889 
2890 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2891                                                   HeapRegionClosure *cl) {
2892   if (r == NULL) {
2893     // The CSet is empty so there's nothing to do.
2894     return;
2895   }
2896 
2897   assert(r->in_collection_set(),
2898          "Start region must be a member of the collection set.");
2899   HeapRegion* cur = r;
2900   while (cur != NULL) {
2901     HeapRegion* next = cur->next_in_collection_set();
2902     if (cl->doHeapRegion(cur) && false) {
2903       cl->incomplete();
2904       return;
2905     }
2906     cur = next;
2907   }
2908   cur = g1_policy()->collection_set();
2909   while (cur != r) {
2910     HeapRegion* next = cur->next_in_collection_set();
2911     if (cl->doHeapRegion(cur) && false) {
2912       cl->incomplete();
2913       return;
2914     }
2915     cur = next;
2916   }
2917 }
2918 
2919 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2920   return n_regions() > 0 ? region_at(0) : NULL;
2921 }
2922 
2923 
2924 Space* G1CollectedHeap::space_containing(const void* addr) const {
2925   Space* res = heap_region_containing(addr);
2926   return res;
2927 }
2928 
2929 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2930   Space* sp = space_containing(addr);
2931   if (sp != NULL) {
2932     return sp->block_start(addr);
2933   }
2934   return NULL;
2935 }
2936 
2937 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2938   Space* sp = space_containing(addr);
2939   assert(sp != NULL, "block_size of address outside of heap");
2940   return sp->block_size(addr);
2941 }
2942 
2943 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2944   Space* sp = space_containing(addr);
2945   return sp->block_is_obj(addr);
2946 }
2947 
2948 bool G1CollectedHeap::supports_tlab_allocation() const {
2949   return true;
2950 }
2951 
2952 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2953   return HeapRegion::GrainBytes;
2954 }
2955 
2956 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2957   // Return the remaining space in the cur alloc region, but not less than
2958   // the min TLAB size.
2959 
2960   // Also, this value can be at most the humongous object threshold,
2961   // since we can't allow tlabs to grow big enough to accomodate
2962   // humongous objects.
2963 
2964   HeapRegion* hr = _mutator_alloc_region.get();
2965   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2966   if (hr == NULL) {
2967     return max_tlab_size;
2968   } else {
2969     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2970   }
2971 }
2972 
2973 size_t G1CollectedHeap::max_capacity() const {
2974   return _g1_reserved.byte_size();
2975 }
2976 
2977 jlong G1CollectedHeap::millis_since_last_gc() {
2978   // assert(false, "NYI");
2979   return 0;
2980 }
2981 
2982 void G1CollectedHeap::prepare_for_verify() {
2983   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2984     ensure_parsability(false);
2985   }
2986   g1_rem_set()->prepare_for_verify();
2987 }
2988 
2989 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2990                                               VerifyOption vo) {
2991   switch (vo) {
2992   case VerifyOption_G1UsePrevMarking:
2993     return hr->obj_allocated_since_prev_marking(obj);
2994   case VerifyOption_G1UseNextMarking:
2995     return hr->obj_allocated_since_next_marking(obj);
2996   case VerifyOption_G1UseMarkWord:
2997     return false;
2998   default:
2999     ShouldNotReachHere();
3000   }
3001   return false; // keep some compilers happy
3002 }
3003 
3004 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
3005   switch (vo) {
3006   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
3007   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
3008   case VerifyOption_G1UseMarkWord:    return NULL;
3009   default:                            ShouldNotReachHere();
3010   }
3011   return NULL; // keep some compilers happy
3012 }
3013 
3014 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3015   switch (vo) {
3016   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3017   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3018   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
3019   default:                            ShouldNotReachHere();
3020   }
3021   return false; // keep some compilers happy
3022 }
3023 
3024 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3025   switch (vo) {
3026   case VerifyOption_G1UsePrevMarking: return "PTAMS";
3027   case VerifyOption_G1UseNextMarking: return "NTAMS";
3028   case VerifyOption_G1UseMarkWord:    return "NONE";
3029   default:                            ShouldNotReachHere();
3030   }
3031   return NULL; // keep some compilers happy
3032 }
3033 
3034 class VerifyLivenessOopClosure: public OopClosure {
3035   G1CollectedHeap* _g1h;
3036   VerifyOption _vo;
3037 public:
3038   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3039     _g1h(g1h), _vo(vo)
3040   { }
3041   void do_oop(narrowOop *p) { do_oop_work(p); }
3042   void do_oop(      oop *p) { do_oop_work(p); }
3043 
3044   template <class T> void do_oop_work(T *p) {
3045     oop obj = oopDesc::load_decode_heap_oop(p);
3046     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3047               "Dead object referenced by a not dead object");
3048   }
3049 };
3050 
3051 class VerifyObjsInRegionClosure: public ObjectClosure {
3052 private:
3053   G1CollectedHeap* _g1h;
3054   size_t _live_bytes;
3055   HeapRegion *_hr;
3056   VerifyOption _vo;
3057 public:
3058   // _vo == UsePrevMarking -> use "prev" marking information,
3059   // _vo == UseNextMarking -> use "next" marking information,
3060   // _vo == UseMarkWord    -> use mark word from object header.
3061   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3062     : _live_bytes(0), _hr(hr), _vo(vo) {
3063     _g1h = G1CollectedHeap::heap();
3064   }
3065   void do_object(oop o) {
3066     VerifyLivenessOopClosure isLive(_g1h, _vo);
3067     assert(o != NULL, "Huh?");
3068     if (!_g1h->is_obj_dead_cond(o, _vo)) {
3069       // If the object is alive according to the mark word,
3070       // then verify that the marking information agrees.
3071       // Note we can't verify the contra-positive of the
3072       // above: if the object is dead (according to the mark
3073       // word), it may not be marked, or may have been marked
3074       // but has since became dead, or may have been allocated
3075       // since the last marking.
3076       if (_vo == VerifyOption_G1UseMarkWord) {
3077         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3078       }
3079 
3080       o->oop_iterate_no_header(&isLive);
3081       if (!_hr->obj_allocated_since_prev_marking(o)) {
3082         size_t obj_size = o->size();    // Make sure we don't overflow
3083         _live_bytes += (obj_size * HeapWordSize);
3084       }
3085     }
3086   }
3087   size_t live_bytes() { return _live_bytes; }
3088 };
3089 
3090 class PrintObjsInRegionClosure : public ObjectClosure {
3091   HeapRegion *_hr;
3092   G1CollectedHeap *_g1;
3093 public:
3094   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3095     _g1 = G1CollectedHeap::heap();
3096   };
3097 
3098   void do_object(oop o) {
3099     if (o != NULL) {
3100       HeapWord *start = (HeapWord *) o;
3101       size_t word_sz = o->size();
3102       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3103                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3104                           (void*) o, word_sz,
3105                           _g1->isMarkedPrev(o),
3106                           _g1->isMarkedNext(o),
3107                           _hr->obj_allocated_since_prev_marking(o));
3108       HeapWord *end = start + word_sz;
3109       HeapWord *cur;
3110       int *val;
3111       for (cur = start; cur < end; cur++) {
3112         val = (int *) cur;
3113         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
3114       }
3115     }
3116   }
3117 };
3118 
3119 class VerifyRegionClosure: public HeapRegionClosure {
3120 private:
3121   bool             _par;
3122   VerifyOption     _vo;
3123   bool             _failures;
3124 public:
3125   // _vo == UsePrevMarking -> use "prev" marking information,
3126   // _vo == UseNextMarking -> use "next" marking information,
3127   // _vo == UseMarkWord    -> use mark word from object header.
3128   VerifyRegionClosure(bool par, VerifyOption vo)
3129     : _par(par),
3130       _vo(vo),
3131       _failures(false) {}
3132 
3133   bool failures() {
3134     return _failures;
3135   }
3136 
3137   bool doHeapRegion(HeapRegion* r) {
3138     if (!r->continuesHumongous()) {
3139       bool failures = false;
3140       r->verify(_vo, &failures);
3141       if (failures) {
3142         _failures = true;
3143       } else {
3144         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3145         r->object_iterate(&not_dead_yet_cl);
3146         if (_vo != VerifyOption_G1UseNextMarking) {
3147           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3148             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3149                                    "max_live_bytes "SIZE_FORMAT" "
3150                                    "< calculated "SIZE_FORMAT,
3151                                    r->bottom(), r->end(),
3152                                    r->max_live_bytes(),
3153                                  not_dead_yet_cl.live_bytes());
3154             _failures = true;
3155           }
3156         } else {
3157           // When vo == UseNextMarking we cannot currently do a sanity
3158           // check on the live bytes as the calculation has not been
3159           // finalized yet.
3160         }
3161       }
3162     }
3163     return false; // stop the region iteration if we hit a failure
3164   }
3165 };
3166 
3167 class YoungRefCounterClosure : public OopClosure {
3168   G1CollectedHeap* _g1h;
3169   int              _count;
3170  public:
3171   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3172   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
3173   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3174 
3175   int count() { return _count; }
3176   void reset_count() { _count = 0; };
3177 };
3178 
3179 class VerifyKlassClosure: public KlassClosure {
3180   YoungRefCounterClosure _young_ref_counter_closure;
3181   OopClosure *_oop_closure;
3182  public:
3183   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3184   void do_klass(Klass* k) {
3185     k->oops_do(_oop_closure);
3186 
3187     _young_ref_counter_closure.reset_count();
3188     k->oops_do(&_young_ref_counter_closure);
3189     if (_young_ref_counter_closure.count() > 0) {
3190       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3191     }
3192   }
3193 };
3194 
3195 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3196 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
3197 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
3198 //       we can change this closure to extend the simpler OopClosure.
3199 class VerifyRootsClosure: public OopsInGenClosure {
3200 private:
3201   G1CollectedHeap* _g1h;
3202   VerifyOption     _vo;
3203   bool             _failures;
3204 public:
3205   // _vo == UsePrevMarking -> use "prev" marking information,
3206   // _vo == UseNextMarking -> use "next" marking information,
3207   // _vo == UseMarkWord    -> use mark word from object header.
3208   VerifyRootsClosure(VerifyOption vo) :
3209     _g1h(G1CollectedHeap::heap()),
3210     _vo(vo),
3211     _failures(false) { }
3212 
3213   bool failures() { return _failures; }
3214 
3215   template <class T> void do_oop_nv(T* p) {
3216     T heap_oop = oopDesc::load_heap_oop(p);
3217     if (!oopDesc::is_null(heap_oop)) {
3218       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3219       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3220         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3221                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3222         if (_vo == VerifyOption_G1UseMarkWord) {
3223           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3224         }
3225         obj->print_on(gclog_or_tty);
3226         _failures = true;
3227       }
3228     }
3229   }
3230 
3231   void do_oop(oop* p)       { do_oop_nv(p); }
3232   void do_oop(narrowOop* p) { do_oop_nv(p); }
3233 };
3234 
3235 // This is the task used for parallel heap verification.
3236 
3237 class G1ParVerifyTask: public AbstractGangTask {
3238 private:
3239   G1CollectedHeap* _g1h;
3240   VerifyOption     _vo;
3241   bool             _failures;
3242 
3243 public:
3244   // _vo == UsePrevMarking -> use "prev" marking information,
3245   // _vo == UseNextMarking -> use "next" marking information,
3246   // _vo == UseMarkWord    -> use mark word from object header.
3247   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3248     AbstractGangTask("Parallel verify task"),
3249     _g1h(g1h),
3250     _vo(vo),
3251     _failures(false) { }
3252 
3253   bool failures() {
3254     return _failures;
3255   }
3256 
3257   void work(uint worker_id) {
3258     HandleMark hm;
3259     VerifyRegionClosure blk(true, _vo);
3260     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3261                                           _g1h->workers()->active_workers(),
3262                                           HeapRegion::ParVerifyClaimValue);
3263     if (blk.failures()) {
3264       _failures = true;
3265     }
3266   }
3267 };
3268 
3269 void G1CollectedHeap::verify(bool silent) {
3270   verify(silent, VerifyOption_G1UsePrevMarking);
3271 }
3272 
3273 void G1CollectedHeap::verify(bool silent,
3274                              VerifyOption vo) {
3275   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3276     if (!silent) { gclog_or_tty->print("Roots "); }
3277     VerifyRootsClosure rootsCl(vo);
3278 
3279     assert(Thread::current()->is_VM_thread(),
3280       "Expected to be executed serially by the VM thread at this point");
3281 
3282     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3283     VerifyKlassClosure klassCl(this, &rootsCl);
3284 
3285     // We apply the relevant closures to all the oops in the
3286     // system dictionary, the string table and the code cache.
3287     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3288 
3289     // Need cleared claim bits for the strong roots processing
3290     ClassLoaderDataGraph::clear_claimed_marks();
3291 
3292     process_strong_roots(true,      // activate StrongRootsScope
3293                          false,     // we set "is scavenging" to false,
3294                                     // so we don't reset the dirty cards.
3295                          ScanningOption(so),  // roots scanning options
3296                          &rootsCl,
3297                          &blobsCl,
3298                          &klassCl
3299                          );
3300 
3301     bool failures = rootsCl.failures();
3302 
3303     if (vo != VerifyOption_G1UseMarkWord) {
3304       // If we're verifying during a full GC then the region sets
3305       // will have been torn down at the start of the GC. Therefore
3306       // verifying the region sets will fail. So we only verify
3307       // the region sets when not in a full GC.
3308       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3309       verify_region_sets();
3310     }
3311 
3312     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3313     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3314       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3315              "sanity check");
3316 
3317       G1ParVerifyTask task(this, vo);
3318       assert(UseDynamicNumberOfGCThreads ||
3319         workers()->active_workers() == workers()->total_workers(),
3320         "If not dynamic should be using all the workers");
3321       int n_workers = workers()->active_workers();
3322       set_par_threads(n_workers);
3323       workers()->run_task(&task);
3324       set_par_threads(0);
3325       if (task.failures()) {
3326         failures = true;
3327       }
3328 
3329       // Checks that the expected amount of parallel work was done.
3330       // The implication is that n_workers is > 0.
3331       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3332              "sanity check");
3333 
3334       reset_heap_region_claim_values();
3335 
3336       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3337              "sanity check");
3338     } else {
3339       VerifyRegionClosure blk(false, vo);
3340       heap_region_iterate(&blk);
3341       if (blk.failures()) {
3342         failures = true;
3343       }
3344     }
3345     if (!silent) gclog_or_tty->print("RemSet ");
3346     rem_set()->verify();
3347 
3348     if (failures) {
3349       gclog_or_tty->print_cr("Heap:");
3350       // It helps to have the per-region information in the output to
3351       // help us track down what went wrong. This is why we call
3352       // print_extended_on() instead of print_on().
3353       print_extended_on(gclog_or_tty);
3354       gclog_or_tty->print_cr("");
3355 #ifndef PRODUCT
3356       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3357         concurrent_mark()->print_reachable("at-verification-failure",
3358                                            vo, false /* all */);
3359       }
3360 #endif
3361       gclog_or_tty->flush();
3362     }
3363     guarantee(!failures, "there should not have been any failures");
3364   } else {
3365     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
3366   }
3367 }
3368 
3369 class PrintRegionClosure: public HeapRegionClosure {
3370   outputStream* _st;
3371 public:
3372   PrintRegionClosure(outputStream* st) : _st(st) {}
3373   bool doHeapRegion(HeapRegion* r) {
3374     r->print_on(_st);
3375     return false;
3376   }
3377 };
3378 
3379 void G1CollectedHeap::print_on(outputStream* st) const {
3380   st->print(" %-20s", "garbage-first heap");
3381   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3382             capacity()/K, used_unlocked()/K);
3383   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3384             _g1_storage.low_boundary(),
3385             _g1_storage.high(),
3386             _g1_storage.high_boundary());
3387   st->cr();
3388   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3389   uint young_regions = _young_list->length();
3390   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3391             (size_t) young_regions * HeapRegion::GrainBytes / K);
3392   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3393   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3394             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3395   st->cr();
3396   MetaspaceAux::print_on(st);
3397 }
3398 
3399 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3400   print_on(st);
3401 
3402   // Print the per-region information.
3403   st->cr();
3404   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3405                "HS=humongous(starts), HC=humongous(continues), "
3406                "CS=collection set, F=free, TS=gc time stamp, "
3407                "PTAMS=previous top-at-mark-start, "
3408                "NTAMS=next top-at-mark-start)");
3409   PrintRegionClosure blk(st);
3410   heap_region_iterate(&blk);
3411 }
3412 
3413 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3414   if (G1CollectedHeap::use_parallel_gc_threads()) {
3415     workers()->print_worker_threads_on(st);
3416   }
3417   _cmThread->print_on(st);
3418   st->cr();
3419   _cm->print_worker_threads_on(st);
3420   _cg1r->print_worker_threads_on(st);
3421 }
3422 
3423 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3424   if (G1CollectedHeap::use_parallel_gc_threads()) {
3425     workers()->threads_do(tc);
3426   }
3427   tc->do_thread(_cmThread);
3428   _cg1r->threads_do(tc);
3429 }
3430 
3431 void G1CollectedHeap::print_tracing_info() const {
3432   // We'll overload this to mean "trace GC pause statistics."
3433   if (TraceGen0Time || TraceGen1Time) {
3434     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3435     // to that.
3436     g1_policy()->print_tracing_info();
3437   }
3438   if (G1SummarizeRSetStats) {
3439     g1_rem_set()->print_summary_info();
3440   }
3441   if (G1SummarizeConcMark) {
3442     concurrent_mark()->print_summary_info();
3443   }
3444   g1_policy()->print_yg_surv_rate_info();
3445   SpecializationStats::print();
3446 }
3447 
3448 #ifndef PRODUCT
3449 // Helpful for debugging RSet issues.
3450 
3451 class PrintRSetsClosure : public HeapRegionClosure {
3452 private:
3453   const char* _msg;
3454   size_t _occupied_sum;
3455 
3456 public:
3457   bool doHeapRegion(HeapRegion* r) {
3458     HeapRegionRemSet* hrrs = r->rem_set();
3459     size_t occupied = hrrs->occupied();
3460     _occupied_sum += occupied;
3461 
3462     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3463                            HR_FORMAT_PARAMS(r));
3464     if (occupied == 0) {
3465       gclog_or_tty->print_cr("  RSet is empty");
3466     } else {
3467       hrrs->print();
3468     }
3469     gclog_or_tty->print_cr("----------");
3470     return false;
3471   }
3472 
3473   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3474     gclog_or_tty->cr();
3475     gclog_or_tty->print_cr("========================================");
3476     gclog_or_tty->print_cr(msg);
3477     gclog_or_tty->cr();
3478   }
3479 
3480   ~PrintRSetsClosure() {
3481     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3482     gclog_or_tty->print_cr("========================================");
3483     gclog_or_tty->cr();
3484   }
3485 };
3486 
3487 void G1CollectedHeap::print_cset_rsets() {
3488   PrintRSetsClosure cl("Printing CSet RSets");
3489   collection_set_iterate(&cl);
3490 }
3491 
3492 void G1CollectedHeap::print_all_rsets() {
3493   PrintRSetsClosure cl("Printing All RSets");;
3494   heap_region_iterate(&cl);
3495 }
3496 #endif // PRODUCT
3497 
3498 G1CollectedHeap* G1CollectedHeap::heap() {
3499   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3500          "not a garbage-first heap");
3501   return _g1h;
3502 }
3503 
3504 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3505   // always_do_update_barrier = false;
3506   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3507   // Call allocation profiler
3508   AllocationProfiler::iterate_since_last_gc();
3509   // Fill TLAB's and such
3510   ensure_parsability(true);
3511 }
3512 
3513 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3514   // FIXME: what is this about?
3515   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3516   // is set.
3517   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3518                         "derived pointer present"));
3519   // always_do_update_barrier = true;
3520 
3521   // We have just completed a GC. Update the soft reference
3522   // policy with the new heap occupancy
3523   Universe::update_heap_info_at_gc();
3524 }
3525 
3526 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3527                                                unsigned int gc_count_before,
3528                                                bool* succeeded) {
3529   assert_heap_not_locked_and_not_at_safepoint();
3530   g1_policy()->record_stop_world_start();
3531   VM_G1IncCollectionPause op(gc_count_before,
3532                              word_size,
3533                              false, /* should_initiate_conc_mark */
3534                              g1_policy()->max_pause_time_ms(),
3535                              GCCause::_g1_inc_collection_pause);
3536   VMThread::execute(&op);
3537 
3538   HeapWord* result = op.result();
3539   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3540   assert(result == NULL || ret_succeeded,
3541          "the result should be NULL if the VM did not succeed");
3542   *succeeded = ret_succeeded;
3543 
3544   assert_heap_not_locked();
3545   return result;
3546 }
3547 
3548 void
3549 G1CollectedHeap::doConcurrentMark() {
3550   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3551   if (!_cmThread->in_progress()) {
3552     _cmThread->set_started();
3553     CGC_lock->notify();
3554   }
3555 }
3556 
3557 size_t G1CollectedHeap::pending_card_num() {
3558   size_t extra_cards = 0;
3559   JavaThread *curr = Threads::first();
3560   while (curr != NULL) {
3561     DirtyCardQueue& dcq = curr->dirty_card_queue();
3562     extra_cards += dcq.size();
3563     curr = curr->next();
3564   }
3565   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3566   size_t buffer_size = dcqs.buffer_size();
3567   size_t buffer_num = dcqs.completed_buffers_num();
3568 
3569   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3570   // in bytes - not the number of 'entries'. We need to convert
3571   // into a number of cards.
3572   return (buffer_size * buffer_num + extra_cards) / oopSize;
3573 }
3574 
3575 size_t G1CollectedHeap::cards_scanned() {
3576   return g1_rem_set()->cardsScanned();
3577 }
3578 
3579 void
3580 G1CollectedHeap::setup_surviving_young_words() {
3581   assert(_surviving_young_words == NULL, "pre-condition");
3582   uint array_length = g1_policy()->young_cset_region_length();
3583   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3584   if (_surviving_young_words == NULL) {
3585     vm_exit_out_of_memory(sizeof(size_t) * array_length,
3586                           "Not enough space for young surv words summary.");
3587   }
3588   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3589 #ifdef ASSERT
3590   for (uint i = 0;  i < array_length; ++i) {
3591     assert( _surviving_young_words[i] == 0, "memset above" );
3592   }
3593 #endif // !ASSERT
3594 }
3595 
3596 void
3597 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3598   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3599   uint array_length = g1_policy()->young_cset_region_length();
3600   for (uint i = 0; i < array_length; ++i) {
3601     _surviving_young_words[i] += surv_young_words[i];
3602   }
3603 }
3604 
3605 void
3606 G1CollectedHeap::cleanup_surviving_young_words() {
3607   guarantee( _surviving_young_words != NULL, "pre-condition" );
3608   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3609   _surviving_young_words = NULL;
3610 }
3611 
3612 #ifdef ASSERT
3613 class VerifyCSetClosure: public HeapRegionClosure {
3614 public:
3615   bool doHeapRegion(HeapRegion* hr) {
3616     // Here we check that the CSet region's RSet is ready for parallel
3617     // iteration. The fields that we'll verify are only manipulated
3618     // when the region is part of a CSet and is collected. Afterwards,
3619     // we reset these fields when we clear the region's RSet (when the
3620     // region is freed) so they are ready when the region is
3621     // re-allocated. The only exception to this is if there's an
3622     // evacuation failure and instead of freeing the region we leave
3623     // it in the heap. In that case, we reset these fields during
3624     // evacuation failure handling.
3625     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3626 
3627     // Here's a good place to add any other checks we'd like to
3628     // perform on CSet regions.
3629     return false;
3630   }
3631 };
3632 #endif // ASSERT
3633 
3634 #if TASKQUEUE_STATS
3635 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3636   st->print_raw_cr("GC Task Stats");
3637   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3638   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3639 }
3640 
3641 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3642   print_taskqueue_stats_hdr(st);
3643 
3644   TaskQueueStats totals;
3645   const int n = workers() != NULL ? workers()->total_workers() : 1;
3646   for (int i = 0; i < n; ++i) {
3647     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3648     totals += task_queue(i)->stats;
3649   }
3650   st->print_raw("tot "); totals.print(st); st->cr();
3651 
3652   DEBUG_ONLY(totals.verify());
3653 }
3654 
3655 void G1CollectedHeap::reset_taskqueue_stats() {
3656   const int n = workers() != NULL ? workers()->total_workers() : 1;
3657   for (int i = 0; i < n; ++i) {
3658     task_queue(i)->stats.reset();
3659   }
3660 }
3661 #endif // TASKQUEUE_STATS
3662 
3663 void G1CollectedHeap::log_gc_header() {
3664   if (!G1Log::fine()) {
3665     return;
3666   }
3667 
3668   gclog_or_tty->date_stamp(PrintGCDateStamps);
3669   gclog_or_tty->stamp(PrintGCTimeStamps);
3670 
3671   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3672     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3673     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3674 
3675   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3676 }
3677 
3678 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3679   if (!G1Log::fine()) {
3680     return;
3681   }
3682 
3683   if (G1Log::finer()) {
3684     if (evacuation_failed()) {
3685       gclog_or_tty->print(" (to-space exhausted)");
3686     }
3687     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3688     g1_policy()->phase_times()->note_gc_end();
3689     g1_policy()->phase_times()->print(pause_time_sec);
3690     g1_policy()->print_detailed_heap_transition();
3691   } else {
3692     if (evacuation_failed()) {
3693       gclog_or_tty->print("--");
3694     }
3695     g1_policy()->print_heap_transition();
3696     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3697   }
3698   gclog_or_tty->flush();
3699 }
3700 
3701 bool
3702 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3703   assert_at_safepoint(true /* should_be_vm_thread */);
3704   guarantee(!is_gc_active(), "collection is not reentrant");
3705 
3706   if (GC_locker::check_active_before_gc()) {
3707     return false;
3708   }
3709 
3710   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3711   ResourceMark rm;
3712 
3713   print_heap_before_gc();
3714 
3715   HRSPhaseSetter x(HRSPhaseEvacuation);
3716   verify_region_sets_optional();
3717   verify_dirty_young_regions();
3718 
3719   // This call will decide whether this pause is an initial-mark
3720   // pause. If it is, during_initial_mark_pause() will return true
3721   // for the duration of this pause.
3722   g1_policy()->decide_on_conc_mark_initiation();
3723 
3724   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3725   assert(!g1_policy()->during_initial_mark_pause() ||
3726           g1_policy()->gcs_are_young(), "sanity");
3727 
3728   // We also do not allow mixed GCs during marking.
3729   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3730 
3731   // Record whether this pause is an initial mark. When the current
3732   // thread has completed its logging output and it's safe to signal
3733   // the CM thread, the flag's value in the policy has been reset.
3734   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3735 
3736   // Inner scope for scope based logging, timers, and stats collection
3737   {
3738     if (g1_policy()->during_initial_mark_pause()) {
3739       // We are about to start a marking cycle, so we increment the
3740       // full collection counter.
3741       increment_old_marking_cycles_started();
3742     }
3743     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3744 
3745     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3746                                 workers()->active_workers() : 1);
3747     double pause_start_sec = os::elapsedTime();
3748     g1_policy()->phase_times()->note_gc_start(active_workers);
3749     log_gc_header();
3750 
3751     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3752     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3753 
3754     // If the secondary_free_list is not empty, append it to the
3755     // free_list. No need to wait for the cleanup operation to finish;
3756     // the region allocation code will check the secondary_free_list
3757     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3758     // set, skip this step so that the region allocation code has to
3759     // get entries from the secondary_free_list.
3760     if (!G1StressConcRegionFreeing) {
3761       append_secondary_free_list_if_not_empty_with_lock();
3762     }
3763 
3764     assert(check_young_list_well_formed(),
3765       "young list should be well formed");
3766 
3767     // Don't dynamically change the number of GC threads this early.  A value of
3768     // 0 is used to indicate serial work.  When parallel work is done,
3769     // it will be set.
3770 
3771     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3772       IsGCActiveMark x;
3773 
3774       gc_prologue(false);
3775       increment_total_collections(false /* full gc */);
3776       increment_gc_time_stamp();
3777 
3778       verify_before_gc();
3779 
3780       COMPILER2_PRESENT(DerivedPointerTable::clear());
3781 
3782       // Please see comment in g1CollectedHeap.hpp and
3783       // G1CollectedHeap::ref_processing_init() to see how
3784       // reference processing currently works in G1.
3785 
3786       // Enable discovery in the STW reference processor
3787       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3788                                             true /*verify_no_refs*/);
3789 
3790       {
3791         // We want to temporarily turn off discovery by the
3792         // CM ref processor, if necessary, and turn it back on
3793         // on again later if we do. Using a scoped
3794         // NoRefDiscovery object will do this.
3795         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3796 
3797         // Forget the current alloc region (we might even choose it to be part
3798         // of the collection set!).
3799         release_mutator_alloc_region();
3800 
3801         // We should call this after we retire the mutator alloc
3802         // region(s) so that all the ALLOC / RETIRE events are generated
3803         // before the start GC event.
3804         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3805 
3806         // This timing is only used by the ergonomics to handle our pause target.
3807         // It is unclear why this should not include the full pause. We will
3808         // investigate this in CR 7178365.
3809         //
3810         // Preserving the old comment here if that helps the investigation:
3811         //
3812         // The elapsed time induced by the start time below deliberately elides
3813         // the possible verification above.
3814         double sample_start_time_sec = os::elapsedTime();
3815         size_t start_used_bytes = used();
3816 
3817 #if YOUNG_LIST_VERBOSE
3818         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3819         _young_list->print();
3820         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3821 #endif // YOUNG_LIST_VERBOSE
3822 
3823         g1_policy()->record_collection_pause_start(sample_start_time_sec,
3824                                                    start_used_bytes);
3825 
3826         double scan_wait_start = os::elapsedTime();
3827         // We have to wait until the CM threads finish scanning the
3828         // root regions as it's the only way to ensure that all the
3829         // objects on them have been correctly scanned before we start
3830         // moving them during the GC.
3831         bool waited = _cm->root_regions()->wait_until_scan_finished();
3832         double wait_time_ms = 0.0;
3833         if (waited) {
3834           double scan_wait_end = os::elapsedTime();
3835           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3836         }
3837         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3838 
3839 #if YOUNG_LIST_VERBOSE
3840         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3841         _young_list->print();
3842 #endif // YOUNG_LIST_VERBOSE
3843 
3844         if (g1_policy()->during_initial_mark_pause()) {
3845           concurrent_mark()->checkpointRootsInitialPre();
3846         }
3847 
3848 #if YOUNG_LIST_VERBOSE
3849         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3850         _young_list->print();
3851         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3852 #endif // YOUNG_LIST_VERBOSE
3853 
3854         g1_policy()->finalize_cset(target_pause_time_ms);
3855 
3856         _cm->note_start_of_gc();
3857         // We should not verify the per-thread SATB buffers given that
3858         // we have not filtered them yet (we'll do so during the
3859         // GC). We also call this after finalize_cset() to
3860         // ensure that the CSet has been finalized.
3861         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3862                                  true  /* verify_enqueued_buffers */,
3863                                  false /* verify_thread_buffers */,
3864                                  true  /* verify_fingers */);
3865 
3866         if (_hr_printer.is_active()) {
3867           HeapRegion* hr = g1_policy()->collection_set();
3868           while (hr != NULL) {
3869             G1HRPrinter::RegionType type;
3870             if (!hr->is_young()) {
3871               type = G1HRPrinter::Old;
3872             } else if (hr->is_survivor()) {
3873               type = G1HRPrinter::Survivor;
3874             } else {
3875               type = G1HRPrinter::Eden;
3876             }
3877             _hr_printer.cset(hr);
3878             hr = hr->next_in_collection_set();
3879           }
3880         }
3881 
3882 #ifdef ASSERT
3883         VerifyCSetClosure cl;
3884         collection_set_iterate(&cl);
3885 #endif // ASSERT
3886 
3887         setup_surviving_young_words();
3888 
3889         // Initialize the GC alloc regions.
3890         init_gc_alloc_regions();
3891 
3892         // Actually do the work...
3893         evacuate_collection_set();
3894 
3895         // We do this to mainly verify the per-thread SATB buffers
3896         // (which have been filtered by now) since we didn't verify
3897         // them earlier. No point in re-checking the stacks / enqueued
3898         // buffers given that the CSet has not changed since last time
3899         // we checked.
3900         _cm->verify_no_cset_oops(false /* verify_stacks */,
3901                                  false /* verify_enqueued_buffers */,
3902                                  true  /* verify_thread_buffers */,
3903                                  true  /* verify_fingers */);
3904 
3905         free_collection_set(g1_policy()->collection_set());
3906         g1_policy()->clear_collection_set();
3907 
3908         cleanup_surviving_young_words();
3909 
3910         // Start a new incremental collection set for the next pause.
3911         g1_policy()->start_incremental_cset_building();
3912 
3913         // Clear the _cset_fast_test bitmap in anticipation of adding
3914         // regions to the incremental collection set for the next
3915         // evacuation pause.
3916         clear_cset_fast_test();
3917 
3918         _young_list->reset_sampled_info();
3919 
3920         // Don't check the whole heap at this point as the
3921         // GC alloc regions from this pause have been tagged
3922         // as survivors and moved on to the survivor list.
3923         // Survivor regions will fail the !is_young() check.
3924         assert(check_young_list_empty(false /* check_heap */),
3925           "young list should be empty");
3926 
3927 #if YOUNG_LIST_VERBOSE
3928         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3929         _young_list->print();
3930 #endif // YOUNG_LIST_VERBOSE
3931 
3932         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3933                                             _young_list->first_survivor_region(),
3934                                             _young_list->last_survivor_region());
3935 
3936         _young_list->reset_auxilary_lists();
3937 
3938         if (evacuation_failed()) {
3939           _summary_bytes_used = recalculate_used();
3940         } else {
3941           // The "used" of the the collection set have already been subtracted
3942           // when they were freed.  Add in the bytes evacuated.
3943           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3944         }
3945 
3946         if (g1_policy()->during_initial_mark_pause()) {
3947           // We have to do this before we notify the CM threads that
3948           // they can start working to make sure that all the
3949           // appropriate initialization is done on the CM object.
3950           concurrent_mark()->checkpointRootsInitialPost();
3951           set_marking_started();
3952           // Note that we don't actually trigger the CM thread at
3953           // this point. We do that later when we're sure that
3954           // the current thread has completed its logging output.
3955         }
3956 
3957         allocate_dummy_regions();
3958 
3959 #if YOUNG_LIST_VERBOSE
3960         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3961         _young_list->print();
3962         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3963 #endif // YOUNG_LIST_VERBOSE
3964 
3965         init_mutator_alloc_region();
3966 
3967         {
3968           size_t expand_bytes = g1_policy()->expansion_amount();
3969           if (expand_bytes > 0) {
3970             size_t bytes_before = capacity();
3971             // No need for an ergo verbose message here,
3972             // expansion_amount() does this when it returns a value > 0.
3973             if (!expand(expand_bytes)) {
3974               // We failed to expand the heap so let's verify that
3975               // committed/uncommitted amount match the backing store
3976               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3977               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3978             }
3979           }
3980         }
3981 
3982         // We redo the verificaiton but now wrt to the new CSet which
3983         // has just got initialized after the previous CSet was freed.
3984         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3985                                  true  /* verify_enqueued_buffers */,
3986                                  true  /* verify_thread_buffers */,
3987                                  true  /* verify_fingers */);
3988         _cm->note_end_of_gc();
3989 
3990         // This timing is only used by the ergonomics to handle our pause target.
3991         // It is unclear why this should not include the full pause. We will
3992         // investigate this in CR 7178365.
3993         double sample_end_time_sec = os::elapsedTime();
3994         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3995         g1_policy()->record_collection_pause_end(pause_time_ms);
3996 
3997         MemoryService::track_memory_usage();
3998 
3999         // In prepare_for_verify() below we'll need to scan the deferred
4000         // update buffers to bring the RSets up-to-date if
4001         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4002         // the update buffers we'll probably need to scan cards on the
4003         // regions we just allocated to (i.e., the GC alloc
4004         // regions). However, during the last GC we called
4005         // set_saved_mark() on all the GC alloc regions, so card
4006         // scanning might skip the [saved_mark_word()...top()] area of
4007         // those regions (i.e., the area we allocated objects into
4008         // during the last GC). But it shouldn't. Given that
4009         // saved_mark_word() is conditional on whether the GC time stamp
4010         // on the region is current or not, by incrementing the GC time
4011         // stamp here we invalidate all the GC time stamps on all the
4012         // regions and saved_mark_word() will simply return top() for
4013         // all the regions. This is a nicer way of ensuring this rather
4014         // than iterating over the regions and fixing them. In fact, the
4015         // GC time stamp increment here also ensures that
4016         // saved_mark_word() will return top() between pauses, i.e.,
4017         // during concurrent refinement. So we don't need the
4018         // is_gc_active() check to decided which top to use when
4019         // scanning cards (see CR 7039627).
4020         increment_gc_time_stamp();
4021 
4022         verify_after_gc();
4023 
4024         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4025         ref_processor_stw()->verify_no_references_recorded();
4026 
4027         // CM reference discovery will be re-enabled if necessary.
4028       }
4029 
4030       // We should do this after we potentially expand the heap so
4031       // that all the COMMIT events are generated before the end GC
4032       // event, and after we retire the GC alloc regions so that all
4033       // RETIRE events are generated before the end GC event.
4034       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4035 
4036       if (mark_in_progress()) {
4037         concurrent_mark()->update_g1_committed();
4038       }
4039 
4040 #ifdef TRACESPINNING
4041       ParallelTaskTerminator::print_termination_counts();
4042 #endif
4043 
4044       gc_epilogue(false);
4045     }
4046 
4047     // Print the remainder of the GC log output.
4048     log_gc_footer(os::elapsedTime() - pause_start_sec);
4049 
4050     // It is not yet to safe to tell the concurrent mark to
4051     // start as we have some optional output below. We don't want the
4052     // output from the concurrent mark thread interfering with this
4053     // logging output either.
4054 
4055     _hrs.verify_optional();
4056     verify_region_sets_optional();
4057 
4058     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4059     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4060 
4061     print_heap_after_gc();
4062 
4063     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4064     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4065     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4066     // before any GC notifications are raised.
4067     g1mm()->update_sizes();
4068   }
4069 
4070   if (G1SummarizeRSetStats &&
4071       (G1SummarizeRSetStatsPeriod > 0) &&
4072       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
4073     g1_rem_set()->print_summary_info();
4074   }
4075 
4076   // It should now be safe to tell the concurrent mark thread to start
4077   // without its logging output interfering with the logging output
4078   // that came from the pause.
4079 
4080   if (should_start_conc_mark) {
4081     // CAUTION: after the doConcurrentMark() call below,
4082     // the concurrent marking thread(s) could be running
4083     // concurrently with us. Make sure that anything after
4084     // this point does not assume that we are the only GC thread
4085     // running. Note: of course, the actual marking work will
4086     // not start until the safepoint itself is released in
4087     // ConcurrentGCThread::safepoint_desynchronize().
4088     doConcurrentMark();
4089   }
4090 
4091   return true;
4092 }
4093 
4094 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4095 {
4096   size_t gclab_word_size;
4097   switch (purpose) {
4098     case GCAllocForSurvived:
4099       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
4100       break;
4101     case GCAllocForTenured:
4102       gclab_word_size = _old_plab_stats.desired_plab_sz();
4103       break;
4104     default:
4105       assert(false, "unknown GCAllocPurpose");
4106       gclab_word_size = _old_plab_stats.desired_plab_sz();
4107       break;
4108   }
4109 
4110   // Prevent humongous PLAB sizes for two reasons:
4111   // * PLABs are allocated using a similar paths as oops, but should
4112   //   never be in a humongous region
4113   // * Allowing humongous PLABs needlessly churns the region free lists
4114   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4115 }
4116 
4117 void G1CollectedHeap::init_mutator_alloc_region() {
4118   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4119   _mutator_alloc_region.init();
4120 }
4121 
4122 void G1CollectedHeap::release_mutator_alloc_region() {
4123   _mutator_alloc_region.release();
4124   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4125 }
4126 
4127 void G1CollectedHeap::init_gc_alloc_regions() {
4128   assert_at_safepoint(true /* should_be_vm_thread */);
4129 
4130   _survivor_gc_alloc_region.init();
4131   _old_gc_alloc_region.init();
4132   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4133   _retained_old_gc_alloc_region = NULL;
4134 
4135   // We will discard the current GC alloc region if:
4136   // a) it's in the collection set (it can happen!),
4137   // b) it's already full (no point in using it),
4138   // c) it's empty (this means that it was emptied during
4139   // a cleanup and it should be on the free list now), or
4140   // d) it's humongous (this means that it was emptied
4141   // during a cleanup and was added to the free list, but
4142   // has been subseqently used to allocate a humongous
4143   // object that may be less than the region size).
4144   if (retained_region != NULL &&
4145       !retained_region->in_collection_set() &&
4146       !(retained_region->top() == retained_region->end()) &&
4147       !retained_region->is_empty() &&
4148       !retained_region->isHumongous()) {
4149     retained_region->set_saved_mark();
4150     // The retained region was added to the old region set when it was
4151     // retired. We have to remove it now, since we don't allow regions
4152     // we allocate to in the region sets. We'll re-add it later, when
4153     // it's retired again.
4154     _old_set.remove(retained_region);
4155     bool during_im = g1_policy()->during_initial_mark_pause();
4156     retained_region->note_start_of_copying(during_im);
4157     _old_gc_alloc_region.set(retained_region);
4158     _hr_printer.reuse(retained_region);
4159   }
4160 }
4161 
4162 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
4163   _survivor_gc_alloc_region.release();
4164   // If we have an old GC alloc region to release, we'll save it in
4165   // _retained_old_gc_alloc_region. If we don't
4166   // _retained_old_gc_alloc_region will become NULL. This is what we
4167   // want either way so no reason to check explicitly for either
4168   // condition.
4169   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4170 
4171   if (ResizePLAB) {
4172     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4173     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4174   }
4175 }
4176 
4177 void G1CollectedHeap::abandon_gc_alloc_regions() {
4178   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4179   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4180   _retained_old_gc_alloc_region = NULL;
4181 }
4182 
4183 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4184   _drain_in_progress = false;
4185   set_evac_failure_closure(cl);
4186   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4187 }
4188 
4189 void G1CollectedHeap::finalize_for_evac_failure() {
4190   assert(_evac_failure_scan_stack != NULL &&
4191          _evac_failure_scan_stack->length() == 0,
4192          "Postcondition");
4193   assert(!_drain_in_progress, "Postcondition");
4194   delete _evac_failure_scan_stack;
4195   _evac_failure_scan_stack = NULL;
4196 }
4197 
4198 void G1CollectedHeap::remove_self_forwarding_pointers() {
4199   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4200 
4201   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4202 
4203   if (G1CollectedHeap::use_parallel_gc_threads()) {
4204     set_par_threads();
4205     workers()->run_task(&rsfp_task);
4206     set_par_threads(0);
4207   } else {
4208     rsfp_task.work(0);
4209   }
4210 
4211   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4212 
4213   // Reset the claim values in the regions in the collection set.
4214   reset_cset_heap_region_claim_values();
4215 
4216   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4217 
4218   // Now restore saved marks, if any.
4219   while (_preserved_marks.has_data()) {
4220     G1PreserveMarkQueueEntry e = _preserved_marks.remove_first();
4221     e.obj->set_mark(e.mark);
4222   }
4223 }
4224 
4225 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4226   _evac_failure_scan_stack->push(obj);
4227 }
4228 
4229 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4230   assert(_evac_failure_scan_stack != NULL, "precondition");
4231 
4232   while (_evac_failure_scan_stack->length() > 0) {
4233      oop obj = _evac_failure_scan_stack->pop();
4234      _evac_failure_closure->set_region(heap_region_containing(obj));
4235      obj->oop_iterate_backwards(_evac_failure_closure);
4236   }
4237 }
4238 
4239 oop
4240 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4241                                                oop old) {
4242   assert(obj_in_cs(old),
4243          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4244                  (HeapWord*) old));
4245   markOop m = old->mark();
4246   oop forward_ptr = old->forward_to_atomic(old);
4247   if (forward_ptr == NULL) {
4248     // Forward-to-self succeeded.
4249 
4250     if (_evac_failure_closure != cl) {
4251       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4252       assert(!_drain_in_progress,
4253              "Should only be true while someone holds the lock.");
4254       // Set the global evac-failure closure to the current thread's.
4255       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4256       set_evac_failure_closure(cl);
4257       // Now do the common part.
4258       handle_evacuation_failure_common(old, m);
4259       // Reset to NULL.
4260       set_evac_failure_closure(NULL);
4261     } else {
4262       // The lock is already held, and this is recursive.
4263       assert(_drain_in_progress, "This should only be the recursive case.");
4264       handle_evacuation_failure_common(old, m);
4265     }
4266     return old;
4267   } else {
4268     // Forward-to-self failed. Either someone else managed to allocate
4269     // space for this object (old != forward_ptr) or they beat us in
4270     // self-forwarding it (old == forward_ptr).
4271     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4272            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4273                    "should not be in the CSet",
4274                    (HeapWord*) old, (HeapWord*) forward_ptr));
4275     return forward_ptr;
4276   }
4277 }
4278 
4279 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4280   set_evacuation_failed(true);
4281 
4282   preserve_mark_if_necessary(old, m);
4283 
4284   HeapRegion* r = heap_region_containing(old);
4285   if (!r->evacuation_failed()) {
4286     r->set_evacuation_failed(true);
4287     _hr_printer.evac_failure(r);
4288   }
4289 
4290   push_on_evac_failure_scan_stack(old);
4291 
4292   if (!_drain_in_progress) {
4293     // prevent recursion in copy_to_survivor_space()
4294     _drain_in_progress = true;
4295     drain_evac_failure_scan_stack();
4296     _drain_in_progress = false;
4297   }
4298 }
4299 
4300 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4301   assert(evacuation_failed(), "Oversaving!");
4302   // We want to call the "for_promotion_failure" version only in the
4303   // case of a promotion failure.
4304   if (m->must_be_preserved_for_promotion_failure(obj)) {
4305     _preserved_marks.append(obj, m);
4306   }
4307 }
4308 
4309 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4310                                                   size_t word_size) {
4311   if (purpose == GCAllocForSurvived) {
4312     HeapWord* result = survivor_attempt_allocation(word_size);
4313     if (result != NULL) {
4314       return result;
4315     } else {
4316       // Let's try to allocate in the old gen in case we can fit the
4317       // object there.
4318       return old_attempt_allocation(word_size);
4319     }
4320   } else {
4321     assert(purpose ==  GCAllocForTenured, "sanity");
4322     HeapWord* result = old_attempt_allocation(word_size);
4323     if (result != NULL) {
4324       return result;
4325     } else {
4326       // Let's try to allocate in the survivors in case we can fit the
4327       // object there.
4328       return survivor_attempt_allocation(word_size);
4329     }
4330   }
4331 
4332   ShouldNotReachHere();
4333   // Trying to keep some compilers happy.
4334   return NULL;
4335 }
4336 
4337 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4338   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4339 
4340 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4341   : _g1h(g1h),
4342     _refs(g1h->task_queue(queue_num)),
4343     _dcq(&g1h->dirty_card_queue_set()),
4344     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
4345     _g1_rem(g1h->g1_rem_set()),
4346     _hash_seed(17), _queue_num(queue_num),
4347     _term_attempts(0),
4348     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4349     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4350     _age_table(false),
4351     _strong_roots_time(0), _term_time(0),
4352     _alloc_buffer_waste(0), _undo_waste(0) {
4353   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4354   // we "sacrifice" entry 0 to keep track of surviving bytes for
4355   // non-young regions (where the age is -1)
4356   // We also add a few elements at the beginning and at the end in
4357   // an attempt to eliminate cache contention
4358   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4359   uint array_length = PADDING_ELEM_NUM +
4360                       real_length +
4361                       PADDING_ELEM_NUM;
4362   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4363   if (_surviving_young_words_base == NULL)
4364     vm_exit_out_of_memory(array_length * sizeof(size_t),
4365                           "Not enough space for young surv histo.");
4366   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4367   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4368 
4369   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4370   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4371 
4372   _start = os::elapsedTime();
4373 }
4374 
4375 void
4376 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4377 {
4378   st->print_raw_cr("GC Termination Stats");
4379   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4380                    " ------waste (KiB)------");
4381   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4382                    "  total   alloc    undo");
4383   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4384                    " ------- ------- -------");
4385 }
4386 
4387 void
4388 G1ParScanThreadState::print_termination_stats(int i,
4389                                               outputStream* const st) const
4390 {
4391   const double elapsed_ms = elapsed_time() * 1000.0;
4392   const double s_roots_ms = strong_roots_time() * 1000.0;
4393   const double term_ms    = term_time() * 1000.0;
4394   st->print_cr("%3d %9.2f %9.2f %6.2f "
4395                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4396                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4397                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4398                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4399                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4400                alloc_buffer_waste() * HeapWordSize / K,
4401                undo_waste() * HeapWordSize / K);
4402 }
4403 
4404 #ifdef ASSERT
4405 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4406   assert(ref != NULL, "invariant");
4407   assert(UseCompressedOops, "sanity");
4408   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4409   oop p = oopDesc::load_decode_heap_oop(ref);
4410   assert(_g1h->is_in_g1_reserved(p),
4411          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4412   return true;
4413 }
4414 
4415 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4416   assert(ref != NULL, "invariant");
4417   if (has_partial_array_mask(ref)) {
4418     // Must be in the collection set--it's already been copied.
4419     oop p = clear_partial_array_mask(ref);
4420     assert(_g1h->obj_in_cs(p),
4421            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4422   } else {
4423     oop p = oopDesc::load_decode_heap_oop(ref);
4424     assert(_g1h->is_in_g1_reserved(p),
4425            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4426   }
4427   return true;
4428 }
4429 
4430 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4431   if (ref.is_narrow()) {
4432     return verify_ref((narrowOop*) ref);
4433   } else {
4434     return verify_ref((oop*) ref);
4435   }
4436 }
4437 #endif // ASSERT
4438 
4439 void G1ParScanThreadState::trim_queue() {
4440   assert(_evac_cl != NULL, "not set");
4441   assert(_evac_failure_cl != NULL, "not set");
4442   assert(_partial_scan_cl != NULL, "not set");
4443 
4444   StarTask ref;
4445   do {
4446     // Drain the overflow stack first, so other threads can steal.
4447     while (refs()->pop_overflow(ref)) {
4448       deal_with_reference(ref);
4449     }
4450 
4451     while (refs()->pop_local(ref)) {
4452       deal_with_reference(ref);
4453     }
4454   } while (!refs()->is_empty());
4455 }
4456 
4457 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4458                                      G1ParScanThreadState* par_scan_state) :
4459   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4460   _par_scan_state(par_scan_state),
4461   _worker_id(par_scan_state->queue_num()),
4462   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4463   _mark_in_progress(_g1->mark_in_progress()) { }
4464 
4465 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4466 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4467 #ifdef ASSERT
4468   HeapRegion* hr = _g1->heap_region_containing(obj);
4469   assert(hr != NULL, "sanity");
4470   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4471 #endif // ASSERT
4472 
4473   // We know that the object is not moving so it's safe to read its size.
4474   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4475 }
4476 
4477 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4478 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4479   ::mark_forwarded_object(oop from_obj, oop to_obj) {
4480 #ifdef ASSERT
4481   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4482   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4483   assert(from_obj != to_obj, "should not be self-forwarded");
4484 
4485   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4486   assert(from_hr != NULL, "sanity");
4487   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4488 
4489   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4490   assert(to_hr != NULL, "sanity");
4491   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4492 #endif // ASSERT
4493 
4494   // The object might be in the process of being copied by another
4495   // worker so we cannot trust that its to-space image is
4496   // well-formed. So we have to read its size from its from-space
4497   // image which we know should not be changing.
4498   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4499 }
4500 
4501 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4502 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4503   ::copy_to_survivor_space(oop old) {
4504   size_t word_sz = old->size();
4505   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4506   // +1 to make the -1 indexes valid...
4507   int       young_index = from_region->young_index_in_cset()+1;
4508   assert( (from_region->is_young() && young_index >  0) ||
4509          (!from_region->is_young() && young_index == 0), "invariant" );
4510   G1CollectorPolicy* g1p = _g1->g1_policy();
4511   markOop m = old->mark();
4512   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4513                                            : m->age();
4514   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4515                                                              word_sz);
4516   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4517 #ifndef PRODUCT
4518   // Should this evacuation fail?
4519   if (_g1->evacuation_should_fail()) {
4520     if (obj_ptr != NULL) {
4521       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4522       obj_ptr = NULL;
4523     }
4524   }
4525 #endif // !PRODUCT
4526 
4527   if (obj_ptr == NULL) {
4528     // This will either forward-to-self, or detect that someone else has
4529     // installed a forwarding pointer.
4530     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4531     return _g1->handle_evacuation_failure_par(cl, old);
4532   }
4533 
4534   oop obj = oop(obj_ptr);
4535 
4536   // We're going to allocate linearly, so might as well prefetch ahead.
4537   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4538 
4539   oop forward_ptr = old->forward_to_atomic(obj);
4540   if (forward_ptr == NULL) {
4541     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4542     if (g1p->track_object_age(alloc_purpose)) {
4543       // We could simply do obj->incr_age(). However, this causes a
4544       // performance issue. obj->incr_age() will first check whether
4545       // the object has a displaced mark by checking its mark word;
4546       // getting the mark word from the new location of the object
4547       // stalls. So, given that we already have the mark word and we
4548       // are about to install it anyway, it's better to increase the
4549       // age on the mark word, when the object does not have a
4550       // displaced mark word. We're not expecting many objects to have
4551       // a displaced marked word, so that case is not optimized
4552       // further (it could be...) and we simply call obj->incr_age().
4553 
4554       if (m->has_displaced_mark_helper()) {
4555         // in this case, we have to install the mark word first,
4556         // otherwise obj looks to be forwarded (the old mark word,
4557         // which contains the forward pointer, was copied)
4558         obj->set_mark(m);
4559         obj->incr_age();
4560       } else {
4561         m = m->incr_age();
4562         obj->set_mark(m);
4563       }
4564       _par_scan_state->age_table()->add(obj, word_sz);
4565     } else {
4566       obj->set_mark(m);
4567     }
4568 
4569     size_t* surv_young_words = _par_scan_state->surviving_young_words();
4570     surv_young_words[young_index] += word_sz;
4571 
4572     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4573       // We keep track of the next start index in the length field of
4574       // the to-space object. The actual length can be found in the
4575       // length field of the from-space object.
4576       arrayOop(obj)->set_length(0);
4577       oop* old_p = set_partial_array_mask(old);
4578       _par_scan_state->push_on_queue(old_p);
4579     } else {
4580       // No point in using the slower heap_region_containing() method,
4581       // given that we know obj is in the heap.
4582       _scanner.set_region(_g1->heap_region_containing_raw(obj));
4583       obj->oop_iterate_backwards(&_scanner);
4584     }
4585   } else {
4586     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4587     obj = forward_ptr;
4588   }
4589   return obj;
4590 }
4591 
4592 template <class T>
4593 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4594   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4595     _scanned_klass->record_modified_oops();
4596   }
4597 }
4598 
4599 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4600 template <class T>
4601 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4602 ::do_oop_work(T* p) {
4603   oop obj = oopDesc::load_decode_heap_oop(p);
4604   assert(barrier != G1BarrierRS || obj != NULL,
4605          "Precondition: G1BarrierRS implies obj is non-NULL");
4606 
4607   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4608 
4609   // here the null check is implicit in the cset_fast_test() test
4610   if (_g1->in_cset_fast_test(obj)) {
4611     oop forwardee;
4612     if (obj->is_forwarded()) {
4613       forwardee = obj->forwardee();
4614     } else {
4615       forwardee = copy_to_survivor_space(obj);
4616     }
4617     assert(forwardee != NULL, "forwardee should not be NULL");
4618     oopDesc::encode_store_heap_oop(p, forwardee);
4619     if (do_mark_object && forwardee != obj) {
4620       // If the object is self-forwarded we don't need to explicitly
4621       // mark it, the evacuation failure protocol will do so.
4622       mark_forwarded_object(obj, forwardee);
4623     }
4624 
4625     // When scanning the RS, we only care about objs in CS.
4626     if (barrier == G1BarrierRS) {
4627       _par_scan_state->update_rs(_from, p, _worker_id);
4628     } else if (barrier == G1BarrierKlass) {
4629       do_klass_barrier(p, forwardee);
4630     }
4631   } else {
4632     // The object is not in collection set. If we're a root scanning
4633     // closure during an initial mark pause (i.e. do_mark_object will
4634     // be true) then attempt to mark the object.
4635     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4636       mark_object(obj);
4637     }
4638   }
4639 
4640   if (barrier == G1BarrierEvac && obj != NULL) {
4641     _par_scan_state->update_rs(_from, p, _worker_id);
4642   }
4643 
4644   if (do_gen_barrier && obj != NULL) {
4645     par_do_barrier(p);
4646   }
4647 }
4648 
4649 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4650 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4651 
4652 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4653   assert(has_partial_array_mask(p), "invariant");
4654   oop from_obj = clear_partial_array_mask(p);
4655 
4656   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4657   assert(from_obj->is_objArray(), "must be obj array");
4658   objArrayOop from_obj_array = objArrayOop(from_obj);
4659   // The from-space object contains the real length.
4660   int length                 = from_obj_array->length();
4661 
4662   assert(from_obj->is_forwarded(), "must be forwarded");
4663   oop to_obj                 = from_obj->forwardee();
4664   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4665   objArrayOop to_obj_array   = objArrayOop(to_obj);
4666   // We keep track of the next start index in the length field of the
4667   // to-space object.
4668   int next_index             = to_obj_array->length();
4669   assert(0 <= next_index && next_index < length,
4670          err_msg("invariant, next index: %d, length: %d", next_index, length));
4671 
4672   int start                  = next_index;
4673   int end                    = length;
4674   int remainder              = end - start;
4675   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4676   if (remainder > 2 * ParGCArrayScanChunk) {
4677     end = start + ParGCArrayScanChunk;
4678     to_obj_array->set_length(end);
4679     // Push the remainder before we process the range in case another
4680     // worker has run out of things to do and can steal it.
4681     oop* from_obj_p = set_partial_array_mask(from_obj);
4682     _par_scan_state->push_on_queue(from_obj_p);
4683   } else {
4684     assert(length == end, "sanity");
4685     // We'll process the final range for this object. Restore the length
4686     // so that the heap remains parsable in case of evacuation failure.
4687     to_obj_array->set_length(end);
4688   }
4689   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4690   // Process indexes [start,end). It will also process the header
4691   // along with the first chunk (i.e., the chunk with start == 0).
4692   // Note that at this point the length field of to_obj_array is not
4693   // correct given that we are using it to keep track of the next
4694   // start index. oop_iterate_range() (thankfully!) ignores the length
4695   // field and only relies on the start / end parameters.  It does
4696   // however return the size of the object which will be incorrect. So
4697   // we have to ignore it even if we wanted to use it.
4698   to_obj_array->oop_iterate_range(&_scanner, start, end);
4699 }
4700 
4701 class G1ParEvacuateFollowersClosure : public VoidClosure {
4702 protected:
4703   G1CollectedHeap*              _g1h;
4704   G1ParScanThreadState*         _par_scan_state;
4705   RefToScanQueueSet*            _queues;
4706   ParallelTaskTerminator*       _terminator;
4707 
4708   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4709   RefToScanQueueSet*      queues()         { return _queues; }
4710   ParallelTaskTerminator* terminator()     { return _terminator; }
4711 
4712 public:
4713   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4714                                 G1ParScanThreadState* par_scan_state,
4715                                 RefToScanQueueSet* queues,
4716                                 ParallelTaskTerminator* terminator)
4717     : _g1h(g1h), _par_scan_state(par_scan_state),
4718       _queues(queues), _terminator(terminator) {}
4719 
4720   void do_void();
4721 
4722 private:
4723   inline bool offer_termination();
4724 };
4725 
4726 bool G1ParEvacuateFollowersClosure::offer_termination() {
4727   G1ParScanThreadState* const pss = par_scan_state();
4728   pss->start_term_time();
4729   const bool res = terminator()->offer_termination();
4730   pss->end_term_time();
4731   return res;
4732 }
4733 
4734 void G1ParEvacuateFollowersClosure::do_void() {
4735   StarTask stolen_task;
4736   G1ParScanThreadState* const pss = par_scan_state();
4737   pss->trim_queue();
4738 
4739   do {
4740     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4741       assert(pss->verify_task(stolen_task), "sanity");
4742       if (stolen_task.is_narrow()) {
4743         pss->deal_with_reference((narrowOop*) stolen_task);
4744       } else {
4745         pss->deal_with_reference((oop*) stolen_task);
4746       }
4747 
4748       // We've just processed a reference and we might have made
4749       // available new entries on the queues. So we have to make sure
4750       // we drain the queues as necessary.
4751       pss->trim_queue();
4752     }
4753   } while (!offer_termination());
4754 
4755   pss->retire_alloc_buffers();
4756 }
4757 
4758 class G1KlassScanClosure : public KlassClosure {
4759  G1ParCopyHelper* _closure;
4760  bool             _process_only_dirty;
4761  int              _count;
4762  public:
4763   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4764       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4765   void do_klass(Klass* klass) {
4766     // If the klass has not been dirtied we know that there's
4767     // no references into  the young gen and we can skip it.
4768    if (!_process_only_dirty || klass->has_modified_oops()) {
4769       // Clean the klass since we're going to scavenge all the metadata.
4770       klass->clear_modified_oops();
4771 
4772       // Tell the closure that this klass is the Klass to scavenge
4773       // and is the one to dirty if oops are left pointing into the young gen.
4774       _closure->set_scanned_klass(klass);
4775 
4776       klass->oops_do(_closure);
4777 
4778       _closure->set_scanned_klass(NULL);
4779     }
4780     _count++;
4781   }
4782 };
4783 
4784 class G1ParTask : public AbstractGangTask {
4785 protected:
4786   G1CollectedHeap*       _g1h;
4787   RefToScanQueueSet      *_queues;
4788   ParallelTaskTerminator _terminator;
4789   uint _n_workers;
4790 
4791   Mutex _stats_lock;
4792   Mutex* stats_lock() { return &_stats_lock; }
4793 
4794   size_t getNCards() {
4795     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4796       / G1BlockOffsetSharedArray::N_bytes;
4797   }
4798 
4799 public:
4800   G1ParTask(G1CollectedHeap* g1h,
4801             RefToScanQueueSet *task_queues)
4802     : AbstractGangTask("G1 collection"),
4803       _g1h(g1h),
4804       _queues(task_queues),
4805       _terminator(0, _queues),
4806       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4807   {}
4808 
4809   RefToScanQueueSet* queues() { return _queues; }
4810 
4811   RefToScanQueue *work_queue(int i) {
4812     return queues()->queue(i);
4813   }
4814 
4815   ParallelTaskTerminator* terminator() { return &_terminator; }
4816 
4817   virtual void set_for_termination(int active_workers) {
4818     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4819     // in the young space (_par_seq_tasks) in the G1 heap
4820     // for SequentialSubTasksDone.
4821     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4822     // both of which need setting by set_n_termination().
4823     _g1h->SharedHeap::set_n_termination(active_workers);
4824     _g1h->set_n_termination(active_workers);
4825     terminator()->reset_for_reuse(active_workers);
4826     _n_workers = active_workers;
4827   }
4828 
4829   void work(uint worker_id) {
4830     if (worker_id >= _n_workers) return;  // no work needed this round
4831 
4832     double start_time_ms = os::elapsedTime() * 1000.0;
4833     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4834 
4835     {
4836       ResourceMark rm;
4837       HandleMark   hm;
4838 
4839       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4840 
4841       G1ParScanThreadState            pss(_g1h, worker_id);
4842       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
4843       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4844       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
4845 
4846       pss.set_evac_closure(&scan_evac_cl);
4847       pss.set_evac_failure_closure(&evac_failure_cl);
4848       pss.set_partial_scan_closure(&partial_scan_cl);
4849 
4850       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
4851       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4852 
4853       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4854       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4855 
4856       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4857       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4858       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4859 
4860       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4861       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4862 
4863       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4864         // We also need to mark copied objects.
4865         scan_root_cl = &scan_mark_root_cl;
4866         scan_klasses_cl = &scan_mark_klasses_cl_s;
4867       }
4868 
4869       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4870 
4871       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
4872 
4873       pss.start_strong_roots();
4874       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4875                                     SharedHeap::ScanningOption(so),
4876                                     scan_root_cl,
4877                                     &push_heap_rs_cl,
4878                                     scan_klasses_cl,
4879                                     worker_id);
4880       pss.end_strong_roots();
4881 
4882       {
4883         double start = os::elapsedTime();
4884         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4885         evac.do_void();
4886         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4887         double term_ms = pss.term_time()*1000.0;
4888         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4889         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4890       }
4891       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4892       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4893 
4894       if (ParallelGCVerbose) {
4895         MutexLocker x(stats_lock());
4896         pss.print_termination_stats(worker_id);
4897       }
4898 
4899       assert(pss.refs()->is_empty(), "should be empty");
4900 
4901       // Close the inner scope so that the ResourceMark and HandleMark
4902       // destructors are executed here and are included as part of the
4903       // "GC Worker Time".
4904     }
4905 
4906     double end_time_ms = os::elapsedTime() * 1000.0;
4907     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4908   }
4909 };
4910 
4911 // *** Common G1 Evacuation Stuff
4912 
4913 // Closures that support the filtering of CodeBlobs scanned during
4914 // external root scanning.
4915 
4916 // Closure applied to reference fields in code blobs (specifically nmethods)
4917 // to determine whether an nmethod contains references that point into
4918 // the collection set. Used as a predicate when walking code roots so
4919 // that only nmethods that point into the collection set are added to the
4920 // 'marked' list.
4921 
4922 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4923 
4924   class G1PointsIntoCSOopClosure : public OopClosure {
4925     G1CollectedHeap* _g1;
4926     bool _points_into_cs;
4927   public:
4928     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4929       _g1(g1), _points_into_cs(false) { }
4930 
4931     bool points_into_cs() const { return _points_into_cs; }
4932 
4933     template <class T>
4934     void do_oop_nv(T* p) {
4935       if (!_points_into_cs) {
4936         T heap_oop = oopDesc::load_heap_oop(p);
4937         if (!oopDesc::is_null(heap_oop) &&
4938             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4939           _points_into_cs = true;
4940         }
4941       }
4942     }
4943 
4944     virtual void do_oop(oop* p)        { do_oop_nv(p); }
4945     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
4946   };
4947 
4948   G1CollectedHeap* _g1;
4949 
4950 public:
4951   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4952     CodeBlobToOopClosure(cl, true), _g1(g1) { }
4953 
4954   virtual void do_code_blob(CodeBlob* cb) {
4955     nmethod* nm = cb->as_nmethod_or_null();
4956     if (nm != NULL && !(nm->test_oops_do_mark())) {
4957       G1PointsIntoCSOopClosure predicate_cl(_g1);
4958       nm->oops_do(&predicate_cl);
4959 
4960       if (predicate_cl.points_into_cs()) {
4961         // At least one of the reference fields or the oop relocations
4962         // in the nmethod points into the collection set. We have to
4963         // 'mark' this nmethod.
4964         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
4965         // or MarkingCodeBlobClosure::do_code_blob() change.
4966         if (!nm->test_set_oops_do_mark()) {
4967           do_newly_marked_nmethod(nm);
4968         }
4969       }
4970     }
4971   }
4972 };
4973 
4974 // This method is run in a GC worker.
4975 
4976 void
4977 G1CollectedHeap::
4978 g1_process_strong_roots(bool is_scavenging,
4979                         ScanningOption so,
4980                         OopClosure* scan_non_heap_roots,
4981                         OopsInHeapRegionClosure* scan_rs,
4982                         G1KlassScanClosure* scan_klasses,
4983                         int worker_i) {
4984 
4985   // First scan the strong roots
4986   double ext_roots_start = os::elapsedTime();
4987   double closure_app_time_sec = 0.0;
4988 
4989   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4990 
4991   // Walk the code cache w/o buffering, because StarTask cannot handle
4992   // unaligned oop locations.
4993   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
4994 
4995   process_strong_roots(false, // no scoping; this is parallel code
4996                        is_scavenging, so,
4997                        &buf_scan_non_heap_roots,
4998                        &eager_scan_code_roots,
4999                        scan_klasses
5000                        );
5001 
5002   // Now the CM ref_processor roots.
5003   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5004     // We need to treat the discovered reference lists of the
5005     // concurrent mark ref processor as roots and keep entries
5006     // (which are added by the marking threads) on them live
5007     // until they can be processed at the end of marking.
5008     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5009   }
5010 
5011   // Finish up any enqueued closure apps (attributed as object copy time).
5012   buf_scan_non_heap_roots.done();
5013 
5014   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
5015 
5016   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
5017 
5018   double ext_root_time_ms =
5019     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5020 
5021   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
5022 
5023   // During conc marking we have to filter the per-thread SATB buffers
5024   // to make sure we remove any oops into the CSet (which will show up
5025   // as implicitly live).
5026   double satb_filtering_ms = 0.0;
5027   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5028     if (mark_in_progress()) {
5029       double satb_filter_start = os::elapsedTime();
5030 
5031       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5032 
5033       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5034     }
5035   }
5036   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5037 
5038   // Now scan the complement of the collection set.
5039   if (scan_rs != NULL) {
5040     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
5041   }
5042   _process_strong_tasks->all_tasks_completed();
5043 }
5044 
5045 void
5046 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
5047                                        OopClosure* non_root_closure) {
5048   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5049   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
5050 }
5051 
5052 // Weak Reference Processing support
5053 
5054 // An always "is_alive" closure that is used to preserve referents.
5055 // If the object is non-null then it's alive.  Used in the preservation
5056 // of referent objects that are pointed to by reference objects
5057 // discovered by the CM ref processor.
5058 class G1AlwaysAliveClosure: public BoolObjectClosure {
5059   G1CollectedHeap* _g1;
5060 public:
5061   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5062   void do_object(oop p) { assert(false, "Do not call."); }
5063   bool do_object_b(oop p) {
5064     if (p != NULL) {
5065       return true;
5066     }
5067     return false;
5068   }
5069 };
5070 
5071 bool G1STWIsAliveClosure::do_object_b(oop p) {
5072   // An object is reachable if it is outside the collection set,
5073   // or is inside and copied.
5074   return !_g1->obj_in_cs(p) || p->is_forwarded();
5075 }
5076 
5077 // Non Copying Keep Alive closure
5078 class G1KeepAliveClosure: public OopClosure {
5079   G1CollectedHeap* _g1;
5080 public:
5081   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5082   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5083   void do_oop(      oop* p) {
5084     oop obj = *p;
5085 
5086     if (_g1->obj_in_cs(obj)) {
5087       assert( obj->is_forwarded(), "invariant" );
5088       *p = obj->forwardee();
5089     }
5090   }
5091 };
5092 
5093 // Copying Keep Alive closure - can be called from both
5094 // serial and parallel code as long as different worker
5095 // threads utilize different G1ParScanThreadState instances
5096 // and different queues.
5097 
5098 class G1CopyingKeepAliveClosure: public OopClosure {
5099   G1CollectedHeap*         _g1h;
5100   OopClosure*              _copy_non_heap_obj_cl;
5101   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5102   G1ParScanThreadState*    _par_scan_state;
5103 
5104 public:
5105   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5106                             OopClosure* non_heap_obj_cl,
5107                             OopsInHeapRegionClosure* metadata_obj_cl,
5108                             G1ParScanThreadState* pss):
5109     _g1h(g1h),
5110     _copy_non_heap_obj_cl(non_heap_obj_cl),
5111     _copy_metadata_obj_cl(metadata_obj_cl),
5112     _par_scan_state(pss)
5113   {}
5114 
5115   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5116   virtual void do_oop(      oop* p) { do_oop_work(p); }
5117 
5118   template <class T> void do_oop_work(T* p) {
5119     oop obj = oopDesc::load_decode_heap_oop(p);
5120 
5121     if (_g1h->obj_in_cs(obj)) {
5122       // If the referent object has been forwarded (either copied
5123       // to a new location or to itself in the event of an
5124       // evacuation failure) then we need to update the reference
5125       // field and, if both reference and referent are in the G1
5126       // heap, update the RSet for the referent.
5127       //
5128       // If the referent has not been forwarded then we have to keep
5129       // it alive by policy. Therefore we have copy the referent.
5130       //
5131       // If the reference field is in the G1 heap then we can push
5132       // on the PSS queue. When the queue is drained (after each
5133       // phase of reference processing) the object and it's followers
5134       // will be copied, the reference field set to point to the
5135       // new location, and the RSet updated. Otherwise we need to
5136       // use the the non-heap or metadata closures directly to copy
5137       // the refernt object and update the pointer, while avoiding
5138       // updating the RSet.
5139 
5140       if (_g1h->is_in_g1_reserved(p)) {
5141         _par_scan_state->push_on_queue(p);
5142       } else {
5143         assert(!ClassLoaderDataGraph::contains((address)p),
5144                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5145                               PTR_FORMAT, p));
5146           _copy_non_heap_obj_cl->do_oop(p);
5147         }
5148       }
5149     }
5150 };
5151 
5152 // Serial drain queue closure. Called as the 'complete_gc'
5153 // closure for each discovered list in some of the
5154 // reference processing phases.
5155 
5156 class G1STWDrainQueueClosure: public VoidClosure {
5157 protected:
5158   G1CollectedHeap* _g1h;
5159   G1ParScanThreadState* _par_scan_state;
5160 
5161   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5162 
5163 public:
5164   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5165     _g1h(g1h),
5166     _par_scan_state(pss)
5167   { }
5168 
5169   void do_void() {
5170     G1ParScanThreadState* const pss = par_scan_state();
5171     pss->trim_queue();
5172   }
5173 };
5174 
5175 // Parallel Reference Processing closures
5176 
5177 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5178 // processing during G1 evacuation pauses.
5179 
5180 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5181 private:
5182   G1CollectedHeap*   _g1h;
5183   RefToScanQueueSet* _queues;
5184   FlexibleWorkGang*  _workers;
5185   int                _active_workers;
5186 
5187 public:
5188   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5189                         FlexibleWorkGang* workers,
5190                         RefToScanQueueSet *task_queues,
5191                         int n_workers) :
5192     _g1h(g1h),
5193     _queues(task_queues),
5194     _workers(workers),
5195     _active_workers(n_workers)
5196   {
5197     assert(n_workers > 0, "shouldn't call this otherwise");
5198   }
5199 
5200   // Executes the given task using concurrent marking worker threads.
5201   virtual void execute(ProcessTask& task);
5202   virtual void execute(EnqueueTask& task);
5203 };
5204 
5205 // Gang task for possibly parallel reference processing
5206 
5207 class G1STWRefProcTaskProxy: public AbstractGangTask {
5208   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5209   ProcessTask&     _proc_task;
5210   G1CollectedHeap* _g1h;
5211   RefToScanQueueSet *_task_queues;
5212   ParallelTaskTerminator* _terminator;
5213 
5214 public:
5215   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5216                      G1CollectedHeap* g1h,
5217                      RefToScanQueueSet *task_queues,
5218                      ParallelTaskTerminator* terminator) :
5219     AbstractGangTask("Process reference objects in parallel"),
5220     _proc_task(proc_task),
5221     _g1h(g1h),
5222     _task_queues(task_queues),
5223     _terminator(terminator)
5224   {}
5225 
5226   virtual void work(uint worker_id) {
5227     // The reference processing task executed by a single worker.
5228     ResourceMark rm;
5229     HandleMark   hm;
5230 
5231     G1STWIsAliveClosure is_alive(_g1h);
5232 
5233     G1ParScanThreadState pss(_g1h, worker_id);
5234 
5235     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5236     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5237     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5238 
5239     pss.set_evac_closure(&scan_evac_cl);
5240     pss.set_evac_failure_closure(&evac_failure_cl);
5241     pss.set_partial_scan_closure(&partial_scan_cl);
5242 
5243     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5244     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5245 
5246     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5247     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5248 
5249     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5250     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5251 
5252     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5253       // We also need to mark copied objects.
5254       copy_non_heap_cl = &copy_mark_non_heap_cl;
5255       copy_metadata_cl = &copy_mark_metadata_cl;
5256     }
5257 
5258     // Keep alive closure.
5259     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5260 
5261     // Complete GC closure
5262     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5263 
5264     // Call the reference processing task's work routine.
5265     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5266 
5267     // Note we cannot assert that the refs array is empty here as not all
5268     // of the processing tasks (specifically phase2 - pp2_work) execute
5269     // the complete_gc closure (which ordinarily would drain the queue) so
5270     // the queue may not be empty.
5271   }
5272 };
5273 
5274 // Driver routine for parallel reference processing.
5275 // Creates an instance of the ref processing gang
5276 // task and has the worker threads execute it.
5277 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5278   assert(_workers != NULL, "Need parallel worker threads.");
5279 
5280   ParallelTaskTerminator terminator(_active_workers, _queues);
5281   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5282 
5283   _g1h->set_par_threads(_active_workers);
5284   _workers->run_task(&proc_task_proxy);
5285   _g1h->set_par_threads(0);
5286 }
5287 
5288 // Gang task for parallel reference enqueueing.
5289 
5290 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5291   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5292   EnqueueTask& _enq_task;
5293 
5294 public:
5295   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5296     AbstractGangTask("Enqueue reference objects in parallel"),
5297     _enq_task(enq_task)
5298   { }
5299 
5300   virtual void work(uint worker_id) {
5301     _enq_task.work(worker_id);
5302   }
5303 };
5304 
5305 // Driver routine for parallel reference enqueing.
5306 // Creates an instance of the ref enqueueing gang
5307 // task and has the worker threads execute it.
5308 
5309 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5310   assert(_workers != NULL, "Need parallel worker threads.");
5311 
5312   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5313 
5314   _g1h->set_par_threads(_active_workers);
5315   _workers->run_task(&enq_task_proxy);
5316   _g1h->set_par_threads(0);
5317 }
5318 
5319 // End of weak reference support closures
5320 
5321 // Abstract task used to preserve (i.e. copy) any referent objects
5322 // that are in the collection set and are pointed to by reference
5323 // objects discovered by the CM ref processor.
5324 
5325 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5326 protected:
5327   G1CollectedHeap* _g1h;
5328   RefToScanQueueSet      *_queues;
5329   ParallelTaskTerminator _terminator;
5330   uint _n_workers;
5331 
5332 public:
5333   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5334     AbstractGangTask("ParPreserveCMReferents"),
5335     _g1h(g1h),
5336     _queues(task_queues),
5337     _terminator(workers, _queues),
5338     _n_workers(workers)
5339   { }
5340 
5341   void work(uint worker_id) {
5342     ResourceMark rm;
5343     HandleMark   hm;
5344 
5345     G1ParScanThreadState            pss(_g1h, worker_id);
5346     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5347     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5348     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5349 
5350     pss.set_evac_closure(&scan_evac_cl);
5351     pss.set_evac_failure_closure(&evac_failure_cl);
5352     pss.set_partial_scan_closure(&partial_scan_cl);
5353 
5354     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5355 
5356 
5357     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5358     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5359 
5360     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5361     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5362 
5363     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5364     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5365 
5366     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5367       // We also need to mark copied objects.
5368       copy_non_heap_cl = &copy_mark_non_heap_cl;
5369       copy_metadata_cl = &copy_mark_metadata_cl;
5370     }
5371 
5372     // Is alive closure
5373     G1AlwaysAliveClosure always_alive(_g1h);
5374 
5375     // Copying keep alive closure. Applied to referent objects that need
5376     // to be copied.
5377     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5378 
5379     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5380 
5381     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5382     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5383 
5384     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5385     // So this must be true - but assert just in case someone decides to
5386     // change the worker ids.
5387     assert(0 <= worker_id && worker_id < limit, "sanity");
5388     assert(!rp->discovery_is_atomic(), "check this code");
5389 
5390     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5391     for (uint idx = worker_id; idx < limit; idx += stride) {
5392       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5393 
5394       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5395       while (iter.has_next()) {
5396         // Since discovery is not atomic for the CM ref processor, we
5397         // can see some null referent objects.
5398         iter.load_ptrs(DEBUG_ONLY(true));
5399         oop ref = iter.obj();
5400 
5401         // This will filter nulls.
5402         if (iter.is_referent_alive()) {
5403           iter.make_referent_alive();
5404         }
5405         iter.move_to_next();
5406       }
5407     }
5408 
5409     // Drain the queue - which may cause stealing
5410     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5411     drain_queue.do_void();
5412     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5413     assert(pss.refs()->is_empty(), "should be");
5414   }
5415 };
5416 
5417 // Weak Reference processing during an evacuation pause (part 1).
5418 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5419   double ref_proc_start = os::elapsedTime();
5420 
5421   ReferenceProcessor* rp = _ref_processor_stw;
5422   assert(rp->discovery_enabled(), "should have been enabled");
5423 
5424   // Any reference objects, in the collection set, that were 'discovered'
5425   // by the CM ref processor should have already been copied (either by
5426   // applying the external root copy closure to the discovered lists, or
5427   // by following an RSet entry).
5428   //
5429   // But some of the referents, that are in the collection set, that these
5430   // reference objects point to may not have been copied: the STW ref
5431   // processor would have seen that the reference object had already
5432   // been 'discovered' and would have skipped discovering the reference,
5433   // but would not have treated the reference object as a regular oop.
5434   // As a reult the copy closure would not have been applied to the
5435   // referent object.
5436   //
5437   // We need to explicitly copy these referent objects - the references
5438   // will be processed at the end of remarking.
5439   //
5440   // We also need to do this copying before we process the reference
5441   // objects discovered by the STW ref processor in case one of these
5442   // referents points to another object which is also referenced by an
5443   // object discovered by the STW ref processor.
5444 
5445   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5446            no_of_gc_workers == workers()->active_workers(),
5447            "Need to reset active GC workers");
5448 
5449   set_par_threads(no_of_gc_workers);
5450   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5451                                                  no_of_gc_workers,
5452                                                  _task_queues);
5453 
5454   if (G1CollectedHeap::use_parallel_gc_threads()) {
5455     workers()->run_task(&keep_cm_referents);
5456   } else {
5457     keep_cm_referents.work(0);
5458   }
5459 
5460   set_par_threads(0);
5461 
5462   // Closure to test whether a referent is alive.
5463   G1STWIsAliveClosure is_alive(this);
5464 
5465   // Even when parallel reference processing is enabled, the processing
5466   // of JNI refs is serial and performed serially by the current thread
5467   // rather than by a worker. The following PSS will be used for processing
5468   // JNI refs.
5469 
5470   // Use only a single queue for this PSS.
5471   G1ParScanThreadState pss(this, 0);
5472 
5473   // We do not embed a reference processor in the copying/scanning
5474   // closures while we're actually processing the discovered
5475   // reference objects.
5476   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5477   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5478   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5479 
5480   pss.set_evac_closure(&scan_evac_cl);
5481   pss.set_evac_failure_closure(&evac_failure_cl);
5482   pss.set_partial_scan_closure(&partial_scan_cl);
5483 
5484   assert(pss.refs()->is_empty(), "pre-condition");
5485 
5486   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5487   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5488 
5489   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5490   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5491 
5492   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5493   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5494 
5495   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5496     // We also need to mark copied objects.
5497     copy_non_heap_cl = &copy_mark_non_heap_cl;
5498     copy_metadata_cl = &copy_mark_metadata_cl;
5499   }
5500 
5501   // Keep alive closure.
5502   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5503 
5504   // Serial Complete GC closure
5505   G1STWDrainQueueClosure drain_queue(this, &pss);
5506 
5507   // Setup the soft refs policy...
5508   rp->setup_policy(false);
5509 
5510   if (!rp->processing_is_mt()) {
5511     // Serial reference processing...
5512     rp->process_discovered_references(&is_alive,
5513                                       &keep_alive,
5514                                       &drain_queue,
5515                                       NULL);
5516   } else {
5517     // Parallel reference processing
5518     assert(rp->num_q() == no_of_gc_workers, "sanity");
5519     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5520 
5521     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5522     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5523   }
5524 
5525   // We have completed copying any necessary live referent objects
5526   // (that were not copied during the actual pause) so we can
5527   // retire any active alloc buffers
5528   pss.retire_alloc_buffers();
5529   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5530 
5531   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5532   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5533 }
5534 
5535 // Weak Reference processing during an evacuation pause (part 2).
5536 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5537   double ref_enq_start = os::elapsedTime();
5538 
5539   ReferenceProcessor* rp = _ref_processor_stw;
5540   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5541 
5542   // Now enqueue any remaining on the discovered lists on to
5543   // the pending list.
5544   if (!rp->processing_is_mt()) {
5545     // Serial reference processing...
5546     rp->enqueue_discovered_references();
5547   } else {
5548     // Parallel reference enqueuing
5549 
5550     assert(no_of_gc_workers == workers()->active_workers(),
5551            "Need to reset active workers");
5552     assert(rp->num_q() == no_of_gc_workers, "sanity");
5553     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5554 
5555     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5556     rp->enqueue_discovered_references(&par_task_executor);
5557   }
5558 
5559   rp->verify_no_references_recorded();
5560   assert(!rp->discovery_enabled(), "should have been disabled");
5561 
5562   // FIXME
5563   // CM's reference processing also cleans up the string and symbol tables.
5564   // Should we do that here also? We could, but it is a serial operation
5565   // and could signicantly increase the pause time.
5566 
5567   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5568   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5569 }
5570 
5571 void G1CollectedHeap::evacuate_collection_set() {
5572   _expand_heap_after_alloc_failure = true;
5573   set_evacuation_failed(false);
5574 
5575   // Should G1EvacuationFailureALot be in effect for this GC?
5576   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5577 
5578   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5579   concurrent_g1_refine()->set_use_cache(false);
5580   concurrent_g1_refine()->clear_hot_cache_claimed_index();
5581 
5582   uint n_workers;
5583   if (G1CollectedHeap::use_parallel_gc_threads()) {
5584     n_workers =
5585       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5586                                      workers()->active_workers(),
5587                                      Threads::number_of_non_daemon_threads());
5588     assert(UseDynamicNumberOfGCThreads ||
5589            n_workers == workers()->total_workers(),
5590            "If not dynamic should be using all the  workers");
5591     workers()->set_active_workers(n_workers);
5592     set_par_threads(n_workers);
5593   } else {
5594     assert(n_par_threads() == 0,
5595            "Should be the original non-parallel value");
5596     n_workers = 1;
5597   }
5598 
5599   G1ParTask g1_par_task(this, _task_queues);
5600 
5601   init_for_evac_failure(NULL);
5602 
5603   rem_set()->prepare_for_younger_refs_iterate(true);
5604 
5605   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5606   double start_par_time_sec = os::elapsedTime();
5607   double end_par_time_sec;
5608 
5609   {
5610     StrongRootsScope srs(this);
5611 
5612     if (G1CollectedHeap::use_parallel_gc_threads()) {
5613       // The individual threads will set their evac-failure closures.
5614       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5615       // These tasks use ShareHeap::_process_strong_tasks
5616       assert(UseDynamicNumberOfGCThreads ||
5617              workers()->active_workers() == workers()->total_workers(),
5618              "If not dynamic should be using all the  workers");
5619       workers()->run_task(&g1_par_task);
5620     } else {
5621       g1_par_task.set_for_termination(n_workers);
5622       g1_par_task.work(0);
5623     }
5624     end_par_time_sec = os::elapsedTime();
5625 
5626     // Closing the inner scope will execute the destructor
5627     // for the StrongRootsScope object. We record the current
5628     // elapsed time before closing the scope so that time
5629     // taken for the SRS destructor is NOT included in the
5630     // reported parallel time.
5631   }
5632 
5633   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5634   g1_policy()->phase_times()->record_par_time(par_time_ms);
5635 
5636   double code_root_fixup_time_ms =
5637         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5638   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5639 
5640   set_par_threads(0);
5641 
5642   // Process any discovered reference objects - we have
5643   // to do this _before_ we retire the GC alloc regions
5644   // as we may have to copy some 'reachable' referent
5645   // objects (and their reachable sub-graphs) that were
5646   // not copied during the pause.
5647   process_discovered_references(n_workers);
5648 
5649   // Weak root processing.
5650   // Note: when JSR 292 is enabled and code blobs can contain
5651   // non-perm oops then we will need to process the code blobs
5652   // here too.
5653   {
5654     G1STWIsAliveClosure is_alive(this);
5655     G1KeepAliveClosure keep_alive(this);
5656     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5657   }
5658 
5659   release_gc_alloc_regions(n_workers);
5660   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5661 
5662   concurrent_g1_refine()->clear_hot_cache();
5663   concurrent_g1_refine()->set_use_cache(true);
5664 
5665   finalize_for_evac_failure();
5666 
5667   if (evacuation_failed()) {
5668     remove_self_forwarding_pointers();
5669 
5670     // Reset the G1EvacuationFailureALot counters and flags
5671     // Note: the values are reset only when an actual
5672     // evacuation failure occurs.
5673     NOT_PRODUCT(reset_evacuation_should_fail();)
5674   }
5675 
5676   // Enqueue any remaining references remaining on the STW
5677   // reference processor's discovered lists. We need to do
5678   // this after the card table is cleaned (and verified) as
5679   // the act of enqueuing entries on to the pending list
5680   // will log these updates (and dirty their associated
5681   // cards). We need these updates logged to update any
5682   // RSets.
5683   enqueue_discovered_references(n_workers);
5684 
5685   if (G1DeferredRSUpdate) {
5686     RedirtyLoggedCardTableEntryFastClosure redirty;
5687     dirty_card_queue_set().set_closure(&redirty);
5688     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5689 
5690     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5691     dcq.merge_bufferlists(&dirty_card_queue_set());
5692     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5693   }
5694   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5695 }
5696 
5697 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5698                                      size_t* pre_used,
5699                                      FreeRegionList* free_list,
5700                                      OldRegionSet* old_proxy_set,
5701                                      HumongousRegionSet* humongous_proxy_set,
5702                                      HRRSCleanupTask* hrrs_cleanup_task,
5703                                      bool par) {
5704   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5705     if (hr->isHumongous()) {
5706       assert(hr->startsHumongous(), "we should only see starts humongous");
5707       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5708     } else {
5709       _old_set.remove_with_proxy(hr, old_proxy_set);
5710       free_region(hr, pre_used, free_list, par);
5711     }
5712   } else {
5713     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5714   }
5715 }
5716 
5717 void G1CollectedHeap::free_region(HeapRegion* hr,
5718                                   size_t* pre_used,
5719                                   FreeRegionList* free_list,
5720                                   bool par) {
5721   assert(!hr->isHumongous(), "this is only for non-humongous regions");
5722   assert(!hr->is_empty(), "the region should not be empty");
5723   assert(free_list != NULL, "pre-condition");
5724 
5725   *pre_used += hr->used();
5726   hr->hr_clear(par, true /* clear_space */);
5727   free_list->add_as_head(hr);
5728 }
5729 
5730 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5731                                      size_t* pre_used,
5732                                      FreeRegionList* free_list,
5733                                      HumongousRegionSet* humongous_proxy_set,
5734                                      bool par) {
5735   assert(hr->startsHumongous(), "this is only for starts humongous regions");
5736   assert(free_list != NULL, "pre-condition");
5737   assert(humongous_proxy_set != NULL, "pre-condition");
5738 
5739   size_t hr_used = hr->used();
5740   size_t hr_capacity = hr->capacity();
5741   size_t hr_pre_used = 0;
5742   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5743   // We need to read this before we make the region non-humongous,
5744   // otherwise the information will be gone.
5745   uint last_index = hr->last_hc_index();
5746   hr->set_notHumongous();
5747   free_region(hr, &hr_pre_used, free_list, par);
5748 
5749   uint i = hr->hrs_index() + 1;
5750   while (i < last_index) {
5751     HeapRegion* curr_hr = region_at(i);
5752     assert(curr_hr->continuesHumongous(), "invariant");
5753     curr_hr->set_notHumongous();
5754     free_region(curr_hr, &hr_pre_used, free_list, par);
5755     i += 1;
5756   }
5757   assert(hr_pre_used == hr_used,
5758          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
5759                  "should be the same", hr_pre_used, hr_used));
5760   *pre_used += hr_pre_used;
5761 }
5762 
5763 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5764                                        FreeRegionList* free_list,
5765                                        OldRegionSet* old_proxy_set,
5766                                        HumongousRegionSet* humongous_proxy_set,
5767                                        bool par) {
5768   if (pre_used > 0) {
5769     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5770     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5771     assert(_summary_bytes_used >= pre_used,
5772            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5773                    "should be >= pre_used: "SIZE_FORMAT,
5774                    _summary_bytes_used, pre_used));
5775     _summary_bytes_used -= pre_used;
5776   }
5777   if (free_list != NULL && !free_list->is_empty()) {
5778     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5779     _free_list.add_as_head(free_list);
5780   }
5781   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5782     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5783     _old_set.update_from_proxy(old_proxy_set);
5784   }
5785   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5786     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5787     _humongous_set.update_from_proxy(humongous_proxy_set);
5788   }
5789 }
5790 
5791 class G1ParCleanupCTTask : public AbstractGangTask {
5792   CardTableModRefBS* _ct_bs;
5793   G1CollectedHeap* _g1h;
5794   HeapRegion* volatile _su_head;
5795 public:
5796   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5797                      G1CollectedHeap* g1h) :
5798     AbstractGangTask("G1 Par Cleanup CT Task"),
5799     _ct_bs(ct_bs), _g1h(g1h) { }
5800 
5801   void work(uint worker_id) {
5802     HeapRegion* r;
5803     while (r = _g1h->pop_dirty_cards_region()) {
5804       clear_cards(r);
5805     }
5806   }
5807 
5808   void clear_cards(HeapRegion* r) {
5809     // Cards of the survivors should have already been dirtied.
5810     if (!r->is_survivor()) {
5811       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5812     }
5813   }
5814 };
5815 
5816 #ifndef PRODUCT
5817 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5818   G1CollectedHeap* _g1h;
5819   CardTableModRefBS* _ct_bs;
5820 public:
5821   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5822     : _g1h(g1h), _ct_bs(ct_bs) { }
5823   virtual bool doHeapRegion(HeapRegion* r) {
5824     if (r->is_survivor()) {
5825       _g1h->verify_dirty_region(r);
5826     } else {
5827       _g1h->verify_not_dirty_region(r);
5828     }
5829     return false;
5830   }
5831 };
5832 
5833 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5834   // All of the region should be clean.
5835   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
5836   MemRegion mr(hr->bottom(), hr->end());
5837   ct_bs->verify_not_dirty_region(mr);
5838 }
5839 
5840 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5841   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5842   // dirty allocated blocks as they allocate them. The thread that
5843   // retires each region and replaces it with a new one will do a
5844   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5845   // not dirty that area (one less thing to have to do while holding
5846   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5847   // is dirty.
5848   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5849   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5850   ct_bs->verify_dirty_region(mr);
5851 }
5852 
5853 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5854   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5855   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5856     verify_dirty_region(hr);
5857   }
5858 }
5859 
5860 void G1CollectedHeap::verify_dirty_young_regions() {
5861   verify_dirty_young_list(_young_list->first_region());
5862 }
5863 #endif
5864 
5865 void G1CollectedHeap::cleanUpCardTable() {
5866   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5867   double start = os::elapsedTime();
5868 
5869   {
5870     // Iterate over the dirty cards region list.
5871     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5872 
5873     if (G1CollectedHeap::use_parallel_gc_threads()) {
5874       set_par_threads();
5875       workers()->run_task(&cleanup_task);
5876       set_par_threads(0);
5877     } else {
5878       while (_dirty_cards_region_list) {
5879         HeapRegion* r = _dirty_cards_region_list;
5880         cleanup_task.clear_cards(r);
5881         _dirty_cards_region_list = r->get_next_dirty_cards_region();
5882         if (_dirty_cards_region_list == r) {
5883           // The last region.
5884           _dirty_cards_region_list = NULL;
5885         }
5886         r->set_next_dirty_cards_region(NULL);
5887       }
5888     }
5889 #ifndef PRODUCT
5890     if (G1VerifyCTCleanup || VerifyAfterGC) {
5891       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5892       heap_region_iterate(&cleanup_verifier);
5893     }
5894 #endif
5895   }
5896 
5897   double elapsed = os::elapsedTime() - start;
5898   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5899 }
5900 
5901 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5902   size_t pre_used = 0;
5903   FreeRegionList local_free_list("Local List for CSet Freeing");
5904 
5905   double young_time_ms     = 0.0;
5906   double non_young_time_ms = 0.0;
5907 
5908   // Since the collection set is a superset of the the young list,
5909   // all we need to do to clear the young list is clear its
5910   // head and length, and unlink any young regions in the code below
5911   _young_list->clear();
5912 
5913   G1CollectorPolicy* policy = g1_policy();
5914 
5915   double start_sec = os::elapsedTime();
5916   bool non_young = true;
5917 
5918   HeapRegion* cur = cs_head;
5919   int age_bound = -1;
5920   size_t rs_lengths = 0;
5921 
5922   while (cur != NULL) {
5923     assert(!is_on_master_free_list(cur), "sanity");
5924     if (non_young) {
5925       if (cur->is_young()) {
5926         double end_sec = os::elapsedTime();
5927         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5928         non_young_time_ms += elapsed_ms;
5929 
5930         start_sec = os::elapsedTime();
5931         non_young = false;
5932       }
5933     } else {
5934       if (!cur->is_young()) {
5935         double end_sec = os::elapsedTime();
5936         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5937         young_time_ms += elapsed_ms;
5938 
5939         start_sec = os::elapsedTime();
5940         non_young = true;
5941       }
5942     }
5943 
5944     rs_lengths += cur->rem_set()->occupied();
5945 
5946     HeapRegion* next = cur->next_in_collection_set();
5947     assert(cur->in_collection_set(), "bad CS");
5948     cur->set_next_in_collection_set(NULL);
5949     cur->set_in_collection_set(false);
5950 
5951     if (cur->is_young()) {
5952       int index = cur->young_index_in_cset();
5953       assert(index != -1, "invariant");
5954       assert((uint) index < policy->young_cset_region_length(), "invariant");
5955       size_t words_survived = _surviving_young_words[index];
5956       cur->record_surv_words_in_group(words_survived);
5957 
5958       // At this point the we have 'popped' cur from the collection set
5959       // (linked via next_in_collection_set()) but it is still in the
5960       // young list (linked via next_young_region()). Clear the
5961       // _next_young_region field.
5962       cur->set_next_young_region(NULL);
5963     } else {
5964       int index = cur->young_index_in_cset();
5965       assert(index == -1, "invariant");
5966     }
5967 
5968     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
5969             (!cur->is_young() && cur->young_index_in_cset() == -1),
5970             "invariant" );
5971 
5972     if (!cur->evacuation_failed()) {
5973       MemRegion used_mr = cur->used_region();
5974 
5975       // And the region is empty.
5976       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
5977       free_region(cur, &pre_used, &local_free_list, false /* par */);
5978     } else {
5979       cur->uninstall_surv_rate_group();
5980       if (cur->is_young()) {
5981         cur->set_young_index_in_cset(-1);
5982       }
5983       cur->set_not_young();
5984       cur->set_evacuation_failed(false);
5985       // The region is now considered to be old.
5986       _old_set.add(cur);
5987     }
5988     cur = next;
5989   }
5990 
5991   policy->record_max_rs_lengths(rs_lengths);
5992   policy->cset_regions_freed();
5993 
5994   double end_sec = os::elapsedTime();
5995   double elapsed_ms = (end_sec - start_sec) * 1000.0;
5996 
5997   if (non_young) {
5998     non_young_time_ms += elapsed_ms;
5999   } else {
6000     young_time_ms += elapsed_ms;
6001   }
6002 
6003   update_sets_after_freeing_regions(pre_used, &local_free_list,
6004                                     NULL /* old_proxy_set */,
6005                                     NULL /* humongous_proxy_set */,
6006                                     false /* par */);
6007   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6008   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6009 }
6010 
6011 // This routine is similar to the above but does not record
6012 // any policy statistics or update free lists; we are abandoning
6013 // the current incremental collection set in preparation of a
6014 // full collection. After the full GC we will start to build up
6015 // the incremental collection set again.
6016 // This is only called when we're doing a full collection
6017 // and is immediately followed by the tearing down of the young list.
6018 
6019 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6020   HeapRegion* cur = cs_head;
6021 
6022   while (cur != NULL) {
6023     HeapRegion* next = cur->next_in_collection_set();
6024     assert(cur->in_collection_set(), "bad CS");
6025     cur->set_next_in_collection_set(NULL);
6026     cur->set_in_collection_set(false);
6027     cur->set_young_index_in_cset(-1);
6028     cur = next;
6029   }
6030 }
6031 
6032 void G1CollectedHeap::set_free_regions_coming() {
6033   if (G1ConcRegionFreeingVerbose) {
6034     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6035                            "setting free regions coming");
6036   }
6037 
6038   assert(!free_regions_coming(), "pre-condition");
6039   _free_regions_coming = true;
6040 }
6041 
6042 void G1CollectedHeap::reset_free_regions_coming() {
6043   assert(free_regions_coming(), "pre-condition");
6044 
6045   {
6046     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6047     _free_regions_coming = false;
6048     SecondaryFreeList_lock->notify_all();
6049   }
6050 
6051   if (G1ConcRegionFreeingVerbose) {
6052     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6053                            "reset free regions coming");
6054   }
6055 }
6056 
6057 void G1CollectedHeap::wait_while_free_regions_coming() {
6058   // Most of the time we won't have to wait, so let's do a quick test
6059   // first before we take the lock.
6060   if (!free_regions_coming()) {
6061     return;
6062   }
6063 
6064   if (G1ConcRegionFreeingVerbose) {
6065     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6066                            "waiting for free regions");
6067   }
6068 
6069   {
6070     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6071     while (free_regions_coming()) {
6072       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6073     }
6074   }
6075 
6076   if (G1ConcRegionFreeingVerbose) {
6077     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6078                            "done waiting for free regions");
6079   }
6080 }
6081 
6082 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6083   assert(heap_lock_held_for_gc(),
6084               "the heap lock should already be held by or for this thread");
6085   _young_list->push_region(hr);
6086 }
6087 
6088 class NoYoungRegionsClosure: public HeapRegionClosure {
6089 private:
6090   bool _success;
6091 public:
6092   NoYoungRegionsClosure() : _success(true) { }
6093   bool doHeapRegion(HeapRegion* r) {
6094     if (r->is_young()) {
6095       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
6096                              r->bottom(), r->end());
6097       _success = false;
6098     }
6099     return false;
6100   }
6101   bool success() { return _success; }
6102 };
6103 
6104 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6105   bool ret = _young_list->check_list_empty(check_sample);
6106 
6107   if (check_heap) {
6108     NoYoungRegionsClosure closure;
6109     heap_region_iterate(&closure);
6110     ret = ret && closure.success();
6111   }
6112 
6113   return ret;
6114 }
6115 
6116 class TearDownRegionSetsClosure : public HeapRegionClosure {
6117 private:
6118   OldRegionSet *_old_set;
6119 
6120 public:
6121   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
6122 
6123   bool doHeapRegion(HeapRegion* r) {
6124     if (r->is_empty()) {
6125       // We ignore empty regions, we'll empty the free list afterwards
6126     } else if (r->is_young()) {
6127       // We ignore young regions, we'll empty the young list afterwards
6128     } else if (r->isHumongous()) {
6129       // We ignore humongous regions, we're not tearing down the
6130       // humongous region set
6131     } else {
6132       // The rest should be old
6133       _old_set->remove(r);
6134     }
6135     return false;
6136   }
6137 
6138   ~TearDownRegionSetsClosure() {
6139     assert(_old_set->is_empty(), "post-condition");
6140   }
6141 };
6142 
6143 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6144   assert_at_safepoint(true /* should_be_vm_thread */);
6145 
6146   if (!free_list_only) {
6147     TearDownRegionSetsClosure cl(&_old_set);
6148     heap_region_iterate(&cl);
6149 
6150     // Need to do this after the heap iteration to be able to
6151     // recognize the young regions and ignore them during the iteration.
6152     _young_list->empty_list();
6153   }
6154   _free_list.remove_all();
6155 }
6156 
6157 class RebuildRegionSetsClosure : public HeapRegionClosure {
6158 private:
6159   bool            _free_list_only;
6160   OldRegionSet*   _old_set;
6161   FreeRegionList* _free_list;
6162   size_t          _total_used;
6163 
6164 public:
6165   RebuildRegionSetsClosure(bool free_list_only,
6166                            OldRegionSet* old_set, FreeRegionList* free_list) :
6167     _free_list_only(free_list_only),
6168     _old_set(old_set), _free_list(free_list), _total_used(0) {
6169     assert(_free_list->is_empty(), "pre-condition");
6170     if (!free_list_only) {
6171       assert(_old_set->is_empty(), "pre-condition");
6172     }
6173   }
6174 
6175   bool doHeapRegion(HeapRegion* r) {
6176     if (r->continuesHumongous()) {
6177       return false;
6178     }
6179 
6180     if (r->is_empty()) {
6181       // Add free regions to the free list
6182       _free_list->add_as_tail(r);
6183     } else if (!_free_list_only) {
6184       assert(!r->is_young(), "we should not come across young regions");
6185 
6186       if (r->isHumongous()) {
6187         // We ignore humongous regions, we left the humongous set unchanged
6188       } else {
6189         // The rest should be old, add them to the old set
6190         _old_set->add(r);
6191       }
6192       _total_used += r->used();
6193     }
6194 
6195     return false;
6196   }
6197 
6198   size_t total_used() {
6199     return _total_used;
6200   }
6201 };
6202 
6203 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6204   assert_at_safepoint(true /* should_be_vm_thread */);
6205 
6206   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6207   heap_region_iterate(&cl);
6208 
6209   if (!free_list_only) {
6210     _summary_bytes_used = cl.total_used();
6211   }
6212   assert(_summary_bytes_used == recalculate_used(),
6213          err_msg("inconsistent _summary_bytes_used, "
6214                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6215                  _summary_bytes_used, recalculate_used()));
6216 }
6217 
6218 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6219   _refine_cte_cl->set_concurrent(concurrent);
6220 }
6221 
6222 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6223   HeapRegion* hr = heap_region_containing(p);
6224   if (hr == NULL) {
6225     return false;
6226   } else {
6227     return hr->is_in(p);
6228   }
6229 }
6230 
6231 // Methods for the mutator alloc region
6232 
6233 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6234                                                       bool force) {
6235   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6236   assert(!force || g1_policy()->can_expand_young_list(),
6237          "if force is true we should be able to expand the young list");
6238   bool young_list_full = g1_policy()->is_young_list_full();
6239   if (force || !young_list_full) {
6240     HeapRegion* new_alloc_region = new_region(word_size,
6241                                               false /* do_expand */);
6242     if (new_alloc_region != NULL) {
6243       set_region_short_lived_locked(new_alloc_region);
6244       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6245       return new_alloc_region;
6246     }
6247   }
6248   return NULL;
6249 }
6250 
6251 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6252                                                   size_t allocated_bytes) {
6253   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6254   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6255 
6256   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6257   _summary_bytes_used += allocated_bytes;
6258   _hr_printer.retire(alloc_region);
6259   // We update the eden sizes here, when the region is retired,
6260   // instead of when it's allocated, since this is the point that its
6261   // used space has been recored in _summary_bytes_used.
6262   g1mm()->update_eden_size();
6263 }
6264 
6265 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6266                                                     bool force) {
6267   return _g1h->new_mutator_alloc_region(word_size, force);
6268 }
6269 
6270 void G1CollectedHeap::set_par_threads() {
6271   // Don't change the number of workers.  Use the value previously set
6272   // in the workgroup.
6273   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6274   uint n_workers = workers()->active_workers();
6275   assert(UseDynamicNumberOfGCThreads ||
6276            n_workers == workers()->total_workers(),
6277       "Otherwise should be using the total number of workers");
6278   if (n_workers == 0) {
6279     assert(false, "Should have been set in prior evacuation pause.");
6280     n_workers = ParallelGCThreads;
6281     workers()->set_active_workers(n_workers);
6282   }
6283   set_par_threads(n_workers);
6284 }
6285 
6286 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6287                                        size_t allocated_bytes) {
6288   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6289 }
6290 
6291 // Methods for the GC alloc regions
6292 
6293 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6294                                                  uint count,
6295                                                  GCAllocPurpose ap) {
6296   assert(FreeList_lock->owned_by_self(), "pre-condition");
6297 
6298   if (count < g1_policy()->max_regions(ap)) {
6299     HeapRegion* new_alloc_region = new_region(word_size,
6300                                               true /* do_expand */);
6301     if (new_alloc_region != NULL) {
6302       // We really only need to do this for old regions given that we
6303       // should never scan survivors. But it doesn't hurt to do it
6304       // for survivors too.
6305       new_alloc_region->set_saved_mark();
6306       if (ap == GCAllocForSurvived) {
6307         new_alloc_region->set_survivor();
6308         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6309       } else {
6310         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6311       }
6312       bool during_im = g1_policy()->during_initial_mark_pause();
6313       new_alloc_region->note_start_of_copying(during_im);
6314       return new_alloc_region;
6315     } else {
6316       g1_policy()->note_alloc_region_limit_reached(ap);
6317     }
6318   }
6319   return NULL;
6320 }
6321 
6322 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6323                                              size_t allocated_bytes,
6324                                              GCAllocPurpose ap) {
6325   bool during_im = g1_policy()->during_initial_mark_pause();
6326   alloc_region->note_end_of_copying(during_im);
6327   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6328   if (ap == GCAllocForSurvived) {
6329     young_list()->add_survivor_region(alloc_region);
6330   } else {
6331     _old_set.add(alloc_region);
6332   }
6333   _hr_printer.retire(alloc_region);
6334 }
6335 
6336 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6337                                                        bool force) {
6338   assert(!force, "not supported for GC alloc regions");
6339   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6340 }
6341 
6342 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6343                                           size_t allocated_bytes) {
6344   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6345                                GCAllocForSurvived);
6346 }
6347 
6348 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6349                                                   bool force) {
6350   assert(!force, "not supported for GC alloc regions");
6351   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6352 }
6353 
6354 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6355                                      size_t allocated_bytes) {
6356   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6357                                GCAllocForTenured);
6358 }
6359 // Heap region set verification
6360 
6361 class VerifyRegionListsClosure : public HeapRegionClosure {
6362 private:
6363   FreeRegionList*     _free_list;
6364   OldRegionSet*       _old_set;
6365   HumongousRegionSet* _humongous_set;
6366   uint                _region_count;
6367 
6368 public:
6369   VerifyRegionListsClosure(OldRegionSet* old_set,
6370                            HumongousRegionSet* humongous_set,
6371                            FreeRegionList* free_list) :
6372     _old_set(old_set), _humongous_set(humongous_set),
6373     _free_list(free_list), _region_count(0) { }
6374 
6375   uint region_count() { return _region_count; }
6376 
6377   bool doHeapRegion(HeapRegion* hr) {
6378     _region_count += 1;
6379 
6380     if (hr->continuesHumongous()) {
6381       return false;
6382     }
6383 
6384     if (hr->is_young()) {
6385       // TODO
6386     } else if (hr->startsHumongous()) {
6387       _humongous_set->verify_next_region(hr);
6388     } else if (hr->is_empty()) {
6389       _free_list->verify_next_region(hr);
6390     } else {
6391       _old_set->verify_next_region(hr);
6392     }
6393     return false;
6394   }
6395 };
6396 
6397 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6398                                              HeapWord* bottom) {
6399   HeapWord* end = bottom + HeapRegion::GrainWords;
6400   MemRegion mr(bottom, end);
6401   assert(_g1_reserved.contains(mr), "invariant");
6402   // This might return NULL if the allocation fails
6403   return new HeapRegion(hrs_index, _bot_shared, mr);
6404 }
6405 
6406 void G1CollectedHeap::verify_region_sets() {
6407   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6408 
6409   // First, check the explicit lists.
6410   _free_list.verify();
6411   {
6412     // Given that a concurrent operation might be adding regions to
6413     // the secondary free list we have to take the lock before
6414     // verifying it.
6415     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6416     _secondary_free_list.verify();
6417   }
6418   _old_set.verify();
6419   _humongous_set.verify();
6420 
6421   // If a concurrent region freeing operation is in progress it will
6422   // be difficult to correctly attributed any free regions we come
6423   // across to the correct free list given that they might belong to
6424   // one of several (free_list, secondary_free_list, any local lists,
6425   // etc.). So, if that's the case we will skip the rest of the
6426   // verification operation. Alternatively, waiting for the concurrent
6427   // operation to complete will have a non-trivial effect on the GC's
6428   // operation (no concurrent operation will last longer than the
6429   // interval between two calls to verification) and it might hide
6430   // any issues that we would like to catch during testing.
6431   if (free_regions_coming()) {
6432     return;
6433   }
6434 
6435   // Make sure we append the secondary_free_list on the free_list so
6436   // that all free regions we will come across can be safely
6437   // attributed to the free_list.
6438   append_secondary_free_list_if_not_empty_with_lock();
6439 
6440   // Finally, make sure that the region accounting in the lists is
6441   // consistent with what we see in the heap.
6442   _old_set.verify_start();
6443   _humongous_set.verify_start();
6444   _free_list.verify_start();
6445 
6446   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6447   heap_region_iterate(&cl);
6448 
6449   _old_set.verify_end();
6450   _humongous_set.verify_end();
6451   _free_list.verify_end();
6452 }