8196341: Add JFR events for parallel phases of G1

0 /*                                                                                                                         
1  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.                                            
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.                                                           
3  *                                                                                                                         
4  * This code is free software; you can redistribute it and/or modify it                                                    
5  * under the terms of the GNU General Public License version 2 only, as                                                    
6  * published by the Free Software Foundation.                                                                              
7  *                                                                                                                         
8  * This code is distributed in the hope that it will be useful, but WITHOUT                                                
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or                                                   
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License                                                   
11  * version 2 for more details (a copy is included in the LICENSE file that                                                 
12  * accompanied this code).                                                                                                 
13  *                                                                                                                         
14  * You should have received a copy of the GNU General Public License version                                               
15  * 2 along with this work; if not, write to the Free Software Foundation,                                                  
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.                                                           
17  *                                                                                                                         
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA                                                 
19  * or visit www.oracle.com if you need additional information or have any                                                  
20  * questions.                                                                                                              
21  *                                                                                                                         
22  */                                                                                                                        
23 
24 #include "precompiled.hpp"                                                                                                 
25 #include "classfile/metadataOnStackMark.hpp"                                                                               
26 #include "classfile/stringTable.hpp"                                                                                       
27 #include "code/codeCache.hpp"                                                                                              
28 #include "code/icBuffer.hpp"                                                                                               
29 #include "gc/g1/g1Allocator.inline.hpp"                                                                                    
30 #include "gc/g1/g1BarrierSet.hpp"                                                                                          
31 #include "gc/g1/g1CollectedHeap.inline.hpp"                                                                                
32 #include "gc/g1/g1CollectionSet.hpp"                                                                                       
33 #include "gc/g1/g1CollectorPolicy.hpp"                                                                                     
34 #include "gc/g1/g1CollectorState.hpp"                                                                                      
35 #include "gc/g1/g1ConcurrentRefine.hpp"                                                                                    
36 #include "gc/g1/g1ConcurrentRefineThread.hpp"                                                                              
37 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"                                                                         
38 #include "gc/g1/g1EvacStats.inline.hpp"                                                                                    
39 #include "gc/g1/g1FullCollector.hpp"                                                                                       
40 #include "gc/g1/g1GCPhaseTimes.hpp"                                                                                        
41 #include "gc/g1/g1HeapSizingPolicy.hpp"                                                                                    
42 #include "gc/g1/g1HeapTransition.hpp"                                                                                      
43 #include "gc/g1/g1HeapVerifier.hpp"                                                                                        
44 #include "gc/g1/g1HotCardCache.hpp"                                                                                        
45 #include "gc/g1/g1MemoryPool.hpp"                                                                                          
46 #include "gc/g1/g1OopClosures.inline.hpp"                                                                                  
47 #include "gc/g1/g1ParScanThreadState.inline.hpp"                                                                           
48 #include "gc/g1/g1Policy.hpp"                                                                                              
49 #include "gc/g1/g1RegionToSpaceMapper.hpp"                                                                                 
50 #include "gc/g1/g1RemSet.hpp"                                                                                              
51 #include "gc/g1/g1RootClosures.hpp"                                                                                        
52 #include "gc/g1/g1RootProcessor.hpp"                                                                                       
53 #include "gc/g1/g1SATBMarkQueueSet.hpp"                                                                                    
54 #include "gc/g1/g1StringDedup.hpp"                                                                                         
55 #include "gc/g1/g1ThreadLocalData.hpp"                                                                                     
56 #include "gc/g1/g1YCTypes.hpp"                                                                                             
57 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"                                                                           
58 #include "gc/g1/heapRegion.inline.hpp"                                                                                     
59 #include "gc/g1/heapRegionRemSet.hpp"                                                                                      
60 #include "gc/g1/heapRegionSet.inline.hpp"                                                                                  
61 #include "gc/g1/vm_operations_g1.hpp"                                                                                      
62 #include "gc/shared/adaptiveSizePolicy.hpp"                                                                                
63 #include "gc/shared/gcHeapSummary.hpp"                                                                                     
64 #include "gc/shared/gcId.hpp"                                                                                              
65 #include "gc/shared/gcLocker.hpp"                                                                                          
66 #include "gc/shared/gcTimer.hpp"                                                                                           
67 #include "gc/shared/gcTrace.hpp"                                                                                           
68 #include "gc/shared/gcTraceTime.inline.hpp"                                                                                
69 #include "gc/shared/generationSpec.hpp"                                                                                    
70 #include "gc/shared/isGCActiveMark.hpp"                                                                                    
71 #include "gc/shared/oopStorageParState.hpp"                                                                                
72 #include "gc/shared/parallelCleaning.hpp"                                                                                  
73 #include "gc/shared/preservedMarks.inline.hpp"                                                                             
74 #include "gc/shared/suspendibleThreadSet.hpp"                                                                              
75 #include "gc/shared/referenceProcessor.inline.hpp"                                                                         
76 #include "gc/shared/taskqueue.inline.hpp"                                                                                  
77 #include "gc/shared/weakProcessor.inline.hpp"                                                                              
78 #include "logging/log.hpp"                                                                                                 
79 #include "memory/allocation.hpp"                                                                                           
80 #include "memory/iterator.hpp"                                                                                             
81 #include "memory/metaspaceShared.hpp"                                                                                      
82 #include "memory/resourceArea.hpp"                                                                                         
83 #include "oops/access.inline.hpp"                                                                                          
84 #include "oops/compressedOops.inline.hpp"                                                                                  
85 #include "oops/oop.inline.hpp"                                                                                             
86 #include "runtime/atomic.hpp"                                                                                              
87 #include "runtime/flags/flagSetting.hpp"                                                                                   
88 #include "runtime/handles.inline.hpp"                                                                                      
89 #include "runtime/init.hpp"                                                                                                
90 #include "runtime/orderAccess.hpp"                                                                                         
91 #include "runtime/threadSMR.hpp"                                                                                           
92 #include "runtime/vmThread.hpp"                                                                                            
93 #include "utilities/align.hpp"                                                                                             
94 #include "utilities/globalDefinitions.hpp"                                                                                 
95 #include "utilities/stack.inline.hpp"                                                                                      
96 
97 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;                                                          
98 
99 // INVARIANTS/NOTES                                                                                                        
100 //                                                                                                                         
101 // All allocation activity covered by the G1CollectedHeap interface is                                                     
102 // serialized by acquiring the HeapLock.  This happens in mem_allocate                                                     
103 // and allocate_new_tlab, which are the "entry" points to the                                                              
104 // allocation code from the rest of the JVM.  (Note that this does not                                                     
105 // apply to TLAB allocation, which is not part of this interface: it                                                       
106 // is done by clients of this interface.)                                                                                  
107 
108 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {                                                  
109  private:                                                                                                                  
110   size_t _num_dirtied;                                                                                                     
111   G1CollectedHeap* _g1h;                                                                                                   
112   G1CardTable* _g1_ct;                                                                                                     
113 
114   HeapRegion* region_for_card(jbyte* card_ptr) const {                                                                     
115     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));                                                       
116   }                                                                                                                        
117 
118   bool will_become_free(HeapRegion* hr) const {                                                                            
119     // A region will be freed by free_collection_set if the region is in the                                               
120     // collection set and has not had an evacuation failure.                                                               
121     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();                                                               
122   }                                                                                                                        
123 
124  public:                                                                                                                   
125   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),                                      
126     _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }                                                              
127 
128   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {                                                                       
129     HeapRegion* hr = region_for_card(card_ptr);                                                                            
130 
131     // Should only dirty cards in regions that won't be freed.                                                             
132     if (!will_become_free(hr)) {                                                                                           
133       *card_ptr = G1CardTable::dirty_card_val();                                                                           
134       _num_dirtied++;                                                                                                      
135     }                                                                                                                      
136 
137     return true;                                                                                                           
138   }                                                                                                                        
139 
140   size_t num_dirtied()   const { return _num_dirtied; }                                                                    
141 };                                                                                                                         
142 
143 
144 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {                           
145   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);                                                    
146 }                                                                                                                          
147 
148 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {                     
149   // The from card cache is not the memory that is actually committed. So we cannot                                        
150   // take advantage of the zero_filled parameter.                                                                          
151   reset_from_card_cache(start_idx, num_regions);                                                                           
152 }                                                                                                                          
153 
154 
155 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,                                                               
156                                              MemRegion mr) {                                                               
157   return new HeapRegion(hrs_index, bot(), mr);                                                                             
158 }                                                                                                                          
159 
160 // Private methods.                                                                                                        
161 
162 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {                                   
163   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,                                                  
164          "the only time we use this to allocate a humongous region is "                                                    
165          "when we are allocating a single humongous region");                                                              
166 
167   HeapRegion* res = _hrm.allocate_free_region(is_old);                                                                     
168 
169   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {                                                      
170     // Currently, only attempts to allocate GC alloc regions set                                                           
171     // do_expand to true. So, we should only reach here during a                                                           
172     // safepoint. If this assumption changes we might have to                                                              
173     // reconsider the use of _expand_heap_after_alloc_failure.                                                             
174     assert(SafepointSynchronize::is_at_safepoint(), "invariant");                                                          
175 
176     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT
177                               word_size * HeapWordSize);                                                                   
178 
179     if (expand(word_size * HeapWordSize)) {                                                                                
180       // Given that expand() succeeded in expanding the heap, and we                                                       
181       // always expand the heap by an amount aligned to the heap                                                           
182       // region size, the free list should in theory not be empty.                                                         
183       // In either case allocate_free_region() will check for NULL.                                                        
184       res = _hrm.allocate_free_region(is_old);                                                                             
185     } else {                                                                                                               
186       _expand_heap_after_alloc_failure = false;                                                                            
187     }                                                                                                                      
188   }                                                                                                                        
189   return res;                                                                                                              
190 }                                                                                                                          
191 
192 HeapWord*                                                                                                                  
193 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,                                                     
194                                                            uint num_regions,                                               
195                                                            size_t word_size) {                                             
196   assert(first != G1_NO_HRM_INDEX, "pre-condition");                                                                       
197   assert(is_humongous(word_size), "word_size should be humongous");                                                        
198   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");                                              
199 
200   // Index of last region in the series.                                                                                   
201   uint last = first + num_regions - 1;                                                                                     
202 
203   // We need to initialize the region(s) we just discovered. This is                                                       
204   // a bit tricky given that it can happen concurrently with                                                               
205   // refinement threads refining cards on these regions and                                                                
206   // potentially wanting to refine the BOT as they are scanning                                                            
207   // those cards (this can happen shortly after a cleanup; see CR                                                          
208   // 6991377). So we have to set up the region(s) carefully and in                                                         
209   // a specific order.                                                                                                     
210 
211   // The word size sum of all the regions we will allocate.                                                                
212   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;                                                    
213   assert(word_size <= word_size_sum, "sanity");                                                                            
214 
215   // This will be the "starts humongous" region.                                                                           
216   HeapRegion* first_hr = region_at(first);                                                                                 
217   // The header of the new object will be placed at the bottom of                                                          
218   // the first region.                                                                                                     
219   HeapWord* new_obj = first_hr->bottom();                                                                                  
220   // This will be the new top of the new object.                                                                           
221   HeapWord* obj_top = new_obj + word_size;                                                                                 
222 
223   // First, we need to zero the header of the space that we will be                                                        
224   // allocating. When we update top further down, some refinement                                                          
225   // threads might try to scan the region. By zeroing the header we                                                        
226   // ensure that any thread that will try to scan the region will                                                          
227   // come across the zero klass word and bail out.                                                                         
228   //                                                                                                                       
229   // NOTE: It would not have been correct to have used                                                                     
230   // CollectedHeap::fill_with_object() and make the space look like                                                        
231   // an int array. The thread that is doing the allocation will                                                            
232   // later update the object header to a potentially different array                                                       
233   // type and, for a very short period of time, the klass and length                                                       
234   // fields will be inconsistent. This could cause a refinement                                                            
235   // thread to calculate the object size incorrectly.                                                                      
236   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);                                                                 
237 
238   // Next, pad out the unused tail of the last region with filler                                                          
239   // objects, for improved usage accounting.                                                                               
240   // How many words we use for filler objects.                                                                             
241   size_t word_fill_size = word_size_sum - word_size;                                                                       
242 
243   // How many words memory we "waste" which cannot hold a filler object.                                                   
244   size_t words_not_fillable = 0;                                                                                           
245 
246   if (word_fill_size >= min_fill_size()) {                                                                                 
247     fill_with_objects(obj_top, word_fill_size);                                                                            
248   } else if (word_fill_size > 0) {                                                                                         
249     // We have space to fill, but we cannot fit an object there.                                                           
250     words_not_fillable = word_fill_size;                                                                                   
251     word_fill_size = 0;                                                                                                    
252   }                                                                                                                        
253 
254   // We will set up the first region as "starts humongous". This                                                           
255   // will also update the BOT covering all the regions to reflect                                                          
256   // that there is a single object that starts at the bottom of the                                                        
257   // first region.                                                                                                         
258   first_hr->set_starts_humongous(obj_top, word_fill_size);                                                                 
259   _g1_policy->remset_tracker()->update_at_allocate(first_hr);                                                              
260   // Then, if there are any, we will set up the "continues                                                                 
261   // humongous" regions.                                                                                                   
262   HeapRegion* hr = NULL;                                                                                                   
263   for (uint i = first + 1; i <= last; ++i) {                                                                               
264     hr = region_at(i);                                                                                                     
265     hr->set_continues_humongous(first_hr);                                                                                 
266     _g1_policy->remset_tracker()->update_at_allocate(hr);                                                                  
267   }                                                                                                                        
268 
269   // Up to this point no concurrent thread would have been able to                                                         
270   // do any scanning on any region in this series. All the top                                                             
271   // fields still point to bottom, so the intersection between                                                             
272   // [bottom,top] and [card_start,card_end] will be empty. Before we                                                       
273   // update the top fields, we'll do a storestore to make sure that                                                        
274   // no thread sees the update to top before the zeroing of the                                                            
275   // object header and the BOT initialization.                                                                             
276   OrderAccess::storestore();                                                                                               
277 
278   // Now, we will update the top fields of the "continues humongous"                                                       
279   // regions except the last one.                                                                                          
280   for (uint i = first; i < last; ++i) {                                                                                    
281     hr = region_at(i);                                                                                                     
282     hr->set_top(hr->end());                                                                                                
283   }                                                                                                                        
284 
285   hr = region_at(last);                                                                                                    
286   // If we cannot fit a filler object, we must set top to the end                                                          
287   // of the humongous object, otherwise we cannot iterate the heap                                                         
288   // and the BOT will not be complete.                                                                                     
289   hr->set_top(hr->end() - words_not_fillable);                                                                             
290 
291   assert(hr->bottom() < obj_top && obj_top <= hr->end(),                                                                   
292          "obj_top should be in last region");                                                                              
293 
294   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);                                                       
295 
296   assert(words_not_fillable == 0 ||                                                                                        
297          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),                                             
298          "Miscalculation in humongous allocation");                                                                        
299 
300   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);                                                      
301 
302   for (uint i = first; i <= last; ++i) {                                                                                   
303     hr = region_at(i);                                                                                                     
304     _humongous_set.add(hr);                                                                                                
305     _hr_printer.alloc(hr);                                                                                                 
306   }                                                                                                                        
307 
308   return new_obj;                                                                                                          
309 }                                                                                                                          
310 
311 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {                                                  
312   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);                     
313   return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;                                             
314 }                                                                                                                          
315 
316 // If could fit into free regions w/o expansion, try.                                                                      
317 // Otherwise, if can expand, do so.                                                                                        
318 // Otherwise, if using ex regions might help, try with ex given back.                                                      
319 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {                                                      
320   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);                                                      
321 
322   _verifier->verify_region_sets_optional();                                                                                
323 
324   uint first = G1_NO_HRM_INDEX;                                                                                            
325   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);                                                      
326 
327   if (obj_regions == 1) {                                                                                                  
328     // Only one region to allocate, try to use a fast path by directly allocating                                          
329     // from the free lists. Do not try to expand here, we will potentially do that                                         
330     // later.                                                                                                              
331     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);                                      
332     if (hr != NULL) {                                                                                                      
333       first = hr->hrm_index();                                                                                             
334     }                                                                                                                      
335   } else {                                                                                                                 
336     // Policy: Try only empty regions (i.e. already committed first). Maybe we                                             
337     // are lucky enough to find some.                                                                                      
338     first = _hrm.find_contiguous_only_empty(obj_regions);                                                                  
339     if (first != G1_NO_HRM_INDEX) {                                                                                        
340       _hrm.allocate_free_regions_starting_at(first, obj_regions);                                                          
341     }                                                                                                                      
342   }                                                                                                                        
343 
344   if (first == G1_NO_HRM_INDEX) {                                                                                          
345     // Policy: We could not find enough regions for the humongous object in the                                            
346     // free list. Look through the heap to find a mix of free and uncommitted regions.                                     
347     // If so, try expansion.                                                                                               
348     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);                                                        
349     if (first != G1_NO_HRM_INDEX) {                                                                                        
350       // We found something. Make sure these regions are committed, i.e. expand                                            
351       // the heap. Alternatively we could do a defragmentation GC.                                                         
352       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_F
353                                     word_size * HeapWordSize);                                                             
354 
355       _hrm.expand_at(first, obj_regions, workers());                                                                       
356       g1_policy()->record_new_heap_size(num_regions());                                                                    
357 
358 #ifdef ASSERT                                                                                                              
359       for (uint i = first; i < first + obj_regions; ++i) {                                                                 
360         HeapRegion* hr = region_at(i);                                                                                     
361         assert(hr->is_free(), "sanity");                                                                                   
362         assert(hr->is_empty(), "sanity");                                                                                  
363         assert(is_on_master_free_list(hr), "sanity");                                                                      
364       }                                                                                                                    
365 #endif                                                                                                                     
366       _hrm.allocate_free_regions_starting_at(first, obj_regions);                                                          
367     } else {                                                                                                               
368       // Policy: Potentially trigger a defragmentation GC.                                                                 
369     }                                                                                                                      
370   }                                                                                                                        
371 
372   HeapWord* result = NULL;                                                                                                 
373   if (first != G1_NO_HRM_INDEX) {                                                                                          
374     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);                                     
375     assert(result != NULL, "it should always return a valid result");                                                      
376 
377     // A successful humongous object allocation changes the used space                                                     
378     // information of the old generation so we need to recalculate the                                                     
379     // sizes and update the jstat counters here.                                                                           
380     g1mm()->update_sizes();                                                                                                
381   }                                                                                                                        
382 
383   _verifier->verify_region_sets_optional();                                                                                
384 
385   return result;                                                                                                           
386 }                                                                                                                          
387 
388 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,                                                              
389                                              size_t requested_size,                                                        
390                                              size_t* actual_size) {                                                        
391   assert_heap_not_locked_and_not_at_safepoint();                                                                           
392   assert(!is_humongous(requested_size), "we do not allow humongous TLABs");                                                
393 
394   return attempt_allocation(min_size, requested_size, actual_size);                                                        
395 }                                                                                                                          
396 
397 HeapWord*                                                                                                                  
398 G1CollectedHeap::mem_allocate(size_t word_size,                                                                            
399                               bool*  gc_overhead_limit_was_exceeded) {                                                     
400   assert_heap_not_locked_and_not_at_safepoint();                                                                           
401 
402   if (is_humongous(word_size)) {                                                                                           
403     return attempt_allocation_humongous(word_size);                                                                        
404   }                                                                                                                        
405   size_t dummy = 0;                                                                                                        
406   return attempt_allocation(word_size, word_size, &dummy);                                                                 
407 }                                                                                                                          
408 
409 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {                                                     
410   ResourceMark rm; // For retrieving the thread names in log messages.                                                     
411 
412   // Make sure you read the note in attempt_allocation_humongous().                                                        
413 
414   assert_heap_not_locked_and_not_at_safepoint();                                                                           
415   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "                                                 
416          "be called for humongous allocation requests");                                                                   
417 
418   // We should only get here after the first-level allocation attempt                                                      
419   // (attempt_allocation()) failed to allocate.                                                                            
420 
421   // We will loop until a) we manage to successfully perform the                                                           
422   // allocation or b) we successfully schedule a collection which                                                          
423   // fails to perform the allocation. b) is the only case when we'll                                                       
424   // return NULL.                                                                                                          
425   HeapWord* result = NULL;                                                                                                 
426   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {                                 
427     bool should_try_gc;                                                                                                    
428     uint gc_count_before;                                                                                                  
429 
430     {                                                                                                                      
431       MutexLockerEx x(Heap_lock);                                                                                          
432       result = _allocator->attempt_allocation_locked(word_size);                                                           
433       if (result != NULL) {                                                                                                
434         return result;                                                                                                     
435       }                                                                                                                    
436 
437       // If the GCLocker is active and we are bound for a GC, try expanding young gen.                                     
438       // This is different to when only GCLocker::needs_gc() is set: try to avoid                                          
439       // waiting because the GCLocker is active to not wait too long.                                                      
440       if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {                                    
441         // No need for an ergo message here, can_expand_young_list() does this when                                        
442         // it returns true.                                                                                                
443         result = _allocator->attempt_allocation_force(word_size);                                                          
444         if (result != NULL) {                                                                                              
445           return result;                                                                                                   
446         }                                                                                                                  
447       }                                                                                                                    
448       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until                                       
449       // the GCLocker initiated GC has been performed and then retry. This includes                                        
450       // the case when the GC Locker is not active but has not been performed.                                             
451       should_try_gc = !GCLocker::needs_gc();                                                                               
452       // Read the GC count while still holding the Heap_lock.                                                              
453       gc_count_before = total_collections();                                                                               
454     }                                                                                                                      
455 
456     if (should_try_gc) {                                                                                                   
457       bool succeeded;                                                                                                      
458       result = do_collection_pause(word_size, gc_count_before, &succeeded,                                                 
459                                    GCCause::_g1_inc_collection_pause);                                                     
460       if (result != NULL) {                                                                                                
461         assert(succeeded, "only way to get back a non-NULL result");                                                       
462         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,                                
463                              Thread::current()->name(), p2i(result));                                                      
464         return result;                                                                                                     
465       }                                                                                                                    
466 
467       if (succeeded) {                                                                                                     
468         // We successfully scheduled a collection which failed to allocate. No                                             
469         // point in trying to allocate further. We'll just return NULL.                                                    
470         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "                                  
471                              SIZE_FORMAT " words", Thread::current()->name(), word_size);                                  
472         return NULL;                                                                                                       
473       }                                                                                                                    
474       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",                     
475                            Thread::current()->name(), word_size);                                                          
476     } else {                                                                                                               
477       // Failed to schedule a collection.                                                                                  
478       if (gclocker_retry_count > GCLockerRetryAllocationCount) {                                                           
479         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "                                    
480                                SIZE_FORMAT " words", Thread::current()->name(), word_size);                                
481         return NULL;                                                                                                       
482       }                                                                                                                    
483       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());                                            
484       // The GCLocker is either active or the GCLocker initiated                                                           
485       // GC has not yet been performed. Stall until it is and                                                              
486       // then retry the allocation.                                                                                        
487       GCLocker::stall_until_clear();                                                                                       
488       gclocker_retry_count += 1;                                                                                           
489     }                                                                                                                      
490 
491     // We can reach here if we were unsuccessful in scheduling a                                                           
492     // collection (because another thread beat us to it) or if we were                                                     
493     // stalled due to the GC locker. In either can we should retry the                                                     
494     // allocation attempt in case another thread successfully                                                              
495     // performed a collection and reclaimed enough space. We do the                                                        
496     // first attempt (without holding the Heap_lock) here and the                                                          
497     // follow-on attempt will be at the start of the next loop                                                             
498     // iteration (after taking the Heap_lock).                                                                             
499     size_t dummy = 0;                                                                                                      
500     result = _allocator->attempt_allocation(word_size, word_size, &dummy);                                                 
501     if (result != NULL) {                                                                                                  
502       return result;                                                                                                       
503     }                                                                                                                      
504 
505     // Give a warning if we seem to be looping forever.                                                                    
506     if ((QueuedAllocationWarningCount > 0) &&                                                                              
507         (try_count % QueuedAllocationWarningCount == 0)) {                                                                 
508       log_warning(gc, alloc)("%s:  Retried allocation %u times for " SIZE_FORMAT " words",                                 
509                              Thread::current()->name(), try_count, word_size);                                             
510     }                                                                                                                      
511   }                                                                                                                        
512 
513   ShouldNotReachHere();                                                                                                    
514   return NULL;                                                                                                             
515 }                                                                                                                          
516 
517 void G1CollectedHeap::begin_archive_alloc_range(bool open) {                                                               
518   assert_at_safepoint_on_vm_thread();                                                                                      
519   if (_archive_allocator == NULL) {                                                                                        
520     _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);                                                 
521   }                                                                                                                        
522 }                                                                                                                          
523 
524 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {                                                       
525   // Allocations in archive regions cannot be of a size that would be considered                                           
526   // humongous even for a minimum-sized region, because G1 region sizes/boundaries                                         
527   // may be different at archive-restore time.                                                                             
528   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());                                     
529 }                                                                                                                          
530 
531 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {                                                        
532   assert_at_safepoint_on_vm_thread();                                                                                      
533   assert(_archive_allocator != NULL, "_archive_allocator not initialized");                                                
534   if (is_archive_alloc_too_large(word_size)) {                                                                             
535     return NULL;                                                                                                           
536   }                                                                                                                        
537   return _archive_allocator->archive_mem_allocate(word_size);                                                              
538 }                                                                                                                          
539 
540 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,                                            
541                                               size_t end_alignment_in_bytes) {                                             
542   assert_at_safepoint_on_vm_thread();                                                                                      
543   assert(_archive_allocator != NULL, "_archive_allocator not initialized");                                                
544 
545   // Call complete_archive to do the real work, filling in the MemRegion                                                   
546   // array with the archive regions.                                                                                       
547   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);                                                    
548   delete _archive_allocator;                                                                                               
549   _archive_allocator = NULL;                                                                                               
550 }                                                                                                                          
551 
552 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {                                           
553   assert(ranges != NULL, "MemRegion array NULL");                                                                          
554   assert(count != 0, "No MemRegions provided");                                                                            
555   MemRegion reserved = _hrm.reserved();                                                                                    
556   for (size_t i = 0; i < count; i++) {                                                                                     
557     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {                                   
558       return false;                                                                                                        
559     }                                                                                                                      
560   }                                                                                                                        
561   return true;                                                                                                             
562 }                                                                                                                          
563 
564 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,                                                             
565                                             size_t count,                                                                  
566                                             bool open) {                                                                   
567   assert(!is_init_completed(), "Expect to be called at JVM init time");                                                    
568   assert(ranges != NULL, "MemRegion array NULL");                                                                          
569   assert(count != 0, "No MemRegions provided");                                                                            
570   MutexLockerEx x(Heap_lock);                                                                                              
571 
572   MemRegion reserved = _hrm.reserved();                                                                                    
573   HeapWord* prev_last_addr = NULL;                                                                                         
574   HeapRegion* prev_last_region = NULL;                                                                                     
575 
576   // Temporarily disable pretouching of heap pages. This interface is used                                                 
577   // when mmap'ing archived heap data in, so pre-touching is wasted.                                                       
578   FlagSetting fs(AlwaysPreTouch, false);                                                                                   
579 
580   // Enable archive object checking used by G1MarkSweep. We have to let it know                                            
581   // about each archive range, so that objects in those ranges aren't marked.                                              
582   G1ArchiveAllocator::enable_archive_object_check();                                                                       
583 
584   // For each specified MemRegion range, allocate the corresponding G1                                                     
585   // regions and mark them as archive regions. We expect the ranges                                                        
586   // in ascending starting address order, without overlap.                                                                 
587   for (size_t i = 0; i < count; i++) {                                                                                     
588     MemRegion curr_range = ranges[i];                                                                                      
589     HeapWord* start_address = curr_range.start();                                                                          
590     size_t word_size = curr_range.word_size();                                                                             
591     HeapWord* last_address = curr_range.last();                                                                            
592     size_t commits = 0;                                                                                                    
593 
594     guarantee(reserved.contains(start_address) && reserved.contains(last_address),                                         
595               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",                                                
596               p2i(start_address), p2i(last_address));                                                                      
597     guarantee(start_address > prev_last_addr,                                                                              
598               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,                                             
599               p2i(start_address), p2i(prev_last_addr));                                                                    
600     prev_last_addr = last_address;                                                                                         
601 
602     // Check for ranges that start in the same G1 region in which the previous                                             
603     // range ended, and adjust the start address so we don't try to allocate                                               
604     // the same region again. If the current range is entirely within that                                                 
605     // region, skip it, just adjusting the recorded top.                                                                   
606     HeapRegion* start_region = _hrm.addr_to_region(start_address);                                                         
607     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {                                                
608       start_address = start_region->end();                                                                                 
609       if (start_address > last_address) {                                                                                  
610         increase_used(word_size * HeapWordSize);                                                                           
611         start_region->set_top(last_address + 1);                                                                           
612         continue;                                                                                                          
613       }                                                                                                                    
614       start_region->set_top(start_address);                                                                                
615       curr_range = MemRegion(start_address, last_address + 1);                                                             
616       start_region = _hrm.addr_to_region(start_address);                                                                   
617     }                                                                                                                      
618 
619     // Perform the actual region allocation, exiting if it fails.                                                          
620     // Then note how much new space we have allocated.                                                                     
621     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {                                              
622       return false;                                                                                                        
623     }                                                                                                                      
624     increase_used(word_size * HeapWordSize);                                                                               
625     if (commits != 0) {                                                                                                    
626       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",         
627                                 HeapRegion::GrainWords * HeapWordSize * commits);                                          
628 
629     }                                                                                                                      
630 
631     // Mark each G1 region touched by the range as archive, add it to                                                      
632     // the old set, and set top.                                                                                           
633     HeapRegion* curr_region = _hrm.addr_to_region(start_address);                                                          
634     HeapRegion* last_region = _hrm.addr_to_region(last_address);                                                           
635     prev_last_region = last_region;                                                                                        
636 
637     while (curr_region != NULL) {                                                                                          
638       assert(curr_region->is_empty() && !curr_region->is_pinned(),                                                         
639              "Region already in use (index %u)", curr_region->hrm_index());                                                
640       if (open) {                                                                                                          
641         curr_region->set_open_archive();                                                                                   
642       } else {                                                                                                             
643         curr_region->set_closed_archive();                                                                                 
644       }                                                                                                                    
645       _hr_printer.alloc(curr_region);                                                                                      
646       _archive_set.add(curr_region);                                                                                       
647       HeapWord* top;                                                                                                       
648       HeapRegion* next_region;                                                                                             
649       if (curr_region != last_region) {                                                                                    
650         top = curr_region->end();                                                                                          
651         next_region = _hrm.next_region_in_heap(curr_region);                                                               
652       } else {                                                                                                             
653         top = last_address + 1;                                                                                            
654         next_region = NULL;                                                                                                
655       }                                                                                                                    
656       curr_region->set_top(top);                                                                                           
657       curr_region->set_first_dead(top);                                                                                    
658       curr_region->set_end_of_live(top);                                                                                   
659       curr_region = next_region;                                                                                           
660     }                                                                                                                      
661 
662     // Notify mark-sweep of the archive                                                                                    
663     G1ArchiveAllocator::set_range_archive(curr_range, open);                                                               
664   }                                                                                                                        
665   return true;                                                                                                             
666 }                                                                                                                          
667 
668 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {                                              
669   assert(!is_init_completed(), "Expect to be called at JVM init time");                                                    
670   assert(ranges != NULL, "MemRegion array NULL");                                                                          
671   assert(count != 0, "No MemRegions provided");                                                                            
672   MemRegion reserved = _hrm.reserved();                                                                                    
673   HeapWord *prev_last_addr = NULL;                                                                                         
674   HeapRegion* prev_last_region = NULL;                                                                                     
675 
676   // For each MemRegion, create filler objects, if needed, in the G1 regions                                               
677   // that contain the address range. The address range actually within the                                                 
678   // MemRegion will not be modified. That is assumed to have been initialized                                              
679   // elsewhere, probably via an mmap of archived heap data.                                                                
680   MutexLockerEx x(Heap_lock);                                                                                              
681   for (size_t i = 0; i < count; i++) {                                                                                     
682     HeapWord* start_address = ranges[i].start();                                                                           
683     HeapWord* last_address = ranges[i].last();                                                                             
684 
685     assert(reserved.contains(start_address) && reserved.contains(last_address),                                            
686            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",                                                   
687            p2i(start_address), p2i(last_address));                                                                         
688     assert(start_address > prev_last_addr,                                                                                 
689            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,                                                
690            p2i(start_address), p2i(prev_last_addr));                                                                       
691 
692     HeapRegion* start_region = _hrm.addr_to_region(start_address);                                                         
693     HeapRegion* last_region = _hrm.addr_to_region(last_address);                                                           
694     HeapWord* bottom_address = start_region->bottom();                                                                     
695 
696     // Check for a range beginning in the same region in which the                                                         
697     // previous one ended.                                                                                                 
698     if (start_region == prev_last_region) {                                                                                
699       bottom_address = prev_last_addr + 1;                                                                                 
700     }                                                                                                                      
701 
702     // Verify that the regions were all marked as archive regions by                                                       
703     // alloc_archive_regions.                                                                                              
704     HeapRegion* curr_region = start_region;                                                                                
705     while (curr_region != NULL) {                                                                                          
706       guarantee(curr_region->is_archive(),                                                                                 
707                 "Expected archive region at index %u", curr_region->hrm_index());                                          
708       if (curr_region != last_region) {                                                                                    
709         curr_region = _hrm.next_region_in_heap(curr_region);                                                               
710       } else {                                                                                                             
711         curr_region = NULL;                                                                                                
712       }                                                                                                                    
713     }                                                                                                                      
714 
715     prev_last_addr = last_address;                                                                                         
716     prev_last_region = last_region;                                                                                        
717 
718     // Fill the memory below the allocated range with dummy object(s),                                                     
719     // if the region bottom does not match the range start, or if the previous                                             
720     // range ended within the same G1 region, and there is a gap.                                                          
721     if (start_address != bottom_address) {                                                                                 
722       size_t fill_size = pointer_delta(start_address, bottom_address);                                                     
723       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);                                                       
724       increase_used(fill_size * HeapWordSize);                                                                             
725     }                                                                                                                      
726   }                                                                                                                        
727 }                                                                                                                          
728 
729 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,                                                 
730                                                      size_t desired_word_size,                                             
731                                                      size_t* actual_word_size) {                                           
732   assert_heap_not_locked_and_not_at_safepoint();                                                                           
733   assert(!is_humongous(desired_word_size), "attempt_allocation() should not "                                              
734          "be called for humongous allocation requests");                                                                   
735 
736   HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);                   
737 
738   if (result == NULL) {                                                                                                    
739     *actual_word_size = desired_word_size;                                                                                 
740     result = attempt_allocation_slow(desired_word_size);                                                                   
741   }                                                                                                                        
742 
743   assert_heap_not_locked();                                                                                                
744   if (result != NULL) {                                                                                                    
745     assert(*actual_word_size != 0, "Actual size must have been set here");                                                 
746     dirty_young_block(result, *actual_word_size);                                                                          
747   } else {                                                                                                                 
748     *actual_word_size = 0;                                                                                                 
749   }                                                                                                                        
750 
751   return result;                                                                                                           
752 }                                                                                                                          
753 
754 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {                                           
755   assert(!is_init_completed(), "Expect to be called at JVM init time");                                                    
756   assert(ranges != NULL, "MemRegion array NULL");                                                                          
757   assert(count != 0, "No MemRegions provided");                                                                            
758   MemRegion reserved = _hrm.reserved();                                                                                    
759   HeapWord* prev_last_addr = NULL;                                                                                         
760   HeapRegion* prev_last_region = NULL;                                                                                     
761   size_t size_used = 0;                                                                                                    
762   size_t uncommitted_regions = 0;                                                                                          
763 
764   // For each Memregion, free the G1 regions that constitute it, and                                                       
765   // notify mark-sweep that the range is no longer to be considered 'archive.'                                             
766   MutexLockerEx x(Heap_lock);                                                                                              
767   for (size_t i = 0; i < count; i++) {                                                                                     
768     HeapWord* start_address = ranges[i].start();                                                                           
769     HeapWord* last_address = ranges[i].last();                                                                             
770 
771     assert(reserved.contains(start_address) && reserved.contains(last_address),                                            
772            "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",                                                   
773            p2i(start_address), p2i(last_address));                                                                         
774     assert(start_address > prev_last_addr,                                                                                 
775            "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,                                                
776            p2i(start_address), p2i(prev_last_addr));                                                                       
777     size_used += ranges[i].byte_size();                                                                                    
778     prev_last_addr = last_address;                                                                                         
779 
780     HeapRegion* start_region = _hrm.addr_to_region(start_address);                                                         
781     HeapRegion* last_region = _hrm.addr_to_region(last_address);                                                           
782 
783     // Check for ranges that start in the same G1 region in which the previous                                             
784     // range ended, and adjust the start address so we don't try to free                                                   
785     // the same region again. If the current range is entirely within that                                                 
786     // region, skip it.                                                                                                    
787     if (start_region == prev_last_region) {                                                                                
788       start_address = start_region->end();                                                                                 
789       if (start_address > last_address) {                                                                                  
790         continue;                                                                                                          
791       }                                                                                                                    
792       start_region = _hrm.addr_to_region(start_address);                                                                   
793     }                                                                                                                      
794     prev_last_region = last_region;                                                                                        
795 
796     // After verifying that each region was marked as an archive region by                                                 
797     // alloc_archive_regions, set it free and empty and uncommit it.                                                       
798     HeapRegion* curr_region = start_region;                                                                                
799     while (curr_region != NULL) {                                                                                          
800       guarantee(curr_region->is_archive(),                                                                                 
801                 "Expected archive region at index %u", curr_region->hrm_index());                                          
802       uint curr_index = curr_region->hrm_index();                                                                          
803       _archive_set.remove(curr_region);                                                                                    
804       curr_region->set_free();                                                                                             
805       curr_region->set_top(curr_region->bottom());                                                                         
806       if (curr_region != last_region) {                                                                                    
807         curr_region = _hrm.next_region_in_heap(curr_region);                                                               
808       } else {                                                                                                             
809         curr_region = NULL;                                                                                                
810       }                                                                                                                    
811       _hrm.shrink_at(curr_index, 1);                                                                                       
812       uncommitted_regions++;                                                                                               
813     }                                                                                                                      
814 
815     // Notify mark-sweep that this is no longer an archive range.                                                          
816     G1ArchiveAllocator::set_range_archive(ranges[i], false);                                                               
817   }                                                                                                                        
818 
819   if (uncommitted_regions != 0) {                                                                                          
820     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",        
821                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);                                
822   }                                                                                                                        
823   decrease_used(size_used);                                                                                                
824 }                                                                                                                          
825 
826 oop G1CollectedHeap::materialize_archived_object(oop obj) {                                                                
827   assert(obj != NULL, "archived obj is NULL");                                                                             
828   assert(MetaspaceShared::is_archive_object(obj), "must be archived object");                                              
829 
830   // Loading an archived object makes it strongly reachable. If it is                                                      
831   // loaded during concurrent marking, it must be enqueued to the SATB                                                     
832   // queue, shading the previously white object gray.                                                                      
833   G1BarrierSet::enqueue(obj);                                                                                              
834 
835   return obj;                                                                                                              
836 }                                                                                                                          
837 
838 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {                                                
839   ResourceMark rm; // For retrieving the thread names in log messages.                                                     
840 
841   // The structure of this method has a lot of similarities to                                                             
842   // attempt_allocation_slow(). The reason these two were not merged                                                       
843   // into a single one is that such a method would require several "if                                                     
844   // allocation is not humongous do this, otherwise do that"                                                               
845   // conditional paths which would obscure its flow. In fact, an early                                                     
846   // version of this code did use a unified method which was harder to                                                     
847   // follow and, as a result, it had subtle bugs that were hard to                                                         
848   // track down. So keeping these two methods separate allows each to                                                      
849   // be more readable. It will be good to keep these two in sync as                                                        
850   // much as possible.                                                                                                     
851 
852   assert_heap_not_locked_and_not_at_safepoint();                                                                           
853   assert(is_humongous(word_size), "attempt_allocation_humongous() "                                                        
854          "should only be called for humongous allocations");                                                               
855 
856   // Humongous objects can exhaust the heap quickly, so we should check if we                                              
857   // need to start a marking cycle at each humongous object allocation. We do                                              
858   // the check before we do the actual allocation. The reason for doing it                                                 
859   // before the allocation is that we avoid having to keep track of the newly                                              
860   // allocated memory while we do a GC.                                                                                    
861   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",                                              
862                                            word_size)) {                                                                   
863     collect(GCCause::_g1_humongous_allocation);                                                                            
864   }                                                                                                                        
865 
866   // We will loop until a) we manage to successfully perform the                                                           
867   // allocation or b) we successfully schedule a collection which                                                          
868   // fails to perform the allocation. b) is the only case when we'll                                                       
869   // return NULL.                                                                                                          
870   HeapWord* result = NULL;                                                                                                 
871   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {                                 
872     bool should_try_gc;                                                                                                    
873     uint gc_count_before;                                                                                                  
874 
875 
876     {                                                                                                                      
877       MutexLockerEx x(Heap_lock);                                                                                          
878 
879       // Given that humongous objects are not allocated in young                                                           
880       // regions, we'll first try to do the allocation without doing a                                                     
881       // collection hoping that there's enough space in the heap.                                                          
882       result = humongous_obj_allocate(word_size);                                                                          
883       if (result != NULL) {                                                                                                
884         size_t size_in_regions = humongous_obj_size_in_regions(word_size);                                                 
885         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);                   
886         return result;                                                                                                     
887       }                                                                                                                    
888 
889       // Only try a GC if the GCLocker does not signal the need for a GC. Wait until                                       
890       // the GCLocker initiated GC has been performed and then retry. This includes                                        
891       // the case when the GC Locker is not active but has not been performed.                                             
892       should_try_gc = !GCLocker::needs_gc();                                                                               
893       // Read the GC count while still holding the Heap_lock.                                                              
894       gc_count_before = total_collections();                                                                               
895     }                                                                                                                      
896 
897     if (should_try_gc) {                                                                                                   
898       bool succeeded;                                                                                                      
899       result = do_collection_pause(word_size, gc_count_before, &succeeded,                                                 
900                                    GCCause::_g1_humongous_allocation);                                                     
901       if (result != NULL) {                                                                                                
902         assert(succeeded, "only way to get back a non-NULL result");                                                       
903         log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,                                
904                              Thread::current()->name(), p2i(result));                                                      
905         return result;                                                                                                     
906       }                                                                                                                    
907 
908       if (succeeded) {                                                                                                     
909         // We successfully scheduled a collection which failed to allocate. No                                             
910         // point in trying to allocate further. We'll just return NULL.                                                    
911         log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "                                  
912                              SIZE_FORMAT " words", Thread::current()->name(), word_size);                                  
913         return NULL;                                                                                                       
914       }                                                                                                                    
915       log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",                           
916                            Thread::current()->name(), word_size);                                                          
917     } else {                                                                                                               
918       // Failed to schedule a collection.                                                                                  
919       if (gclocker_retry_count > GCLockerRetryAllocationCount) {                                                           
920         log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "                                    
921                                SIZE_FORMAT " words", Thread::current()->name(), word_size);                                
922         return NULL;                                                                                                       
923       }                                                                                                                    
924       log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());                                            
925       // The GCLocker is either active or the GCLocker initiated                                                           
926       // GC has not yet been performed. Stall until it is and                                                              
927       // then retry the allocation.                                                                                        
928       GCLocker::stall_until_clear();                                                                                       
929       gclocker_retry_count += 1;                                                                                           
930     }                                                                                                                      
931 
932 
933     // We can reach here if we were unsuccessful in scheduling a                                                           
934     // collection (because another thread beat us to it) or if we were                                                     
935     // stalled due to the GC locker. In either can we should retry the                                                     
936     // allocation attempt in case another thread successfully                                                              
937     // performed a collection and reclaimed enough space.                                                                  
938     // Humongous object allocation always needs a lock, so we wait for the retry                                           
939     // in the next iteration of the loop, unlike for the regular iteration case.                                           
940     // Give a warning if we seem to be looping forever.                                                                    
941 
942     if ((QueuedAllocationWarningCount > 0) &&                                                                              
943         (try_count % QueuedAllocationWarningCount == 0)) {                                                                 
944       log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",                                  
945                              Thread::current()->name(), try_count, word_size);                                             
946     }                                                                                                                      
947   }                                                                                                                        
948 
949   ShouldNotReachHere();                                                                                                    
950   return NULL;                                                                                                             
951 }                                                                                                                          
952 
953 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,                                               
954                                                            bool expect_null_mutator_alloc_region) {                        
955   assert_at_safepoint_on_vm_thread();                                                                                      
956   assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,                                     
957          "the current alloc region was unexpectedly found to be non-NULL");                                                
958 
959   if (!is_humongous(word_size)) {                                                                                          
960     return _allocator->attempt_allocation_locked(word_size);                                                               
961   } else {                                                                                                                 
962     HeapWord* result = humongous_obj_allocate(word_size);                                                                  
963     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {                              
964       collector_state()->set_initiate_conc_mark_if_possible(true);                                                         
965     }                                                                                                                      
966     return result;                                                                                                         
967   }                                                                                                                        
968 
969   ShouldNotReachHere();                                                                                                    
970 }                                                                                                                          
971 
972 class PostCompactionPrinterClosure: public HeapRegionClosure {                                                             
973 private:                                                                                                                   
974   G1HRPrinter* _hr_printer;                                                                                                
975 public:                                                                                                                    
976   bool do_heap_region(HeapRegion* hr) {                                                                                    
977     assert(!hr->is_young(), "not expecting to find young regions");                                                        
978     _hr_printer->post_compaction(hr);                                                                                      
979     return false;                                                                                                          
980   }                                                                                                                        
981 
982   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)                                                                    
983     : _hr_printer(hr_printer) { }                                                                                          
984 };                                                                                                                         
985 
986 void G1CollectedHeap::print_hrm_post_compaction() {                                                                        
987   if (_hr_printer.is_active()) {                                                                                           
988     PostCompactionPrinterClosure cl(hr_printer());                                                                         
989     heap_region_iterate(&cl);                                                                                              
990   }                                                                                                                        
991 }                                                                                                                          
992 
993 void G1CollectedHeap::abort_concurrent_cycle() {                                                                           
994   // If we start the compaction before the CM threads finish                                                               
995   // scanning the root regions we might trip them over as we'll                                                            
996   // be moving objects / updating references. So let's wait until                                                          
997   // they are done. By telling them to abort, they should complete                                                         
998   // early.                                                                                                                
999   _cm->root_regions()->abort();                                                                                            
1000   _cm->root_regions()->wait_until_scan_finished();                                                                         
1001 
1002   // Disable discovery and empty the discovered lists                                                                      
1003   // for the CM ref processor.                                                                                             
1004   _ref_processor_cm->disable_discovery();                                                                                  
1005   _ref_processor_cm->abandon_partial_discovery();                                                                          
1006   _ref_processor_cm->verify_no_references_recorded();                                                                      
1007 
1008   // Abandon current iterations of concurrent marking and concurrent                                                       
1009   // refinement, if any are in progress.                                                                                   
1010   concurrent_mark()->concurrent_cycle_abort();                                                                             
1011 }                                                                                                                          
1012 
1013 void G1CollectedHeap::prepare_heap_for_full_collection() {                                                                 
1014   // Make sure we'll choose a new allocation region afterwards.                                                            
1015   _allocator->release_mutator_alloc_region();                                                                              
1016   _allocator->abandon_gc_alloc_regions();                                                                                  
1017   g1_rem_set()->cleanupHRRS();                                                                                             
1018 
1019   // We may have added regions to the current incremental collection                                                       
1020   // set between the last GC or pause and now. We need to clear the                                                        
1021   // incremental collection set and then start rebuilding it afresh                                                        
1022   // after this full GC.                                                                                                   
1023   abandon_collection_set(collection_set());                                                                                
1024 
1025   tear_down_region_sets(false /* free_list_only */);                                                                       
1026 }                                                                                                                          
1027 
1028 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {                                                    
1029   assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");                                          
1030   assert(used() == recalculate_used(), "Should be equal");                                                                 
1031   _verifier->verify_region_sets_optional();                                                                                
1032   _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);                                                               
1033   _verifier->check_bitmaps("Full GC Start");                                                                               
1034 }                                                                                                                          
1035 
1036 void G1CollectedHeap::prepare_heap_for_mutators() {                                                                        
1037   // Delete metaspaces for unloaded class loaders and clean up loader_data graph                                           
1038   ClassLoaderDataGraph::purge();                                                                                           
1039   MetaspaceUtils::verify_metrics();                                                                                        
1040 
1041   // Prepare heap for normal collections.                                                                                  
1042   assert(num_free_regions() == 0, "we should not have added any free regions");                                            
1043   rebuild_region_sets(false /* free_list_only */);                                                                         
1044   abort_refinement();                                                                                                      
1045   resize_if_necessary_after_full_collection();                                                                             
1046 
1047   // Rebuild the strong code root lists for each region                                                                    
1048   rebuild_strong_code_roots();                                                                                             
1049 
1050   // Start a new incremental collection set for the next pause                                                             
1051   start_new_collection_set();                                                                                              
1052 
1053   _allocator->init_mutator_alloc_region();                                                                                 
1054 
1055   // Post collection state updates.                                                                                        
1056   MetaspaceGC::compute_new_size();                                                                                         
1057 }                                                                                                                          
1058 
1059 void G1CollectedHeap::abort_refinement() {                                                                                 
1060   if (_hot_card_cache->use_cache()) {                                                                                      
1061     _hot_card_cache->reset_hot_cache();                                                                                    
1062   }                                                                                                                        
1063 
1064   // Discard all remembered set updates.                                                                                   
1065   G1BarrierSet::dirty_card_queue_set().abandon_logs();                                                                     
1066   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");                                     
1067 }                                                                                                                          
1068 
1069 void G1CollectedHeap::verify_after_full_collection() {                                                                     
1070   _hrm.verify_optional();                                                                                                  
1071   _verifier->verify_region_sets_optional();                                                                                
1072   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);                                                                
1073   // Clear the previous marking bitmap, if needed for bitmap verification.                                                 
1074   // Note we cannot do this when we clear the next marking bitmap in                                                       
1075   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the                                                     
1076   // objects marked during a full GC against the previous bitmap.                                                          
1077   // But we need to clear it before calling check_bitmaps below since                                                      
1078   // the full GC has compacted objects and updated TAMS but not updated                                                    
1079   // the prev bitmap.                                                                                                      
1080   if (G1VerifyBitmaps) {                                                                                                   
1081     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");                                                               
1082     _cm->clear_prev_bitmap(workers());                                                                                     
1083   }                                                                                                                        
1084   _verifier->check_bitmaps("Full GC End");                                                                                 
1085 
1086   // At this point there should be no regions in the                                                                       
1087   // entire heap tagged as young.                                                                                          
1088   assert(check_young_list_empty(), "young list should be empty at this point");                                            
1089 
1090   // Note: since we've just done a full GC, concurrent                                                                     
1091   // marking is no longer active. Therefore we need not                                                                    
1092   // re-enable reference discovery for the CM ref processor.                                                               
1093   // That will be done at the start of the next marking cycle.                                                             
1094   // We also know that the STW processor should no longer                                                                  
1095   // discover any new references.                                                                                          
1096   assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");                                                       
1097   assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");                                                        
1098   _ref_processor_stw->verify_no_references_recorded();                                                                     
1099   _ref_processor_cm->verify_no_references_recorded();                                                                      
1100 }                                                                                                                          
1101 
1102 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {                                
1103   // Post collection logging.                                                                                              
1104   // We should do this after we potentially resize the heap so                                                             
1105   // that all the COMMIT / UNCOMMIT events are generated before                                                            
1106   // the compaction events.                                                                                                
1107   print_hrm_post_compaction();                                                                                             
1108   heap_transition->print();                                                                                                
1109   print_heap_after_gc();                                                                                                   
1110   print_heap_regions();                                                                                                    
1111 #ifdef TRACESPINNING                                                                                                       
1112   ParallelTaskTerminator::print_termination_counts();                                                                      
1113 #endif                                                                                                                     
1114 }                                                                                                                          
1115 
1116 bool G1CollectedHeap::do_full_collection(bool explicit_gc,                                                                 
1117                                          bool clear_all_soft_refs) {                                                       
1118   assert_at_safepoint_on_vm_thread();                                                                                      
1119 
1120   if (GCLocker::check_active_before_gc()) {                                                                                
1121     // Full GC was not completed.                                                                                          
1122     return false;                                                                                                          
1123   }                                                                                                                        
1124 
1125   const bool do_clear_all_soft_refs = clear_all_soft_refs ||                                                               
1126       soft_ref_policy()->should_clear_all_soft_refs();                                                                     
1127 
1128   G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);                                                    
1129   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);                                                          
1130 
1131   collector.prepare_collection();                                                                                          
1132   collector.collect();                                                                                                     
1133   collector.complete_collection();                                                                                         
1134 
1135   // Full collection was successfully completed.                                                                           
1136   return true;                                                                                                             
1137 }                                                                                                                          
1138 
1139 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {                                                       
1140   // Currently, there is no facility in the do_full_collection(bool) API to notify                                         
1141   // the caller that the collection did not succeed (e.g., because it was locked                                           
1142   // out by the GC locker). So, right now, we'll ignore the return value.                                                  
1143   bool dummy = do_full_collection(true,                /* explicit_gc */                                                   
1144                                   clear_all_soft_refs);                                                                    
1145 }                                                                                                                          
1146 
1147 void G1CollectedHeap::resize_if_necessary_after_full_collection() {                                                        
1148   // Capacity, free and used after the GC counted as full regions to                                                       
1149   // include the waste in the following calculations.                                                                      
1150   const size_t capacity_after_gc = capacity();                                                                             
1151   const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();                                    
1152 
1153   // This is enforced in arguments.cpp.                                                                                    
1154   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,                                                                             
1155          "otherwise the code below doesn't make sense");                                                                   
1156 
1157   // We don't have floating point command-line arguments                                                                   
1158   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;                                                
1159   const double maximum_used_percentage = 1.0 - minimum_free_percentage;                                                    
1160   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;                                                
1161   const double minimum_used_percentage = 1.0 - maximum_free_percentage;                                                    
1162 
1163   const size_t min_heap_size = collector_policy()->min_heap_byte_size();                                                   
1164   const size_t max_heap_size = collector_policy()->max_heap_byte_size();                                                   
1165 
1166   // We have to be careful here as these two calculations can overflow                                                     
1167   // 32-bit size_t's.                                                                                                      
1168   double used_after_gc_d = (double) used_after_gc;                                                                         
1169   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;                                           
1170   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;                                           
1171 
1172   // Let's make sure that they are both under the max heap size, which                                                     
1173   // by default will make them fit into a size_t.                                                                          
1174   double desired_capacity_upper_bound = (double) max_heap_size;                                                            
1175   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,                                                            
1176                                     desired_capacity_upper_bound);                                                         
1177   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,                                                            
1178                                     desired_capacity_upper_bound);                                                         
1179 
1180   // We can now safely turn them into size_t's.                                                                            
1181   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;                                                   
1182   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;                                                   
1183 
1184   // This assert only makes sense here, before we adjust them                                                              
1185   // with respect to the min and max heap size.                                                                            
1186   assert(minimum_desired_capacity <= maximum_desired_capacity,                                                             
1187          "minimum_desired_capacity = " SIZE_FORMAT ", "                                                                    
1188          "maximum_desired_capacity = " SIZE_FORMAT,                                                                        
1189          minimum_desired_capacity, maximum_desired_capacity);                                                              
1190 
1191   // Should not be greater than the heap max size. No need to adjust                                                       
1192   // it with respect to the heap min size as it's a lower bound (i.e.,                                                     
1193   // we'll try to make the capacity larger than it, not smaller).                                                          
1194   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);                                                
1195   // Should not be less than the heap min size. No need to adjust it                                                       
1196   // with respect to the heap max size as it's an upper bound (i.e.,                                                       
1197   // we'll try to make the capacity smaller than it, not greater).                                                         
1198   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);                                               
1199 
1200   if (capacity_after_gc < minimum_desired_capacity) {                                                                      
1201     // Don't expand unless it's significant                                                                                
1202     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;                                                    
1203 
1204     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "          
1205                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "             
1206                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",                              
1207                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);       
1208 
1209     expand(expand_bytes, _workers);                                                                                        
1210 
1211     // No expansion, now see if we want to shrink                                                                          
1212   } else if (capacity_after_gc > maximum_desired_capacity) {                                                               
1213     // Capacity too large, compute shrinking size                                                                          
1214     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;                                                    
1215 
1216     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "         
1217                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "             
1218                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",                          
1219                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);       
1220 
1221     shrink(shrink_bytes);                                                                                                  
1222   }                                                                                                                        
1223 }                                                                                                                          
1224 
1225 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,                                              
1226                                                             bool do_gc,                                                    
1227                                                             bool clear_all_soft_refs,                                      
1228                                                             bool expect_null_mutator_alloc_region,                         
1229                                                             bool* gc_succeeded) {                                          
1230   *gc_succeeded = true;                                                                                                    
1231   // Let's attempt the allocation first.                                                                                   
1232   HeapWord* result =                                                                                                       
1233     attempt_allocation_at_safepoint(word_size,                                                                             
1234                                     expect_null_mutator_alloc_region);                                                     
1235   if (result != NULL) {                                                                                                    
1236     return result;                                                                                                         
1237   }                                                                                                                        
1238 
1239   // In a G1 heap, we're supposed to keep allocation from failing by                                                       
1240   // incremental pauses.  Therefore, at least for now, we'll favor                                                         
1241   // expansion over collection.  (This might change in the future if we can                                                
1242   // do something smarter than full collection to satisfy a failed alloc.)                                                 
1243   result = expand_and_allocate(word_size);                                                                                 
1244   if (result != NULL) {                                                                                                    
1245     return result;                                                                                                         
1246   }                                                                                                                        
1247 
1248   if (do_gc) {                                                                                                             
1249     // Expansion didn't work, we'll try to do a Full GC.                                                                   
1250     *gc_succeeded = do_full_collection(false, /* explicit_gc */                                                            
1251                                        clear_all_soft_refs);                                                               
1252   }                                                                                                                        
1253 
1254   return NULL;                                                                                                             
1255 }                                                                                                                          
1256 
1257 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,                                                     
1258                                                      bool* succeeded) {                                                    
1259   assert_at_safepoint_on_vm_thread();                                                                                      
1260 
1261   // Attempts to allocate followed by Full GC.                                                                             
1262   HeapWord* result =                                                                                                       
1263     satisfy_failed_allocation_helper(word_size,                                                                            
1264                                      true,  /* do_gc */                                                                    
1265                                      false, /* clear_all_soft_refs */                                                      
1266                                      false, /* expect_null_mutator_alloc_region */                                         
1267                                      succeeded);                                                                           
1268 
1269   if (result != NULL || !*succeeded) {                                                                                     
1270     return result;                                                                                                         
1271   }                                                                                                                        
1272 
1273   // Attempts to allocate followed by Full GC that will collect all soft references.                                       
1274   result = satisfy_failed_allocation_helper(word_size,                                                                     
1275                                             true, /* do_gc */                                                              
1276                                             true, /* clear_all_soft_refs */                                                
1277                                             true, /* expect_null_mutator_alloc_region */                                   
1278                                             succeeded);                                                                    
1279 
1280   if (result != NULL || !*succeeded) {                                                                                     
1281     return result;                                                                                                         
1282   }                                                                                                                        
1283 
1284   // Attempts to allocate, no GC                                                                                           
1285   result = satisfy_failed_allocation_helper(word_size,                                                                     
1286                                             false, /* do_gc */                                                             
1287                                             false, /* clear_all_soft_refs */                                               
1288                                             true,  /* expect_null_mutator_alloc_region */                                  
1289                                             succeeded);                                                                    
1290 
1291   if (result != NULL) {                                                                                                    
1292     return result;                                                                                                         
1293   }                                                                                                                        
1294 
1295   assert(!soft_ref_policy()->should_clear_all_soft_refs(),                                                                 
1296          "Flag should have been handled and cleared prior to this point");                                                 
1297 
1298   // What else?  We might try synchronous finalization later.  If the total                                                
1299   // space available is large enough for the allocation, then a more                                                       
1300   // complete compaction phase than we've tried so far might be                                                            
1301   // appropriate.                                                                                                          
1302   return NULL;                                                                                                             
1303 }                                                                                                                          
1304 
1305 // Attempting to expand the heap sufficiently                                                                              
1306 // to support an allocation of the given "word_size".  If                                                                  
1307 // successful, perform the allocation and return the address of the                                                        
1308 // allocated block, or else "NULL".                                                                                        
1309 
1310 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {                                                         
1311   assert_at_safepoint_on_vm_thread();                                                                                      
1312 
1313   _verifier->verify_region_sets_optional();                                                                                
1314 
1315   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);                                                 
1316   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",    
1317                             word_size * HeapWordSize);                                                                     
1318 
1319 
1320   if (expand(expand_bytes, _workers)) {                                                                                    
1321     _hrm.verify_optional();                                                                                                
1322     _verifier->verify_region_sets_optional();                                                                              
1323     return attempt_allocation_at_safepoint(word_size,                                                                      
1324                                            false /* expect_null_mutator_alloc_region */);                                  
1325   }                                                                                                                        
1326   return NULL;                                                                                                             
1327 }                                                                                                                          
1328 
1329 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {                    
1330   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);                                           
1331   aligned_expand_bytes = align_up(aligned_expand_bytes,                                                                    
1332                                        HeapRegion::GrainBytes);                                                            
1333 
1334   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT 
1335                             expand_bytes, aligned_expand_bytes);                                                           
1336 
1337   if (is_maximal_no_gc()) {                                                                                                
1338     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");                                    
1339     return false;                                                                                                          
1340   }                                                                                                                        
1341 
1342   double expand_heap_start_time_sec = os::elapsedTime();                                                                   
1343   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);                                          
1344   assert(regions_to_expand > 0, "Must expand by at least one region");                                                     
1345 
1346   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);                                                  
1347   if (expand_time_ms != NULL) {                                                                                            
1348     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;                                       
1349   }                                                                                                                        
1350 
1351   if (expanded_by > 0) {                                                                                                   
1352     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;                                                     
1353     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");                                                 
1354     g1_policy()->record_new_heap_size(num_regions());                                                                      
1355   } else {                                                                                                                 
1356     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");                                
1357 
1358     // The expansion of the virtual storage space was unsuccessful.                                                        
1359     // Let's see if it was because we ran out of swap.                                                                     
1360     if (G1ExitOnExpansionFailure &&                                                                                        
1361         _hrm.available() >= regions_to_expand) {                                                                           
1362       // We had head room...                                                                                               
1363       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");                                    
1364     }                                                                                                                      
1365   }                                                                                                                        
1366   return regions_to_expand > 0;                                                                                            
1367 }                                                                                                                          
1368 
1369 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {                                                                 
1370   size_t aligned_shrink_bytes =                                                                                            
1371     ReservedSpace::page_align_size_down(shrink_bytes);                                                                     
1372   aligned_shrink_bytes = align_down(aligned_shrink_bytes,                                                                  
1373                                          HeapRegion::GrainBytes);                                                          
1374   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);                                              
1375 
1376   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);                                                        
1377   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;                                                      
1378 
1379 
1380   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE
1381                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);                                             
1382   if (num_regions_removed > 0) {                                                                                           
1383     g1_policy()->record_new_heap_size(num_regions());                                                                      
1384   } else {                                                                                                                 
1385     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");                                
1386   }                                                                                                                        
1387 }                                                                                                                          
1388 
1389 void G1CollectedHeap::shrink(size_t shrink_bytes) {                                                                        
1390   _verifier->verify_region_sets_optional();                                                                                
1391 
1392   // We should only reach here at the end of a Full GC which means we                                                      
1393   // should not not be holding to any GC alloc regions. The method                                                         
1394   // below will make sure of that and do any remaining clean up.                                                           
1395   _allocator->abandon_gc_alloc_regions();                                                                                  
1396 
1397   // Instead of tearing down / rebuilding the free lists here, we                                                          
1398   // could instead use the remove_all_pending() method on free_list to                                                     
1399   // remove only the ones that we need to remove.                                                                          
1400   tear_down_region_sets(true /* free_list_only */);                                                                        
1401   shrink_helper(shrink_bytes);                                                                                             
1402   rebuild_region_sets(true /* free_list_only */);                                                                          
1403 
1404   _hrm.verify_optional();                                                                                                  
1405   _verifier->verify_region_sets_optional();                                                                                
1406 }                                                                                                                          
1407 
1408 class OldRegionSetChecker : public HeapRegionSetChecker {                                                                  
1409 public:                                                                                                                    
1410   void check_mt_safety() {                                                                                                 
1411     // Master Old Set MT safety protocol:                                                                                  
1412     // (a) If we're at a safepoint, operations on the master old set                                                       
1413     // should be invoked:                                                                                                  
1414     // - by the VM thread (which will serialize them), or                                                                  
1415     // - by the GC workers while holding the FreeList_lock, if we're                                                       
1416     //   at a safepoint for an evacuation pause (this lock is taken                                                        
1417     //   anyway when an GC alloc region is retired so that a new one                                                       
1418     //   is allocated from the free list), or                                                                              
1419     // - by the GC workers while holding the OldSets_lock, if we're at a                                                   
1420     //   safepoint for a cleanup pause.                                                                                    
1421     // (b) If we're not at a safepoint, operations on the master old set                                                   
1422     // should be invoked while holding the Heap_lock.                                                                      
1423 
1424     if (SafepointSynchronize::is_at_safepoint()) {                                                                         
1425       guarantee(Thread::current()->is_VM_thread() ||                                                                       
1426                 FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),                                           
1427                 "master old set MT safety protocol at a safepoint");                                                       
1428     } else {                                                                                                               
1429       guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");                      
1430     }                                                                                                                      
1431   }                                                                                                                        
1432   bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }                                                            
1433   const char* get_description() { return "Old Regions"; }                                                                  
1434 };                                                                                                                         
1435 
1436 class ArchiveRegionSetChecker : public HeapRegionSetChecker {                                                              
1437 public:                                                                                                                    
1438   void check_mt_safety() {                                                                                                 
1439     guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),                                
1440               "May only change archive regions during initialization or safepoint.");                                      
1441   }                                                                                                                        
1442   bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }                                                        
1443   const char* get_description() { return "Archive Regions"; }                                                              
1444 };                                                                                                                         
1445 
1446 class HumongousRegionSetChecker : public HeapRegionSetChecker {                                                            
1447 public:                                                                                                                    
1448   void check_mt_safety() {                                                                                                 
1449     // Humongous Set MT safety protocol:                                                                                   
1450     // (a) If we're at a safepoint, operations on the master humongous                                                     
1451     // set should be invoked by either the VM thread (which will                                                           
1452     // serialize them) or by the GC workers while holding the                                                              
1453     // OldSets_lock.                                                                                                       
1454     // (b) If we're not at a safepoint, operations on the master                                                           
1455     // humongous set should be invoked while holding the Heap_lock.                                                        
1456 
1457     if (SafepointSynchronize::is_at_safepoint()) {                                                                         
1458       guarantee(Thread::current()->is_VM_thread() ||                                                                       
1459                 OldSets_lock->owned_by_self(),                                                                             
1460                 "master humongous set MT safety protocol at a safepoint");                                                 
1461     } else {                                                                                                               
1462       guarantee(Heap_lock->owned_by_self(),                                                                                
1463                 "master humongous set MT safety protocol outside a safepoint");                                            
1464     }                                                                                                                      
1465   }                                                                                                                        
1466   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }                                                      
1467   const char* get_description() { return "Humongous Regions"; }                                                            
1468 };                                                                                                                         
1469 
1470 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :                                                    
1471   CollectedHeap(),                                                                                                         
1472   _young_gen_sampling_thread(NULL),                                                                                        
1473   _workers(NULL),                                                                                                          
1474   _collector_policy(collector_policy),                                                                                     
1475   _card_table(NULL),                                                                                                       
1476   _soft_ref_policy(),                                                                                                      
1477   _old_set("Old Region Set", new OldRegionSetChecker()),                                                                   
1478   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),                                                       
1479   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),                                                 
1480   _bot(NULL),                                                                                                              
1481   _listener(),                                                                                                             
1482   _hrm(),                                                                                                                  
1483   _allocator(NULL),                                                                                                        
1484   _verifier(NULL),                                                                                                         
1485   _summary_bytes_used(0),                                                                                                  
1486   _archive_allocator(NULL),                                                                                                
1487   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),                                                                
1488   _old_evac_stats("Old", OldPLABSize, PLABWeight),                                                                         
1489   _expand_heap_after_alloc_failure(true),                                                                                  
1490   _g1mm(NULL),                                                                                                             
1491   _humongous_reclaim_candidates(),                                                                                         
1492   _has_humongous_reclaim_candidates(false),                                                                                
1493   _hr_printer(),                                                                                                           
1494   _collector_state(),                                                                                                      
1495   _old_marking_cycles_started(0),                                                                                          
1496   _old_marking_cycles_completed(0),                                                                                        
1497   _eden(),                                                                                                                 
1498   _survivor(),                                                                                                             
1499   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),                                                             
1500   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),                                                           
1501   _g1_policy(new G1Policy(_gc_timer_stw)),                                                                                 
1502   _heap_sizing_policy(NULL),                                                                                               
1503   _collection_set(this, _g1_policy),                                                                                       
1504   _hot_card_cache(NULL),                                                                                                   
1505   _g1_rem_set(NULL),                                                                                                       
1506   _dirty_card_queue_set(false),                                                                                            
1507   _cm(NULL),                                                                                                               
1508   _cm_thread(NULL),                                                                                                        
1509   _cr(NULL),                                                                                                               
1510   _task_queues(NULL),                                                                                                      
1511   _evacuation_failed(false),                                                                                               
1512   _evacuation_failed_info_array(NULL),                                                                                     
1513   _preserved_marks_set(true /* in_c_heap */),                                                                              
1514 #ifndef PRODUCT                                                                                                            
1515   _evacuation_failure_alot_for_current_gc(false),                                                                          
1516   _evacuation_failure_alot_gc_number(0),                                                                                   
1517   _evacuation_failure_alot_count(0),                                                                                       
1518 #endif                                                                                                                     
1519   _ref_processor_stw(NULL),                                                                                                
1520   _is_alive_closure_stw(this),                                                                                             
1521   _is_subject_to_discovery_stw(this),                                                                                      
1522   _ref_processor_cm(NULL),                                                                                                 
1523   _is_alive_closure_cm(this),                                                                                              
1524   _is_subject_to_discovery_cm(this),                                                                                       
1525   _in_cset_fast_test() {                                                                                                   
1526 
1527   _workers = new WorkGang("GC Thread", ParallelGCThreads,                                                                  
1528                           true /* are_GC_task_threads */,                                                                  
1529                           false /* are_ConcurrentGC_threads */);                                                           
1530   _workers->initialize_workers();                                                                                          
1531   _verifier = new G1HeapVerifier(this);                                                                                    
1532 
1533   _allocator = new G1Allocator(this);                                                                                      
1534 
1535   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());                                         
1536 
1537   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);                                  
1538 
1539   // Override the default _filler_array_max_size so that no humongous filler                                               
1540   // objects are created.                                                                                                  
1541   _filler_array_max_size = _humongous_object_threshold_in_words;                                                           
1542 
1543   uint n_queues = ParallelGCThreads;                                                                                       
1544   _task_queues = new RefToScanQueueSet(n_queues);                                                                          
1545 
1546   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);                                  
1547 
1548   for (uint i = 0; i < n_queues; i++) {                                                                                    
1549     RefToScanQueue* q = new RefToScanQueue();                                                                              
1550     q->initialize();                                                                                                       
1551     _task_queues->register_queue(i, q);                                                                                    
1552     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();                                                      
1553   }                                                                                                                        
1554 
1555   // Initialize the G1EvacuationFailureALot counters and flags.                                                            
1556   NOT_PRODUCT(reset_evacuation_should_fail();)                                                                             
1557 
1558   guarantee(_task_queues != NULL, "task_queues allocation failure.");                                                      
1559 }                                                                                                                          
1560 
1561 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,                                  
1562                                                                  size_t size,                                              
1563                                                                  size_t translation_factor) {                              
1564   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);                                                
1565   // Allocate a new reserved space, preferring to use large pages.                                                         
1566   ReservedSpace rs(size, preferred_page_size);                                                                             
1567   G1RegionToSpaceMapper* result  =                                                                                         
1568     G1RegionToSpaceMapper::create_mapper(rs,                                                                               
1569                                          size,                                                                             
1570                                          rs.alignment(),                                                                   
1571                                          HeapRegion::GrainBytes,                                                           
1572                                          translation_factor,                                                               
1573                                          mtGC);                                                                            
1574 
1575   os::trace_page_sizes_for_requested_size(description,                                                                     
1576                                           size,                                                                            
1577                                           preferred_page_size,                                                             
1578                                           rs.alignment(),                                                                  
1579                                           rs.base(),                                                                       
1580                                           rs.size());                                                                      
1581 
1582   return result;                                                                                                           
1583 }                                                                                                                          
1584 
1585 jint G1CollectedHeap::initialize_concurrent_refinement() {                                                                 
1586   jint ecode = JNI_OK;                                                                                                     
1587   _cr = G1ConcurrentRefine::create(&ecode);                                                                                
1588   return ecode;                                                                                                            
1589 }                                                                                                                          
1590 
1591 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {                                                             
1592   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();                                                          
1593   if (_young_gen_sampling_thread->osthread() == NULL) {                                                                    
1594     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");                                     
1595     return JNI_ENOMEM;                                                                                                     
1596   }                                                                                                                        
1597   return JNI_OK;                                                                                                           
1598 }                                                                                                                          
1599 
1600 jint G1CollectedHeap::initialize() {                                                                                       
1601   os::enable_vtime();                                                                                                      
1602 
1603   // Necessary to satisfy locking discipline assertions.                                                                   
1604 
1605   MutexLocker x(Heap_lock);                                                                                                
1606 
1607   // While there are no constraints in the GC code that HeapWordSize                                                       
1608   // be any particular value, there are multiple other areas in the                                                        
1609   // system which believe this to be true (e.g. oop->object_size in some                                                   
1610   // cases incorrectly returns the size in wordSize units rather than                                                      
1611   // HeapWordSize).                                                                                                        
1612   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");                                                 
1613 
1614   size_t init_byte_size = collector_policy()->initial_heap_byte_size();                                                    
1615   size_t max_byte_size = collector_policy()->max_heap_byte_size();                                                         
1616   size_t heap_alignment = collector_policy()->heap_alignment();                                                            
1617 
1618   // Ensure that the sizes are properly aligned.                                                                           
1619   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");                                            
1620   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");                                             
1621   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");                                                     
1622 
1623   // Reserve the maximum.                                                                                                  
1624 
1625   // When compressed oops are enabled, the preferred heap base                                                             
1626   // is calculated by subtracting the requested size from the                                                              
1627   // 32Gb boundary and using the result as the base address for                                                            
1628   // heap reservation. If the requested size is not aligned to                                                             
1629   // HeapRegion::GrainBytes (i.e. the alignment that is passed                                                             
1630   // into the ReservedHeapSpace constructor) then the actual                                                               
1631   // base of the reserved heap may end up differing from the                                                               
1632   // address that was requested (i.e. the preferred heap base).                                                            
1633   // If this happens then we could end up using a non-optimal                                                              
1634   // compressed oops mode.                                                                                                 
1635 
1636   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,                                                            
1637                                                  heap_alignment);                                                          
1638 
1639   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));                     
1640 
1641   // Create the barrier set for the entire reserved region.                                                                
1642   G1CardTable* ct = new G1CardTable(reserved_region());                                                                    
1643   ct->initialize();                                                                                                        
1644   G1BarrierSet* bs = new G1BarrierSet(ct);                                                                                 
1645   bs->initialize();                                                                                                        
1646   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");                                                                    
1647   BarrierSet::set_barrier_set(bs);                                                                                         
1648   _card_table = ct;                                                                                                        
1649 
1650   // Create the hot card cache.                                                                                            
1651   _hot_card_cache = new G1HotCardCache(this);                                                                              
1652 
1653   // Carve out the G1 part of the heap.                                                                                    
1654   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);                                                                 
1655   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();                                           
1656   G1RegionToSpaceMapper* heap_storage =                                                                                    
1657     G1RegionToSpaceMapper::create_mapper(g1_rs,                                                                            
1658                                          g1_rs.size(),                                                                     
1659                                          page_size,                                                                        
1660                                          HeapRegion::GrainBytes,                                                           
1661                                          1,                                                                                
1662                                          mtJavaHeap);                                                                      
1663   os::trace_page_sizes("Heap",                                                                                             
1664                        collector_policy()->min_heap_byte_size(),                                                           
1665                        max_byte_size,                                                                                      
1666                        page_size,                                                                                          
1667                        heap_rs.base(),                                                                                     
1668                        heap_rs.size());                                                                                    
1669   heap_storage->set_mapping_changed_listener(&_listener);                                                                  
1670 
1671   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.                           
1672   G1RegionToSpaceMapper* bot_storage =                                                                                     
1673     create_aux_memory_mapper("Block Offset Table",                                                                         
1674                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),                                
1675                              G1BlockOffsetTable::heap_map_factor());                                                       
1676 
1677   G1RegionToSpaceMapper* cardtable_storage =                                                                               
1678     create_aux_memory_mapper("Card Table",                                                                                 
1679                              G1CardTable::compute_size(g1_rs.size() / HeapWordSize),                                       
1680                              G1CardTable::heap_map_factor());                                                              
1681 
1682   G1RegionToSpaceMapper* card_counts_storage =                                                                             
1683     create_aux_memory_mapper("Card Counts Table",                                                                          
1684                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),                                      
1685                              G1CardCounts::heap_map_factor());                                                             
1686 
1687   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());                                                             
1688   G1RegionToSpaceMapper* prev_bitmap_storage =                                                                             
1689     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());                                   
1690   G1RegionToSpaceMapper* next_bitmap_storage =                                                                             
1691     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());                                   
1692 
1693   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_stora
1694   _card_table->initialize(cardtable_storage);                                                                              
1695   // Do later initialization work for concurrent refinement.                                                               
1696   _hot_card_cache->initialize(card_counts_storage);                                                                        
1697 
1698   // 6843694 - ensure that the maximum region index can fit                                                                
1699   // in the remembered set structures.                                                                                     
1700   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;                                             
1701   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");                                                    
1702 
1703   // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not                                     
1704   // start within the first card.                                                                                          
1705   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");             
1706   // Also create a G1 rem set.                                                                                             
1707   _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);                                                          
1708   _g1_rem_set->initialize(max_capacity(), max_regions());                                                                  
1709 
1710   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;                                      
1711   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");                                                 
1712   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,                                                             
1713             "too many cards per region");                                                                                  
1714 
1715   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);                                                      
1716 
1717   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);                                                           
1718 
1719   {                                                                                                                        
1720     HeapWord* start = _hrm.reserved().start();                                                                             
1721     HeapWord* end = _hrm.reserved().end();                                                                                 
1722     size_t granularity = HeapRegion::GrainBytes;                                                                           
1723 
1724     _in_cset_fast_test.initialize(start, end, granularity);                                                                
1725     _humongous_reclaim_candidates.initialize(start, end, granularity);                                                     
1726   }                                                                                                                        
1727 
1728   // Create the G1ConcurrentMark data structure and thread.                                                                
1729   // (Must do this late, so that "max_regions" is defined.)                                                                
1730   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);                                              
1731   if (_cm == NULL || !_cm->completed_initialization()) {                                                                   
1732     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");                                     
1733     return JNI_ENOMEM;                                                                                                     
1734   }                                                                                                                        
1735   _cm_thread = _cm->cm_thread();                                                                                           
1736 
1737   // Now expand into the initial heap size.                                                                                
1738   if (!expand(init_byte_size, _workers)) {                                                                                 
1739     vm_shutdown_during_initialization("Failed to allocate initial heap.");                                                 
1740     return JNI_ENOMEM;                                                                                                     
1741   }                                                                                                                        
1742 
1743   // Perform any initialization actions delegated to the policy.                                                           
1744   g1_policy()->init(this, &_collection_set);                                                                               
1745 
1746   G1BarrierSet::satb_mark_queue_set().initialize(this,                                                                     
1747                                                  SATB_Q_CBL_mon,                                                           
1748                                                  SATB_Q_FL_lock,                                                           
1749                                                  G1SATBProcessCompletedThreshold,                                          
1750                                                  G1SATBBufferEnqueueingThresholdPercent,                                   
1751                                                  Shared_SATB_Q_lock);                                                      
1752 
1753   jint ecode = initialize_concurrent_refinement();                                                                         
1754   if (ecode != JNI_OK) {                                                                                                   
1755     return ecode;                                                                                                          
1756   }                                                                                                                        
1757 
1758   ecode = initialize_young_gen_sampling_thread();                                                                          
1759   if (ecode != JNI_OK) {                                                                                                   
1760     return ecode;                                                                                                          
1761   }                                                                                                                        
1762 
1763   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,                                                      
1764                                                   DirtyCardQ_FL_lock,                                                      
1765                                                   (int)concurrent_refine()->yellow_zone(),                                 
1766                                                   (int)concurrent_refine()->red_zone(),                                    
1767                                                   Shared_DirtyCardQ_lock,                                                  
1768                                                   NULL,  // fl_owner                                                       
1769                                                   true); // init_free_ids                                                  
1770 
1771   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,                                                                    
1772                                     DirtyCardQ_FL_lock,                                                                    
1773                                     -1, // never trigger processing                                                        
1774                                     -1, // no limit on length                                                              
1775                                     Shared_DirtyCardQ_lock,                                                                
1776                                     &G1BarrierSet::dirty_card_queue_set());                                                
1777 
1778   // Here we allocate the dummy HeapRegion that is required by the                                                         
1779   // G1AllocRegion class.                                                                                                  
1780   HeapRegion* dummy_region = _hrm.get_dummy_region();                                                                      
1781 
1782   // We'll re-use the same region whether the alloc region will                                                            
1783   // require BOT updates or not and, if it doesn't, then a non-young                                                       
1784   // region will complain that it cannot support allocations without                                                       
1785   // BOT updates. So we'll tag the dummy region as eden to avoid that.                                                     
1786   dummy_region->set_eden();                                                                                                
1787   // Make sure it's full.                                                                                                  
1788   dummy_region->set_top(dummy_region->end());                                                                              
1789   G1AllocRegion::setup(this, dummy_region);                                                                                
1790 
1791   _allocator->init_mutator_alloc_region();                                                                                 
1792 
1793   // Do create of the monitoring and management support so that                                                            
1794   // values in the heap have been properly initialized.                                                                    
1795   _g1mm = new G1MonitoringSupport(this);                                                                                   
1796 
1797   G1StringDedup::initialize();                                                                                             
1798 
1799   _preserved_marks_set.init(ParallelGCThreads);                                                                            
1800 
1801   _collection_set.initialize(max_regions());                                                                               
1802 
1803   return JNI_OK;                                                                                                           
1804 }                                                                                                                          
1805 
1806 void G1CollectedHeap::stop() {                                                                                             
1807   // Stop all concurrent threads. We do this to make sure these threads                                                    
1808   // do not continue to execute and access resources (e.g. logging)                                                        
1809   // that are destroyed during shutdown.                                                                                   
1810   _cr->stop();                                                                                                             
1811   _young_gen_sampling_thread->stop();                                                                                      
1812   _cm_thread->stop();                                                                                                      
1813   if (G1StringDedup::is_enabled()) {                                                                                       
1814     G1StringDedup::stop();                                                                                                 
1815   }                                                                                                                        
1816 }                                                                                                                          
1817 
1818 void G1CollectedHeap::safepoint_synchronize_begin() {                                                                      
1819   SuspendibleThreadSet::synchronize();                                                                                     
1820 }                                                                                                                          
1821 
1822 void G1CollectedHeap::safepoint_synchronize_end() {                                                                        
1823   SuspendibleThreadSet::desynchronize();                                                                                   
1824 }                                                                                                                          
1825 
1826 size_t G1CollectedHeap::conservative_max_heap_alignment() {                                                                
1827   return HeapRegion::max_region_size();                                                                                    
1828 }                                                                                                                          
1829 
1830 void G1CollectedHeap::post_initialize() {                                                                                  
1831   CollectedHeap::post_initialize();                                                                                        
1832   ref_processing_init();                                                                                                   
1833 }                                                                                                                          
1834 
1835 void G1CollectedHeap::ref_processing_init() {                                                                              
1836   // Reference processing in G1 currently works as follows:                                                                
1837   //                                                                                                                       
1838   // * There are two reference processor instances. One is                                                                 
1839   //   used to record and process discovered references                                                                    
1840   //   during concurrent marking; the other is used to                                                                     
1841   //   record and process references during STW pauses                                                                     
1842   //   (both full and incremental).                                                                                        
1843   // * Both ref processors need to 'span' the entire heap as                                                               
1844   //   the regions in the collection set may be dotted around.                                                             
1845   //                                                                                                                       
1846   // * For the concurrent marking ref processor:                                                                           
1847   //   * Reference discovery is enabled at initial marking.                                                                
1848   //   * Reference discovery is disabled and the discovered                                                                
1849   //     references processed etc during remarking.                                                                        
1850   //   * Reference discovery is MT (see below).                                                                            
1851   //   * Reference discovery requires a barrier (see below).                                                               
1852   //   * Reference processing may or may not be MT                                                                         
1853   //     (depending on the value of ParallelRefProcEnabled                                                                 
1854   //     and ParallelGCThreads).                                                                                           
1855   //   * A full GC disables reference discovery by the CM                                                                  
1856   //     ref processor and abandons any entries on it's                                                                    
1857   //     discovered lists.                                                                                                 
1858   //                                                                                                                       
1859   // * For the STW processor:                                                                                              
1860   //   * Non MT discovery is enabled at the start of a full GC.                                                            
1861   //   * Processing and enqueueing during a full GC is non-MT.                                                             
1862   //   * During a full GC, references are processed after marking.                                                         
1863   //                                                                                                                       
1864   //   * Discovery (may or may not be MT) is enabled at the start                                                          
1865   //     of an incremental evacuation pause.                                                                               
1866   //   * References are processed near the end of a STW evacuation pause.                                                  
1867   //   * For both types of GC:                                                                                             
1868   //     * Discovery is atomic - i.e. not concurrent.                                                                      
1869   //     * Reference discovery will not need a barrier.                                                                    
1870 
1871   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);                                                  
1872 
1873   // Concurrent Mark ref processor                                                                                         
1874   _ref_processor_cm =                                                                                                      
1875     new ReferenceProcessor(&_is_subject_to_discovery_cm,                                                                   
1876                            mt_processing,                                  // mt processing                                
1877                            ParallelGCThreads,                              // degree of mt processing                      
1878                            (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery                                 
1879                            MAX2(ParallelGCThreads, ConcGCThreads),         // degree of mt discovery                       
1880                            false,                                          // Reference discovery is not atomic            
1881                            &_is_alive_closure_cm,                          // is alive closure                             
1882                            true);                                          // allow changes to number of processing threads
1883 
1884   // STW ref processor                                                                                                     
1885   _ref_processor_stw =                                                                                                     
1886     new ReferenceProcessor(&_is_subject_to_discovery_stw,                                                                  
1887                            mt_processing,                        // mt processing                                          
1888                            ParallelGCThreads,                    // degree of mt processing                                
1889                            (ParallelGCThreads > 1),              // mt discovery                                           
1890                            ParallelGCThreads,                    // degree of mt discovery                                 
1891                            true,                                 // Reference discovery is atomic                          
1892                            &_is_alive_closure_stw,               // is alive closure                                       
1893                            true);                                // allow changes to number of processing threads          
1894 }                                                                                                                          
1895 
1896 CollectorPolicy* G1CollectedHeap::collector_policy() const {                                                               
1897   return _collector_policy;                                                                                                
1898 }                                                                                                                          
1899 
1900 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {                                                                        
1901   return &_soft_ref_policy;                                                                                                
1902 }                                                                                                                          
1903 
1904 size_t G1CollectedHeap::capacity() const {                                                                                 
1905   return _hrm.length() * HeapRegion::GrainBytes;                                                                           
1906 }                                                                                                                          
1907 
1908 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {                                                        
1909   return _hrm.total_free_bytes();                                                                                          
1910 }                                                                                                                          
1911 
1912 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {                                      
1913   _hot_card_cache->drain(cl, worker_i);                                                                                    
1914 }                                                                                                                          
1915 
1916 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {                               
1917   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();                                                          
1918   size_t n_completed_buffers = 0;                                                                                          
1919   while (dcqs.apply_closure_during_gc(cl, worker_i)) {                                                                     
1920     n_completed_buffers++;                                                                                                 
1921   }                                                                                                                        
1922   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTim
1923   dcqs.clear_n_completed_buffers();                                                                                        
1924   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");                                               
1925 }                                                                                                                          
1926 
1927 // Computes the sum of the storage used by the various regions.                                                            
1928 size_t G1CollectedHeap::used() const {                                                                                     
1929   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();                                               
1930   if (_archive_allocator != NULL) {                                                                                        
1931     result += _archive_allocator->used();                                                                                  
1932   }                                                                                                                        
1933   return result;                                                                                                           
1934 }                                                                                                                          
1935 
1936 size_t G1CollectedHeap::used_unlocked() const {                                                                            
1937   return _summary_bytes_used;                                                                                              
1938 }                                                                                                                          
1939 
1940 class SumUsedClosure: public HeapRegionClosure {                                                                           
1941   size_t _used;                                                                                                            
1942 public:                                                                                                                    
1943   SumUsedClosure() : _used(0) {}                                                                                           
1944   bool do_heap_region(HeapRegion* r) {                                                                                     
1945     _used += r->used();                                                                                                    
1946     return false;                                                                                                          
1947   }                                                                                                                        
1948   size_t result() { return _used; }                                                                                        
1949 };                                                                                                                         
1950 
1951 size_t G1CollectedHeap::recalculate_used() const {                                                                         
1952   double recalculate_used_start = os::elapsedTime();                                                                       
1953 
1954   SumUsedClosure blk;                                                                                                      
1955   heap_region_iterate(&blk);                                                                                               
1956 
1957   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);    
1958   return blk.result();                                                                                                     
1959 }                                                                                                                          
1960 
1961 bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {                                        
1962   switch (cause) {                                                                                                         
1963     case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;                                
1964     case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;                                
1965     case GCCause::_wb_conc_mark:                        return true;                                                       
1966     default :                                           return false;                                                      
1967   }                                                                                                                        
1968 }                                                                                                                          
1969 
1970 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {                                                 
1971   switch (cause) {                                                                                                         
1972     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;                                              
1973     case GCCause::_g1_humongous_allocation: return true;                                                                   
1974     default:                                return is_user_requested_concurrent_full_gc(cause);                            
1975   }                                                                                                                        
1976 }                                                                                                                          
1977 
1978 #ifndef PRODUCT                                                                                                            
1979 void G1CollectedHeap::allocate_dummy_regions() {                                                                           
1980   // Let's fill up most of the region                                                                                      
1981   size_t word_size = HeapRegion::GrainWords - 1024;                                                                        
1982   // And as a result the region we'll allocate will be humongous.                                                          
1983   guarantee(is_humongous(word_size), "sanity");                                                                            
1984 
1985   // _filler_array_max_size is set to humongous object threshold                                                           
1986   // but temporarily change it to use CollectedHeap::fill_with_object().                                                   
1987   SizeTFlagSetting fs(_filler_array_max_size, word_size);                                                                  
1988 
1989   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {                                                                        
1990     // Let's use the existing mechanism for the allocation                                                                 
1991     HeapWord* dummy_obj = humongous_obj_allocate(word_size);                                                               
1992     if (dummy_obj != NULL) {                                                                                               
1993       MemRegion mr(dummy_obj, word_size);                                                                                  
1994       CollectedHeap::fill_with_object(mr);                                                                                 
1995     } else {                                                                                                               
1996       // If we can't allocate once, we probably cannot allocate                                                            
1997       // again. Let's get out of the loop.                                                                                 
1998       break;                                                                                                               
1999     }                                                                                                                      
2000   }                                                                                                                        
2001 }                                                                                                                          
2002 #endif // !PRODUCT                                                                                                         
2003 
2004 void G1CollectedHeap::increment_old_marking_cycles_started() {                                                             
2005   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||                                                   
2006          _old_marking_cycles_started == _old_marking_cycles_completed + 1,                                                 
2007          "Wrong marking cycle count (started: %d, completed: %d)",                                                         
2008          _old_marking_cycles_started, _old_marking_cycles_completed);                                                      
2009 
2010   _old_marking_cycles_started++;                                                                                           
2011 }                                                                                                                          
2012 
2013 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {                                            
2014   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);                                                    
2015 
2016   // We assume that if concurrent == true, then the caller is a                                                            
2017   // concurrent thread that was joined the Suspendible Thread                                                              
2018   // Set. If there's ever a cheap way to check this, we should add an                                                      
2019   // assert here.                                                                                                          
2020 
2021   // Given that this method is called at the end of a Full GC or of a                                                      
2022   // concurrent cycle, and those can be nested (i.e., a Full GC can                                                        
2023   // interrupt a concurrent cycle), the number of full collections                                                         
2024   // completed should be either one (in the case where there was no                                                        
2025   // nesting) or two (when a Full GC interrupted a concurrent cycle)                                                       
2026   // behind the number of full collections started.                                                                        
2027 
2028   // This is the case for the inner caller, i.e. a Full GC.                                                                
2029   assert(concurrent ||                                                                                                     
2030          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||                                             
2031          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),                                               
2032          "for inner caller (Full GC): _old_marking_cycles_started = %u "                                                   
2033          "is inconsistent with _old_marking_cycles_completed = %u",                                                        
2034          _old_marking_cycles_started, _old_marking_cycles_completed);                                                      
2035 
2036   // This is the case for the outer caller, i.e. the concurrent cycle.                                                     
2037   assert(!concurrent ||                                                                                                    
2038          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),                                               
2039          "for outer caller (concurrent cycle): "                                                                           
2040          "_old_marking_cycles_started = %u "                                                                               
2041          "is inconsistent with _old_marking_cycles_completed = %u",                                                        
2042          _old_marking_cycles_started, _old_marking_cycles_completed);                                                      
2043 
2044   _old_marking_cycles_completed += 1;                                                                                      
2045 
2046   // We need to clear the "in_progress" flag in the CM thread before                                                       
2047   // we wake up any waiters (especially when ExplicitInvokesConcurrent                                                     
2048   // is set) so that if a waiter requests another System.gc() it doesn't                                                   
2049   // incorrectly see that a marking cycle is still in progress.                                                            
2050   if (concurrent) {                                                                                                        
2051     _cm_thread->set_idle();                                                                                                
2052   }                                                                                                                        
2053 
2054   // This notify_all() will ensure that a thread that called                                                               
2055   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)                                                        
2056   // and it's waiting for a full GC to finish will be woken up. It is                                                      
2057   // waiting in VM_G1CollectForAllocation::doit_epilogue().                                                                
2058   FullGCCount_lock->notify_all();                                                                                          
2059 }                                                                                                                          
2060 
2061 void G1CollectedHeap::collect(GCCause::Cause cause) {                                                                      
2062   assert_heap_not_locked();                                                                                                
2063 
2064   uint gc_count_before;                                                                                                    
2065   uint old_marking_count_before;                                                                                           
2066   uint full_gc_count_before;                                                                                               
2067   bool retry_gc;                                                                                                           
2068 
2069   do {                                                                                                                     
2070     retry_gc = false;                                                                                                      
2071 
2072     {                                                                                                                      
2073       MutexLocker ml(Heap_lock);                                                                                           
2074 
2075       // Read the GC count while holding the Heap_lock                                                                     
2076       gc_count_before = total_collections();                                                                               
2077       full_gc_count_before = total_full_collections();                                                                     
2078       old_marking_count_before = _old_marking_cycles_started;                                                              
2079     }                                                                                                                      
2080 
2081     if (should_do_concurrent_full_gc(cause)) {                                                                             
2082       // Schedule an initial-mark evacuation pause that will start a                                                       
2083       // concurrent cycle. We're setting word_size to 0 which means that                                                   
2084       // we are not requesting a post-GC allocation.                                                                       
2085       VM_G1CollectForAllocation op(0,     /* word_size */                                                                  
2086                                    gc_count_before,                                                                        
2087                                    cause,                                                                                  
2088                                    true,  /* should_initiate_conc_mark */                                                  
2089                                    g1_policy()->max_pause_time_ms());                                                      
2090       VMThread::execute(&op);                                                                                              
2091       if (!op.pause_succeeded()) {                                                                                         
2092         if (old_marking_count_before == _old_marking_cycles_started) {                                                     
2093           retry_gc = op.should_retry_gc();                                                                                 
2094         } else {                                                                                                           
2095           // A Full GC happened while we were trying to schedule the                                                       
2096           // initial-mark GC. No point in starting a new cycle given                                                       
2097           // that the whole heap was collected anyway.                                                                     
2098         }                                                                                                                  
2099 
2100         if (retry_gc) {                                                                                                    
2101           if (GCLocker::is_active_and_needs_gc()) {                                                                        
2102             GCLocker::stall_until_clear();                                                                                 
2103           }                                                                                                                
2104         }                                                                                                                  
2105       }                                                                                                                    
2106     } else {                                                                                                               
2107       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc                                                   
2108           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {                                                               
2109 
2110         // Schedule a standard evacuation pause. We're setting word_size                                                   
2111         // to 0 which means that we are not requesting a post-GC allocation.                                               
2112         VM_G1CollectForAllocation op(0,     /* word_size */                                                                
2113                                      gc_count_before,                                                                      
2114                                      cause,                                                                                
2115                                      false, /* should_initiate_conc_mark */                                                
2116                                      g1_policy()->max_pause_time_ms());                                                    
2117         VMThread::execute(&op);                                                                                            
2118       } else {                                                                                                             
2119         // Schedule a Full GC.                                                                                             
2120         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);                                                 
2121         VMThread::execute(&op);                                                                                            
2122       }                                                                                                                    
2123     }                                                                                                                      
2124   } while (retry_gc);                                                                                                      
2125 }                                                                                                                          
2126 
2127 bool G1CollectedHeap::is_in(const void* p) const {                                                                         
2128   if (_hrm.reserved().contains(p)) {                                                                                       
2129     // Given that we know that p is in the reserved space,                                                                 
2130     // heap_region_containing() should successfully                                                                        
2131     // return the containing region.                                                                                       
2132     HeapRegion* hr = heap_region_containing(p);                                                                            
2133     return hr->is_in(p);                                                                                                   
2134   } else {                                                                                                                 
2135     return false;                                                                                                          
2136   }                                                                                                                        
2137 }                                                                                                                          
2138 
2139 #ifdef ASSERT                                                                                                              
2140 bool G1CollectedHeap::is_in_exact(const void* p) const {                                                                   
2141   bool contains = reserved_region().contains(p);                                                                           
2142   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));                                                        
2143   if (contains && available) {                                                                                             
2144     return true;                                                                                                           
2145   } else {                                                                                                                 
2146     return false;                                                                                                          
2147   }                                                                                                                        
2148 }                                                                                                                          
2149 #endif                                                                                                                     
2150 
2151 // Iteration functions.                                                                                                    
2152 
2153 // Iterates an ObjectClosure over all objects within a HeapRegion.                                                         
2154 
2155 class IterateObjectClosureRegionClosure: public HeapRegionClosure {                                                        
2156   ObjectClosure* _cl;                                                                                                      
2157 public:                                                                                                                    
2158   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}                                                        
2159   bool do_heap_region(HeapRegion* r) {                                                                                     
2160     if (!r->is_continues_humongous()) {                                                                                    
2161       r->object_iterate(_cl);                                                                                              
2162     }                                                                                                                      
2163     return false;                                                                                                          
2164   }                                                                                                                        
2165 };                                                                                                                         
2166 
2167 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {                                                                  
2168   IterateObjectClosureRegionClosure blk(cl);                                                                               
2169   heap_region_iterate(&blk);                                                                                               
2170 }                                                                                                                          
2171 
2172 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {                                                   
2173   _hrm.iterate(cl);                                                                                                        
2174 }                                                                                                                          
2175 
2176 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,                                    
2177                                                                  HeapRegionClaimer *hrclaimer,                             
2178                                                                  uint worker_id) const {                                   
2179   _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));                                                
2180 }                                                                                                                          
2181 
2182 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,                                            
2183                                                          HeapRegionClaimer *hrclaimer) const {                             
2184   _hrm.par_iterate(cl, hrclaimer, 0);                                                                                      
2185 }                                                                                                                          
2186 
2187 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {                                                      
2188   _collection_set.iterate(cl);                                                                                             
2189 }                                                                                                                          
2190 
2191 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {                                 
2192   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());                                                
2193 }                                                                                                                          
2194 
2195 HeapWord* G1CollectedHeap::block_start(const void* addr) const {                                                           
2196   HeapRegion* hr = heap_region_containing(addr);                                                                           
2197   return hr->block_start(addr);                                                                                            
2198 }                                                                                                                          
2199 
2200 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {                                                           
2201   HeapRegion* hr = heap_region_containing(addr);                                                                           
2202   return hr->block_size(addr);                                                                                             
2203 }                                                                                                                          
2204 
2205 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {                                                           
2206   HeapRegion* hr = heap_region_containing(addr);                                                                           
2207   return hr->block_is_obj(addr);                                                                                           
2208 }                                                                                                                          
2209 
2210 bool G1CollectedHeap::supports_tlab_allocation() const {                                                                   
2211   return true;                                                                                                             
2212 }                                                                                                                          
2213 
2214 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {                                                             
2215   return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;                           
2216 }                                                                                                                          
2217 
2218 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {                                                                 
2219   return _eden.length() * HeapRegion::GrainBytes;                                                                          
2220 }                                                                                                                          
2221 
2222 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size                                             
2223 // must be equal to the humongous object limit.                                                                            
2224 size_t G1CollectedHeap::max_tlab_size() const {                                                                            
2225   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);                                                
2226 }                                                                                                                          
2227 
2228 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {                                                     
2229   return _allocator->unsafe_max_tlab_alloc();                                                                              
2230 }                                                                                                                          
2231 
2232 size_t G1CollectedHeap::max_capacity() const {                                                                             
2233   return _hrm.reserved().byte_size();                                                                                      
2234 }                                                                                                                          
2235 
2236 jlong G1CollectedHeap::millis_since_last_gc() {                                                                            
2237   // See the notes in GenCollectedHeap::millis_since_last_gc()                                                             
2238   // for more information about the implementation.                                                                        
2239   jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -                                                          
2240     _g1_policy->collection_pause_end_millis();                                                                             
2241   if (ret_val < 0) {                                                                                                       
2242     log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT                                                  
2243       ". returning zero instead.", ret_val);                                                                               
2244     return 0;                                                                                                              
2245   }                                                                                                                        
2246   return ret_val;                                                                                                          
2247 }                                                                                                                          
2248 
2249 void G1CollectedHeap::deduplicate_string(oop str) {                                                                        
2250   assert(java_lang_String::is_instance(str), "invariant");                                                                 
2251 
2252   if (G1StringDedup::is_enabled()) {                                                                                       
2253     G1StringDedup::deduplicate(str);                                                                                       
2254   }                                                                                                                        
2255 }                                                                                                                          
2256 
2257 void G1CollectedHeap::prepare_for_verify() {                                                                               
2258   _verifier->prepare_for_verify();                                                                                         
2259 }                                                                                                                          
2260 
2261 void G1CollectedHeap::verify(VerifyOption vo) {                                                                            
2262   _verifier->verify(vo);                                                                                                   
2263 }                                                                                                                          
2264 
2265 bool G1CollectedHeap::supports_concurrent_phase_control() const {                                                          
2266   return true;                                                                                                             
2267 }                                                                                                                          
2268 
2269 const char* const* G1CollectedHeap::concurrent_phases() const {                                                            
2270   return _cm_thread->concurrent_phases();                                                                                  
2271 }                                                                                                                          
2272 
2273 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {                                                        
2274   return _cm_thread->request_concurrent_phase(phase);                                                                      
2275 }                                                                                                                          
2276 
2277 class PrintRegionClosure: public HeapRegionClosure {                                                                       
2278   outputStream* _st;                                                                                                       
2279 public:                                                                                                                    
2280   PrintRegionClosure(outputStream* st) : _st(st) {}                                                                        
2281   bool do_heap_region(HeapRegion* r) {                                                                                     
2282     r->print_on(_st);                                                                                                      
2283     return false;                                                                                                          
2284   }                                                                                                                        
2285 };                                                                                                                         
2286 
2287 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,                                                                      
2288                                        const HeapRegion* hr,                                                               
2289                                        const VerifyOption vo) const {                                                      
2290   switch (vo) {                                                                                                            
2291   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);                                                         
2292   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);                                                          
2293   case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);                                                    
2294   default:                            ShouldNotReachHere();                                                                
2295   }                                                                                                                        
2296   return false; // keep some compilers happy                                                                               
2297 }                                                                                                                          
2298 
2299 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,                                                                      
2300                                        const VerifyOption vo) const {                                                      
2301   switch (vo) {                                                                                                            
2302   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);                                                             
2303   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);                                                              
2304   case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);                                                        
2305   default:                            ShouldNotReachHere();                                                                
2306   }                                                                                                                        
2307   return false; // keep some compilers happy                                                                               
2308 }                                                                                                                          
2309 
2310 void G1CollectedHeap::print_heap_regions() const {                                                                         
2311   LogTarget(Trace, gc, heap, region) lt;                                                                                   
2312   if (lt.is_enabled()) {                                                                                                   
2313     LogStream ls(lt);                                                                                                      
2314     print_regions_on(&ls);                                                                                                 
2315   }                                                                                                                        
2316 }                                                                                                                          
2317 
2318 void G1CollectedHeap::print_on(outputStream* st) const {                                                                   
2319   st->print(" %-20s", "garbage-first heap");                                                                               
2320   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",                                                              
2321             capacity()/K, used_unlocked()/K);                                                                              
2322   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",                                                                           
2323             p2i(_hrm.reserved().start()),                                                                                  
2324             p2i(_hrm.reserved().end()));                                                                                   
2325   st->cr();                                                                                                                
2326   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);                                               
2327   uint young_regions = young_regions_count();                                                                              
2328   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,                                                                
2329             (size_t) young_regions * HeapRegion::GrainBytes / K);                                                          
2330   uint survivor_regions = survivor_regions_count();                                                                        
2331   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,                                                           
2332             (size_t) survivor_regions * HeapRegion::GrainBytes / K);                                                       
2333   st->cr();                                                                                                                
2334   MetaspaceUtils::print_on(st);                                                                                            
2335 }                                                                                                                          
2336 
2337 void G1CollectedHeap::print_regions_on(outputStream* st) const {                                                           
2338   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "                                                   
2339                "HS=humongous(starts), HC=humongous(continues), "                                                           
2340                "CS=collection set, F=free, A=archive, "                                                                    
2341                "TAMS=top-at-mark-start (previous, next)");                                                                 
2342   PrintRegionClosure blk(st);                                                                                              
2343   heap_region_iterate(&blk);                                                                                               
2344 }                                                                                                                          
2345 
2346 void G1CollectedHeap::print_extended_on(outputStream* st) const {                                                          
2347   print_on(st);                                                                                                            
2348 
2349   // Print the per-region information.                                                                                     
2350   print_regions_on(st);                                                                                                    
2351 }                                                                                                                          
2352 
2353 void G1CollectedHeap::print_on_error(outputStream* st) const {                                                             
2354   this->CollectedHeap::print_on_error(st);                                                                                 
2355 
2356   if (_cm != NULL) {                                                                                                       
2357     st->cr();                                                                                                              
2358     _cm->print_on_error(st);                                                                                               
2359   }                                                                                                                        
2360 }                                                                                                                          
2361 
2362 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {                                                        
2363   workers()->print_worker_threads_on(st);                                                                                  
2364   _cm_thread->print_on(st);                                                                                                
2365   st->cr();                                                                                                                
2366   _cm->print_worker_threads_on(st);                                                                                        
2367   _cr->print_threads_on(st);                                                                                               
2368   _young_gen_sampling_thread->print_on(st);                                                                                
2369   if (G1StringDedup::is_enabled()) {                                                                                       
2370     G1StringDedup::print_worker_threads_on(st);                                                                            
2371   }                                                                                                                        
2372 }                                                                                                                          
2373 
2374 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {                                                             
2375   workers()->threads_do(tc);                                                                                               
2376   tc->do_thread(_cm_thread);                                                                                               
2377   _cm->threads_do(tc);                                                                                                     
2378   _cr->threads_do(tc);                                                                                                     
2379   tc->do_thread(_young_gen_sampling_thread);                                                                               
2380   if (G1StringDedup::is_enabled()) {                                                                                       
2381     G1StringDedup::threads_do(tc);                                                                                         
2382   }                                                                                                                        
2383 }                                                                                                                          
2384 
2385 void G1CollectedHeap::print_tracing_info() const {                                                                         
2386   g1_rem_set()->print_summary_info();                                                                                      
2387   concurrent_mark()->print_summary_info();                                                                                 
2388 }                                                                                                                          
2389 
2390 #ifndef PRODUCT                                                                                                            
2391 // Helpful for debugging RSet issues.                                                                                      
2392 
2393 class PrintRSetsClosure : public HeapRegionClosure {                                                                       
2394 private:                                                                                                                   
2395   const char* _msg;                                                                                                        
2396   size_t _occupied_sum;                                                                                                    
2397 
2398 public:                                                                                                                    
2399   bool do_heap_region(HeapRegion* r) {                                                                                     
2400     HeapRegionRemSet* hrrs = r->rem_set();                                                                                 
2401     size_t occupied = hrrs->occupied();                                                                                    
2402     _occupied_sum += occupied;                                                                                             
2403 
2404     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));                                             
2405     if (occupied == 0) {                                                                                                   
2406       tty->print_cr("  RSet is empty");                                                                                    
2407     } else {                                                                                                               
2408       hrrs->print();                                                                                                       
2409     }                                                                                                                      
2410     tty->print_cr("----------");                                                                                           
2411     return false;                                                                                                          
2412   }                                                                                                                        
2413 
2414   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {                                                       
2415     tty->cr();                                                                                                             
2416     tty->print_cr("========================================");                                                             
2417     tty->print_cr("%s", msg);                                                                                              
2418     tty->cr();                                                                                                             
2419   }                                                                                                                        
2420 
2421   ~PrintRSetsClosure() {                                                                                                   
2422     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);                                                            
2423     tty->print_cr("========================================");                                                             
2424     tty->cr();                                                                                                             
2425   }                                                                                                                        
2426 };                                                                                                                         
2427 
2428 void G1CollectedHeap::print_cset_rsets() {                                                                                 
2429   PrintRSetsClosure cl("Printing CSet RSets");                                                                             
2430   collection_set_iterate(&cl);                                                                                             
2431 }                                                                                                                          
2432 
2433 void G1CollectedHeap::print_all_rsets() {                                                                                  
2434   PrintRSetsClosure cl("Printing All RSets");;                                                                             
2435   heap_region_iterate(&cl);                                                                                                
2436 }                                                                                                                          
2437 #endif // PRODUCT                                                                                                          
2438 
2439 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {                                                                  
2440 
2441   size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;                                          
2442   size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;                                  
2443   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();                                                
2444 
2445   size_t eden_capacity_bytes =                                                                                             
2446     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;                              
2447 
2448   VirtualSpaceSummary heap_summary = create_heap_space_summary();                                                          
2449   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,                                                           
2450                        eden_capacity_bytes, survivor_used_bytes, num_regions());                                           
2451 }                                                                                                                          
2452 
2453 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {                                                
2454   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),                                          
2455                        stats->unused(), stats->used(), stats->region_end_waste(),                                          
2456                        stats->regions_filled(), stats->direct_allocated(),                                                 
2457                        stats->failure_used(), stats->failure_waste());                                                     
2458 }                                                                                                                          
2459 
2460 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {                                           
2461   const G1HeapSummary& heap_summary = create_g1_heap_summary();                                                            
2462   gc_tracer->report_gc_heap_summary(when, heap_summary);                                                                   
2463 
2464   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();                                                  
2465   gc_tracer->report_metaspace_summary(when, metaspace_summary);                                                            
2466 }                                                                                                                          
2467 
2468 G1CollectedHeap* G1CollectedHeap::heap() {                                                                                 
2469   CollectedHeap* heap = Universe::heap();                                                                                  
2470   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");                                                 
2471   assert(heap->kind() == CollectedHeap::G1, "Invalid name");                                                               
2472   return (G1CollectedHeap*)heap;                                                                                           
2473 }                                                                                                                          
2474 
2475 void G1CollectedHeap::gc_prologue(bool full) {                                                                             
2476   // always_do_update_barrier = false;                                                                                     
2477   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");                                                
2478 
2479   // This summary needs to be printed before incrementing total collections.                                               
2480   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());                                  
2481 
2482   // Update common counters.                                                                                               
2483   increment_total_collections(full /* full gc */);                                                                         
2484   if (full) {                                                                                                              
2485     increment_old_marking_cycles_started();                                                                                
2486   }                                                                                                                        
2487 
2488   // Fill TLAB's and such                                                                                                  
2489   double start = os::elapsedTime();                                                                                        
2490   accumulate_statistics_all_tlabs();                                                                                       
2491   ensure_parsability(true);                                                                                                
2492   g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);                           
2493 }                                                                                                                          
2494 
2495 void G1CollectedHeap::gc_epilogue(bool full) {                                                                             
2496   // Update common counters.                                                                                               
2497   if (full) {                                                                                                              
2498     // Update the number of full collections that have been completed.                                                     
2499     increment_old_marking_cycles_completed(false /* concurrent */);                                                        
2500   }                                                                                                                        
2501 
2502   // We are at the end of the GC. Total collections has already been increased.                                            
2503   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);                               
2504 
2505   // FIXME: what is this about?                                                                                            
2506   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"                                                        
2507   // is set.                                                                                                               
2508 #if COMPILER2_OR_JVMCI                                                                                                     
2509   assert(DerivedPointerTable::is_empty(), "derived pointer present");                                                      
2510 #endif                                                                                                                     
2511   // always_do_update_barrier = true;                                                                                      
2512 
2513   double start = os::elapsedTime();                                                                                        
2514   resize_all_tlabs();                                                                                                      
2515   g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);                            
2516 
2517   MemoryService::track_memory_usage();                                                                                     
2518   // We have just completed a GC. Update the soft reference                                                                
2519   // policy with the new heap occupancy                                                                                    
2520   Universe::update_heap_info_at_gc();                                                                                      
2521 }                                                                                                                          
2522 
2523 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,                                                           
2524                                                uint gc_count_before,                                                       
2525                                                bool* succeeded,                                                            
2526                                                GCCause::Cause gc_cause) {                                                  
2527   assert_heap_not_locked_and_not_at_safepoint();                                                                           
2528   VM_G1CollectForAllocation op(word_size,                                                                                  
2529                                gc_count_before,                                                                            
2530                                gc_cause,                                                                                   
2531                                false, /* should_initiate_conc_mark */                                                      
2532                                g1_policy()->max_pause_time_ms());                                                          
2533   VMThread::execute(&op);                                                                                                  
2534 
2535   HeapWord* result = op.result();                                                                                          
2536   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();                                                    
2537   assert(result == NULL || ret_succeeded,                                                                                  
2538          "the result should be NULL if the VM did not succeed");                                                           
2539   *succeeded = ret_succeeded;                                                                                              
2540 
2541   assert_heap_not_locked();                                                                                                
2542   return result;                                                                                                           
2543 }                                                                                                                          
2544 
2545 void G1CollectedHeap::do_concurrent_mark() {                                                                               
2546   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);                                                              
2547   if (!_cm_thread->in_progress()) {                                                                                        
2548     _cm_thread->set_started();                                                                                             
2549     CGC_lock->notify();                                                                                                    
2550   }                                                                                                                        
2551 }                                                                                                                          
2552 
2553 size_t G1CollectedHeap::pending_card_num() {                                                                               
2554   size_t extra_cards = 0;                                                                                                  
2555   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {                                            
2556     DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);                                                       
2557     extra_cards += dcq.size();                                                                                             
2558   }                                                                                                                        
2559   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();                                                          
2560   size_t buffer_size = dcqs.buffer_size();                                                                                 
2561   size_t buffer_num = dcqs.completed_buffers_num();                                                                        
2562 
2563   return buffer_size * buffer_num + extra_cards;                                                                           
2564 }                                                                                                                          
2565 
2566 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {                                          
2567   // We don't nominate objects with many remembered set entries, on                                                        
2568   // the assumption that such objects are likely still live.                                                               
2569   HeapRegionRemSet* rem_set = r->rem_set();                                                                                
2570 
2571   return G1EagerReclaimHumongousObjectsWithStaleRefs ?                                                                     
2572          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :                                                
2573          G1EagerReclaimHumongousObjects && rem_set->is_empty();                                                            
2574 }                                                                                                                          
2575 
2576 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {                                              
2577  private:                                                                                                                  
2578   size_t _total_humongous;                                                                                                 
2579   size_t _candidate_humongous;                                                                                             
2580 
2581   DirtyCardQueue _dcq;                                                                                                     
2582 
2583   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {                                     
2584     assert(region->is_starts_humongous(), "Must start a humongous object");                                                
2585 
2586     oop obj = oop(region->bottom());                                                                                       
2587 
2588     // Dead objects cannot be eager reclaim candidates. Due to class                                                       
2589     // unloading it is unsafe to query their classes so we return early.                                                   
2590     if (g1h->is_obj_dead(obj, region)) {                                                                                   
2591       return false;                                                                                                        
2592     }                                                                                                                      
2593 
2594     // If we do not have a complete remembered set for the region, then we can                                             
2595     // not be sure that we have all references to it.                                                                      
2596     if (!region->rem_set()->is_complete()) {                                                                               
2597       return false;                                                                                                        
2598     }                                                                                                                      
2599     // Candidate selection must satisfy the following constraints                                                          
2600     // while concurrent marking is in progress:                                                                            
2601     //                                                                                                                     
2602     // * In order to maintain SATB invariants, an object must not be                                                       
2603     // reclaimed if it was allocated before the start of marking and                                                       
2604     // has not had its references scanned.  Such an object must have                                                       
2605     // its references (including type metadata) scanned to ensure no                                                       
2606     // live objects are missed by the marking process.  Objects                                                            
2607     // allocated after the start of concurrent marking don't need to                                                       
2608     // be scanned.                                                                                                         
2609     //                                                                                                                     
2610     // * An object must not be reclaimed if it is on the concurrent                                                        
2611     // mark stack.  Objects allocated after the start of concurrent                                                        
2612     // marking are never pushed on the mark stack.                                                                         
2613     //                                                                                                                     
2614     // Nominating only objects allocated after the start of concurrent                                                     
2615     // marking is sufficient to meet both constraints.  This may miss                                                      
2616     // some objects that satisfy the constraints, but the marking data                                                     
2617     // structures don't support efficiently performing the needed                                                          
2618     // additional tests or scrubbing of the mark stack.                                                                    
2619     //                                                                                                                     
2620     // However, we presently only nominate is_typeArray() objects.                                                         
2621     // A humongous object containing references induces remembered                                                         
2622     // set entries on other regions.  In order to reclaim such an                                                          
2623     // object, those remembered sets would need to be cleaned up.                                                          
2624     //                                                                                                                     
2625     // We also treat is_typeArray() objects specially, allowing them                                                       
2626     // to be reclaimed even if allocated before the start of                                                               
2627     // concurrent mark.  For this we rely on mark stack insertion to                                                       
2628     // exclude is_typeArray() objects, preventing reclaiming an object                                                     
2629     // that is in the mark stack.  We also rely on the metadata for                                                        
2630     // such objects to be built-in and so ensured to be kept live.                                                         
2631     // Frequent allocation and drop of large binary blobs is an                                                            
2632     // important use case for eager reclaim, and this special handling                                                     
2633     // may reduce needed headroom.                                                                                         
2634 
2635     return obj->is_typeArray() &&                                                                                          
2636            g1h->is_potential_eager_reclaim_candidate(region);                                                              
2637   }                                                                                                                        
2638 
2639  public:                                                                                                                   
2640   RegisterHumongousWithInCSetFastTestClosure()                                                                             
2641   : _total_humongous(0),                                                                                                   
2642     _candidate_humongous(0),                                                                                               
2643     _dcq(&G1BarrierSet::dirty_card_queue_set()) {                                                                          
2644   }                                                                                                                        
2645 
2646   virtual bool do_heap_region(HeapRegion* r) {                                                                             
2647     if (!r->is_starts_humongous()) {                                                                                       
2648       return false;                                                                                                        
2649     }                                                                                                                      
2650     G1CollectedHeap* g1h = G1CollectedHeap::heap();                                                                        
2651 
2652     bool is_candidate = humongous_region_is_candidate(g1h, r);                                                             
2653     uint rindex = r->hrm_index();                                                                                          
2654     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);                                                            
2655     if (is_candidate) {                                                                                                    
2656       _candidate_humongous++;                                                                                              
2657       g1h->register_humongous_region_with_cset(rindex);                                                                    
2658       // Is_candidate already filters out humongous object with large remembered sets.                                     
2659       // If we have a humongous object with a few remembered sets, we simply flush these                                   
2660       // remembered set entries into the DCQS. That will result in automatic                                               
2661       // re-evaluation of their remembered set entries during the following evacuation                                     
2662       // phase.                                                                                                            
2663       if (!r->rem_set()->is_empty()) {                                                                                     
2664         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),                                   
2665                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");               
2666         G1CardTable* ct = g1h->card_table();                                                                               
2667         HeapRegionRemSetIterator hrrs(r->rem_set());                                                                       
2668         size_t card_index;                                                                                                 
2669         while (hrrs.has_next(card_index)) {                                                                                
2670           jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);                                                        
2671           // The remembered set might contain references to already freed                                                  
2672           // regions. Filter out such entries to avoid failing card table                                                  
2673           // verification.                                                                                                 
2674           if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {                                                          
2675             if (*card_ptr != G1CardTable::dirty_card_val()) {                                                              
2676               *card_ptr = G1CardTable::dirty_card_val();                                                                   
2677               _dcq.enqueue(card_ptr);                                                                                      
2678             }                                                                                                              
2679           }                                                                                                                
2680         }                                                                                                                  
2681         assert(hrrs.n_yielded() == r->rem_set()->occupied(),                                                               
2682                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",        
2683                hrrs.n_yielded(), r->rem_set()->occupied());                                                                
2684         // We should only clear the card based remembered set here as we will not                                          
2685         // implicitly rebuild anything else during eager reclaim. Note that at the moment                                  
2686         // (and probably never) we do not enter this path if there are other kind of                                       
2687         // remembered sets for this region.                                                                                
2688         r->rem_set()->clear_locked(true /* only_cardset */);                                                               
2689         // Clear_locked() above sets the state to Empty. However we want to continue                                       
2690         // collecting remembered set entries for humongous regions that were not                                           
2691         // reclaimed.                                                                                                      
2692         r->rem_set()->set_state_complete();                                                                                
2693       }                                                                                                                    
2694       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");             
2695     }                                                                                                                      
2696     _total_humongous++;                                                                                                    
2697 
2698     return false;                                                                                                          
2699   }                                                                                                                        
2700 
2701   size_t total_humongous() const { return _total_humongous; }                                                              
2702   size_t candidate_humongous() const { return _candidate_humongous; }                                                      
2703 
2704   void flush_rem_set_entries() { _dcq.flush(); }                                                                           
2705 };                                                                                                                         
2706 
2707 void G1CollectedHeap::register_humongous_regions_with_cset() {                                                             
2708   if (!G1EagerReclaimHumongousObjects) {                                                                                   
2709     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);                                            
2710     return;                                                                                                                
2711   }                                                                                                                        
2712   double time = os::elapsed_counter();                                                                                     
2713 
2714   // Collect reclaim candidate information and register candidates with cset.                                              
2715   RegisterHumongousWithInCSetFastTestClosure cl;                                                                           
2716   heap_region_iterate(&cl);                                                                                                
2717 
2718   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;                                      
2719   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,                                                    
2720                                                                   cl.total_humongous(),                                    
2721                                                                   cl.candidate_humongous());                               
2722   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;                                                        
2723 
2724   // Finally flush all remembered set entries to re-check into the global DCQS.                                            
2725   cl.flush_rem_set_entries();                                                                                              
2726 }                                                                                                                          
2727 
2728 class VerifyRegionRemSetClosure : public HeapRegionClosure {                                                               
2729   public:                                                                                                                  
2730     bool do_heap_region(HeapRegion* hr) {                                                                                  
2731       if (!hr->is_archive() && !hr->is_continues_humongous()) {                                                            
2732         hr->verify_rem_set();                                                                                              
2733       }                                                                                                                    
2734       return false;                                                                                                        
2735     }                                                                                                                      
2736 };                                                                                                                         
2737 
2738 uint G1CollectedHeap::num_task_queues() const {                                                                            
2739   return _task_queues->size();                                                                                             
2740 }                                                                                                                          
2741 
2742 #if TASKQUEUE_STATS                                                                                                        
2743 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {                                                  
2744   st->print_raw_cr("GC Task Stats");                                                                                       
2745   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();                                                    
2746   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();                                                    
2747 }                                                                                                                          
2748 
2749 void G1CollectedHeap::print_taskqueue_stats() const {                                                                      
2750   if (!log_is_enabled(Trace, gc, task, stats)) {                                                                           
2751     return;                                                                                                                
2752   }                                                                                                                        
2753   Log(gc, task, stats) log;                                                                                                
2754   ResourceMark rm;                                                                                                         
2755   LogStream ls(log.trace());                                                                                               
2756   outputStream* st = &ls;                                                                                                  
2757 
2758   print_taskqueue_stats_hdr(st);                                                                                           
2759 
2760   TaskQueueStats totals;                                                                                                   
2761   const uint n = num_task_queues();                                                                                        
2762   for (uint i = 0; i < n; ++i) {                                                                                           
2763     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();                                                        
2764     totals += task_queue(i)->stats;                                                                                        
2765   }                                                                                                                        
2766   st->print_raw("tot "); totals.print(st); st->cr();                                                                       
2767 
2768   DEBUG_ONLY(totals.verify());                                                                                             
2769 }                                                                                                                          
2770 
2771 void G1CollectedHeap::reset_taskqueue_stats() {                                                                            
2772   const uint n = num_task_queues();                                                                                        
2773   for (uint i = 0; i < n; ++i) {                                                                                           
2774     task_queue(i)->stats.reset();                                                                                          
2775   }                                                                                                                        
2776 }                                                                                                                          
2777 #endif // TASKQUEUE_STATS                                                                                                  
2778 
2779 void G1CollectedHeap::wait_for_root_region_scanning() {                                                                    
2780   double scan_wait_start = os::elapsedTime();                                                                              
2781   // We have to wait until the CM threads finish scanning the                                                              
2782   // root regions as it's the only way to ensure that all the                                                              
2783   // objects on them have been correctly scanned before we start                                                           
2784   // moving them during the GC.                                                                                            
2785   bool waited = _cm->root_regions()->wait_until_scan_finished();                                                           
2786   double wait_time_ms = 0.0;                                                                                               
2787   if (waited) {                                                                                                            
2788     double scan_wait_end = os::elapsedTime();                                                                              
2789     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;                                                             
2790   }                                                                                                                        
2791   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);                                             
2792 }                                                                                                                          
2793 
2794 class G1PrintCollectionSetClosure : public HeapRegionClosure {                                                             
2795 private:                                                                                                                   
2796   G1HRPrinter* _hr_printer;                                                                                                
2797 public:                                                                                                                    
2798   G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }                  
2799 
2800   virtual bool do_heap_region(HeapRegion* r) {                                                                             
2801     _hr_printer->cset(r);                                                                                                  
2802     return false;                                                                                                          
2803   }                                                                                                                        
2804 };                                                                                                                         
2805 
2806 void G1CollectedHeap::start_new_collection_set() {                                                                         
2807   collection_set()->start_incremental_building();                                                                          
2808 
2809   clear_cset_fast_test();                                                                                                  
2810 
2811   guarantee(_eden.length() == 0, "eden should have been cleared");                                                         
2812   g1_policy()->transfer_survivors_to_cset(survivor());                                                                     
2813 }                                                                                                                          
2814 
2815 bool                                                                                                                       
2816 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {                                           
2817   assert_at_safepoint_on_vm_thread();                                                                                      
2818   guarantee(!is_gc_active(), "collection is not reentrant");                                                               
2819 
2820   if (GCLocker::check_active_before_gc()) {                                                                                
2821     return false;                                                                                                          
2822   }                                                                                                                        
2823 
2824   _gc_timer_stw->register_gc_start();                                                                                      
2825 
2826   GCIdMark gc_id_mark;                                                                                                     
2827   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());                                                  
2828 
2829   SvcGCMarker sgcm(SvcGCMarker::MINOR);                                                                                    
2830   ResourceMark rm;                                                                                                         
2831 
2832   g1_policy()->note_gc_start();                                                                                            
2833 
2834   wait_for_root_region_scanning();                                                                                         
2835 
2836   print_heap_before_gc();                                                                                                  
2837   print_heap_regions();                                                                                                    
2838   trace_heap_before_gc(_gc_tracer_stw);                                                                                    
2839 
2840   _verifier->verify_region_sets_optional();                                                                                
2841   _verifier->verify_dirty_young_regions();                                                                                 
2842 
2843   // We should not be doing initial mark unless the conc mark thread is running                                            
2844   if (!_cm_thread->should_terminate()) {                                                                                   
2845     // This call will decide whether this pause is an initial-mark                                                         
2846     // pause. If it is, in_initial_mark_gc() will return true                                                              
2847     // for the duration of this pause.                                                                                     
2848     g1_policy()->decide_on_conc_mark_initiation();                                                                         
2849   }                                                                                                                        
2850 
2851   // We do not allow initial-mark to be piggy-backed on a mixed GC.                                                        
2852   assert(!collector_state()->in_initial_mark_gc() ||                                                                       
2853           collector_state()->in_young_only_phase(), "sanity");                                                             
2854 
2855   // We also do not allow mixed GCs during marking.                                                                        
2856   assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");         
2857 
2858   // Record whether this pause is an initial mark. When the current                                                        
2859   // thread has completed its logging output and it's safe to signal                                                       
2860   // the CM thread, the flag's value in the policy has been reset.                                                         
2861   bool should_start_conc_mark = collector_state()->in_initial_mark_gc();                                                   
2862 
2863   // Inner scope for scope based logging, timers, and stats collection                                                     
2864   {                                                                                                                        
2865     EvacuationInfo evacuation_info;                                                                                        
2866 
2867     if (collector_state()->in_initial_mark_gc()) {                                                                         
2868       // We are about to start a marking cycle, so we increment the                                                        
2869       // full collection counter.                                                                                          
2870       increment_old_marking_cycles_started();                                                                              
2871       _cm->gc_tracer_cm()->set_gc_cause(gc_cause());                                                                       
2872     }                                                                                                                      
2873 
2874     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());                                                          
2875 
2876     GCTraceCPUTime tcpu;                                                                                                   
2877 
2878     G1HeapVerifier::G1VerifyType verify_type;                                                                              
2879     FormatBuffer<> gc_string("Pause Young ");                                                                              
2880     if (collector_state()->in_initial_mark_gc()) {                                                                         
2881       gc_string.append("(Concurrent Start)");                                                                              
2882       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;                                                               
2883     } else if (collector_state()->in_young_only_phase()) {                                                                 
2884       if (collector_state()->in_young_gc_before_mixed()) {                                                                 
2885         gc_string.append("(Prepare Mixed)");                                                                               
2886       } else {                                                                                                             
2887         gc_string.append("(Normal)");                                                                                      
2888       }                                                                                                                    
2889       verify_type = G1HeapVerifier::G1VerifyYoungNormal;                                                                   
2890     } else {                                                                                                               
2891       gc_string.append("(Mixed)");                                                                                         
2892       verify_type = G1HeapVerifier::G1VerifyMixed;                                                                         
2893     }                                                                                                                      
2894     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);                                                           
2895 
2896     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),                              
2897                                                                   workers()->active_workers(),                             
2898                                                                   Threads::number_of_non_daemon_threads());                
2899     active_workers = workers()->update_active_workers(active_workers);                                                     
2900     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());                
2901 
2902     G1MonitoringScope ms(g1mm(),                                                                                           
2903                          false /* full_gc */,                                                                              
2904                          collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);                           
2905 
2906     G1HeapTransition heap_transition(this);                                                                                
2907     size_t heap_used_bytes_before_gc = used();                                                                             
2908 
2909     // Don't dynamically change the number of GC threads this early.  A value of                                           
2910     // 0 is used to indicate serial work.  When parallel work is done,                                                     
2911     // it will be set.                                                                                                     
2912 
2913     { // Call to jvmpi::post_class_unload_events must occur outside of active GC                                           
2914       IsGCActiveMark x;                                                                                                    
2915 
2916       gc_prologue(false);                                                                                                  
2917 
2918       if (VerifyRememberedSets) {                                                                                          
2919         log_info(gc, verify)("[Verifying RemSets before GC]");                                                             
2920         VerifyRegionRemSetClosure v_cl;                                                                                    
2921         heap_region_iterate(&v_cl);                                                                                        
2922       }                                                                                                                    
2923 
2924       _verifier->verify_before_gc(verify_type);                                                                            
2925 
2926       _verifier->check_bitmaps("GC Start");                                                                                
2927 
2928 #if COMPILER2_OR_JVMCI                                                                                                     
2929       DerivedPointerTable::clear();                                                                                        
2930 #endif                                                                                                                     
2931 
2932       // Please see comment in g1CollectedHeap.hpp and                                                                     
2933       // G1CollectedHeap::ref_processing_init() to see how                                                                 
2934       // reference processing currently works in G1.                                                                       
2935 
2936       // Enable discovery in the STW reference processor                                                                   
2937       _ref_processor_stw->enable_discovery();                                                                              
2938 
2939       {                                                                                                                    
2940         // We want to temporarily turn off discovery by the                                                                
2941         // CM ref processor, if necessary, and turn it back on                                                             
2942         // on again later if we do. Using a scoped                                                                         
2943         // NoRefDiscovery object will do this.                                                                             
2944         NoRefDiscovery no_cm_discovery(_ref_processor_cm);                                                                 
2945 
2946         // Forget the current alloc region (we might even choose it to be part                                             
2947         // of the collection set!).                                                                                        
2948         _allocator->release_mutator_alloc_region();                                                                        
2949 
2950         // This timing is only used by the ergonomics to handle our pause target.                                          
2951         // It is unclear why this should not include the full pause. We will                                               
2952         // investigate this in CR 7178365.                                                                                 
2953         //                                                                                                                 
2954         // Preserving the old comment here if that helps the investigation:                                                
2955         //                                                                                                                 
2956         // The elapsed time induced by the start time below deliberately elides                                            
2957         // the possible verification above.                                                                                
2958         double sample_start_time_sec = os::elapsedTime();                                                                  
2959 
2960         g1_policy()->record_collection_pause_start(sample_start_time_sec);                                                 
2961 
2962         if (collector_state()->in_initial_mark_gc()) {                                                                     
2963           concurrent_mark()->pre_initial_mark();                                                                           
2964         }                                                                                                                  
2965 
2966         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);                                            
2967 
2968         evacuation_info.set_collectionset_regions(collection_set()->region_length());                                      
2969 
2970         // Make sure the remembered sets are up to date. This needs to be                                                  
2971         // done before register_humongous_regions_with_cset(), because the                                                 
2972         // remembered sets are used there to choose eager reclaim candidates.                                              
2973         // If the remembered sets are not up to date we might miss some                                                    
2974         // entries that need to be handled.                                                                                
2975         g1_rem_set()->cleanupHRRS();                                                                                       
2976 
2977         register_humongous_regions_with_cset();                                                                            
2978 
2979         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");                              
2980 
2981         // We call this after finalize_cset() to                                                                           
2982         // ensure that the CSet has been finalized.                                                                        
2983         _cm->verify_no_cset_oops();                                                                                        
2984 
2985         if (_hr_printer.is_active()) {                                                                                     
2986           G1PrintCollectionSetClosure cl(&_hr_printer);                                                                    
2987           _collection_set.iterate(&cl);                                                                                    
2988         }                                                                                                                  
2989 
2990         // Initialize the GC alloc regions.                                                                                
2991         _allocator->init_gc_alloc_regions(evacuation_info);                                                                
2992 
2993         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length(
2994         pre_evacuate_collection_set();                                                                                     
2995 
2996         // Actually do the work...                                                                                         
2997         evacuate_collection_set(&per_thread_states);                                                                       
2998 
2999         post_evacuate_collection_set(evacuation_info, &per_thread_states);                                                 
3000 
3001         const size_t* surviving_young_words = per_thread_states.surviving_young_words();                                   
3002         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);                                     
3003 
3004         eagerly_reclaim_humongous_regions();                                                                               
3005 
3006         record_obj_copy_mem_stats();                                                                                       
3007         _survivor_evac_stats.adjust_desired_plab_sz();                                                                     
3008         _old_evac_stats.adjust_desired_plab_sz();                                                                          
3009 
3010         double start = os::elapsedTime();                                                                                  
3011         start_new_collection_set();                                                                                        
3012         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);                   
3013 
3014         if (evacuation_failed()) {                                                                                         
3015           set_used(recalculate_used());                                                                                    
3016           if (_archive_allocator != NULL) {                                                                                
3017             _archive_allocator->clear_used();                                                                              
3018           }                                                                                                                
3019           for (uint i = 0; i < ParallelGCThreads; i++) {                                                                   
3020             if (_evacuation_failed_info_array[i].has_failed()) {                                                           
3021               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);                                  
3022             }                                                                                                              
3023           }                                                                                                                
3024         } else {                                                                                                           
3025           // The "used" of the the collection set have already been subtracted                                             
3026           // when they were freed.  Add in the bytes evacuated.                                                            
3027           increase_used(g1_policy()->bytes_copied_during_gc());                                                            
3028         }                                                                                                                  
3029 
3030         if (collector_state()->in_initial_mark_gc()) {                                                                     
3031           // We have to do this before we notify the CM threads that                                                       
3032           // they can start working to make sure that all the                                                              
3033           // appropriate initialization is done on the CM object.                                                          
3034           concurrent_mark()->post_initial_mark();                                                                          
3035           // Note that we don't actually trigger the CM thread at                                                          
3036           // this point. We do that later when we're sure that                                                             
3037           // the current thread has completed its logging output.                                                          
3038         }                                                                                                                  
3039 
3040         allocate_dummy_regions();                                                                                          
3041 
3042         _allocator->init_mutator_alloc_region();                                                                           
3043 
3044         {                                                                                                                  
3045           size_t expand_bytes = _heap_sizing_policy->expansion_amount();                                                   
3046           if (expand_bytes > 0) {                                                                                          
3047             size_t bytes_before = capacity();                                                                              
3048             // No need for an ergo logging here,                                                                           
3049             // expansion_amount() does this when it returns a value > 0.                                                   
3050             double expand_ms;                                                                                              
3051             if (!expand(expand_bytes, _workers, &expand_ms)) {                                                             
3052               // We failed to expand the heap. Cannot do anything about it.                                                
3053             }                                                                                                              
3054             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);                                                
3055           }                                                                                                                
3056         }                                                                                                                  
3057 
3058         // We redo the verification but now wrt to the new CSet which                                                      
3059         // has just got initialized after the previous CSet was freed.                                                     
3060         _cm->verify_no_cset_oops();                                                                                        
3061 
3062         // This timing is only used by the ergonomics to handle our pause target.                                          
3063         // It is unclear why this should not include the full pause. We will                                               
3064         // investigate this in CR 7178365.                                                                                 
3065         double sample_end_time_sec = os::elapsedTime();                                                                    
3066         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;                                 
3067         size_t total_cards_scanned = g1_policy()->phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTim
3068         g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);           
3069 
3070         evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());                              
3071         evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());                                           
3072 
3073         if (VerifyRememberedSets) {                                                                                        
3074           log_info(gc, verify)("[Verifying RemSets after GC]");                                                            
3075           VerifyRegionRemSetClosure v_cl;                                                                                  
3076           heap_region_iterate(&v_cl);                                                                                      
3077         }                                                                                                                  
3078 
3079         _verifier->verify_after_gc(verify_type);                                                                           
3080         _verifier->check_bitmaps("GC End");                                                                                
3081 
3082         assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");                                                 
3083         _ref_processor_stw->verify_no_references_recorded();                                                               
3084 
3085         // CM reference discovery will be re-enabled if necessary.                                                         
3086       }                                                                                                                    
3087 
3088 #ifdef TRACESPINNING                                                                                                       
3089       ParallelTaskTerminator::print_termination_counts();                                                                  
3090 #endif                                                                                                                     
3091 
3092       gc_epilogue(false);                                                                                                  
3093     }                                                                                                                      
3094 
3095     // Print the remainder of the GC log output.                                                                           
3096     if (evacuation_failed()) {                                                                                             
3097       log_info(gc)("To-space exhausted");                                                                                  
3098     }                                                                                                                      
3099 
3100     g1_policy()->print_phases();                                                                                           
3101     heap_transition.print();                                                                                               
3102 
3103     // It is not yet to safe to tell the concurrent mark to                                                                
3104     // start as we have some optional output below. We don't want the                                                      
3105     // output from the concurrent mark thread interfering with this                                                        
3106     // logging output either.                                                                                              
3107 
3108     _hrm.verify_optional();                                                                                                
3109     _verifier->verify_region_sets_optional();                                                                              
3110 
3111     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());                                                                         
3112     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());                                                                         
3113 
3114     print_heap_after_gc();                                                                                                 
3115     print_heap_regions();                                                                                                  
3116     trace_heap_after_gc(_gc_tracer_stw);                                                                                   
3117 
3118     // We must call G1MonitoringSupport::update_sizes() in the same scoping level                                          
3119     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the                                     
3120     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated                                          
3121     // before any GC notifications are raised.                                                                             
3122     g1mm()->update_sizes();                                                                                                
3123 
3124     _gc_tracer_stw->report_evacuation_info(&evacuation_info);                                                              
3125     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());                                           
3126     _gc_timer_stw->register_gc_end();                                                                                      
3127     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());                              
3128   }                                                                                                                        
3129   // It should now be safe to tell the concurrent mark thread to start                                                     
3130   // without its logging output interfering with the logging output                                                        
3131   // that came from the pause.                                                                                             
3132 
3133   if (should_start_conc_mark) {                                                                                            
3134     // CAUTION: after the doConcurrentMark() call below,                                                                   
3135     // the concurrent marking thread(s) could be running                                                                   
3136     // concurrently with us. Make sure that anything after                                                                 
3137     // this point does not assume that we are the only GC thread                                                           
3138     // running. Note: of course, the actual marking work will                                                              
3139     // not start until the safepoint itself is released in                                                                 
3140     // SuspendibleThreadSet::desynchronize().                                                                              
3141     do_concurrent_mark();                                                                                                  
3142   }                                                                                                                        
3143 
3144   return true;                                                                                                             
3145 }                                                                                                                          
3146 
3147 void G1CollectedHeap::remove_self_forwarding_pointers() {                                                                  
3148   G1ParRemoveSelfForwardPtrsTask rsfp_task;                                                                                
3149   workers()->run_task(&rsfp_task);                                                                                         
3150 }                                                                                                                          
3151 
3152 void G1CollectedHeap::restore_after_evac_failure() {                                                                       
3153   double remove_self_forwards_start = os::elapsedTime();                                                                   
3154 
3155   remove_self_forwarding_pointers();                                                                                       
3156   SharedRestorePreservedMarksTaskExecutor task_executor(workers());                                                        
3157   _preserved_marks_set.restore(&task_executor);                                                                            
3158 
3159   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000
3160 }                                                                                                                          
3161 
3162 void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {                              
3163   if (!_evacuation_failed) {                                                                                               
3164     _evacuation_failed = true;                                                                                             
3165   }                                                                                                                        
3166 
3167   _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());                                             
3168   _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);                                                          
3169 }                                                                                                                          
3170 
3171 bool G1ParEvacuateFollowersClosure::offer_termination() {                                                                  
                                                                                                                           
3172   G1ParScanThreadState* const pss = par_scan_state();                                                                      
3173   start_term_time();                                                                                                       
3174   const bool res = terminator()->offer_termination();                                                                      
3175   end_term_time();                                                                                                         
                                                                                                                           
3176   return res;                                                                                                              
3177 }                                                                                                                          
3178 
3179 void G1ParEvacuateFollowersClosure::do_void() {                                                                            
                                                                                                                           
3180   G1ParScanThreadState* const pss = par_scan_state();                                                                      
3181   pss->trim_queue();                                                                                                       
                                                                                                                           
3182   do {                                                                                                                     
                                                                                                                           
3183     pss->steal_and_trim_queue(queues());                                                                                   
                                                                                                                           
3184   } while (!offer_termination());                                                                                          
3185 }                                                                                                                          
3186 
3187 class G1ParTask : public AbstractGangTask {                                                                                
3188 protected:                                                                                                                 
3189   G1CollectedHeap*         _g1h;                                                                                           
3190   G1ParScanThreadStateSet* _pss;                                                                                           
3191   RefToScanQueueSet*       _queues;                                                                                        
3192   G1RootProcessor*         _root_processor;                                                                                
3193   ParallelTaskTerminator   _terminator;                                                                                    
3194   uint                     _n_workers;                                                                                     
3195 
3196 public:                                                                                                                    
3197   G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcess
3198     : AbstractGangTask("G1 collection"),                                                                                   
3199       _g1h(g1h),                                                                                                           
3200       _pss(per_thread_states),                                                                                             
3201       _queues(task_queues),                                                                                                
3202       _root_processor(root_processor),                                                                                     
3203       _terminator(n_workers, _queues),                                                                                     
3204       _n_workers(n_workers)                                                                                                
3205   {}                                                                                                                       
3206 
3207   void work(uint worker_id) {                                                                                              
3208     if (worker_id >= _n_workers) return;  // no work needed this round                                                     
3209 
3210     double start_sec = os::elapsedTime();                                                                                  
3211     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);               
3212 
3213     {                                                                                                                      
3214       ResourceMark rm;                                                                                                     
3215       HandleMark   hm;                                                                                                     
3216 
3217       ReferenceProcessor*             rp = _g1h->ref_processor_stw();                                                      
3218 
3219       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);                                             
3220       pss->set_ref_discoverer(rp);                                                                                         
3221 
3222       double start_strong_roots_sec = os::elapsedTime();                                                                   
3223 
3224       _root_processor->evacuate_roots(pss, worker_id);                                                                     
3225 
3226       // We pass a weak code blobs closure to the remembered set scanning because we want to avoid                         
3227       // treating the nmethods visited to act as roots for concurrent marking.                                             
3228       // We only want to make sure that the oops in the nmethods are adjusted with regard to the                           
3229       // objects copied by the current evacuation.                                                                         
3230       _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);                                                     
3231 
3232       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;                                                
3233 
3234       double term_sec = 0.0;                                                                                               
3235       size_t evac_term_attempts = 0;                                                                                       
3236       {                                                                                                                    
3237         double start = os::elapsedTime();                                                                                  
3238         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);                                              
3239         evac.do_void();                                                                                                    
3240 
3241         evac_term_attempts = evac.term_attempts();                                                                         
3242         term_sec = evac.term_time();                                                                                       
3243         double elapsed_sec = os::elapsedTime() - start;                                                                    
3244 
3245         G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();                                                              
3246         p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);                                      
3247         p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);                                             
3248         p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);                            
3249       }                                                                                                                    
3250 
3251       assert(pss->queue_is_empty(), "should be empty");                                                                    
3252 
3253       if (log_is_enabled(Debug, gc, task, stats)) {                                                                        
3254         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);                                             
3255         size_t lab_waste;                                                                                                  
3256         size_t lab_undo_waste;                                                                                             
3257         pss->waste(lab_waste, lab_undo_waste);                                                                             
3258         _g1h->print_termination_stats(worker_id,                                                                           
3259                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */                       
3260                                       strong_roots_sec * 1000.0,                  /* strong roots time */                  
3261                                       term_sec * 1000.0,                          /* evac term time */                     
3262                                       evac_term_attempts,                         /* evac term attempts */                 
3263                                       lab_waste,                                  /* alloc buffer waste */                 
3264                                       lab_undo_waste                              /* undo waste */                         
3265                                       );                                                                                   
3266       }                                                                                                                    
3267 
3268       // Close the inner scope so that the ResourceMark and HandleMark                                                     
3269       // destructors are executed here and are included as part of the                                                     
3270       // "GC Worker Time".                                                                                                 
3271     }                                                                                                                      
3272     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());         
3273   }                                                                                                                        
3274 };                                                                                                                         
3275 
3276 void G1CollectedHeap::print_termination_stats_hdr() {                                                                      
3277   log_debug(gc, task, stats)("GC Termination Stats");                                                                      
3278   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");          
3279   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");         
3280   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");          
3281 }                                                                                                                          
3282 
3283 void G1CollectedHeap::print_termination_stats(uint worker_id,                                                              
3284                                               double elapsed_ms,                                                           
3285                                               double strong_roots_ms,                                                      
3286                                               double term_ms,                                                              
3287                                               size_t term_attempts,                                                        
3288                                               size_t alloc_buffer_waste,                                                   
3289                                               size_t undo_waste) const {                                                   
3290   log_debug(gc, task, stats)                                                                                               
3291               ("%3d %9.2f %9.2f %6.2f "                                                                                    
3292                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "                                                                         
3293                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),                                                 
3294                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,                                 
3295                term_ms, term_ms * 100 / elapsed_ms, term_attempts,                                                         
3296                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,                                                       
3297                alloc_buffer_waste * HeapWordSize / K,                                                                      
3298                undo_waste * HeapWordSize / K);                                                                             
3299 }                                                                                                                          
3300 
3301 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,                                                       
3302                                         bool class_unloading_occurred) {                                                   
3303   uint n_workers = workers()->active_workers();                                                                            
3304 
3305   G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);                                                 
3306   ParallelCleaningTask g1_unlink_task(is_alive, &dedup_closure, n_workers, class_unloading_occurred);                      
3307   workers()->run_task(&g1_unlink_task);                                                                                    
3308 }                                                                                                                          
3309 
3310 void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,                                                        
3311                                        bool process_strings,                                                               
3312                                        bool process_string_dedup) {                                                        
3313   if (!process_strings && !process_string_dedup) {                                                                         
3314     // Nothing to clean.                                                                                                   
3315     return;                                                                                                                
3316   }                                                                                                                        
3317 
3318   G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);                                                 
3319   StringCleaningTask g1_unlink_task(is_alive, process_string_dedup ? &dedup_closure : NULL, process_strings);              
3320   workers()->run_task(&g1_unlink_task);                                                                                    
3321 }                                                                                                                          
3322 
3323 class G1RedirtyLoggedCardsTask : public AbstractGangTask {                                                                 
3324  private:                                                                                                                  
3325   DirtyCardQueueSet* _queue;                                                                                               
3326   G1CollectedHeap* _g1h;                                                                                                   
3327  public:                                                                                                                   
3328   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),            
3329     _queue(queue), _g1h(g1h) { }                                                                                           
3330 
3331   virtual void work(uint worker_id) {                                                                                      
3332     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();                                                        
3333     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);                                      
3334 
3335     RedirtyLoggedCardTableEntryClosure cl(_g1h);                                                                           
3336     _queue->par_apply_closure_to_all_completed_buffers(&cl);                                                               
3337 
3338     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());                       
3339   }                                                                                                                        
3340 };                                                                                                                         
3341 
3342 void G1CollectedHeap::redirty_logged_cards() {                                                                             
3343   double redirty_logged_cards_start = os::elapsedTime();                                                                   
3344 
3345   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);                                                    
3346   dirty_card_queue_set().reset_for_par_iteration();                                                                        
3347   workers()->run_task(&redirty_task);                                                                                      
3348 
3349   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();                                                           
3350   dcq.merge_bufferlists(&dirty_card_queue_set());                                                                          
3351   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");                                   
3352 
3353   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0
3354 }                                                                                                                          
3355 
3356 // Weak Reference Processing support                                                                                       
3357 
3358 bool G1STWIsAliveClosure::do_object_b(oop p) {                                                                             
3359   // An object is reachable if it is outside the collection set,                                                           
3360   // or is inside and copied.                                                                                              
3361   return !_g1h->is_in_cset(p) || p->is_forwarded();                                                                        
3362 }                                                                                                                          
3363 
3364 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {                                                                
3365   assert(obj != NULL, "must not be NULL");                                                                                 
3366   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));                        
3367   // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below                                
3368   // may falsely indicate that this is not the case here: however the collection set only                                  
3369   // contains old regions when concurrent mark is not running.                                                             
3370   return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();                                        
3371 }                                                                                                                          
3372 
3373 // Non Copying Keep Alive closure                                                                                          
3374 class G1KeepAliveClosure: public OopClosure {                                                                              
3375   G1CollectedHeap*_g1h;                                                                                                    
3376 public:                                                                                                                    
3377   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}                                                                   
3378   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }                                                            
3379   void do_oop(oop* p) {                                                                                                    
3380     oop obj = *p;                                                                                                          
3381     assert(obj != NULL, "the caller should have filtered out NULL values");                                                
3382 
3383     const InCSetState cset_state =_g1h->in_cset_state(obj);                                                                
3384     if (!cset_state.is_in_cset_or_humongous()) {                                                                           
3385       return;                                                                                                              
3386     }                                                                                                                      
3387     if (cset_state.is_in_cset()) {                                                                                         
3388       assert( obj->is_forwarded(), "invariant" );                                                                          
3389       *p = obj->forwardee();                                                                                               
3390     } else {                                                                                                               
3391       assert(!obj->is_forwarded(), "invariant" );                                                                          
3392       assert(cset_state.is_humongous(),                                                                                    
3393              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());                                   
3394      _g1h->set_humongous_is_live(obj);                                                                                     
3395     }                                                                                                                      
3396   }                                                                                                                        
3397 };                                                                                                                         
3398 
3399 // Copying Keep Alive closure - can be called from both                                                                    
3400 // serial and parallel code as long as different worker                                                                    
3401 // threads utilize different G1ParScanThreadState instances                                                                
3402 // and different queues.                                                                                                   
3403 
3404 class G1CopyingKeepAliveClosure: public OopClosure {                                                                       
3405   G1CollectedHeap*         _g1h;                                                                                           
3406   G1ParScanThreadState*    _par_scan_state;                                                                                
3407 
3408 public:                                                                                                                    
3409   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,                                                                          
3410                             G1ParScanThreadState* pss):                                                                    
3411     _g1h(g1h),                                                                                                             
3412     _par_scan_state(pss)                                                                                                   
3413   {}                                                                                                                       
3414 
3415   virtual void do_oop(narrowOop* p) { do_oop_work(p); }                                                                    
3416   virtual void do_oop(      oop* p) { do_oop_work(p); }                                                                    
3417 
3418   template <class T> void do_oop_work(T* p) {                                                                              
3419     oop obj = RawAccess<>::oop_load(p);                                                                                    
3420 
3421     if (_g1h->is_in_cset_or_humongous(obj)) {                                                                              
3422       // If the referent object has been forwarded (either copied                                                          
3423       // to a new location or to itself in the event of an                                                                 
3424       // evacuation failure) then we need to update the reference                                                          
3425       // field and, if both reference and referent are in the G1                                                           
3426       // heap, update the RSet for the referent.                                                                           
3427       //                                                                                                                   
3428       // If the referent has not been forwarded then we have to keep                                                       
3429       // it alive by policy. Therefore we have copy the referent.                                                          
3430       //                                                                                                                   
3431       // When the queue is drained (after each phase of reference processing)                                              
3432       // the object and it's followers will be copied, the reference field set                                             
3433       // to point to the new location, and the RSet updated.                                                               
3434       _par_scan_state->push_on_queue(p);                                                                                   
3435     }                                                                                                                      
3436   }                                                                                                                        
3437 };                                                                                                                         
3438 
3439 // Serial drain queue closure. Called as the 'complete_gc'                                                                 
3440 // closure for each discovered list in some of the                                                                         
3441 // reference processing phases.                                                                                            
3442 
3443 class G1STWDrainQueueClosure: public VoidClosure {                                                                         
3444 protected:                                                                                                                 
3445   G1CollectedHeap* _g1h;                                                                                                   
3446   G1ParScanThreadState* _par_scan_state;                                                                                   
3447 
3448   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }                                                     
3449 
3450 public:                                                                                                                    
3451   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :                                                
3452     _g1h(g1h),                                                                                                             
3453     _par_scan_state(pss)                                                                                                   
3454   { }                                                                                                                      
3455 
3456   void do_void() {                                                                                                         
3457     G1ParScanThreadState* const pss = par_scan_state();                                                                    
3458     pss->trim_queue();                                                                                                     
3459   }                                                                                                                        
3460 };                                                                                                                         
3461 
3462 // Parallel Reference Processing closures                                                                                  
3463 
3464 // Implementation of AbstractRefProcTaskExecutor for parallel reference                                                    
3465 // processing during G1 evacuation pauses.                                                                                 
3466 
3467 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {                                                       
3468 private:                                                                                                                   
3469   G1CollectedHeap*          _g1h;                                                                                          
3470   G1ParScanThreadStateSet*  _pss;                                                                                          
3471   RefToScanQueueSet*        _queues;                                                                                       
3472   WorkGang*                 _workers;                                                                                      
3473 
3474 public:                                                                                                                    
3475   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,                                                                           
3476                            G1ParScanThreadStateSet* per_thread_states,                                                     
3477                            WorkGang* workers,                                                                              
3478                            RefToScanQueueSet *task_queues) :                                                               
3479     _g1h(g1h),                                                                                                             
3480     _pss(per_thread_states),                                                                                               
3481     _queues(task_queues),                                                                                                  
3482     _workers(workers)                                                                                                      
3483   {                                                                                                                        
3484     g1h->ref_processor_stw()->set_active_mt_degree(workers->active_workers());                                             
3485   }                                                                                                                        
3486 
3487   // Executes the given task using concurrent marking worker threads.                                                      
3488   virtual void execute(ProcessTask& task, uint ergo_workers);                                                              
3489 };                                                                                                                         
3490 
3491 // Gang task for possibly parallel reference processing                                                                    
3492 
3493 class G1STWRefProcTaskProxy: public AbstractGangTask {                                                                     
3494   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;                                                            
3495   ProcessTask&     _proc_task;                                                                                             
3496   G1CollectedHeap* _g1h;                                                                                                   
3497   G1ParScanThreadStateSet* _pss;                                                                                           
3498   RefToScanQueueSet* _task_queues;                                                                                         
3499   ParallelTaskTerminator* _terminator;                                                                                     
3500 
3501 public:                                                                                                                    
3502   G1STWRefProcTaskProxy(ProcessTask& proc_task,                                                                            
3503                         G1CollectedHeap* g1h,                                                                              
3504                         G1ParScanThreadStateSet* per_thread_states,                                                        
3505                         RefToScanQueueSet *task_queues,                                                                    
3506                         ParallelTaskTerminator* terminator) :                                                              
3507     AbstractGangTask("Process reference objects in parallel"),                                                             
3508     _proc_task(proc_task),                                                                                                 
3509     _g1h(g1h),                                                                                                             
3510     _pss(per_thread_states),                                                                                               
3511     _task_queues(task_queues),                                                                                             
3512     _terminator(terminator)                                                                                                
3513   {}                                                                                                                       
3514 
3515   virtual void work(uint worker_id) {                                                                                      
3516     // The reference processing task executed by a single worker.                                                          
3517     ResourceMark rm;                                                                                                       
3518     HandleMark   hm;                                                                                                       
3519 
3520     G1STWIsAliveClosure is_alive(_g1h);                                                                                    
3521 
3522     G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);                                                         
3523     pss->set_ref_discoverer(NULL);                                                                                         
3524 
3525     // Keep alive closure.                                                                                                 
3526     G1CopyingKeepAliveClosure keep_alive(_g1h, pss);                                                                       
3527 
3528     // Complete GC closure                                                                                                 
3529     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);                                       
3530 
3531     // Call the reference processing task's work routine.                                                                  
3532     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);                                                         
3533 
3534     // Note we cannot assert that the refs array is empty here as not all                                                  
3535     // of the processing tasks (specifically phase2 - pp2_work) execute                                                    
3536     // the complete_gc closure (which ordinarily would drain the queue) so                                                 
3537     // the queue may not be empty.                                                                                         
3538   }                                                                                                                        
3539 };                                                                                                                         
3540 
3541 // Driver routine for parallel reference processing.                                                                       
3542 // Creates an instance of the ref processing gang                                                                          
3543 // task and has the worker threads execute it.                                                                             
3544 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) {                                        
3545   assert(_workers != NULL, "Need parallel worker threads.");                                                               
3546 
3547   assert(_workers->active_workers() >= ergo_workers,                                                                       
3548          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",                          
3549          ergo_workers, _workers->active_workers());                                                                        
3550   ParallelTaskTerminator terminator(ergo_workers, _queues);                                                                
3551   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);                                      
3552 
3553   _workers->run_task(&proc_task_proxy, ergo_workers);                                                                      
3554 }                                                                                                                          
3555 
3556 // End of weak reference support closures                                                                                  
3557 
3558 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {                          
3559   double ref_proc_start = os::elapsedTime();                                                                               
3560 
3561   ReferenceProcessor* rp = _ref_processor_stw;                                                                             
3562   assert(rp->discovery_enabled(), "should have been enabled");                                                             
3563 
3564   // Closure to test whether a referent is alive.                                                                          
3565   G1STWIsAliveClosure is_alive(this);                                                                                      
3566 
3567   // Even when parallel reference processing is enabled, the processing                                                    
3568   // of JNI refs is serial and performed serially by the current thread                                                    
3569   // rather than by a worker. The following PSS will be used for processing                                                
3570   // JNI refs.                                                                                                             
3571 
3572   // Use only a single queue for this PSS.                                                                                 
3573   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);                                             
3574   pss->set_ref_discoverer(NULL);                                                                                           
3575   assert(pss->queue_is_empty(), "pre-condition");                                                                          
3576 
3577   // Keep alive closure.                                                                                                   
3578   G1CopyingKeepAliveClosure keep_alive(this, pss);                                                                         
3579 
3580   // Serial Complete GC closure                                                                                            
3581   G1STWDrainQueueClosure drain_queue(this, pss);                                                                           
3582 
3583   // Setup the soft refs policy...                                                                                         
3584   rp->setup_policy(false);                                                                                                 
3585 
3586   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();                                        
3587 
3588   ReferenceProcessorStats stats;                                                                                           
3589   if (!rp->processing_is_mt()) {                                                                                           
3590     // Serial reference processing...                                                                                      
3591     stats = rp->process_discovered_references(&is_alive,                                                                   
3592                                               &keep_alive,                                                                 
3593                                               &drain_queue,                                                                
3594                                               NULL,                                                                        
3595                                               pt);                                                                         
3596   } else {                                                                                                                 
3597     uint no_of_gc_workers = workers()->active_workers();                                                                   
3598 
3599     // Parallel reference processing                                                                                       
3600     assert(no_of_gc_workers <= rp->max_num_queues(),                                                                       
3601            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",           
3602            no_of_gc_workers,  rp->max_num_queues());                                                                       
3603 
3604     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues);                          
3605     stats = rp->process_discovered_references(&is_alive,                                                                   
3606                                               &keep_alive,                                                                 
3607                                               &drain_queue,                                                                
3608                                               &par_task_executor,                                                          
3609                                               pt);                                                                         
3610   }                                                                                                                        
3611 
3612   _gc_tracer_stw->report_gc_reference_stats(stats);                                                                        
3613 
3614   // We have completed copying any necessary live referent objects.                                                        
3615   assert(pss->queue_is_empty(), "both queue and overflow should be empty");                                                
3616 
3617   make_pending_list_reachable();                                                                                           
3618 
3619   rp->verify_no_references_recorded();                                                                                     
3620 
3621   double ref_proc_time = os::elapsedTime() - ref_proc_start;                                                               
3622   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);                                                
3623 }                                                                                                                          
3624 
3625 void G1CollectedHeap::make_pending_list_reachable() {                                                                      
3626   if (collector_state()->in_initial_mark_gc()) {                                                                           
3627     oop pll_head = Universe::reference_pending_list();                                                                     
3628     if (pll_head != NULL) {                                                                                                
3629       // Any valid worker id is fine here as we are in the VM thread and single-threaded.                                  
3630       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);                                                               
3631     }                                                                                                                      
3632   }                                                                                                                        
3633 }                                                                                                                          
3634 
3635 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {                            
3636   double merge_pss_time_start = os::elapsedTime();                                                                         
3637   per_thread_states->flush();                                                                                              
3638   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);               
3639 }                                                                                                                          
3640 
3641 void G1CollectedHeap::pre_evacuate_collection_set() {                                                                      
3642   _expand_heap_after_alloc_failure = true;                                                                                 
3643   _evacuation_failed = false;                                                                                              
3644 
3645   // Disable the hot card cache.                                                                                           
3646   _hot_card_cache->reset_hot_cache_claimed_index();                                                                        
3647   _hot_card_cache->set_use_cache(false);                                                                                   
3648 
3649   g1_rem_set()->prepare_for_oops_into_collection_set_do();                                                                 
3650   _preserved_marks_set.assert_empty();                                                                                     
3651 
3652   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();                                                                
3653 
3654   // InitialMark needs claim bits to keep track of the marked-through CLDs.                                                
3655   if (collector_state()->in_initial_mark_gc()) {                                                                           
3656     double start_clear_claimed_marks = os::elapsedTime();                                                                  
3657 
3658     ClassLoaderDataGraph::clear_claimed_marks();                                                                           
3659 
3660     double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;                
3661     phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);                                 
3662   }                                                                                                                        
3663 }                                                                                                                          
3664 
3665 void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {                                
3666   // Should G1EvacuationFailureALot be in effect for this GC?                                                              
3667   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)                                                               
3668 
3669   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");                                          
3670 
3671   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();                                                                
3672 
3673   double start_par_time_sec = os::elapsedTime();                                                                           
3674   double end_par_time_sec;                                                                                                 
3675 
3676   {                                                                                                                        
3677     const uint n_workers = workers()->active_workers();                                                                    
3678     G1RootProcessor root_processor(this, n_workers);                                                                       
3679     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);                              
3680 
3681     print_termination_stats_hdr();                                                                                         
3682 
3683     workers()->run_task(&g1_par_task);                                                                                     
3684     end_par_time_sec = os::elapsedTime();                                                                                  
3685 
3686     // Closing the inner scope will execute the destructor                                                                 
3687     // for the G1RootProcessor object. We record the current                                                               
3688     // elapsed time before closing the scope so that time                                                                  
3689     // taken for the destructor is NOT included in the                                                                     
3690     // reported parallel time.                                                                                             
3691   }                                                                                                                        
3692 
3693   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;                                                   
3694   phase_times->record_par_time(par_time_ms);                                                                               
3695 
3696   double code_root_fixup_time_ms =                                                                                         
3697         (os::elapsedTime() - end_par_time_sec) * 1000.0;                                                                   
3698   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);                                                       
3699 }                                                                                                                          
3700 
3701 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_sta
3702   // Also cleans the card table from temporary duplicate detection information used                                        
3703   // during UpdateRS/ScanRS.                                                                                               
3704   g1_rem_set()->cleanup_after_oops_into_collection_set_do();                                                               
3705 
3706   // Process any discovered reference objects - we have                                                                    
3707   // to do this _before_ we retire the GC alloc regions                                                                    
3708   // as we may have to copy some 'reachable' referent                                                                      
3709   // objects (and their reachable sub-graphs) that were                                                                    
3710   // not copied during the pause.                                                                                          
3711   process_discovered_references(per_thread_states);                                                                        
3712 
3713   // FIXME                                                                                                                 
3714   // CM's reference processing also cleans up the string table.                                                            
3715   // Should we do that here also? We could, but it is a serial operation                                                   
3716   // and could significantly increase the pause time.                                                                      
3717 
3718   G1STWIsAliveClosure is_alive(this);                                                                                      
3719   G1KeepAliveClosure keep_alive(this);                                                                                     
3720 
3721   WeakProcessor::weak_oops_do(workers(), &is_alive, &keep_alive,                                                           
3722                               g1_policy()->phase_times()->weak_phase_times());                                             
3723 
3724   if (G1StringDedup::is_enabled()) {                                                                                       
3725     double fixup_start = os::elapsedTime();                                                                                
3726 
3727     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());                            
3728 
3729     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;                                                     
3730     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);                                             
3731   }                                                                                                                        
3732 
3733   if (evacuation_failed()) {                                                                                               
3734     restore_after_evac_failure();                                                                                          
3735 
3736     // Reset the G1EvacuationFailureALot counters and flags                                                                
3737     // Note: the values are reset only when an actual                                                                      
3738     // evacuation failure occurs.                                                                                          
3739     NOT_PRODUCT(reset_evacuation_should_fail();)                                                                           
3740   }                                                                                                                        
3741 
3742   _preserved_marks_set.assert_empty();                                                                                     
3743 
3744   _allocator->release_gc_alloc_regions(evacuation_info);                                                                   
3745 
3746   merge_per_thread_state_info(per_thread_states);                                                                          
3747 
3748   // Reset and re-enable the hot card cache.                                                                               
3749   // Note the counts for the cards in the regions in the                                                                   
3750   // collection set are reset when the collection set is freed.                                                            
3751   _hot_card_cache->reset_hot_cache();                                                                                      
3752   _hot_card_cache->set_use_cache(true);                                                                                    
3753 
3754   purge_code_root_memory();                                                                                                
3755 
3756   redirty_logged_cards();                                                                                                  
3757 #if COMPILER2_OR_JVMCI                                                                                                     
3758   double start = os::elapsedTime();                                                                                        
3759   DerivedPointerTable::update_pointers();                                                                                  
3760   g1_policy()->phase_times()->record_derived_pointer_table_update_time((os::elapsedTime() - start) * 1000.0);              
3761 #endif                                                                                                                     
3762   g1_policy()->print_age_table();                                                                                          
3763 }                                                                                                                          
3764 
3765 void G1CollectedHeap::record_obj_copy_mem_stats() {                                                                        
3766   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);                       
3767 
3768   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),                              
3769                                                create_g1_evac_summary(&_old_evac_stats));                                  
3770 }                                                                                                                          
3771 
3772 void G1CollectedHeap::free_region(HeapRegion* hr,                                                                          
3773                                   FreeRegionList* free_list,                                                               
3774                                   bool skip_remset,                                                                        
3775                                   bool skip_hot_card_cache,                                                                
3776                                   bool locked) {                                                                           
3777   assert(!hr->is_free(), "the region should not be free");                                                                 
3778   assert(!hr->is_empty(), "the region should not be empty");                                                               
3779   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");                                                
3780   assert(free_list != NULL, "pre-condition");                                                                              
3781 
3782   if (G1VerifyBitmaps) {                                                                                                   
3783     MemRegion mr(hr->bottom(), hr->end());                                                                                 
3784     concurrent_mark()->clear_range_in_prev_bitmap(mr);                                                                     
3785   }                                                                                                                        
3786 
3787   // Clear the card counts for this region.                                                                                
3788   // Note: we only need to do this if the region is not young                                                              
3789   // (since we don't refine cards in young regions).                                                                       
3790   if (!skip_hot_card_cache && !hr->is_young()) {                                                                           
3791     _hot_card_cache->reset_card_counts(hr);                                                                                
3792   }                                                                                                                        
3793   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);                                                  
3794   _g1_policy->remset_tracker()->update_at_free(hr);                                                                        
3795   free_list->add_ordered(hr);                                                                                              
3796 }                                                                                                                          
3797 
3798 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,                                                                
3799                                             FreeRegionList* free_list) {                                                   
3800   assert(hr->is_humongous(), "this is only for humongous regions");                                                        
3801   assert(free_list != NULL, "pre-condition");                                                                              
3802   hr->clear_humongous();                                                                                                   
3803   free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);                            
3804 }                                                                                                                          
3805 
3806 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,                                                 
3807                                            const uint humongous_regions_removed) {                                         
3808   if (old_regions_removed > 0 || humongous_regions_removed > 0) {                                                          
3809     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);                                                        
3810     _old_set.bulk_remove(old_regions_removed);                                                                             
3811     _humongous_set.bulk_remove(humongous_regions_removed);                                                                 
3812   }                                                                                                                        
3813 
3814 }                                                                                                                          
3815 
3816 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {                                                          
3817   assert(list != NULL, "list can't be null");                                                                              
3818   if (!list->is_empty()) {                                                                                                 
3819     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);                                                       
3820     _hrm.insert_list_into_free_list(list);                                                                                 
3821   }                                                                                                                        
3822 }                                                                                                                          
3823 
3824 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {                                                              
3825   decrease_used(bytes);                                                                                                    
3826 }                                                                                                                          
3827 
3828 class G1FreeCollectionSetTask : public AbstractGangTask {                                                                  
3829 private:                                                                                                                   
3830 
3831   // Closure applied to all regions in the collection set to do work that needs to                                         
3832   // be done serially in a single thread.                                                                                  
3833   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {                                                      
3834   private:                                                                                                                 
3835     EvacuationInfo* _evacuation_info;                                                                                      
3836     const size_t* _surviving_young_words;                                                                                  
3837 
3838     // Bytes used in successfully evacuated regions before the evacuation.                                                 
3839     size_t _before_used_bytes;                                                                                             
3840     // Bytes used in unsucessfully evacuated regions before the evacuation                                                 
3841     size_t _after_used_bytes;                                                                                              
3842 
3843     size_t _bytes_allocated_in_old_since_last_gc;                                                                          
3844 
3845     size_t _failure_used_words;                                                                                            
3846     size_t _failure_waste_words;                                                                                           
3847 
3848     FreeRegionList _local_free_list;                                                                                       
3849   public:                                                                                                                  
3850     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :               
3851       HeapRegionClosure(),                                                                                                 
3852       _evacuation_info(evacuation_info),                                                                                   
3853       _surviving_young_words(surviving_young_words),                                                                       
3854       _before_used_bytes(0),                                                                                               
3855       _after_used_bytes(0),                                                                                                
3856       _bytes_allocated_in_old_since_last_gc(0),                                                                            
3857       _failure_used_words(0),                                                                                              
3858       _failure_waste_words(0),                                                                                             
3859       _local_free_list("Local Region List for CSet Freeing") {                                                             
3860     }                                                                                                                      
3861 
3862     virtual bool do_heap_region(HeapRegion* r) {                                                                           
3863       G1CollectedHeap* g1h = G1CollectedHeap::heap();                                                                      
3864 
3865       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());                            
3866       g1h->clear_in_cset(r);                                                                                               
3867 
3868       if (r->is_young()) {                                                                                                 
3869         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_lengt
3870                "Young index %d is wrong for region %u of type %s with %u young regions",                                   
3871                r->young_index_in_cset(),                                                                                   
3872                r->hrm_index(),                                                                                             
3873                r->get_type_str(),                                                                                          
3874                g1h->collection_set()->young_region_length());                                                              
3875         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];                                          
3876         r->record_surv_words_in_group(words_survived);                                                                     
3877       }                                                                                                                    
3878 
3879       if (!r->evacuation_failed()) {                                                                                       
3880         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());                     
3881         _before_used_bytes += r->used();                                                                                   
3882         g1h->free_region(r,                                                                                                
3883                          &_local_free_list,                                                                                
3884                          true, /* skip_remset */                                                                           
3885                          true, /* skip_hot_card_cache */                                                                   
3886                          true  /* locked */);                                                                              
3887       } else {                                                                                                             
3888         r->uninstall_surv_rate_group();                                                                                    
3889         r->set_young_index_in_cset(-1);                                                                                    
3890         r->set_evacuation_failed(false);                                                                                   
3891         // When moving a young gen region to old gen, we "allocate" that whole region                                      
3892         // there. This is in addition to any already evacuated objects. Notify the                                         
3893         // policy about that.                                                                                              
3894         // Old gen regions do not cause an additional allocation: both the objects                                         
3895         // still in the region and the ones already moved are accounted for elsewhere.                                     
3896         if (r->is_young()) {                                                                                               
3897           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;                                                 
3898         }                                                                                                                  
3899         // The region is now considered to be old.                                                                         
3900         r->set_old();                                                                                                      
3901         // Do some allocation statistics accounting. Regions that failed evacuation                                        
3902         // are always made old, so there is no need to update anything in the young                                        
3903         // gen statistics, but we need to update old gen statistics.                                                       
3904         size_t used_words = r->marked_bytes() / HeapWordSize;                                                              
3905 
3906         _failure_used_words += used_words;                                                                                 
3907         _failure_waste_words += HeapRegion::GrainWords - used_words;                                                       
3908 
3909         g1h->old_set_add(r);                                                                                               
3910         _after_used_bytes += r->used();                                                                                    
3911       }                                                                                                                    
3912       return false;                                                                                                        
3913     }                                                                                                                      
3914 
3915     void complete_work() {                                                                                                 
3916       G1CollectedHeap* g1h = G1CollectedHeap::heap();                                                                      
3917 
3918       _evacuation_info->set_regions_freed(_local_free_list.length());                                                      
3919       _evacuation_info->increment_collectionset_used_after(_after_used_bytes);                                             
3920 
3921       g1h->prepend_to_freelist(&_local_free_list);                                                                         
3922       g1h->decrement_summary_bytes(_before_used_bytes);                                                                    
3923 
3924       G1Policy* policy = g1h->g1_policy();                                                                                 
3925       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);                             
3926 
3927       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);    
3928     }                                                                                                                      
3929   };                                                                                                                       
3930 
3931   G1CollectionSet* _collection_set;                                                                                        
3932   G1SerialFreeCollectionSetClosure _cl;                                                                                    
3933   const size_t* _surviving_young_words;                                                                                    
3934 
3935   size_t _rs_lengths;                                                                                                      
3936 
3937   volatile jint _serial_work_claim;                                                                                        
3938 
3939   struct WorkItem {                                                                                                        
3940     uint region_idx;                                                                                                       
3941     bool is_young;                                                                                                         
3942     bool evacuation_failed;                                                                                                
3943 
3944     WorkItem(HeapRegion* r) {                                                                                              
3945       region_idx = r->hrm_index();                                                                                         
3946       is_young = r->is_young();                                                                                            
3947       evacuation_failed = r->evacuation_failed();                                                                          
3948     }                                                                                                                      
3949   };                                                                                                                       
3950 
3951   volatile size_t _parallel_work_claim;                                                                                    
3952   size_t _num_work_items;                                                                                                  
3953   WorkItem* _work_items;                                                                                                   
3954 
3955   void do_serial_work() {                                                                                                  
3956     // Need to grab the lock to be allowed to modify the old region list.                                                  
3957     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);                                                        
3958     _collection_set->iterate(&_cl);                                                                                        
3959   }                                                                                                                        
3960 
3961   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {                               
3962     G1CollectedHeap* g1h = G1CollectedHeap::heap();                                                                        
3963 
3964     HeapRegion* r = g1h->region_at(region_idx);                                                                            
3965     assert(!g1h->is_on_master_free_list(r), "sanity");                                                                     
3966 
3967     Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);                                                            
3968 
3969     if (!is_young) {                                                                                                       
3970       g1h->_hot_card_cache->reset_card_counts(r);                                                                          
3971     }                                                                                                                      
3972 
3973     if (!evacuation_failed) {                                                                                              
3974       r->rem_set()->clear_locked();                                                                                        
3975     }                                                                                                                      
3976   }                                                                                                                        
3977 
3978   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {                                                     
3979   private:                                                                                                                 
3980     size_t _cur_idx;                                                                                                       
3981     WorkItem* _work_items;                                                                                                 
3982   public:                                                                                                                  
3983     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
3984 
3985     virtual bool do_heap_region(HeapRegion* r) {                                                                           
3986       _work_items[_cur_idx++] = WorkItem(r);                                                                               
3987       return false;                                                                                                        
3988     }                                                                                                                      
3989   };                                                                                                                       
3990 
3991   void prepare_work() {                                                                                                    
3992     G1PrepareFreeCollectionSetClosure cl(_work_items);                                                                     
3993     _collection_set->iterate(&cl);                                                                                         
3994   }                                                                                                                        
3995 
3996   void complete_work() {                                                                                                   
3997     _cl.complete_work();                                                                                                   
3998 
3999     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();                                                               
4000     policy->record_max_rs_lengths(_rs_lengths);                                                                            
4001     policy->cset_regions_freed();                                                                                          
4002   }                                                                                                                        
4003 public:                                                                                                                    
4004   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_w
4005     AbstractGangTask("G1 Free Collection Set"),                                                                            
4006     _collection_set(collection_set),                                                                                       
4007     _cl(evacuation_info, surviving_young_words),                                                                           
4008     _surviving_young_words(surviving_young_words),                                                                         
4009     _rs_lengths(0),                                                                                                        
4010     _serial_work_claim(0),                                                                                                 
4011     _parallel_work_claim(0),                                                                                               
4012     _num_work_items(collection_set->region_length()),                                                                      
4013     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {                                                       
4014     prepare_work();                                                                                                        
4015   }                                                                                                                        
4016 
4017   ~G1FreeCollectionSetTask() {                                                                                             
4018     complete_work();                                                                                                       
4019     FREE_C_HEAP_ARRAY(WorkItem, _work_items);                                                                              
4020   }                                                                                                                        
4021 
4022   // Chunk size for work distribution. The chosen value has been determined experimentally                                 
4023   // to be a good tradeoff between overhead and achievable parallelism.                                                    
4024   static uint chunk_size() { return 32; }                                                                                  
4025 
4026   virtual void work(uint worker_id) {                                                                                      
4027     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();                                           
4028 
4029     // Claim serial work.                                                                                                  
4030     if (_serial_work_claim == 0) {                                                                                         
4031       jint value = Atomic::add(1, &_serial_work_claim) - 1;                                                                
4032       if (value == 0) {                                                                                                    
4033         double serial_time = os::elapsedTime();                                                                            
4034         do_serial_work();                                                                                                  
4035         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);                                
4036       }                                                                                                                    
4037     }                                                                                                                      
4038 
4039     // Start parallel work.                                                                                                
4040     double young_time = 0.0;                                                                                               
4041     bool has_young_time = false;                                                                                           
4042     double non_young_time = 0.0;                                                                                           
4043     bool has_non_young_time = false;                                                                                       
4044 
4045     while (true) {                                                                                                         
4046       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);                                                       
4047       size_t cur = end - chunk_size();                                                                                     
4048 
4049       if (cur >= _num_work_items) {                                                                                        
4050         break;                                                                                                             
4051       }                                                                                                                    
4052 
                                                                                                                           
4053       double start_time = os::elapsedTime();                                                                               
4054 
4055       end = MIN2(end, _num_work_items);                                                                                    
4056 
4057       for (; cur < end; cur++) {                                                                                           
4058         bool is_young = _work_items[cur].is_young;                                                                         
4059 
4060         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);            
4061 
4062         double end_time = os::elapsedTime();                                                                               
4063         double time_taken = end_time - start_time;                                                                         
4064         if (is_young) {                                                                                                    
4065           young_time += time_taken;                                                                                        
4066           has_young_time = true;                                                                                           
                                                                                                                           
4067         } else {                                                                                                           
4068           non_young_time += time_taken;                                                                                    
4069           has_non_young_time = true;                                                                                       
                                                                                                                           
4070         }                                                                                                                  
4071         start_time = end_time;                                                                                             
4072       }                                                                                                                    
4073     }                                                                                                                      
4074 
4075     if (has_young_time) {                                                                                                  
4076       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);                                       
4077     }                                                                                                                      
4078     if (has_non_young_time) {                                                                                              
4079       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, non_young_time);                                
4080     }                                                                                                                      
4081   }                                                                                                                        
4082 };                                                                                                                         
4083 
4084 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* s
4085   _eden.clear();                                                                                                           
4086 
4087   double free_cset_start_time = os::elapsedTime();                                                                         
4088 
4089   {                                                                                                                        
4090     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);             
4091     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);                                                
4092 
4093     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);                                   
4094 
4095     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",                                        
4096                         cl.name(),                                                                                         
4097                         num_workers,                                                                                       
4098                         _collection_set.region_length());                                                                  
4099     workers()->run_task(&cl, num_workers);                                                                                 
4100   }                                                                                                                        
4101   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);         
4102 
4103   collection_set->clear();                                                                                                 
4104 }                                                                                                                          
4105 
4106 class G1FreeHumongousRegionClosure : public HeapRegionClosure {                                                            
4107  private:                                                                                                                  
4108   FreeRegionList* _free_region_list;                                                                                       
4109   HeapRegionSet* _proxy_set;                                                                                               
4110   uint _humongous_objects_reclaimed;                                                                                       
4111   uint _humongous_regions_reclaimed;                                                                                       
4112   size_t _freed_bytes;                                                                                                     
4113  public:                                                                                                                   
4114 
4115   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :                                                         
4116     _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) 
4117   }                                                                                                                        
4118 
4119   virtual bool do_heap_region(HeapRegion* r) {                                                                             
4120     if (!r->is_starts_humongous()) {                                                                                       
4121       return false;                                                                                                        
4122     }                                                                                                                      
4123 
4124     G1CollectedHeap* g1h = G1CollectedHeap::heap();                                                                        
4125 
4126     oop obj = (oop)r->bottom();                                                                                            
4127     G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();                                                  
4128 
4129     // The following checks whether the humongous object is live are sufficient.                                           
4130     // The main additional check (in addition to having a reference from the roots                                         
4131     // or the young gen) is whether the humongous object has a remembered set entry.                                       
4132     //                                                                                                                     
4133     // A humongous object cannot be live if there is no remembered set for it                                              
4134     // because:                                                                                                            
4135     // - there can be no references from within humongous starts regions referencing                                       
4136     // the object because we never allocate other objects into them.                                                       
4137     // (I.e. there are no intra-region references that may be missed by the                                                
4138     // remembered set)                                                                                                     
4139     // - as soon there is a remembered set entry to the humongous starts region                                            
4140     // (i.e. it has "escaped" to an old object) this remembered set entry will stay                                        
4141     // until the end of a concurrent mark.                                                                                 
4142     //                                                                                                                     
4143     // It is not required to check whether the object has been found dead by marking                                       
4144     // or not, in fact it would prevent reclamation within a concurrent cycle, as                                          
4145     // all objects allocated during that time are considered live.                                                         
4146     // SATB marking is even more conservative than the remembered set.                                                     
4147     // So if at this point in the collection there is no remembered set entry,                                             
4148     // nobody has a reference to it.                                                                                       
4149     // At the start of collection we flush all refinement logs, and remembered sets                                        
4150     // are completely up-to-date wrt to references to the humongous object.                                                
4151     //                                                                                                                     
4152     // Other implementation considerations:                                                                                
4153     // - never consider object arrays at this time because they would pose                                                 
4154     // considerable effort for cleaning up the the remembered sets. This is                                                
4155     // required because stale remembered sets might reference locations that                                               
4156     // are currently allocated into.                                                                                       
4157     uint region_idx = r->hrm_index();                                                                                      
4158     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||                                                                
4159         !r->rem_set()->is_empty()) {                                                                                       
4160       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SI
4161                                region_idx,                                                                                 
4162                                (size_t)obj->size() * HeapWordSize,                                                         
4163                                p2i(r->bottom()),                                                                           
4164                                r->rem_set()->occupied(),                                                                   
4165                                r->rem_set()->strong_code_roots_list_length(),                                              
4166                                next_bitmap->is_marked(r->bottom()),                                                        
4167                                g1h->is_humongous_reclaim_candidate(region_idx),                                            
4168                                obj->is_typeArray()                                                                         
4169                               );                                                                                           
4170       return false;                                                                                                        
4171     }                                                                                                                      
4172 
4173     guarantee(obj->is_typeArray(),                                                                                         
4174               "Only eagerly reclaiming type arrays is supported, but the object "                                          
4175               PTR_FORMAT " is not.", p2i(r->bottom()));                                                                    
4176 
4177     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_
4178                              region_idx,                                                                                   
4179                              (size_t)obj->size() * HeapWordSize,                                                           
4180                              p2i(r->bottom()),                                                                             
4181                              r->rem_set()->occupied(),                                                                     
4182                              r->rem_set()->strong_code_roots_list_length(),                                                
4183                              next_bitmap->is_marked(r->bottom()),                                                          
4184                              g1h->is_humongous_reclaim_candidate(region_idx),                                              
4185                              obj->is_typeArray()                                                                           
4186                             );                                                                                             
4187 
4188     G1ConcurrentMark* const cm = g1h->concurrent_mark();                                                                   
4189     cm->humongous_object_eagerly_reclaimed(r);                                                                             
4190     assert(!cm->is_marked_in_prev_bitmap(obj) && !cm->is_marked_in_next_bitmap(obj),                                       
4191            "Eagerly reclaimed humongous region %u should not be marked at all but is in prev %s next %s",                  
4192            region_idx,                                                                                                     
4193            BOOL_TO_STR(cm->is_marked_in_prev_bitmap(obj)),                                                                 
4194            BOOL_TO_STR(cm->is_marked_in_next_bitmap(obj)));                                                                
4195     _humongous_objects_reclaimed++;                                                                                        
4196     do {                                                                                                                   
4197       HeapRegion* next = g1h->next_region_in_humongous(r);                                                                 
4198       _freed_bytes += r->used();                                                                                           
4199       r->set_containing_set(NULL);                                                                                         
4200       _humongous_regions_reclaimed++;                                                                                      
4201       g1h->free_humongous_region(r, _free_region_list);                                                                    
4202       r = next;                                                                                                            
4203     } while (r != NULL);                                                                                                   
4204 
4205     return false;                                                                                                          
4206   }                                                                                                                        
4207 
4208   uint humongous_objects_reclaimed() {                                                                                     
4209     return _humongous_objects_reclaimed;                                                                                   
4210   }                                                                                                                        
4211 
4212   uint humongous_regions_reclaimed() {                                                                                     
4213     return _humongous_regions_reclaimed;                                                                                   
4214   }                                                                                                                        
4215 
4216   size_t bytes_freed() const {                                                                                             
4217     return _freed_bytes;                                                                                                   
4218   }                                                                                                                        
4219 };                                                                                                                         
4220 
4221 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {                                                                
4222   assert_at_safepoint_on_vm_thread();                                                                                      
4223 
4224   if (!G1EagerReclaimHumongousObjects ||                                                                                   
4225       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {                                     
4226     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);                                             
4227     return;                                                                                                                
4228   }                                                                                                                        
4229 
4230   double start_time = os::elapsedTime();                                                                                   
4231 
4232   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");                                                       
4233 
4234   G1FreeHumongousRegionClosure cl(&local_cleanup_list);                                                                    
4235   heap_region_iterate(&cl);                                                                                                
4236 
4237   remove_from_old_sets(0, cl.humongous_regions_reclaimed());                                                               
4238 
4239   G1HRPrinter* hrp = hr_printer();                                                                                         
4240   if (hrp->is_active()) {                                                                                                  
4241     FreeRegionListIterator iter(&local_cleanup_list);                                                                      
4242     while (iter.more_available()) {                                                                                        
4243       HeapRegion* hr = iter.get_next();                                                                                    
4244       hrp->cleanup(hr);                                                                                                    
4245     }                                                                                                                      
4246   }                                                                                                                        
4247 
4248   prepend_to_freelist(&local_cleanup_list);                                                                                
4249   decrement_summary_bytes(cl.bytes_freed());                                                                               
4250 
4251   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,             
4252                                                                     cl.humongous_objects_reclaimed());                     
4253 }                                                                                                                          
4254 
4255 class G1AbandonCollectionSetClosure : public HeapRegionClosure {                                                           
4256 public:                                                                                                                    
4257   virtual bool do_heap_region(HeapRegion* r) {                                                                             
4258     assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());                          
4259     G1CollectedHeap::heap()->clear_in_cset(r);                                                                             
4260     r->set_young_index_in_cset(-1);                                                                                        
4261     return false;                                                                                                          
4262   }                                                                                                                        
4263 };                                                                                                                         
4264 
4265 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {                                            
4266   G1AbandonCollectionSetClosure cl;                                                                                        
4267   collection_set->iterate(&cl);                                                                                            
4268 
4269   collection_set->clear();                                                                                                 
4270   collection_set->stop_incremental_building();                                                                             
4271 }                                                                                                                          
4272 
4273 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {                                                             
4274   return _allocator->is_retained_old_region(hr);                                                                           
4275 }                                                                                                                          
4276 
4277 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {                                                      
4278   _eden.add(hr);                                                                                                           
4279   _g1_policy->set_region_eden(hr);                                                                                         
4280 }                                                                                                                          
4281 
4282 #ifdef ASSERT                                                                                                              
4283 
4284 class NoYoungRegionsClosure: public HeapRegionClosure {                                                                    
4285 private:                                                                                                                   
4286   bool _success;                                                                                                           
4287 public:                                                                                                                    
4288   NoYoungRegionsClosure() : _success(true) { }                                                                             
4289   bool do_heap_region(HeapRegion* r) {                                                                                     
4290     if (r->is_young()) {                                                                                                   
4291       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",                                     
4292                             p2i(r->bottom()), p2i(r->end()));                                                              
4293       _success = false;                                                                                                    
4294     }                                                                                                                      
4295     return false;                                                                                                          
4296   }                                                                                                                        
4297   bool success() { return _success; }                                                                                      
4298 };                                                                                                                         
4299 
4300 bool G1CollectedHeap::check_young_list_empty() {                                                                           
4301   bool ret = (young_regions_count() == 0);                                                                                 
4302 
4303   NoYoungRegionsClosure closure;                                                                                           
4304   heap_region_iterate(&closure);                                                                                           
4305   ret = ret && closure.success();                                                                                          
4306 
4307   return ret;                                                                                                              
4308 }                                                                                                                          
4309 
4310 #endif // ASSERT                                                                                                           
4311 
4312 class TearDownRegionSetsClosure : public HeapRegionClosure {                                                               
4313   HeapRegionSet *_old_set;                                                                                                 
4314 
4315 public:                                                                                                                    
4316   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }                                                
4317 
4318   bool do_heap_region(HeapRegion* r) {                                                                                     
4319     if (r->is_old()) {                                                                                                     
4320       _old_set->remove(r);                                                                                                 
4321     } else if(r->is_young()) {                                                                                             
4322       r->uninstall_surv_rate_group();                                                                                      
4323     } else {                                                                                                               
4324       // We ignore free regions, we'll empty the free list afterwards.                                                     
4325       // We ignore humongous and archive regions, we're not tearing down these                                             
4326       // sets.                                                                                                             
4327       assert(r->is_archive() || r->is_free() || r->is_humongous(),                                                         
4328              "it cannot be another type");                                                                                 
4329     }                                                                                                                      
4330     return false;                                                                                                          
4331   }                                                                                                                        
4332 
4333   ~TearDownRegionSetsClosure() {                                                                                           
4334     assert(_old_set->is_empty(), "post-condition");                                                                        
4335   }                                                                                                                        
4336 };                                                                                                                         
4337 
4338 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {                                                         
4339   assert_at_safepoint_on_vm_thread();                                                                                      
4340 
4341   if (!free_list_only) {                                                                                                   
4342     TearDownRegionSetsClosure cl(&_old_set);                                                                               
4343     heap_region_iterate(&cl);                                                                                              
4344 
4345     // Note that emptying the _young_list is postponed and instead done as                                                 
4346     // the first step when rebuilding the regions sets again. The reason for                                               
4347     // this is that during a full GC string deduplication needs to know if                                                 
4348     // a collected region was young or old when the full GC was initiated.                                                 
4349   }                                                                                                                        
4350   _hrm.remove_all_free_regions();                                                                                          
4351 }                                                                                                                          
4352 
4353 void G1CollectedHeap::increase_used(size_t bytes) {                                                                        
4354   _summary_bytes_used += bytes;                                                                                            
4355 }                                                                                                                          
4356 
4357 void G1CollectedHeap::decrease_used(size_t bytes) {                                                                        
4358   assert(_summary_bytes_used >= bytes,                                                                                     
4359          "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,                               
4360          _summary_bytes_used, bytes);                                                                                      
4361   _summary_bytes_used -= bytes;                                                                                            
4362 }                                                                                                                          
4363 
4364 void G1CollectedHeap::set_used(size_t bytes) {                                                                             
4365   _summary_bytes_used = bytes;                                                                                             
4366 }                                                                                                                          
4367 
4368 class RebuildRegionSetsClosure : public HeapRegionClosure {                                                                
4369 private:                                                                                                                   
4370   bool _free_list_only;                                                                                                    
4371 
4372   HeapRegionSet* _old_set;                                                                                                 
4373   HeapRegionManager* _hrm;                                                                                                 
4374 
4375   size_t _total_used;                                                                                                      
4376 
4377 public:                                                                                                                    
4378   RebuildRegionSetsClosure(bool free_list_only,                                                                            
4379                            HeapRegionSet* old_set,                                                                         
4380                            HeapRegionManager* hrm) :                                                                       
4381     _free_list_only(free_list_only),                                                                                       
4382     _old_set(old_set), _hrm(hrm), _total_used(0) {                                                                         
4383     assert(_hrm->num_free_regions() == 0, "pre-condition");                                                                
4384     if (!free_list_only) {                                                                                                 
4385       assert(_old_set->is_empty(), "pre-condition");                                                                       
4386     }                                                                                                                      
4387   }                                                                                                                        
4388 
4389   bool do_heap_region(HeapRegion* r) {                                                                                     
4390     // After full GC, no region should have a remembered set.                                                              
4391     r->rem_set()->clear(true);                                                                                             
4392     if (r->is_empty()) {                                                                                                   
4393       // Add free regions to the free list                                                                                 
4394       r->set_free();                                                                                                       
4395       _hrm->insert_into_free_list(r);                                                                                      
4396     } else if (!_free_list_only) {                                                                                         
4397 
4398       if (r->is_archive() || r->is_humongous()) {                                                                          
4399         // We ignore archive and humongous regions. We left these sets unchanged.                                          
4400       } else {                                                                                                             
4401         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");                                                 
4402         // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.            
4403         r->move_to_old();                                                                                                  
4404         _old_set->add(r);                                                                                                  
4405       }                                                                                                                    
4406       _total_used += r->used();                                                                                            
4407     }                                                                                                                      
4408 
4409     return false;                                                                                                          
4410   }                                                                                                                        
4411 
4412   size_t total_used() {                                                                                                    
4413     return _total_used;                                                                                                    
4414   }                                                                                                                        
4415 };                                                                                                                         
4416 
4417 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {                                                           
4418   assert_at_safepoint_on_vm_thread();                                                                                      
4419 
4420   if (!free_list_only) {                                                                                                   
4421     _eden.clear();                                                                                                         
4422     _survivor.clear();                                                                                                     
4423   }                                                                                                                        
4424 
4425   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);                                                           
4426   heap_region_iterate(&cl);                                                                                                
4427 
4428   if (!free_list_only) {                                                                                                   
4429     set_used(cl.total_used());                                                                                             
4430     if (_archive_allocator != NULL) {                                                                                      
4431       _archive_allocator->clear_used();                                                                                    
4432     }                                                                                                                      
4433   }                                                                                                                        
4434   assert(used_unlocked() == recalculate_used(),                                                                            
4435          "inconsistent used_unlocked(), "                                                                                  
4436          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,                                                              
4437          used_unlocked(), recalculate_used());                                                                             
4438 }                                                                                                                          
4439 
4440 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {                                                           
4441   HeapRegion* hr = heap_region_containing(p);                                                                              
4442   return hr->is_in(p);                                                                                                     
4443 }                                                                                                                          
4444 
4445 // Methods for the mutator alloc region                                                                                    
4446 
4447 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,                                                    
4448                                                       bool force) {                                                        
4449   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);                                                      
4450   bool should_allocate = g1_policy()->should_allocate_mutator_region();                                                    
4451   if (force || should_allocate) {                                                                                          
4452     HeapRegion* new_alloc_region = new_region(word_size,                                                                   
4453                                               false /* is_old */,                                                          
4454                                               false /* do_expand */);                                                      
4455     if (new_alloc_region != NULL) {                                                                                        
4456       set_region_short_lived_locked(new_alloc_region);                                                                     
4457       _hr_printer.alloc(new_alloc_region, !should_allocate);                                                               
4458       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);                                             
4459       _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);                                                  
4460       return new_alloc_region;                                                                                             
4461     }                                                                                                                      
4462   }                                                                                                                        
4463   return NULL;                                                                                                             
4464 }                                                                                                                          
4465 
4466 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,                                                
4467                                                   size_t allocated_bytes) {                                                
4468   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);                                                      
4469   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");                                             
4470 
4471   collection_set()->add_eden_region(alloc_region);                                                                         
4472   increase_used(allocated_bytes);                                                                                          
4473   _hr_printer.retire(alloc_region);                                                                                        
4474   // We update the eden sizes here, when the region is retired,                                                            
4475   // instead of when it's allocated, since this is the point that its                                                      
4476   // used space has been recorded in _summary_bytes_used.                                                                  
4477   g1mm()->update_eden_size();                                                                                              
4478 }                                                                                                                          
4479 
4480 // Methods for the GC alloc regions                                                                                        
4481 
4482 bool G1CollectedHeap::has_more_regions(InCSetState dest) {                                                                 
4483   if (dest.is_old()) {                                                                                                     
4484     return true;                                                                                                           
4485   } else {                                                                                                                 
4486     return survivor_regions_count() < g1_policy()->max_survivor_regions();                                                 
4487   }                                                                                                                        
4488 }                                                                                                                          
4489 
4490 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {                                     
4491   assert(FreeList_lock->owned_by_self(), "pre-condition");                                                                 
4492 
4493   if (!has_more_regions(dest)) {                                                                                           
4494     return NULL;                                                                                                           
4495   }                                                                                                                        
4496 
4497   const bool is_survivor = dest.is_young();                                                                                
4498 
4499   HeapRegion* new_alloc_region = new_region(word_size,                                                                     
4500                                             !is_survivor,                                                                  
4501                                             true /* do_expand */);                                                         
4502   if (new_alloc_region != NULL) {                                                                                          
4503     if (is_survivor) {                                                                                                     
4504       new_alloc_region->set_survivor();                                                                                    
4505       _survivor.add(new_alloc_region);                                                                                     
4506       _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);                                            
4507     } else {                                                                                                               
4508       new_alloc_region->set_old();                                                                                         
4509       _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);                                                 
4510     }                                                                                                                      
4511     _g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);                                                    
4512     _hr_printer.alloc(new_alloc_region);                                                                                   
4513     bool during_im = collector_state()->in_initial_mark_gc();                                                              
4514     new_alloc_region->note_start_of_copying(during_im);                                                                    
4515     return new_alloc_region;                                                                                               
4516   }                                                                                                                        
4517   return NULL;                                                                                                             
4518 }                                                                                                                          
4519 
4520 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,                                                     
4521                                              size_t allocated_bytes,                                                       
4522                                              InCSetState dest) {                                                           
4523   bool during_im = collector_state()->in_initial_mark_gc();                                                                
4524   alloc_region->note_end_of_copying(during_im);                                                                            
4525   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);                                                             
4526   if (dest.is_old()) {                                                                                                     
4527     old_set_add(alloc_region);                                                                                             
4528   }                                                                                                                        
4529   _hr_printer.retire(alloc_region);                                                                                        
4530 }                                                                                                                          
4531 
4532 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {                                                                 
4533   bool expanded = false;                                                                                                   
4534   uint index = _hrm.find_highest_free(&expanded);                                                                          
4535 
4536   if (index != G1_NO_HRM_INDEX) {                                                                                          
4537     if (expanded) {                                                                                                        
4538       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_
4539                                 HeapRegion::GrainWords * HeapWordSize);                                                    
4540     }                                                                                                                      
4541     _hrm.allocate_free_regions_starting_at(index, 1);                                                                      
4542     return region_at(index);                                                                                               
4543   }                                                                                                                        
4544   return NULL;                                                                                                             
4545 }                                                                                                                          
4546 
4547 // Optimized nmethod scanning                                                                                              
4548 
4549 class RegisterNMethodOopClosure: public OopClosure {                                                                       
4550   G1CollectedHeap* _g1h;                                                                                                   
4551   nmethod* _nm;                                                                                                            
4552 
4553   template <class T> void do_oop_work(T* p) {                                                                              
4554     T heap_oop = RawAccess<>::oop_load(p);                                                                                 
4555     if (!CompressedOops::is_null(heap_oop)) {                                                                              
4556       oop obj = CompressedOops::decode_not_null(heap_oop);                                                                 
4557       HeapRegion* hr = _g1h->heap_region_containing(obj);                                                                  
4558       assert(!hr->is_continues_humongous(),                                                                                
4559              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT                       
4560              " starting at " HR_FORMAT,                                                                                    
4561              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));                              
4562 
4563       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.                                        
4564       hr->add_strong_code_root_locked(_nm);                                                                                
4565     }                                                                                                                      
4566   }                                                                                                                        
4567 
4568 public:                                                                                                                    
4569   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :                                                           
4570     _g1h(g1h), _nm(nm) {}                                                                                                  
4571 
4572   void do_oop(oop* p)       { do_oop_work(p); }                                                                            
4573   void do_oop(narrowOop* p) { do_oop_work(p); }                                                                            
4574 };                                                                                                                         
4575 
4576 class UnregisterNMethodOopClosure: public OopClosure {                                                                     
4577   G1CollectedHeap* _g1h;                                                                                                   
4578   nmethod* _nm;                                                                                                            
4579 
4580   template <class T> void do_oop_work(T* p) {                                                                              
4581     T heap_oop = RawAccess<>::oop_load(p);                                                                                 
4582     if (!CompressedOops::is_null(heap_oop)) {                                                                              
4583       oop obj = CompressedOops::decode_not_null(heap_oop);                                                                 
4584       HeapRegion* hr = _g1h->heap_region_containing(obj);                                                                  
4585       assert(!hr->is_continues_humongous(),                                                                                
4586              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT                    
4587              " starting at " HR_FORMAT,                                                                                    
4588              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));                              
4589 
4590       hr->remove_strong_code_root(_nm);                                                                                    
4591     }                                                                                                                      
4592   }                                                                                                                        
4593 
4594 public:                                                                                                                    
4595   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :                                                         
4596     _g1h(g1h), _nm(nm) {}                                                                                                  
4597 
4598   void do_oop(oop* p)       { do_oop_work(p); }                                                                            
4599   void do_oop(narrowOop* p) { do_oop_work(p); }                                                                            
4600 };                                                                                                                         
4601 
4602 // Returns true if the reference points to an object that                                                                  
4603 // can move in an incremental collection.                                                                                  
4604 bool G1CollectedHeap::is_scavengable(oop obj) {                                                                            
4605   HeapRegion* hr = heap_region_containing(obj);                                                                            
4606   return !hr->is_pinned();                                                                                                 
4607 }                                                                                                                          
4608 
4609 void G1CollectedHeap::register_nmethod(nmethod* nm) {                                                                      
4610   guarantee(nm != NULL, "sanity");                                                                                         
4611   RegisterNMethodOopClosure reg_cl(this, nm);                                                                              
4612   nm->oops_do(&reg_cl);                                                                                                    
4613 }                                                                                                                          
4614 
4615 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {                                                                    
4616   guarantee(nm != NULL, "sanity");                                                                                         
4617   UnregisterNMethodOopClosure reg_cl(this, nm);                                                                            
4618   nm->oops_do(&reg_cl, true);                                                                                              
4619 }                                                                                                                          
4620 
4621 void G1CollectedHeap::purge_code_root_memory() {                                                                           
4622   double purge_start = os::elapsedTime();                                                                                  
4623   G1CodeRootSet::purge();                                                                                                  
4624   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;                                                       
4625   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);                                           
4626 }                                                                                                                          
4627 
4628 class RebuildStrongCodeRootClosure: public CodeBlobClosure {                                                               
4629   G1CollectedHeap* _g1h;                                                                                                   
4630 
4631 public:                                                                                                                    
4632   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :                                                                     
4633     _g1h(g1h) {}                                                                                                           
4634 
4635   void do_code_blob(CodeBlob* cb) {                                                                                        
4636     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;                                                          
4637     if (nm == NULL) {                                                                                                      
4638       return;                                                                                                              
4639     }                                                                                                                      
4640 
4641     if (ScavengeRootsInCode) {                                                                                             
4642       _g1h->register_nmethod(nm);                                                                                          
4643     }                                                                                                                      
4644   }                                                                                                                        
4645 };                                                                                                                         
4646 
4647 void G1CollectedHeap::rebuild_strong_code_roots() {                                                                        
4648   RebuildStrongCodeRootClosure blob_cl(this);                                                                              
4649   CodeCache::blobs_do(&blob_cl);                                                                                           
4650 }                                                                                                                          
4651 
4652 void G1CollectedHeap::initialize_serviceability() {                                                                        
4653   _g1mm->initialize_serviceability();                                                                                      
4654 }                                                                                                                          
4655 
4656 MemoryUsage G1CollectedHeap::memory_usage() {                                                                              
4657   return _g1mm->memory_usage();                                                                                            
4658 }                                                                                                                          
4659 
4660 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {                                                       
4661   return _g1mm->memory_managers();                                                                                         
4662 }                                                                                                                          
4663 
4664 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {                                                               
4665   return _g1mm->memory_pools();                                                                                            
4666 }                                                                                                                          
--- EOF ---