1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc_implementation/g1/concurrentMark.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.hpp" 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 33 #include "gc_implementation/g1/heapRegionManager.inline.hpp" 34 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 35 #include "runtime/orderAccess.inline.hpp" 36 #include "utilities/taskqueue.hpp" 37 38 PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { 39 switch (dest.value()) { 40 case InCSetState::Young: 41 return &_survivor_plab_stats; 42 case InCSetState::Old: 43 return &_old_plab_stats; 44 default: 45 ShouldNotReachHere(); 46 return NULL; // Keep some compilers happy 47 } 48 } 49 50 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { 51 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(); 52 // Prevent humongous PLAB sizes for two reasons: 53 // * PLABs are allocated using a similar paths as oops, but should 54 // never be in a humongous region 55 // * Allowing humongous PLABs needlessly churns the region free lists 56 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 57 } 58 59 HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest, 60 size_t word_size, 61 AllocationContext_t context) { 62 switch (dest.value()) { 63 case InCSetState::Young: 64 return survivor_attempt_allocation(word_size, context); 65 case InCSetState::Old: 66 return old_attempt_allocation(word_size, context); 67 default: 68 ShouldNotReachHere(); 69 return NULL; // Keep some compilers happy 70 } 71 } 72 73 // Inline functions for G1CollectedHeap 74 75 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { 76 return _allocation_context_stats; 77 } 78 79 // Return the region with the given index. It assumes the index is valid. 80 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 81 82 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 83 assert(is_in_reserved(addr), 84 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")", 85 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()))); 86 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 87 } 88 89 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 90 return _hrm.reserved().start() + index * HeapRegion::GrainWords; 91 } 92 93 template <class T> 94 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { 95 assert(addr != NULL, "invariant"); 96 assert(is_in_g1_reserved((const void*) addr), 97 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", 98 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); 99 return _hrm.addr_to_region((HeapWord*) addr); 100 } 101 102 template <class T> 103 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { 104 HeapRegion* hr = heap_region_containing_raw(addr); 105 if (hr->is_continues_humongous()) { 106 return hr->humongous_start_region(); 107 } 108 return hr; 109 } 110 111 inline void G1CollectedHeap::reset_gc_time_stamp() { 112 _gc_time_stamp = 0; 113 OrderAccess::fence(); 114 // Clear the cached CSet starting regions and time stamps. 115 // Their validity is dependent on the GC timestamp. 116 clear_cset_start_regions(); 117 } 118 119 inline void G1CollectedHeap::increment_gc_time_stamp() { 120 ++_gc_time_stamp; 121 OrderAccess::fence(); 122 } 123 124 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 125 _old_set.remove(hr); 126 } 127 128 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 129 HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); 130 return r != NULL && r->in_collection_set(); 131 } 132 133 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, 134 uint* gc_count_before_ret, 135 uint* gclocker_retry_count_ret, 136 uint* gc_attempt) { 137 assert_heap_not_locked_and_not_at_safepoint(); 138 assert(!is_humongous(word_size), "attempt_allocation() should not " 139 "be called for humongous allocation requests"); 140 141 AllocationContext_t context = AllocationContext::current(); 142 HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, 143 false /* bot_updates */); 144 if (result == NULL) { 145 result = attempt_allocation_slow(word_size, 146 context, 147 gc_count_before_ret, 148 gclocker_retry_count_ret, 149 gc_attempt); 150 } 151 assert_heap_not_locked(); 152 if (result != NULL) { 153 dirty_young_block(result, word_size); 154 } 155 return result; 156 } 157 158 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, 159 AllocationContext_t context) { 160 assert(!is_humongous(word_size), 161 "we should not be seeing humongous-size allocations in this path"); 162 163 HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, 164 false /* bot_updates */); 165 if (result == NULL) { 166 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 167 result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, 168 false /* bot_updates */); 169 } 170 if (result != NULL) { 171 dirty_young_block(result, word_size); 172 } 173 return result; 174 } 175 176 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, 177 AllocationContext_t context) { 178 assert(!is_humongous(word_size), 179 "we should not be seeing humongous-size allocations in this path"); 180 181 HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, 182 true /* bot_updates */); 183 if (result == NULL) { 184 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 185 result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, 186 true /* bot_updates */); 187 } 188 return result; 189 } 190 191 // It dirties the cards that cover the block so that so that the post 192 // write barrier never queues anything when updating objects on this 193 // block. It is assumed (and in fact we assert) that the block 194 // belongs to a young region. 195 inline void 196 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 197 assert_heap_not_locked(); 198 199 // Assign the containing region to containing_hr so that we don't 200 // have to keep calling heap_region_containing_raw() in the 201 // asserts below. 202 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) 203 assert(word_size > 0, "pre-condition"); 204 assert(containing_hr->is_in(start), "it should contain start"); 205 assert(containing_hr->is_young(), "it should be young"); 206 assert(!containing_hr->is_humongous(), "it should not be humongous"); 207 208 HeapWord* end = start + word_size; 209 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 210 211 MemRegion mr(start, end); 212 g1_barrier_set()->g1_mark_as_young(mr); 213 } 214 215 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { 216 return _task_queues->queue(i); 217 } 218 219 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 220 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 221 } 222 223 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { 224 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); 225 } 226 227 // This is a fast test on whether a reference points into the 228 // collection set or not. Assume that the reference 229 // points into the heap. 230 inline bool G1CollectedHeap::is_in_cset(oop obj) { 231 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); 232 // let's make sure the result is consistent with what the slower 233 // test returns 234 assert( ret || !obj_in_cs(obj), "sanity"); 235 assert(!ret || obj_in_cs(obj), "sanity"); 236 return ret; 237 } 238 239 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) { 240 return _in_cset_fast_test.is_in_cset(hr); 241 } 242 243 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { 244 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); 245 } 246 247 InCSetState G1CollectedHeap::in_cset_state(const oop obj) { 248 return _in_cset_fast_test.at((HeapWord*)obj); 249 } 250 251 void G1CollectedHeap::register_humongous_region_with_cset(uint index) { 252 _in_cset_fast_test.set_humongous(index); 253 } 254 255 #ifndef PRODUCT 256 // Support for G1EvacuationFailureALot 257 258 inline bool 259 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, 260 bool during_initial_mark, 261 bool during_marking) { 262 bool res = false; 263 if (during_marking) { 264 res |= G1EvacuationFailureALotDuringConcMark; 265 } 266 if (during_initial_mark) { 267 res |= G1EvacuationFailureALotDuringInitialMark; 268 } 269 if (gcs_are_young) { 270 res |= G1EvacuationFailureALotDuringYoungGC; 271 } else { 272 // GCs are mixed 273 res |= G1EvacuationFailureALotDuringMixedGC; 274 } 275 return res; 276 } 277 278 inline void 279 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { 280 if (G1EvacuationFailureALot) { 281 // Note we can't assert that _evacuation_failure_alot_for_current_gc 282 // is clear here. It may have been set during a previous GC but that GC 283 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to 284 // trigger an evacuation failure and clear the flags and and counts. 285 286 // Check if we have gone over the interval. 287 const size_t gc_num = total_collections(); 288 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; 289 290 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); 291 292 // Now check if G1EvacuationFailureALot is enabled for the current GC type. 293 const bool gcs_are_young = g1_policy()->gcs_are_young(); 294 const bool during_im = g1_policy()->during_initial_mark_pause(); 295 const bool during_marking = mark_in_progress(); 296 297 _evacuation_failure_alot_for_current_gc &= 298 evacuation_failure_alot_for_gc_type(gcs_are_young, 299 during_im, 300 during_marking); 301 } 302 } 303 304 inline bool G1CollectedHeap::evacuation_should_fail() { 305 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 306 return false; 307 } 308 // G1EvacuationFailureALot is in effect for current GC 309 // Access to _evacuation_failure_alot_count is not atomic; 310 // the value does not have to be exact. 311 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { 312 return false; 313 } 314 _evacuation_failure_alot_count = 0; 315 return true; 316 } 317 318 inline void G1CollectedHeap::reset_evacuation_should_fail() { 319 if (G1EvacuationFailureALot) { 320 _evacuation_failure_alot_gc_number = total_collections(); 321 _evacuation_failure_alot_count = 0; 322 _evacuation_failure_alot_for_current_gc = false; 323 } 324 } 325 #endif // #ifndef PRODUCT 326 327 inline bool G1CollectedHeap::is_in_young(const oop obj) { 328 if (obj == NULL) { 329 return false; 330 } 331 return heap_region_containing(obj)->is_young(); 332 } 333 334 // We don't need barriers for initializing stores to objects 335 // in the young gen: for the SATB pre-barrier, there is no 336 // pre-value that needs to be remembered; for the remembered-set 337 // update logging post-barrier, we don't maintain remembered set 338 // information for young gen objects. 339 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 340 return is_in_young(new_obj); 341 } 342 343 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { 344 if (obj == NULL) { 345 return false; 346 } 347 return is_obj_dead(obj, heap_region_containing(obj)); 348 } 349 350 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { 351 if (obj == NULL) { 352 return false; 353 } 354 return is_obj_ill(obj, heap_region_containing(obj)); 355 } 356 357 inline void G1CollectedHeap::set_humongous_is_live(oop obj) { 358 uint region = addr_to_region((HeapWord*)obj); 359 // We not only set the "live" flag in the humongous_is_live table, but also 360 // reset the entry in the _in_cset_fast_test table so that subsequent references 361 // to the same humongous object do not go into the slow path again. 362 // This is racy, as multiple threads may at the same time enter here, but this 363 // is benign. 364 // During collection we only ever set the "live" flag, and only ever clear the 365 // entry in the in_cset_fast_table. 366 // We only ever evaluate the contents of these tables (in the VM thread) after 367 // having synchronized the worker threads with the VM thread, or in the same 368 // thread (i.e. within the VM thread). 369 if (!_humongous_is_live.is_live(region)) { 370 _humongous_is_live.set_live(region); 371 _in_cset_fast_test.clear_humongous(region); 372 } 373 } 374 375 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP