index

src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Print this page
rev 8024 : imported patch event1
* * *
imported patch event2


 115   // Their validity is dependent on the GC timestamp.
 116   clear_cset_start_regions();
 117 }
 118 
 119 inline void G1CollectedHeap::increment_gc_time_stamp() {
 120   ++_gc_time_stamp;
 121   OrderAccess::fence();
 122 }
 123 
 124 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 125   _old_set.remove(hr);
 126 }
 127 
 128 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
 129   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
 130   return r != NULL && r->in_collection_set();
 131 }
 132 
 133 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 134                                                      uint* gc_count_before_ret,
 135                                                      uint* gclocker_retry_count_ret) {

 136   assert_heap_not_locked_and_not_at_safepoint();
 137   assert(!is_humongous(word_size), "attempt_allocation() should not "
 138          "be called for humongous allocation requests");
 139 
 140   AllocationContext_t context = AllocationContext::current();
 141   HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 142                                                                                    false /* bot_updates */);
 143   if (result == NULL) {
 144     result = attempt_allocation_slow(word_size,
 145                                      context,
 146                                      gc_count_before_ret,
 147                                      gclocker_retry_count_ret);

 148   }
 149   assert_heap_not_locked();
 150   if (result != NULL) {
 151     dirty_young_block(result, word_size);
 152   }
 153   return result;
 154 }
 155 
 156 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
 157                                                               AllocationContext_t context) {
 158   assert(!is_humongous(word_size),
 159          "we should not be seeing humongous-size allocations in this path");
 160 
 161   HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
 162                                                                                        false /* bot_updates */);
 163   if (result == NULL) {
 164     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 165     result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 166                                                                                       false /* bot_updates */);
 167   }




 115   // Their validity is dependent on the GC timestamp.
 116   clear_cset_start_regions();
 117 }
 118 
 119 inline void G1CollectedHeap::increment_gc_time_stamp() {
 120   ++_gc_time_stamp;
 121   OrderAccess::fence();
 122 }
 123 
 124 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 125   _old_set.remove(hr);
 126 }
 127 
 128 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
 129   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
 130   return r != NULL && r->in_collection_set();
 131 }
 132 
 133 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 134                                                      uint* gc_count_before_ret,
 135                                                      uint* gclocker_retry_count_ret,
 136                                                      uint* gc_attempt) {
 137   assert_heap_not_locked_and_not_at_safepoint();
 138   assert(!is_humongous(word_size), "attempt_allocation() should not "
 139          "be called for humongous allocation requests");
 140 
 141   AllocationContext_t context = AllocationContext::current();
 142   HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 143                                                                                    false /* bot_updates */);
 144   if (result == NULL) {
 145     result = attempt_allocation_slow(word_size,
 146                                      context,
 147                                      gc_count_before_ret,
 148                                      gclocker_retry_count_ret,
 149                                      gc_attempt);
 150   }
 151   assert_heap_not_locked();
 152   if (result != NULL) {
 153     dirty_young_block(result, word_size);
 154   }
 155   return result;
 156 }
 157 
 158 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
 159                                                               AllocationContext_t context) {
 160   assert(!is_humongous(word_size),
 161          "we should not be seeing humongous-size allocations in this path");
 162 
 163   HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
 164                                                                                        false /* bot_updates */);
 165   if (result == NULL) {
 166     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 167     result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 168                                                                                       false /* bot_updates */);
 169   }


index