< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 870     // alloc_archive_regions.
 871     HeapRegion* curr_region = start_region;
 872     while (curr_region != NULL) {
 873       guarantee(curr_region->is_archive(),
 874                 "Expected archive region at index %u", curr_region->hrm_index());
 875       if (curr_region != last_region) {
 876         curr_region = _hrm.next_region_in_heap(curr_region);
 877       } else {
 878         curr_region = NULL;
 879       }
 880     }
 881 
 882     prev_last_addr = last_address;
 883     prev_last_region = last_region;
 884 
 885     // Fill the memory below the allocated range with dummy object(s),
 886     // if the region bottom does not match the range start, or if the previous
 887     // range ended within the same G1 region, and there is a gap.
 888     if (start_address != bottom_address) {
 889       size_t fill_size = pointer_delta(start_address, bottom_address);
 890       G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
 891       increase_used(fill_size * HeapWordSize);
 892     }
 893   }
 894 }
 895 










 896 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 897                                                      uint* gc_count_before_ret,
 898                                                      uint* gclocker_retry_count_ret) {
 899   assert_heap_not_locked_and_not_at_safepoint();
 900   assert(!is_humongous(word_size), "attempt_allocation() should not "
 901          "be called for humongous allocation requests");
 902 
 903   AllocationContext_t context = AllocationContext::current();
 904   HeapWord* result = _allocator->attempt_allocation(word_size, context);
 905 
 906   if (result == NULL) {
 907     result = attempt_allocation_slow(word_size,
 908                                      context,
 909                                      gc_count_before_ret,
 910                                      gclocker_retry_count_ret);
 911   }
 912   assert_heap_not_locked();
 913   if (result != NULL) {
 914     dirty_young_block(result, word_size);
 915   }


1835   _expand_heap_after_alloc_failure(true),
1836   _old_marking_cycles_started(0),
1837   _old_marking_cycles_completed(0),
1838   _heap_summary_sent(false),
1839   _in_cset_fast_test(),
1840   _dirty_cards_region_list(NULL),
1841   _worker_cset_start_region(NULL),
1842   _worker_cset_start_region_time_stamp(NULL),
1843   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1844   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1845   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1846   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1847 
1848   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1849                           /* are_GC_task_threads */true,
1850                           /* are_ConcurrentGC_threads */false);
1851   _workers->initialize_workers();
1852 
1853   _allocator = G1Allocator::create_allocator(this);
1854   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1855 
1856   // Override the default _filler_array_max_size so that no humongous filler
1857   // objects are created.
1858   _filler_array_max_size = _humongous_object_threshold_in_words;
1859 
1860   uint n_queues = ParallelGCThreads;
1861   _task_queues = new RefToScanQueueSet(n_queues);
1862 
1863   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1864   assert(n_rem_sets > 0, "Invariant.");
1865 
1866   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1867   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1868   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1869 
1870   for (uint i = 0; i < n_queues; i++) {
1871     RefToScanQueue* q = new RefToScanQueue();
1872     q->initialize();
1873     _task_queues->register_queue(i, q);
1874     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1875   }
1876   clear_cset_start_regions();
1877 
1878   // Initialize the G1EvacuationFailureALot counters and flags.




 870     // alloc_archive_regions.
 871     HeapRegion* curr_region = start_region;
 872     while (curr_region != NULL) {
 873       guarantee(curr_region->is_archive(),
 874                 "Expected archive region at index %u", curr_region->hrm_index());
 875       if (curr_region != last_region) {
 876         curr_region = _hrm.next_region_in_heap(curr_region);
 877       } else {
 878         curr_region = NULL;
 879       }
 880     }
 881 
 882     prev_last_addr = last_address;
 883     prev_last_region = last_region;
 884 
 885     // Fill the memory below the allocated range with dummy object(s),
 886     // if the region bottom does not match the range start, or if the previous
 887     // range ended within the same G1 region, and there is a gap.
 888     if (start_address != bottom_address) {
 889       size_t fill_size = pointer_delta(start_address, bottom_address);
 890       fill_with_non_humongous_objects(bottom_address, fill_size);
 891       increase_used(fill_size * HeapWordSize);
 892     }
 893   }
 894 }
 895 
 896 void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* start, size_t words, bool zap)
 897 {
 898   size_t prev_filler_array_max_size = _filler_array_max_size;
 899   _filler_array_max_size = _humongous_object_threshold_in_words;
 900 
 901   CollectedHeap::fill_with_objects(start, words);
 902 
 903   _filler_array_max_size = prev_filler_array_max_size;
 904 }
 905 
 906 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 907                                                      uint* gc_count_before_ret,
 908                                                      uint* gclocker_retry_count_ret) {
 909   assert_heap_not_locked_and_not_at_safepoint();
 910   assert(!is_humongous(word_size), "attempt_allocation() should not "
 911          "be called for humongous allocation requests");
 912 
 913   AllocationContext_t context = AllocationContext::current();
 914   HeapWord* result = _allocator->attempt_allocation(word_size, context);
 915 
 916   if (result == NULL) {
 917     result = attempt_allocation_slow(word_size,
 918                                      context,
 919                                      gc_count_before_ret,
 920                                      gclocker_retry_count_ret);
 921   }
 922   assert_heap_not_locked();
 923   if (result != NULL) {
 924     dirty_young_block(result, word_size);
 925   }


1845   _expand_heap_after_alloc_failure(true),
1846   _old_marking_cycles_started(0),
1847   _old_marking_cycles_completed(0),
1848   _heap_summary_sent(false),
1849   _in_cset_fast_test(),
1850   _dirty_cards_region_list(NULL),
1851   _worker_cset_start_region(NULL),
1852   _worker_cset_start_region_time_stamp(NULL),
1853   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1854   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1855   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1856   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1857 
1858   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1859                           /* are_GC_task_threads */true,
1860                           /* are_ConcurrentGC_threads */false);
1861   _workers->initialize_workers();
1862 
1863   _allocator = G1Allocator::create_allocator(this);
1864   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);




1865 
1866   uint n_queues = ParallelGCThreads;
1867   _task_queues = new RefToScanQueueSet(n_queues);
1868 
1869   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1870   assert(n_rem_sets > 0, "Invariant.");
1871 
1872   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1873   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1874   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1875 
1876   for (uint i = 0; i < n_queues; i++) {
1877     RefToScanQueue* q = new RefToScanQueue();
1878     q->initialize();
1879     _task_queues->register_queue(i, q);
1880     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1881   }
1882   clear_cset_start_regions();
1883 
1884   // Initialize the G1EvacuationFailureALot counters and flags.


< prev index next >