< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 10309 : 8150390: Move rs length sampling data to the sampling thread
Reviewed-by:


1383       workers()->set_active_workers(n_workers);
1384 
1385       ParRebuildRSTask rebuild_rs_task(this);
1386       workers()->run_task(&rebuild_rs_task);
1387 
1388       // Rebuild the strong code root lists for each region
1389       rebuild_strong_code_roots();
1390 
1391       if (true) { // FIXME
1392         MetaspaceGC::compute_new_size();
1393       }
1394 
1395 #ifdef TRACESPINNING
1396       ParallelTaskTerminator::print_termination_counts();
1397 #endif
1398 
1399       // Discard all rset updates
1400       JavaThread::dirty_card_queue_set().abandon_logs();
1401       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1402 
1403       _young_list->reset_sampled_info();
1404       // At this point there should be no regions in the
1405       // entire heap tagged as young.
1406       assert(check_young_list_empty(true /* check_heap */),
1407              "young list should be empty at this point");
1408 
1409       // Update the number of full collections that have been completed.
1410       increment_old_marking_cycles_completed(false /* concurrent */);
1411 
1412       _hrm.verify_optional();
1413       _verifier->verify_region_sets_optional();
1414 
1415       _verifier->verify_after_gc();
1416 
1417       // Clear the previous marking bitmap, if needed for bitmap verification.
1418       // Note we cannot do this when we clear the next marking bitmap in
1419       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1420       // objects marked during a full GC against the previous bitmap.
1421       // But we need to clear it before calling check_bitmaps below since
1422       // the full GC has compacted objects and updated TAMS but not updated
1423       // the prev bitmap.


3373         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
3374         pre_evacuate_collection_set();
3375 
3376         // Actually do the work...
3377         evacuate_collection_set(evacuation_info, &per_thread_states);
3378 
3379         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3380 
3381         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3382         free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
3383 
3384         eagerly_reclaim_humongous_regions();
3385 
3386         g1_policy()->clear_collection_set();
3387 
3388         // Start a new incremental collection set for the next pause.
3389         g1_policy()->start_incremental_cset_building();
3390 
3391         clear_cset_fast_test();
3392 
3393         _young_list->reset_sampled_info();
3394 
3395         // Don't check the whole heap at this point as the
3396         // GC alloc regions from this pause have been tagged
3397         // as survivors and moved on to the survivor list.
3398         // Survivor regions will fail the !is_young() check.
3399         assert(check_young_list_empty(false /* check_heap */),
3400           "young list should be empty");
3401 
3402         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3403                                              _young_list->first_survivor_region(),
3404                                              _young_list->last_survivor_region());
3405 
3406         _young_list->reset_auxilary_lists();
3407 
3408         if (evacuation_failed()) {
3409           set_used(recalculate_used());
3410           if (_archive_allocator != NULL) {
3411             _archive_allocator->clear_used();
3412           }
3413           for (uint i = 0; i < ParallelGCThreads; i++) {
3414             if (_evacuation_failed_info_array[i].has_failed()) {


5171 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5172   _young_list->push_region(hr);
5173 }
5174 
5175 class NoYoungRegionsClosure: public HeapRegionClosure {
5176 private:
5177   bool _success;
5178 public:
5179   NoYoungRegionsClosure() : _success(true) { }
5180   bool doHeapRegion(HeapRegion* r) {
5181     if (r->is_young()) {
5182       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5183                             p2i(r->bottom()), p2i(r->end()));
5184       _success = false;
5185     }
5186     return false;
5187   }
5188   bool success() { return _success; }
5189 };
5190 
5191 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5192   bool ret = _young_list->check_list_empty(check_sample);
5193 
5194   if (check_heap) {
5195     NoYoungRegionsClosure closure;
5196     heap_region_iterate(&closure);
5197     ret = ret && closure.success();
5198   }
5199 
5200   return ret;
5201 }
5202 
5203 class TearDownRegionSetsClosure : public HeapRegionClosure {
5204 private:
5205   HeapRegionSet *_old_set;
5206 
5207 public:
5208   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5209 
5210   bool doHeapRegion(HeapRegion* r) {
5211     if (r->is_old()) {
5212       _old_set->remove(r);




1383       workers()->set_active_workers(n_workers);
1384 
1385       ParRebuildRSTask rebuild_rs_task(this);
1386       workers()->run_task(&rebuild_rs_task);
1387 
1388       // Rebuild the strong code root lists for each region
1389       rebuild_strong_code_roots();
1390 
1391       if (true) { // FIXME
1392         MetaspaceGC::compute_new_size();
1393       }
1394 
1395 #ifdef TRACESPINNING
1396       ParallelTaskTerminator::print_termination_counts();
1397 #endif
1398 
1399       // Discard all rset updates
1400       JavaThread::dirty_card_queue_set().abandon_logs();
1401       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1402 

1403       // At this point there should be no regions in the
1404       // entire heap tagged as young.
1405       assert(check_young_list_empty(true /* check_heap */),
1406              "young list should be empty at this point");
1407 
1408       // Update the number of full collections that have been completed.
1409       increment_old_marking_cycles_completed(false /* concurrent */);
1410 
1411       _hrm.verify_optional();
1412       _verifier->verify_region_sets_optional();
1413 
1414       _verifier->verify_after_gc();
1415 
1416       // Clear the previous marking bitmap, if needed for bitmap verification.
1417       // Note we cannot do this when we clear the next marking bitmap in
1418       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1419       // objects marked during a full GC against the previous bitmap.
1420       // But we need to clear it before calling check_bitmaps below since
1421       // the full GC has compacted objects and updated TAMS but not updated
1422       // the prev bitmap.


3372         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
3373         pre_evacuate_collection_set();
3374 
3375         // Actually do the work...
3376         evacuate_collection_set(evacuation_info, &per_thread_states);
3377 
3378         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3379 
3380         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3381         free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
3382 
3383         eagerly_reclaim_humongous_regions();
3384 
3385         g1_policy()->clear_collection_set();
3386 
3387         // Start a new incremental collection set for the next pause.
3388         g1_policy()->start_incremental_cset_building();
3389 
3390         clear_cset_fast_test();
3391 


3392         // Don't check the whole heap at this point as the
3393         // GC alloc regions from this pause have been tagged
3394         // as survivors and moved on to the survivor list.
3395         // Survivor regions will fail the !is_young() check.
3396         assert(check_young_list_empty(false /* check_heap */),
3397           "young list should be empty");
3398 
3399         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3400                                              _young_list->first_survivor_region(),
3401                                              _young_list->last_survivor_region());
3402 
3403         _young_list->reset_auxilary_lists();
3404 
3405         if (evacuation_failed()) {
3406           set_used(recalculate_used());
3407           if (_archive_allocator != NULL) {
3408             _archive_allocator->clear_used();
3409           }
3410           for (uint i = 0; i < ParallelGCThreads; i++) {
3411             if (_evacuation_failed_info_array[i].has_failed()) {


5168 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5169   _young_list->push_region(hr);
5170 }
5171 
5172 class NoYoungRegionsClosure: public HeapRegionClosure {
5173 private:
5174   bool _success;
5175 public:
5176   NoYoungRegionsClosure() : _success(true) { }
5177   bool doHeapRegion(HeapRegion* r) {
5178     if (r->is_young()) {
5179       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5180                             p2i(r->bottom()), p2i(r->end()));
5181       _success = false;
5182     }
5183     return false;
5184   }
5185   bool success() { return _success; }
5186 };
5187 
5188 bool G1CollectedHeap::check_young_list_empty(bool check_heap) {
5189   bool ret = _young_list->check_list_empty();
5190 
5191   if (check_heap) {
5192     NoYoungRegionsClosure closure;
5193     heap_region_iterate(&closure);
5194     ret = ret && closure.success();
5195   }
5196 
5197   return ret;
5198 }
5199 
5200 class TearDownRegionSetsClosure : public HeapRegionClosure {
5201 private:
5202   HeapRegionSet *_old_set;
5203 
5204 public:
5205   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5206 
5207   bool doHeapRegion(HeapRegion* r) {
5208     if (r->is_old()) {
5209       _old_set->remove(r);


< prev index next >