< prev index next >

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page




 579     _young_old_boundary(young_old_boundary),
 580     _state_set(state_set)
 581   {}
 582 
 583 // Reset the terminator for the given number of
 584 // active threads.
 585 void ParNewGenTask::set_for_termination(int active_workers) {
 586   _state_set->reset(active_workers, _gen->promotion_failed());
 587   // Should the heap be passed in?  There's only 1 for now so
 588   // grab it instead.
 589   GenCollectedHeap* gch = GenCollectedHeap::heap();
 590   gch->set_n_termination(active_workers);
 591 }
 592 
 593 void ParNewGenTask::work(uint worker_id) {
 594   GenCollectedHeap* gch = GenCollectedHeap::heap();
 595   // Since this is being done in a separate thread, need new resource
 596   // and handle marks.
 597   ResourceMark rm;
 598   HandleMark hm;
 599   // We would need multiple old-gen queues otherwise.
 600   assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
 601 
 602   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 603   assert(_state_set->is_valid(worker_id), "Should not have been called");
 604 
 605   par_scan_state.set_young_old_boundary(_young_old_boundary);
 606 
 607   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 608                                       gch->rem_set()->klass_rem_set());
 609   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 610                                            &par_scan_state.to_space_root_closure(),
 611                                            false);
 612 
 613   par_scan_state.start_strong_roots();
 614   gch->gen_process_roots(_gen->level(),
 615                          true,  // Process younger gens, if any,
 616                                 // as strong roots.
 617                          false, // no scope; this is parallel code
 618                          GenCollectedHeap::SO_ScavengeCodeCache,
 619                          GenCollectedHeap::StrongAndWeakRoots,
 620                          &par_scan_state.to_space_root_closure(),


 905 void ParNewGeneration::collect(bool   full,
 906                                bool   clear_all_soft_refs,
 907                                size_t size,
 908                                bool   is_tlab) {
 909   assert(full || size > 0, "otherwise we don't want to collect");
 910 
 911   GenCollectedHeap* gch = GenCollectedHeap::heap();
 912 
 913   _gc_timer->register_gc_start();
 914 
 915   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 916     "not a CMS generational heap");
 917   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 918   FlexibleWorkGang* workers = gch->workers();
 919   assert(workers != NULL, "Need workgang for parallel work");
 920   int active_workers =
 921       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 922                                    workers->active_workers(),
 923                                    Threads::number_of_non_daemon_threads());
 924   workers->set_active_workers(active_workers);
 925   assert(gch->n_gens() == 2,
 926          "Par collection currently only works with single older gen.");
 927   _old_gen = gch->old_gen();
 928 
 929   // If the next generation is too full to accommodate worst-case promotion
 930   // from this generation, pass on collection; let the next generation
 931   // do it.
 932   if (!collection_attempt_is_safe()) {
 933     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 934     return;
 935   }
 936   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 937 
 938   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 939   gch->trace_heap_before_gc(gc_tracer());
 940 
 941   init_assuming_no_promotion_failure();
 942 
 943   if (UseAdaptiveSizePolicy) {
 944     set_survivor_overflow(false);
 945     size_policy->minor_collection_begin();
 946   }




 579     _young_old_boundary(young_old_boundary),
 580     _state_set(state_set)
 581   {}
 582 
 583 // Reset the terminator for the given number of
 584 // active threads.
 585 void ParNewGenTask::set_for_termination(int active_workers) {
 586   _state_set->reset(active_workers, _gen->promotion_failed());
 587   // Should the heap be passed in?  There's only 1 for now so
 588   // grab it instead.
 589   GenCollectedHeap* gch = GenCollectedHeap::heap();
 590   gch->set_n_termination(active_workers);
 591 }
 592 
 593 void ParNewGenTask::work(uint worker_id) {
 594   GenCollectedHeap* gch = GenCollectedHeap::heap();
 595   // Since this is being done in a separate thread, need new resource
 596   // and handle marks.
 597   ResourceMark rm;
 598   HandleMark hm;


 599 
 600   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 601   assert(_state_set->is_valid(worker_id), "Should not have been called");
 602 
 603   par_scan_state.set_young_old_boundary(_young_old_boundary);
 604 
 605   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 606                                       gch->rem_set()->klass_rem_set());
 607   CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
 608                                            &par_scan_state.to_space_root_closure(),
 609                                            false);
 610 
 611   par_scan_state.start_strong_roots();
 612   gch->gen_process_roots(_gen->level(),
 613                          true,  // Process younger gens, if any,
 614                                 // as strong roots.
 615                          false, // no scope; this is parallel code
 616                          GenCollectedHeap::SO_ScavengeCodeCache,
 617                          GenCollectedHeap::StrongAndWeakRoots,
 618                          &par_scan_state.to_space_root_closure(),


 903 void ParNewGeneration::collect(bool   full,
 904                                bool   clear_all_soft_refs,
 905                                size_t size,
 906                                bool   is_tlab) {
 907   assert(full || size > 0, "otherwise we don't want to collect");
 908 
 909   GenCollectedHeap* gch = GenCollectedHeap::heap();
 910 
 911   _gc_timer->register_gc_start();
 912 
 913   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 914     "not a CMS generational heap");
 915   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 916   FlexibleWorkGang* workers = gch->workers();
 917   assert(workers != NULL, "Need workgang for parallel work");
 918   int active_workers =
 919       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 920                                    workers->active_workers(),
 921                                    Threads::number_of_non_daemon_threads());
 922   workers->set_active_workers(active_workers);


 923   _old_gen = gch->old_gen();
 924 
 925   // If the next generation is too full to accommodate worst-case promotion
 926   // from this generation, pass on collection; let the next generation
 927   // do it.
 928   if (!collection_attempt_is_safe()) {
 929     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 930     return;
 931   }
 932   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 933 
 934   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 935   gch->trace_heap_before_gc(gc_tracer());
 936 
 937   init_assuming_no_promotion_failure();
 938 
 939   if (UseAdaptiveSizePolicy) {
 940     set_survivor_overflow(false);
 941     size_policy->minor_collection_begin();
 942   }


< prev index next >