3076 }
3077 };
3078
3079 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3080 if (SafepointSynchronize::is_at_safepoint()) {
3081 assert(Thread::current()->is_VM_thread(),
3082 "Expected to be executed serially by the VM thread at this point");
3083
3084 if (!silent) { gclog_or_tty->print("Roots "); }
3085 VerifyRootsClosure rootsCl(vo);
3086 VerifyKlassClosure klassCl(this, &rootsCl);
3087 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3088
3089 // We apply the relevant closures to all the oops in the
3090 // system dictionary, class loader data graph, the string table
3091 // and the nmethods in the code cache.
3092 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3093 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3094
3095 {
3096 G1RootProcessor root_processor(this, false /* trace_metadata */);
3097 root_processor.process_all_roots(&rootsCl,
3098 &cldCl,
3099 &blobsCl);
3100 }
3101
3102 bool failures = rootsCl.failures() || codeRootsCl.failures();
3103
3104 if (vo != VerifyOption_G1UseMarkWord) {
3105 // If we're verifying during a full GC then the region sets
3106 // will have been torn down at the start of the GC. Therefore
3107 // verifying the region sets will fail. So we only verify
3108 // the region sets when not in a full GC.
3109 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3110 verify_region_sets();
3111 }
3112
3113 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3114 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3115
3116 G1ParVerifyTask task(this, vo);
4433 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4434 only_young, // Only process dirty klasses.
4435 false); // No need to claim CLDs.
4436 // IM young GC.
4437 // Strong roots closures.
4438 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4439 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4440 false, // Process all klasses.
4441 true); // Need to claim CLDs.
4442 // Weak roots closures.
4443 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4444 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4445 false, // Process all klasses.
4446 true); // Need to claim CLDs.
4447
4448 OopClosure* strong_root_cl;
4449 OopClosure* weak_root_cl;
4450 CLDClosure* strong_cld_cl;
4451 CLDClosure* weak_cld_cl;
4452
4453 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4454 // We also need to mark copied objects.
4455 strong_root_cl = &scan_mark_root_cl;
4456 strong_cld_cl = &scan_mark_cld_cl;
4457 if (ClassUnloadingWithConcurrentMark) {
4458 weak_root_cl = &scan_mark_weak_root_cl;
4459 weak_cld_cl = &scan_mark_weak_cld_cl;
4460 } else {
4461 weak_root_cl = &scan_mark_root_cl;
4462 weak_cld_cl = &scan_mark_cld_cl;
4463 }
4464 } else {
4465 strong_root_cl = &scan_only_root_cl;
4466 weak_root_cl = &scan_only_root_cl;
4467 strong_cld_cl = &scan_only_cld_cl;
4468 weak_cld_cl = &scan_only_cld_cl;
4469 }
4470
4471 pss.start_strong_roots();
4472
4473 _root_processor->evacuate_roots(strong_root_cl,
4474 weak_root_cl,
4475 strong_cld_cl,
4476 weak_cld_cl,
4477 worker_id);
4478
4479 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4480 _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4481 weak_root_cl,
4482 worker_id);
4483 pss.end_strong_roots();
4484
4485 {
4486 double start = os::elapsedTime();
4487 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4488 evac.do_void();
4489 double elapsed_sec = os::elapsedTime() - start;
4490 double term_sec = pss.term_time();
4491 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4492 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4493 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4494 }
4495 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4496 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
5447
5448 uint n_workers;
5449 n_workers =
5450 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5451 workers()->active_workers(),
5452 Threads::number_of_non_daemon_threads());
5453 assert(UseDynamicNumberOfGCThreads ||
5454 n_workers == workers()->total_workers(),
5455 "If not dynamic should be using all the workers");
5456 workers()->set_active_workers(n_workers);
5457 set_par_threads(n_workers);
5458
5459
5460 init_for_evac_failure(NULL);
5461
5462 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5463 double start_par_time_sec = os::elapsedTime();
5464 double end_par_time_sec;
5465
5466 {
5467 const bool during_im = g1_policy()->during_initial_mark_pause();
5468 const bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
5469
5470 G1RootProcessor root_processor(this, trace_metadata);
5471 G1ParTask g1_par_task(this, _task_queues, &root_processor);
5472 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5473 if (during_im) {
5474 ClassLoaderDataGraph::clear_claimed_marks();
5475 }
5476
5477 // The individual threads will set their evac-failure closures.
5478 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5479 // These tasks use ShareHeap::_process_strong_tasks
5480 assert(UseDynamicNumberOfGCThreads ||
5481 workers()->active_workers() == workers()->total_workers(),
5482 "If not dynamic should be using all the workers");
5483 workers()->run_task(&g1_par_task);
5484 end_par_time_sec = os::elapsedTime();
5485
5486 // Closing the inner scope will execute the destructor
5487 // for the G1RootProcessor object. We record the current
5488 // elapsed time before closing the scope so that time
5489 // taken for the destructor is NOT included in the
5490 // reported parallel time.
5491 }
5492
5493 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
|
3076 }
3077 };
3078
3079 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3080 if (SafepointSynchronize::is_at_safepoint()) {
3081 assert(Thread::current()->is_VM_thread(),
3082 "Expected to be executed serially by the VM thread at this point");
3083
3084 if (!silent) { gclog_or_tty->print("Roots "); }
3085 VerifyRootsClosure rootsCl(vo);
3086 VerifyKlassClosure klassCl(this, &rootsCl);
3087 CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3088
3089 // We apply the relevant closures to all the oops in the
3090 // system dictionary, class loader data graph, the string table
3091 // and the nmethods in the code cache.
3092 G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3093 G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3094
3095 {
3096 G1RootProcessor root_processor(this);
3097 root_processor.process_all_roots(&rootsCl,
3098 &cldCl,
3099 &blobsCl);
3100 }
3101
3102 bool failures = rootsCl.failures() || codeRootsCl.failures();
3103
3104 if (vo != VerifyOption_G1UseMarkWord) {
3105 // If we're verifying during a full GC then the region sets
3106 // will have been torn down at the start of the GC. Therefore
3107 // verifying the region sets will fail. So we only verify
3108 // the region sets when not in a full GC.
3109 if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3110 verify_region_sets();
3111 }
3112
3113 if (!silent) { gclog_or_tty->print("HeapRegions "); }
3114 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3115
3116 G1ParVerifyTask task(this, vo);
4433 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4434 only_young, // Only process dirty klasses.
4435 false); // No need to claim CLDs.
4436 // IM young GC.
4437 // Strong roots closures.
4438 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4439 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4440 false, // Process all klasses.
4441 true); // Need to claim CLDs.
4442 // Weak roots closures.
4443 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4444 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4445 false, // Process all klasses.
4446 true); // Need to claim CLDs.
4447
4448 OopClosure* strong_root_cl;
4449 OopClosure* weak_root_cl;
4450 CLDClosure* strong_cld_cl;
4451 CLDClosure* weak_cld_cl;
4452
4453 bool trace_metadata = false;
4454
4455 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4456 // We also need to mark copied objects.
4457 strong_root_cl = &scan_mark_root_cl;
4458 strong_cld_cl = &scan_mark_cld_cl;
4459 if (ClassUnloadingWithConcurrentMark) {
4460 weak_root_cl = &scan_mark_weak_root_cl;
4461 weak_cld_cl = &scan_mark_weak_cld_cl;
4462 trace_metadata = true;
4463 } else {
4464 weak_root_cl = &scan_mark_root_cl;
4465 weak_cld_cl = &scan_mark_cld_cl;
4466 }
4467 } else {
4468 strong_root_cl = &scan_only_root_cl;
4469 weak_root_cl = &scan_only_root_cl;
4470 strong_cld_cl = &scan_only_cld_cl;
4471 weak_cld_cl = &scan_only_cld_cl;
4472 }
4473
4474 pss.start_strong_roots();
4475
4476 _root_processor->evacuate_roots(strong_root_cl,
4477 weak_root_cl,
4478 strong_cld_cl,
4479 weak_cld_cl,
4480 trace_metadata,
4481 worker_id);
4482
4483 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4484 _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4485 weak_root_cl,
4486 worker_id);
4487 pss.end_strong_roots();
4488
4489 {
4490 double start = os::elapsedTime();
4491 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4492 evac.do_void();
4493 double elapsed_sec = os::elapsedTime() - start;
4494 double term_sec = pss.term_time();
4495 _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4496 _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4497 _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4498 }
4499 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4500 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
5451
5452 uint n_workers;
5453 n_workers =
5454 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5455 workers()->active_workers(),
5456 Threads::number_of_non_daemon_threads());
5457 assert(UseDynamicNumberOfGCThreads ||
5458 n_workers == workers()->total_workers(),
5459 "If not dynamic should be using all the workers");
5460 workers()->set_active_workers(n_workers);
5461 set_par_threads(n_workers);
5462
5463
5464 init_for_evac_failure(NULL);
5465
5466 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5467 double start_par_time_sec = os::elapsedTime();
5468 double end_par_time_sec;
5469
5470 {
5471 G1RootProcessor root_processor(this);
5472 G1ParTask g1_par_task(this, _task_queues, &root_processor);
5473 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5474 if (g1_policy()->during_initial_mark_pause()) {
5475 ClassLoaderDataGraph::clear_claimed_marks();
5476 }
5477
5478 // The individual threads will set their evac-failure closures.
5479 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5480 // These tasks use ShareHeap::_process_strong_tasks
5481 assert(UseDynamicNumberOfGCThreads ||
5482 workers()->active_workers() == workers()->total_workers(),
5483 "If not dynamic should be using all the workers");
5484 workers()->run_task(&g1_par_task);
5485 end_par_time_sec = os::elapsedTime();
5486
5487 // Closing the inner scope will execute the destructor
5488 // for the G1RootProcessor object. We record the current
5489 // elapsed time before closing the scope so that time
5490 // taken for the destructor is NOT included in the
5491 // reported parallel time.
5492 }
5493
5494 G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
|