587 // This usually happens due to the timer not having the required
588 // granularity. Some Linuxes are the usual culprits.
589 // We'll just set it to something (arbitrarily) small.
590 app_time_ms = 1.0;
591 }
592
593 if (update_stats) {
594 // We maintain the invariant that all objects allocated by mutator
595 // threads will be allocated out of eden regions. So, we can use
596 // the eden region number allocated since the previous GC to
597 // calculate the application's allocate rate. The only exception
598 // to that is humongous objects that are allocated separately. But
599 // given that humongous object allocations do not really affect
600 // either the pause's duration nor when the next pause will take
601 // place we can safely ignore them here.
602 uint regions_allocated = _collection_set->eden_region_length();
603 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
604 _analytics->report_alloc_rate_ms(alloc_rate_ms);
605
606 double interval_ms =
607 (end_time_sec - _analytics->oldest_known_gc_end_time_sec()) * 1000.0;
608 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
609 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
610 }
611
612 bool new_in_marking_window = collector_state()->in_marking_window();
613 bool new_in_marking_window_im = false;
614 if (last_pause_included_initial_mark) {
615 new_in_marking_window = true;
616 new_in_marking_window_im = true;
617 }
618
619 if (collector_state()->last_young_gc()) {
620 // This is supposed to to be the "last young GC" before we start
621 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
622 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
623
624 if (next_gc_should_be_mixed("start mixed GCs",
625 "do not start mixed GCs")) {
626 collector_state()->set_gcs_are_young(false);
627 } else {
|
587 // This usually happens due to the timer not having the required
588 // granularity. Some Linuxes are the usual culprits.
589 // We'll just set it to something (arbitrarily) small.
590 app_time_ms = 1.0;
591 }
592
593 if (update_stats) {
594 // We maintain the invariant that all objects allocated by mutator
595 // threads will be allocated out of eden regions. So, we can use
596 // the eden region number allocated since the previous GC to
597 // calculate the application's allocate rate. The only exception
598 // to that is humongous objects that are allocated separately. But
599 // given that humongous object allocations do not really affect
600 // either the pause's duration nor when the next pause will take
601 // place we can safely ignore them here.
602 uint regions_allocated = _collection_set->eden_region_length();
603 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
604 _analytics->report_alloc_rate_ms(alloc_rate_ms);
605
606 double interval_ms =
607 (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
608 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
609 _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
610 }
611
612 bool new_in_marking_window = collector_state()->in_marking_window();
613 bool new_in_marking_window_im = false;
614 if (last_pause_included_initial_mark) {
615 new_in_marking_window = true;
616 new_in_marking_window_im = true;
617 }
618
619 if (collector_state()->last_young_gc()) {
620 // This is supposed to to be the "last young GC" before we start
621 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
622 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
623
624 if (next_gc_should_be_mixed("start mixed GCs",
625 "do not start mixed GCs")) {
626 collector_state()->set_gcs_are_young(false);
627 } else {
|