src/share/vm/gc_implementation/g1/concurrentMark.hpp

Print this page
rev 4278 : [mq]: 8009536-fix
rev 4279 : 8009940: G1: assert(_finger == _heap_end) failed, concurrentMark.cpp:809
Summary: Skip reference processing if the global marking stack overflows during remark. Do not call set_phase() from within parallel reference processing; use reset_for_reuse() instead. CMTask-0 should reset the marking state only during the concurrent phase of the marking cycle; if an overflow occurs at any stage during the remark, the marking state will be reset after reference processing.


 474 
 475   ForceOverflowSettings _force_overflow_conc;
 476   ForceOverflowSettings _force_overflow_stw;
 477 
 478   void weakRefsWork(bool clear_all_soft_refs);
 479 
 480   void swapMarkBitMaps();
 481 
 482   // It resets the global marking data structures, as well as the
 483   // task local ones; should be called during initial mark.
 484   void reset();
 485 
 486   // Resets all the marking data structures. Called when we have to restart
 487   // marking or when marking completes (via set_non_marking_state below).
 488   void reset_marking_state(bool clear_overflow = true);
 489 
 490   // We do this after we're done with marking so that the marking data
 491   // structures are initialised to a sensible and predictable state.
 492   void set_non_marking_state();
 493 



 494   // It should be called to indicate which phase we're in (concurrent
 495   // mark or remark) and how many threads are currently active.
 496   void set_phase(uint active_tasks, bool concurrent);
 497 
 498   // prints all gathered CM-related statistics
 499   void print_stats();
 500 
 501   bool cleanup_list_is_empty() {
 502     return _cleanup_list.is_empty();
 503   }
 504 
 505   // accessor methods
 506   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
 507   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
 508   double sleep_factor()                     { return _sleep_factor; }
 509   double marking_task_overhead()            { return _marking_task_overhead;}
 510   double cleanup_sleep_factor()             { return _cleanup_sleep_factor; }
 511   double cleanup_task_overhead()            { return _cleanup_task_overhead;}
 512 
 513   bool use_parallel_marking_threads() const {
 514     assert(parallel_marking_threads() <=
 515            max_parallel_marking_threads(), "sanity");
 516     assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||




 474 
 475   ForceOverflowSettings _force_overflow_conc;
 476   ForceOverflowSettings _force_overflow_stw;
 477 
 478   void weakRefsWork(bool clear_all_soft_refs);
 479 
 480   void swapMarkBitMaps();
 481 
 482   // It resets the global marking data structures, as well as the
 483   // task local ones; should be called during initial mark.
 484   void reset();
 485 
 486   // Resets all the marking data structures. Called when we have to restart
 487   // marking or when marking completes (via set_non_marking_state below).
 488   void reset_marking_state(bool clear_overflow = true);
 489 
 490   // We do this after we're done with marking so that the marking data
 491   // structures are initialised to a sensible and predictable state.
 492   void set_non_marking_state();
 493 
 494   // Called to indicate how many threads are currently active.
 495   void set_concurrency(uint active_tasks);
 496 
 497   // It should be called to indicate which phase we're in (concurrent
 498   // mark or remark) and how many threads are currently active.
 499   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 500 
 501   // prints all gathered CM-related statistics
 502   void print_stats();
 503 
 504   bool cleanup_list_is_empty() {
 505     return _cleanup_list.is_empty();
 506   }
 507 
 508   // accessor methods
 509   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
 510   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
 511   double sleep_factor()                     { return _sleep_factor; }
 512   double marking_task_overhead()            { return _marking_task_overhead;}
 513   double cleanup_sleep_factor()             { return _cleanup_sleep_factor; }
 514   double cleanup_task_overhead()            { return _cleanup_task_overhead;}
 515 
 516   bool use_parallel_marking_threads() const {
 517     assert(parallel_marking_threads() <=
 518            max_parallel_marking_threads(), "sanity");
 519     assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||