< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.hpp

Print this page
rev 55281 : [mq]: 8225478-cmrootregions-cleanup


 205   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 206   // _chunk_list.
 207   bool is_empty() const { return _chunk_list == NULL; }
 208 
 209   size_t capacity() const  { return _chunk_capacity; }
 210 
 211   // Expand the stack, typically in response to an overflow condition
 212   void expand();
 213 
 214   // Return the approximate number of oops on this mark stack. Racy due to
 215   // unsynchronized access to _chunks_in_chunk_list.
 216   size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
 217 
 218   void set_empty();
 219 
 220   // Apply Fn to every oop on the mark stack. The mark stack must not
 221   // be modified while iterating.
 222   template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
 223 };
 224 
 225 // Root Regions are regions that contain objects from nTAMS to top. These are roots
 226 // for marking, i.e. their referenced objects must be kept alive to maintain the
 227 // SATB invariant.
 228 // We could scan and mark them through during the initial-mark pause, but for

 229 // pause time reasons we move this work to the concurrent phase.
 230 // We need to complete this procedure before the next GC because it might determine
 231 // that some of these "root objects" are dead, potentially dropping some required
 232 // references.
 233 // Root regions comprise of the complete contents of survivor regions, and any
 234 // objects copied into old gen during GC.
 235 class G1CMRootRegions {
 236   HeapRegion** _root_regions;

 237   size_t const _max_regions;
 238 
 239   volatile size_t _num_root_regions; // Actual number of root regions.
 240 
 241   volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
 242 
 243   volatile bool _scan_in_progress;
 244   volatile bool _should_abort;
 245 
 246   void notify_scan_done();
 247 
 248 public:
 249   G1CMRootRegions(uint const max_regions);
 250   ~G1CMRootRegions();
 251 
 252   // Reset the data structure to allow addition of new root regions.
 253   void reset();
 254 
 255   void add(HeapRegion* hr);
 256 
 257   // Reset the claiming / scanning of the root regions.
 258   void prepare_for_scan();
 259 
 260   // Forces get_next() to return NULL so that the iteration aborts early.
 261   void abort() { _should_abort = true; }
 262 
 263   // Return true if the CM thread are actively scanning root regions,
 264   // false otherwise.
 265   bool scan_in_progress() { return _scan_in_progress; }
 266 
 267   // Claim the next root region to scan atomically, or return NULL if
 268   // all have been claimed.
 269   HeapRegion* claim_next();
 270 
 271   // The number of root regions to scan.
 272   uint num_root_regions() const;
 273 
 274   void cancel_scan();
 275 
 276   // Flag that we're done with root region scanning and notify anyone
 277   // who's waiting on it. If aborted is false, assume that all regions
 278   // have been claimed.
 279   void scan_finished();
 280 
 281   // If CM threads are still scanning root regions, wait until they
 282   // are done. Return true if we had to wait, false otherwise.
 283   bool wait_until_scan_finished();
 284 };
 285 
 286 // This class manages data structures and methods for doing liveness analysis in
 287 // G1's concurrent cycle.
 288 class G1ConcurrentMark : public CHeapObj<mtGC> {
 289   friend class G1ConcurrentMarkThread;


 293   friend class G1CMDrainMarkingStackClosure;
 294   friend class G1CMBitMapClosure;
 295   friend class G1CMConcurrentMarkingTask;
 296   friend class G1CMRemarkTask;
 297   friend class G1CMTask;
 298 
 299   G1ConcurrentMarkThread* _cm_thread;     // The thread doing the work
 300   G1CollectedHeap*        _g1h;           // The heap
 301   bool                    _completed_initialization; // Set to true when initialization is complete
 302 
 303   // Concurrent marking support structures
 304   G1CMBitMap              _mark_bitmap_1;
 305   G1CMBitMap              _mark_bitmap_2;
 306   G1CMBitMap*             _prev_mark_bitmap; // Completed mark bitmap
 307   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 308 
 309   // Heap bounds
 310   MemRegion const         _heap;
 311 
 312   // Root region tracking and claiming
 313   G1CMRootRegions         _root_regions;
 314 
 315   // For grey objects
 316   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 317   HeapWord* volatile      _finger;            // The global finger, region aligned,
 318                                               // always pointing to the end of the
 319                                               // last claimed region
 320 
 321   uint                    _worker_id_offset;
 322   uint                    _max_num_tasks;    // Maximum number of marking tasks
 323   uint                    _num_active_tasks; // Number of tasks currently active
 324   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 325 
 326   G1CMTaskQueueSet*       _task_queues; // Task queue set
 327   TaskTerminator          _terminator;  // For termination
 328 
 329   // Two sync barriers that are used to synchronize tasks when an
 330   // overflow occurs. The algorithm is the following. All tasks enter
 331   // the first one to ensure that they have all stopped manipulating
 332   // the global data structures. After they exit it, they re-initialize
 333   // their data structures and task 0 re-initializes the global data


 484   void clear_statistics_in_region(uint region_idx);
 485   // Notification for eagerly reclaimed regions to clean up.
 486   void humongous_object_eagerly_reclaimed(HeapRegion* r);
 487   // Manipulation of the global mark stack.
 488   // The push and pop operations are used by tasks for transfers
 489   // between task-local queues and the global mark stack.
 490   bool mark_stack_push(G1TaskQueueEntry* arr) {
 491     if (!_global_mark_stack.par_push_chunk(arr)) {
 492       set_has_overflown();
 493       return false;
 494     }
 495     return true;
 496   }
 497   bool mark_stack_pop(G1TaskQueueEntry* arr) {
 498     return _global_mark_stack.par_pop_chunk(arr);
 499   }
 500   size_t mark_stack_size() const                { return _global_mark_stack.size(); }
 501   size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
 502   bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); }
 503 
 504   G1CMRootRegions* root_regions() { return &_root_regions; }
 505 
 506   void concurrent_cycle_start();
 507   // Abandon current marking iteration due to a Full GC.
 508   void concurrent_cycle_abort();
 509   void concurrent_cycle_end();
 510 
 511   void update_accum_task_vtime(int i, double vtime) {
 512     _accum_task_vtime[i] += vtime;
 513   }
 514 
 515   double all_task_accum_vtime() {
 516     double ret = 0.0;
 517     for (uint i = 0; i < _max_num_tasks; ++i)
 518       ret += _accum_task_vtime[i];
 519     return ret;
 520   }
 521 
 522   // Attempts to steal an object from the task queues of other tasks
 523   bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
 524 


 537 
 538   // Moves all per-task cached data into global state.
 539   void flush_all_task_caches();
 540   // Prepare internal data structures for the next mark cycle. This includes clearing
 541   // the next mark bitmap and some internal data structures. This method is intended
 542   // to be called concurrently to the mutator. It will yield to safepoint requests.
 543   void cleanup_for_next_mark();
 544 
 545   // Clear the previous marking bitmap during safepoint.
 546   void clear_prev_bitmap(WorkGang* workers);
 547 
 548   // These two methods do the work that needs to be done at the start and end of the
 549   // initial mark pause.
 550   void pre_initial_mark();
 551   void post_initial_mark();
 552 
 553   // Scan all the root regions and mark everything reachable from
 554   // them.
 555   void scan_root_regions();
 556 
 557   // Scan a single root region from nTAMS to top and mark everything reachable from it.
 558   void scan_root_region(HeapRegion* hr, uint worker_id);
 559 
 560   // Do concurrent phase of marking, to a tentative transitive closure.
 561   void mark_from_roots();
 562 
 563   // Do concurrent preclean work.
 564   void preclean();
 565 
 566   void remark();
 567 
 568   void cleanup();
 569   // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
 570   // this carefully.
 571   inline void mark_in_prev_bitmap(oop p);
 572 
 573   // Clears marks for all objects in the given range, for the prev or
 574   // next bitmaps.  Caution: the previous bitmap is usually
 575   // read-only, so use this carefully!
 576   void clear_range_in_prev_bitmap(MemRegion mr);
 577 
 578   inline bool is_marked_in_prev_bitmap(oop p) const;




 205   // Return whether the chunk list is empty. Racy due to unsynchronized access to
 206   // _chunk_list.
 207   bool is_empty() const { return _chunk_list == NULL; }
 208 
 209   size_t capacity() const  { return _chunk_capacity; }
 210 
 211   // Expand the stack, typically in response to an overflow condition
 212   void expand();
 213 
 214   // Return the approximate number of oops on this mark stack. Racy due to
 215   // unsynchronized access to _chunks_in_chunk_list.
 216   size_t size() const { return _chunks_in_chunk_list * EntriesPerChunk; }
 217 
 218   void set_empty();
 219 
 220   // Apply Fn to every oop on the mark stack. The mark stack must not
 221   // be modified while iterating.
 222   template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
 223 };
 224 
 225 // Root MemRegions are memory areas that contain objects which references are
 226 // roots wrt to the marking. They must be scanned before marking to maintain the
 227 // SATB invariant.
 228 // Typically they contain the areas from nTAMS to top of the regions.
 229 // We could scan and mark through these objects during the initial-mark pause, but for
 230 // pause time reasons we move this work to the concurrent phase.
 231 // We need to complete this procedure before the next GC because it might determine
 232 // that some of these "root objects" are dead, potentially dropping some required
 233 // references.
 234 // Root MemRegions comprise of the contents of survivor regions at the end
 235 // of the GC, and any objects copied into the old gen during GC.
 236 class G1CMRootMemRegions {
 237   // The set of root MemRegions.
 238   MemRegion* _root_regions;
 239   size_t const _max_regions;
 240 
 241   volatile size_t _num_root_regions; // Actual number of root regions.
 242 
 243   volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
 244 
 245   volatile bool _scan_in_progress;
 246   volatile bool _should_abort;
 247 
 248   void notify_scan_done();
 249 
 250 public:
 251   G1CMRootMemRegions(uint const max_regions);
 252   ~G1CMRootMemRegions();
 253 
 254   // Reset the data structure to allow addition of new root regions.
 255   void reset();
 256 
 257   void add(HeapWord* start, HeapWord* end);
 258 
 259   // Reset the claiming / scanning of the root regions.
 260   void prepare_for_scan();
 261 
 262   // Forces get_next() to return NULL so that the iteration aborts early.
 263   void abort() { _should_abort = true; }
 264 
 265   // Return true if the CM thread are actively scanning root regions,
 266   // false otherwise.
 267   bool scan_in_progress() { return _scan_in_progress; }
 268 
 269   // Claim the next root MemRegion to scan atomically, or return NULL if
 270   // all have been claimed.
 271   MemRegion* claim_next();
 272 
 273   // The number of root regions to scan.
 274   uint num_root_regions() const;
 275 
 276   void cancel_scan();
 277 
 278   // Flag that we're done with root region scanning and notify anyone
 279   // who's waiting on it. If aborted is false, assume that all regions
 280   // have been claimed.
 281   void scan_finished();
 282 
 283   // If CM threads are still scanning root regions, wait until they
 284   // are done. Return true if we had to wait, false otherwise.
 285   bool wait_until_scan_finished();
 286 };
 287 
 288 // This class manages data structures and methods for doing liveness analysis in
 289 // G1's concurrent cycle.
 290 class G1ConcurrentMark : public CHeapObj<mtGC> {
 291   friend class G1ConcurrentMarkThread;


 295   friend class G1CMDrainMarkingStackClosure;
 296   friend class G1CMBitMapClosure;
 297   friend class G1CMConcurrentMarkingTask;
 298   friend class G1CMRemarkTask;
 299   friend class G1CMTask;
 300 
 301   G1ConcurrentMarkThread* _cm_thread;     // The thread doing the work
 302   G1CollectedHeap*        _g1h;           // The heap
 303   bool                    _completed_initialization; // Set to true when initialization is complete
 304 
 305   // Concurrent marking support structures
 306   G1CMBitMap              _mark_bitmap_1;
 307   G1CMBitMap              _mark_bitmap_2;
 308   G1CMBitMap*             _prev_mark_bitmap; // Completed mark bitmap
 309   G1CMBitMap*             _next_mark_bitmap; // Under-construction mark bitmap
 310 
 311   // Heap bounds
 312   MemRegion const         _heap;
 313 
 314   // Root region tracking and claiming
 315   G1CMRootMemRegions         _root_regions;
 316 
 317   // For grey objects
 318   G1CMMarkStack           _global_mark_stack; // Grey objects behind global finger
 319   HeapWord* volatile      _finger;            // The global finger, region aligned,
 320                                               // always pointing to the end of the
 321                                               // last claimed region
 322 
 323   uint                    _worker_id_offset;
 324   uint                    _max_num_tasks;    // Maximum number of marking tasks
 325   uint                    _num_active_tasks; // Number of tasks currently active
 326   G1CMTask**              _tasks;            // Task queue array (max_worker_id length)
 327 
 328   G1CMTaskQueueSet*       _task_queues; // Task queue set
 329   TaskTerminator          _terminator;  // For termination
 330 
 331   // Two sync barriers that are used to synchronize tasks when an
 332   // overflow occurs. The algorithm is the following. All tasks enter
 333   // the first one to ensure that they have all stopped manipulating
 334   // the global data structures. After they exit it, they re-initialize
 335   // their data structures and task 0 re-initializes the global data


 486   void clear_statistics_in_region(uint region_idx);
 487   // Notification for eagerly reclaimed regions to clean up.
 488   void humongous_object_eagerly_reclaimed(HeapRegion* r);
 489   // Manipulation of the global mark stack.
 490   // The push and pop operations are used by tasks for transfers
 491   // between task-local queues and the global mark stack.
 492   bool mark_stack_push(G1TaskQueueEntry* arr) {
 493     if (!_global_mark_stack.par_push_chunk(arr)) {
 494       set_has_overflown();
 495       return false;
 496     }
 497     return true;
 498   }
 499   bool mark_stack_pop(G1TaskQueueEntry* arr) {
 500     return _global_mark_stack.par_pop_chunk(arr);
 501   }
 502   size_t mark_stack_size() const                { return _global_mark_stack.size(); }
 503   size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
 504   bool mark_stack_empty() const                 { return _global_mark_stack.is_empty(); }
 505 
 506   G1CMRootMemRegions* root_regions() { return &_root_regions; }
 507 
 508   void concurrent_cycle_start();
 509   // Abandon current marking iteration due to a Full GC.
 510   void concurrent_cycle_abort();
 511   void concurrent_cycle_end();
 512 
 513   void update_accum_task_vtime(int i, double vtime) {
 514     _accum_task_vtime[i] += vtime;
 515   }
 516 
 517   double all_task_accum_vtime() {
 518     double ret = 0.0;
 519     for (uint i = 0; i < _max_num_tasks; ++i)
 520       ret += _accum_task_vtime[i];
 521     return ret;
 522   }
 523 
 524   // Attempts to steal an object from the task queues of other tasks
 525   bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
 526 


 539 
 540   // Moves all per-task cached data into global state.
 541   void flush_all_task_caches();
 542   // Prepare internal data structures for the next mark cycle. This includes clearing
 543   // the next mark bitmap and some internal data structures. This method is intended
 544   // to be called concurrently to the mutator. It will yield to safepoint requests.
 545   void cleanup_for_next_mark();
 546 
 547   // Clear the previous marking bitmap during safepoint.
 548   void clear_prev_bitmap(WorkGang* workers);
 549 
 550   // These two methods do the work that needs to be done at the start and end of the
 551   // initial mark pause.
 552   void pre_initial_mark();
 553   void post_initial_mark();
 554 
 555   // Scan all the root regions and mark everything reachable from
 556   // them.
 557   void scan_root_regions();
 558 
 559   // Scan a single root MemRegion which holds from nTAMS to top and mark everything reachable from it.
 560   void scan_root_region(MemRegion* region, uint worker_id);
 561 
 562   // Do concurrent phase of marking, to a tentative transitive closure.
 563   void mark_from_roots();
 564 
 565   // Do concurrent preclean work.
 566   void preclean();
 567 
 568   void remark();
 569 
 570   void cleanup();
 571   // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
 572   // this carefully.
 573   inline void mark_in_prev_bitmap(oop p);
 574 
 575   // Clears marks for all objects in the given range, for the prev or
 576   // next bitmaps.  Caution: the previous bitmap is usually
 577   // read-only, so use this carefully!
 578   void clear_range_in_prev_bitmap(MemRegion mr);
 579 
 580   inline bool is_marked_in_prev_bitmap(oop p) const;


< prev index next >