src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6668 : 8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: To allow uncommit of regions within the heap, G1 Full GC should correctly handle the case when the very first region is not available (uncommitted). Provide support for that by lazily initializing the compaction point during iteration of the list of heap regions. Further refactor the code to let the G1CollectedHeap handle finding the next region to compact into.
Reviewed-by:
rev 6669 : 8048084: Need to abort concurrent next bitmap clear when marking is aborted during concurrent next bitmap clearing
Summary: When concurrent next bitmap clearing is interrupted by Full GC, we need to abort concurrent next bitmap clear. On the one hand, the concurrent marking thread might still be working on a just uncommitted region, and on the other hand this is superfluous work because the Full GC already did that during mark abort handling.
Reviewed-by:


1319   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1320 
1321   // Iterate over all objects, calling "cl.do_object" on each.
1322   virtual void object_iterate(ObjectClosure* cl);
1323 
1324   virtual void safe_object_iterate(ObjectClosure* cl) {
1325     object_iterate(cl);
1326   }
1327 
1328   // Iterate over all spaces in use in the heap, in ascending address order.
1329   virtual void space_iterate(SpaceClosure* cl);
1330 
1331   // Iterate over heap regions, in address order, terminating the
1332   // iteration early if the "doHeapRegion" method returns "true".
1333   void heap_region_iterate(HeapRegionClosure* blk) const;
1334 
1335   // Return the region with the given index. It assumes the index is valid.
1336   inline HeapRegion* region_at(uint index) const;
1337 
1338   // Divide the heap region sequence into "chunks" of some size (the number
1339   // of regions divided by the number of parallel threads times some
1340   // overpartition factor, currently 4).  Assumes that this will be called
1341   // in parallel by ParallelGCThreads worker threads with distinct worker
1342   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1343   // calls will use the same "claim_value", and that that claim value is
1344   // different from the claim_value of any heap region before the start of
1345   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by

1346   // attempting to claim the first region in each chunk, and, if
1347   // successful, applying the closure to each region in the chunk (and
1348   // setting the claim value of the second and subsequent regions of the
1349   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1350   // i.e., that a closure never attempt to abort a traversal.
1351   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1352                                        uint worker,
1353                                        uint no_of_par_workers,
1354                                        jint claim_value);
1355 
1356   // It resets all the region claim values to the default.
1357   void reset_heap_region_claim_values();
1358 
1359   // Resets the claim values of regions in the current
1360   // collection set to the default.
1361   void reset_cset_heap_region_claim_values();
1362 
1363 #ifdef ASSERT
1364   bool check_heap_region_claim_values(jint claim_value);
1365 
1366   // Same as the routine above but only checks regions in the
1367   // current collection set.
1368   bool check_cset_heap_region_claim_values(jint claim_value);
1369 #endif // ASSERT
1370 
1371   // Clear the cached cset start regions and (more importantly)
1372   // the time stamps. Called when we reset the GC time stamp.
1373   void clear_cset_start_regions();
1374 
1375   // Given the id of a worker, obtain or calculate a suitable
1376   // starting region for iterating over the current collection set.
1377   HeapRegion* start_cset_region_for_worker(uint worker_i);
1378 
1379   // This is a convenience method that is used by the
1380   // HeapRegionIterator classes to calculate the starting region for
1381   // each worker so that they do not all start from the same region.
1382   HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
1383 
1384   // Iterate over the regions (if any) in the current collection set.
1385   void collection_set_iterate(HeapRegionClosure* blk);
1386 
1387   // As above but starting from region r
1388   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1389 
1390   HeapRegion* next_compaction_region(const HeapRegion* from) const;
1391 
1392   // A CollectedHeap will contain some number of spaces.  This finds the
1393   // space containing a given address, or else returns NULL.
1394   virtual Space* space_containing(const void* addr) const;
1395 
1396   // Returns the HeapRegion that contains addr. addr must not be NULL.
1397   template <class T>
1398   inline HeapRegion* heap_region_containing_raw(const T addr) const;
1399 
1400   // Returns the HeapRegion that contains addr. addr must not be NULL.
1401   // If addr is within a humongous continues region, it returns its humongous start region.
1402   template <class T>




1319   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1320 
1321   // Iterate over all objects, calling "cl.do_object" on each.
1322   virtual void object_iterate(ObjectClosure* cl);
1323 
1324   virtual void safe_object_iterate(ObjectClosure* cl) {
1325     object_iterate(cl);
1326   }
1327 
1328   // Iterate over all spaces in use in the heap, in ascending address order.
1329   virtual void space_iterate(SpaceClosure* cl);
1330 
1331   // Iterate over heap regions, in address order, terminating the
1332   // iteration early if the "doHeapRegion" method returns "true".
1333   void heap_region_iterate(HeapRegionClosure* blk) const;
1334 
1335   // Return the region with the given index. It assumes the index is valid.
1336   inline HeapRegion* region_at(uint index) const;
1337 
1338   // Divide the heap region sequence into "chunks" of some size (the number
1339   // of regions divided by the number of parallel threads).
1340   // Assumes that this will be called
1341   // in parallel by ParallelGCThreads worker threads with distinct worker
1342   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1343   // calls will use the same "claim_value", and that that claim value is
1344   // different from the claim_value of any heap region before the start of
1345   // the iteration.
1346   // Applies "blk->doHeapRegion" to each of the regions, by
1347   // attempting to claim the first region in each chunk, and, if
1348   // successful, applying the closure to each region in the chunk (and
1349   // setting the claim value of the second and subsequent regions of the
1350   // chunk.)

1351   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1352                                        uint worker,
1353                                        uint no_of_par_workers,
1354                                        jint claim_value) const;
1355 
1356   // It resets all the region claim values to the default.
1357   void reset_heap_region_claim_values();
1358 
1359   // Resets the claim values of regions in the current
1360   // collection set to the default.
1361   void reset_cset_heap_region_claim_values();
1362 
1363 #ifdef ASSERT
1364   bool check_heap_region_claim_values(jint claim_value);
1365 
1366   // Same as the routine above but only checks regions in the
1367   // current collection set.
1368   bool check_cset_heap_region_claim_values(jint claim_value);
1369 #endif // ASSERT
1370 
1371   // Clear the cached cset start regions and (more importantly)
1372   // the time stamps. Called when we reset the GC time stamp.
1373   void clear_cset_start_regions();
1374 
1375   // Given the id of a worker, obtain or calculate a suitable
1376   // starting region for iterating over the current collection set.
1377   HeapRegion* start_cset_region_for_worker(uint worker_i);
1378 
1379   // This is a convenience method that is used by the to calculate the starting
1380   // region index for each worker so that they do not all start from the same
1381   // region.
1382   uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
1383 
1384   // Iterate over the regions (if any) in the current collection set.
1385   void collection_set_iterate(HeapRegionClosure* blk);
1386 
1387   // As above but starting from region r
1388   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1389 
1390   HeapRegion* next_compaction_region(const HeapRegion* from) const;
1391 
1392   // A CollectedHeap will contain some number of spaces.  This finds the
1393   // space containing a given address, or else returns NULL.
1394   virtual Space* space_containing(const void* addr) const;
1395 
1396   // Returns the HeapRegion that contains addr. addr must not be NULL.
1397   template <class T>
1398   inline HeapRegion* heap_region_containing_raw(const T addr) const;
1399 
1400   // Returns the HeapRegion that contains addr. addr must not be NULL.
1401   // If addr is within a humongous continues region, it returns its humongous start region.
1402   template <class T>