< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/concurrentMark.hpp"
  29 #include "gc/g1/evacuationInfo.hpp"
  30 #include "gc/g1/g1AllocationContext.hpp"
  31 #include "gc/g1/g1BiasedArray.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"

  33 #include "gc/g1/g1HRPrinter.hpp"
  34 #include "gc/g1/g1InCSetState.hpp"
  35 #include "gc/g1/g1MonitoringSupport.hpp"
  36 #include "gc/g1/g1EvacFailure.hpp"
  37 #include "gc/g1/g1EvacStats.hpp"
  38 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  39 #include "gc/g1/g1YCTypes.hpp"
  40 #include "gc/g1/hSpaceCounters.hpp"
  41 #include "gc/g1/heapRegionManager.hpp"
  42 #include "gc/g1/heapRegionSet.hpp"
  43 #include "gc/g1/youngList.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/collectedHeap.hpp"
  46 #include "gc/shared/plab.hpp"
  47 #include "memory/memRegion.hpp"
  48 #include "utilities/stack.hpp"
  49 
  50 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  51 // It uses the "Garbage First" heap organization and algorithm, which
  52 // may combine concurrent marking with parallel, incremental compaction of


 101   bool do_object_b(oop p);
 102 };
 103 
 104 class RefineCardTableEntryClosure;
 105 
 106 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 107  private:
 108   void reset_from_card_cache(uint start_idx, size_t num_regions);
 109  public:
 110   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 111 };
 112 
 113 class G1CollectedHeap : public CollectedHeap {
 114   friend class VM_CollectForMetadataAllocation;
 115   friend class VM_G1CollectForAllocation;
 116   friend class VM_G1CollectFull;
 117   friend class VM_G1IncCollectionPause;
 118   friend class VMStructs;
 119   friend class MutatorAllocRegion;
 120   friend class G1GCAllocRegion;

 121 
 122   // Closures used in implementation.
 123   friend class G1ParScanThreadState;
 124   friend class G1ParScanThreadStateSet;
 125   friend class G1ParTask;
 126   friend class G1PLABAllocator;
 127   friend class G1PrepareCompactClosure;
 128 
 129   // Other related classes.
 130   friend class HeapRegionClaimer;
 131 
 132   // Testing classes.
 133   friend class G1CheckCSetFastTableClosure;
 134 
 135 private:
 136   WorkGang* _workers;
 137 
 138   static size_t _humongous_object_threshold_in_words;
 139 
 140   // The secondary free list which contains regions that have been


 164   // before heap shrinking (free_list_only == true).
 165   void tear_down_region_sets(bool free_list_only);
 166 
 167   // Rebuilds the region sets / lists so that they are repopulated to
 168   // reflect the contents of the heap. The only exception is the
 169   // humongous set which was not torn down in the first place. If
 170   // free_list_only is true, it will only rebuild the master free
 171   // list. It is called after a Full GC (free_list_only == false) or
 172   // after heap shrinking (free_list_only == true).
 173   void rebuild_region_sets(bool free_list_only);
 174 
 175   // Callback for region mapping changed events.
 176   G1RegionMappingChangedListener _listener;
 177 
 178   // The sequence of all heap regions in the heap.
 179   HeapRegionManager _hrm;
 180 
 181   // Manages all allocations with regions except humongous object allocations.
 182   G1Allocator* _allocator;
 183 



 184   // Outside of GC pauses, the number of bytes used in all regions other
 185   // than the current allocation region(s).
 186   size_t _summary_bytes_used;
 187 
 188   void increase_used(size_t bytes);
 189   void decrease_used(size_t bytes);
 190 
 191   void set_used(size_t bytes);
 192 
 193   // Class that handles archive allocation ranges.
 194   G1ArchiveAllocator* _archive_allocator;
 195 
 196   // Statistics for each allocation context
 197   AllocationContextStats _allocation_context_stats;
 198 
 199   // GC allocation statistics policy for survivors.
 200   G1EvacStats _survivor_evac_stats;
 201 
 202   // GC allocation statistics policy for tenured objects.
 203   G1EvacStats _old_evac_stats;


 269   // called at the end of a GC and artificially expands the heap by
 270   // allocating a number of dead regions. This way we can induce very
 271   // frequent marking cycles and stress the cleanup / concurrent
 272   // cleanup code more (as all the regions that will be allocated by
 273   // this method will be found dead by the marking cycle).
 274   void allocate_dummy_regions() PRODUCT_RETURN;
 275 
 276   // Clear RSets after a compaction. It also resets the GC time stamps.
 277   void clear_rsets_post_compaction();
 278 
 279   // If the HR printer is active, dump the state of the regions in the
 280   // heap after a compaction.
 281   void print_hrm_post_compaction();
 282 
 283   // Create a memory mapper for auxiliary data structures of the given size and
 284   // translation factor.
 285   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 286                                                          size_t size,
 287                                                          size_t translation_factor);
 288 
 289   double verify(bool guard, const char* msg);
 290   void verify_before_gc();
 291   void verify_after_gc();
 292 
 293   void log_gc_footer(jlong pause_time_counter);
 294 
 295   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 296 
 297   void process_weak_jni_handles();
 298 
 299   // These are macros so that, if the assert fires, we get the correct
 300   // line number, file, etc.
 301 
 302 #define heap_locking_asserts_params(_extra_message_)                          \
 303   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 304   (_extra_message_),                                                          \
 305   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 306   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 307   BOOL_TO_STR(Thread::current()->is_VM_thread())
 308 
 309 #define assert_heap_locked()                                                  \
 310   do {                                                                        \
 311     assert(Heap_lock->owned_by_self(),                                        \
 312            heap_locking_asserts_params("should be holding the Heap_lock"));   \


 510   // to support an allocation of the given "word_size".  If
 511   // successful, perform the allocation and return the address of the
 512   // allocated block, or else "NULL".
 513   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 514 
 515   // Process any reference objects discovered during
 516   // an incremental evacuation pause.
 517   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 518 
 519   // Enqueue any remaining discovered references
 520   // after processing.
 521   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 522 
 523 public:
 524   WorkGang* workers() const { return _workers; }
 525 
 526   G1Allocator* allocator() {
 527     return _allocator;
 528   }
 529 




 530   G1MonitoringSupport* g1mm() {
 531     assert(_g1mm != NULL, "should have been initialized");
 532     return _g1mm;
 533   }
 534 
 535   // Expand the garbage-first heap by at least the given size (in bytes!).
 536   // Returns true if the heap was expanded by the requested amount;
 537   // false otherwise.
 538   // (Rounds up to a HeapRegion boundary.)
 539   bool expand(size_t expand_bytes, double* expand_time_ms = NULL);
 540 
 541   // Returns the PLAB statistics for a given destination.
 542   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 543 
 544   // Determines PLAB size for a given destination.
 545   inline size_t desired_plab_sz(InCSetState dest);
 546 
 547   inline AllocationContextStats& allocation_context_stats();
 548 
 549   // Do anything common to GC's.


1039   virtual bool is_maximal_no_gc() const {
1040     return _hrm.available() == 0;
1041   }
1042 
1043   // The current number of regions in the heap.
1044   uint num_regions() const { return _hrm.length(); }
1045 
1046   // The max number of regions in the heap.
1047   uint max_regions() const { return _hrm.max_length(); }
1048 
1049   // The number of regions that are completely free.
1050   uint num_free_regions() const { return _hrm.num_free_regions(); }
1051 
1052   MemoryUsage get_auxiliary_data_memory_usage() const {
1053     return _hrm.get_auxiliary_data_memory_usage();
1054   }
1055 
1056   // The number of regions that are not completely free.
1057   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1058 
1059   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1060   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1061   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1062   void verify_dirty_young_regions() PRODUCT_RETURN;
1063 
1064 #ifndef PRODUCT
1065   // Make sure that the given bitmap has no marked objects in the
1066   // range [from,limit). If it does, print an error message and return
1067   // false. Otherwise, just return true. bitmap_name should be "prev"
1068   // or "next".
1069   bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
1070                                 HeapWord* from, HeapWord* limit);
1071 
1072   // Verify that the prev / next bitmap range [tams,end) for the given
1073   // region has no marks. Return true if all is well, false if errors
1074   // are detected.
1075   bool verify_bitmaps(const char* caller, HeapRegion* hr);
1076 #endif // PRODUCT
1077 
1078   // If G1VerifyBitmaps is set, verify that the marking bitmaps for
1079   // the given region do not have any spurious marks. If errors are
1080   // detected, print appropriate error messages and crash.
1081   void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
1082 
1083   // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
1084   // have any spurious marks. If errors are detected, print
1085   // appropriate error messages and crash.
1086   void check_bitmaps(const char* caller) PRODUCT_RETURN;
1087 
1088   // Do sanity check on the contents of the in-cset fast test table.
1089   bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
1090 
1091   // verify_region_sets() performs verification over the region
1092   // lists. It will be compiled in the product code to be used when
1093   // necessary (i.e., during heap verification).
1094   void verify_region_sets();
1095 
1096   // verify_region_sets_optional() is planted in the code for
1097   // list verification in non-product builds (and it can be enabled in
1098   // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1099 #if HEAP_REGION_SET_FORCE_VERIFY
1100   void verify_region_sets_optional() {
1101     verify_region_sets();
1102   }
1103 #else // HEAP_REGION_SET_FORCE_VERIFY
1104   void verify_region_sets_optional() { }
1105 #endif // HEAP_REGION_SET_FORCE_VERIFY
1106 
1107 #ifdef ASSERT
1108   bool is_on_master_free_list(HeapRegion* hr) {
1109     return _hrm.is_free(hr);
1110   }
1111 #endif // ASSERT
1112 
1113   // Wrapper for the region list operations that can be called from
1114   // methods outside this class.
1115 
1116   void secondary_free_list_add(FreeRegionList* list) {
1117     _secondary_free_list.add_ordered(list);
1118   }
1119 
1120   void append_secondary_free_list() {
1121     _hrm.insert_list_into_free_list(&_secondary_free_list);
1122   }
1123 
1124   void append_secondary_free_list_if_not_empty_with_lock() {
1125     // If the secondary free list looks empty there's no reason to
1126     // take the lock and then try to append it.


1408   // This function returns true when an object has been
1409   // around since the previous marking and hasn't yet
1410   // been marked during this marking, and is not in an archive region.
1411   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1412     return
1413       !hr->obj_allocated_since_next_marking(obj) &&
1414       !isMarkedNext(obj) &&
1415       !hr->is_archive();
1416   }
1417 
1418   // Determine if an object is dead, given only the object itself.
1419   // This will find the region to which the object belongs and
1420   // then call the region version of the same function.
1421 
1422   // Added if it is NULL it isn't dead.
1423 
1424   inline bool is_obj_dead(const oop obj) const;
1425 
1426   inline bool is_obj_ill(const oop obj) const;
1427 
1428   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1429   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1430   bool is_marked(oop obj, VerifyOption vo);
1431   const char* top_at_mark_start_str(VerifyOption vo);
1432 
1433   ConcurrentMark* concurrent_mark() const { return _cm; }
1434 
1435   // Refinement
1436 
1437   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1438 
1439   // The dirty cards region list is used to record a subset of regions
1440   // whose cards need clearing. The list if populated during the
1441   // remembered set scanning and drained during the card table
1442   // cleanup. Although the methods are reentrant, population/draining
1443   // phases must not overlap. For synchronization purposes the last
1444   // element on the list points to itself.
1445   HeapRegion* _dirty_cards_region_list;
1446   void push_dirty_cards_region(HeapRegion* hr);
1447   HeapRegion* pop_dirty_cards_region();
1448 
1449   // Optimized nmethod scanning support routines
1450 
1451   // Register the given nmethod with the G1 heap.
1452   virtual void register_nmethod(nmethod* nm);


1473   // Verification
1474 
1475   // Perform any cleanup actions necessary before allowing a verification.
1476   virtual void prepare_for_verify();
1477 
1478   // Perform verification.
1479 
1480   // vo == UsePrevMarking  -> use "prev" marking information,
1481   // vo == UseNextMarking -> use "next" marking information
1482   // vo == UseMarkWord    -> use the mark word in the object header
1483   //
1484   // NOTE: Only the "prev" marking information is guaranteed to be
1485   // consistent most of the time, so most calls to this should use
1486   // vo == UsePrevMarking.
1487   // Currently, there is only one case where this is called with
1488   // vo == UseNextMarking, which is to verify the "next" marking
1489   // information at the end of remark.
1490   // Currently there is only one place where this is called with
1491   // vo == UseMarkWord, which is to verify the marking during a
1492   // full GC.
1493   void verify(VerifyOption vo);


1494 
1495   // The methods below are here for convenience and dispatch the
1496   // appropriate method depending on value of the given VerifyOption
1497   // parameter. The values for that parameter, and their meanings,
1498   // are the same as those above.
1499 
1500   bool is_obj_dead_cond(const oop obj,
1501                         const HeapRegion* hr,
1502                         const VerifyOption vo) const;
1503 
1504   bool is_obj_dead_cond(const oop obj,
1505                         const VerifyOption vo) const;
1506 
1507   G1HeapSummary create_g1_heap_summary();
1508   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1509 
1510   // Printing
1511 
1512   virtual void print_on(outputStream* st) const;
1513   virtual void print_extended_on(outputStream* st) const;




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/concurrentMark.hpp"
  29 #include "gc/g1/evacuationInfo.hpp"
  30 #include "gc/g1/g1AllocationContext.hpp"
  31 #include "gc/g1/g1BiasedArray.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1HRPrinter.hpp"
  35 #include "gc/g1/g1InCSetState.hpp"
  36 #include "gc/g1/g1MonitoringSupport.hpp"
  37 #include "gc/g1/g1EvacFailure.hpp"
  38 #include "gc/g1/g1EvacStats.hpp"
  39 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  40 #include "gc/g1/g1YCTypes.hpp"
  41 #include "gc/g1/hSpaceCounters.hpp"
  42 #include "gc/g1/heapRegionManager.hpp"
  43 #include "gc/g1/heapRegionSet.hpp"
  44 #include "gc/g1/youngList.hpp"
  45 #include "gc/shared/barrierSet.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "gc/shared/plab.hpp"
  48 #include "memory/memRegion.hpp"
  49 #include "utilities/stack.hpp"
  50 
  51 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  52 // It uses the "Garbage First" heap organization and algorithm, which
  53 // may combine concurrent marking with parallel, incremental compaction of


 102   bool do_object_b(oop p);
 103 };
 104 
 105 class RefineCardTableEntryClosure;
 106 
 107 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 108  private:
 109   void reset_from_card_cache(uint start_idx, size_t num_regions);
 110  public:
 111   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 112 };
 113 
 114 class G1CollectedHeap : public CollectedHeap {
 115   friend class VM_CollectForMetadataAllocation;
 116   friend class VM_G1CollectForAllocation;
 117   friend class VM_G1CollectFull;
 118   friend class VM_G1IncCollectionPause;
 119   friend class VMStructs;
 120   friend class MutatorAllocRegion;
 121   friend class G1GCAllocRegion;
 122   friend class G1HeapVerifier;
 123 
 124   // Closures used in implementation.
 125   friend class G1ParScanThreadState;
 126   friend class G1ParScanThreadStateSet;
 127   friend class G1ParTask;
 128   friend class G1PLABAllocator;
 129   friend class G1PrepareCompactClosure;
 130 
 131   // Other related classes.
 132   friend class HeapRegionClaimer;
 133 
 134   // Testing classes.
 135   friend class G1CheckCSetFastTableClosure;
 136 
 137 private:
 138   WorkGang* _workers;
 139 
 140   static size_t _humongous_object_threshold_in_words;
 141 
 142   // The secondary free list which contains regions that have been


 166   // before heap shrinking (free_list_only == true).
 167   void tear_down_region_sets(bool free_list_only);
 168 
 169   // Rebuilds the region sets / lists so that they are repopulated to
 170   // reflect the contents of the heap. The only exception is the
 171   // humongous set which was not torn down in the first place. If
 172   // free_list_only is true, it will only rebuild the master free
 173   // list. It is called after a Full GC (free_list_only == false) or
 174   // after heap shrinking (free_list_only == true).
 175   void rebuild_region_sets(bool free_list_only);
 176 
 177   // Callback for region mapping changed events.
 178   G1RegionMappingChangedListener _listener;
 179 
 180   // The sequence of all heap regions in the heap.
 181   HeapRegionManager _hrm;
 182 
 183   // Manages all allocations with regions except humongous object allocations.
 184   G1Allocator* _allocator;
 185 
 186   // Manages all heap verification.
 187   G1HeapVerifier* _verifier;
 188 
 189   // Outside of GC pauses, the number of bytes used in all regions other
 190   // than the current allocation region(s).
 191   size_t _summary_bytes_used;
 192 
 193   void increase_used(size_t bytes);
 194   void decrease_used(size_t bytes);
 195 
 196   void set_used(size_t bytes);
 197 
 198   // Class that handles archive allocation ranges.
 199   G1ArchiveAllocator* _archive_allocator;
 200 
 201   // Statistics for each allocation context
 202   AllocationContextStats _allocation_context_stats;
 203 
 204   // GC allocation statistics policy for survivors.
 205   G1EvacStats _survivor_evac_stats;
 206 
 207   // GC allocation statistics policy for tenured objects.
 208   G1EvacStats _old_evac_stats;


 274   // called at the end of a GC and artificially expands the heap by
 275   // allocating a number of dead regions. This way we can induce very
 276   // frequent marking cycles and stress the cleanup / concurrent
 277   // cleanup code more (as all the regions that will be allocated by
 278   // this method will be found dead by the marking cycle).
 279   void allocate_dummy_regions() PRODUCT_RETURN;
 280 
 281   // Clear RSets after a compaction. It also resets the GC time stamps.
 282   void clear_rsets_post_compaction();
 283 
 284   // If the HR printer is active, dump the state of the regions in the
 285   // heap after a compaction.
 286   void print_hrm_post_compaction();
 287 
 288   // Create a memory mapper for auxiliary data structures of the given size and
 289   // translation factor.
 290   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 291                                                          size_t size,
 292                                                          size_t translation_factor);
 293 




 294   void log_gc_footer(jlong pause_time_counter);
 295 
 296   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 297 
 298   void process_weak_jni_handles();
 299 
 300   // These are macros so that, if the assert fires, we get the correct
 301   // line number, file, etc.
 302 
 303 #define heap_locking_asserts_params(_extra_message_)                          \
 304   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 305   (_extra_message_),                                                          \
 306   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 307   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 308   BOOL_TO_STR(Thread::current()->is_VM_thread())
 309 
 310 #define assert_heap_locked()                                                  \
 311   do {                                                                        \
 312     assert(Heap_lock->owned_by_self(),                                        \
 313            heap_locking_asserts_params("should be holding the Heap_lock"));   \


 511   // to support an allocation of the given "word_size".  If
 512   // successful, perform the allocation and return the address of the
 513   // allocated block, or else "NULL".
 514   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 515 
 516   // Process any reference objects discovered during
 517   // an incremental evacuation pause.
 518   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 519 
 520   // Enqueue any remaining discovered references
 521   // after processing.
 522   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 523 
 524 public:
 525   WorkGang* workers() const { return _workers; }
 526 
 527   G1Allocator* allocator() {
 528     return _allocator;
 529   }
 530 
 531   G1HeapVerifier* verifier() {
 532     return _verifier;
 533   }
 534 
 535   G1MonitoringSupport* g1mm() {
 536     assert(_g1mm != NULL, "should have been initialized");
 537     return _g1mm;
 538   }
 539 
 540   // Expand the garbage-first heap by at least the given size (in bytes!).
 541   // Returns true if the heap was expanded by the requested amount;
 542   // false otherwise.
 543   // (Rounds up to a HeapRegion boundary.)
 544   bool expand(size_t expand_bytes, double* expand_time_ms = NULL);
 545 
 546   // Returns the PLAB statistics for a given destination.
 547   inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
 548 
 549   // Determines PLAB size for a given destination.
 550   inline size_t desired_plab_sz(InCSetState dest);
 551 
 552   inline AllocationContextStats& allocation_context_stats();
 553 
 554   // Do anything common to GC's.


1044   virtual bool is_maximal_no_gc() const {
1045     return _hrm.available() == 0;
1046   }
1047 
1048   // The current number of regions in the heap.
1049   uint num_regions() const { return _hrm.length(); }
1050 
1051   // The max number of regions in the heap.
1052   uint max_regions() const { return _hrm.max_length(); }
1053 
1054   // The number of regions that are completely free.
1055   uint num_free_regions() const { return _hrm.num_free_regions(); }
1056 
1057   MemoryUsage get_auxiliary_data_memory_usage() const {
1058     return _hrm.get_auxiliary_data_memory_usage();
1059   }
1060 
1061   // The number of regions that are not completely free.
1062   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1063 
















































1064 #ifdef ASSERT
1065   bool is_on_master_free_list(HeapRegion* hr) {
1066     return _hrm.is_free(hr);
1067   }
1068 #endif // ASSERT
1069 
1070   // Wrapper for the region list operations that can be called from
1071   // methods outside this class.
1072 
1073   void secondary_free_list_add(FreeRegionList* list) {
1074     _secondary_free_list.add_ordered(list);
1075   }
1076 
1077   void append_secondary_free_list() {
1078     _hrm.insert_list_into_free_list(&_secondary_free_list);
1079   }
1080 
1081   void append_secondary_free_list_if_not_empty_with_lock() {
1082     // If the secondary free list looks empty there's no reason to
1083     // take the lock and then try to append it.


1365   // This function returns true when an object has been
1366   // around since the previous marking and hasn't yet
1367   // been marked during this marking, and is not in an archive region.
1368   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1369     return
1370       !hr->obj_allocated_since_next_marking(obj) &&
1371       !isMarkedNext(obj) &&
1372       !hr->is_archive();
1373   }
1374 
1375   // Determine if an object is dead, given only the object itself.
1376   // This will find the region to which the object belongs and
1377   // then call the region version of the same function.
1378 
1379   // Added if it is NULL it isn't dead.
1380 
1381   inline bool is_obj_dead(const oop obj) const;
1382 
1383   inline bool is_obj_ill(const oop obj) const;
1384 





1385   ConcurrentMark* concurrent_mark() const { return _cm; }
1386 
1387   // Refinement
1388 
1389   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1390 
1391   // The dirty cards region list is used to record a subset of regions
1392   // whose cards need clearing. The list if populated during the
1393   // remembered set scanning and drained during the card table
1394   // cleanup. Although the methods are reentrant, population/draining
1395   // phases must not overlap. For synchronization purposes the last
1396   // element on the list points to itself.
1397   HeapRegion* _dirty_cards_region_list;
1398   void push_dirty_cards_region(HeapRegion* hr);
1399   HeapRegion* pop_dirty_cards_region();
1400 
1401   // Optimized nmethod scanning support routines
1402 
1403   // Register the given nmethod with the G1 heap.
1404   virtual void register_nmethod(nmethod* nm);


1425   // Verification
1426 
1427   // Perform any cleanup actions necessary before allowing a verification.
1428   virtual void prepare_for_verify();
1429 
1430   // Perform verification.
1431 
1432   // vo == UsePrevMarking  -> use "prev" marking information,
1433   // vo == UseNextMarking -> use "next" marking information
1434   // vo == UseMarkWord    -> use the mark word in the object header
1435   //
1436   // NOTE: Only the "prev" marking information is guaranteed to be
1437   // consistent most of the time, so most calls to this should use
1438   // vo == UsePrevMarking.
1439   // Currently, there is only one case where this is called with
1440   // vo == UseNextMarking, which is to verify the "next" marking
1441   // information at the end of remark.
1442   // Currently there is only one place where this is called with
1443   // vo == UseMarkWord, which is to verify the marking during a
1444   // full GC.
1445   void verify(VerifyOption vo) {
1446     _verifier->verify(vo);
1447   }
1448 
1449   // The methods below are here for convenience and dispatch the
1450   // appropriate method depending on value of the given VerifyOption
1451   // parameter. The values for that parameter, and their meanings,
1452   // are the same as those above.
1453 
1454   bool is_obj_dead_cond(const oop obj,
1455                         const HeapRegion* hr,
1456                         const VerifyOption vo) const;
1457 
1458   bool is_obj_dead_cond(const oop obj,
1459                         const VerifyOption vo) const;
1460 
1461   G1HeapSummary create_g1_heap_summary();
1462   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1463 
1464   // Printing
1465 
1466   virtual void print_on(outputStream* st) const;
1467   virtual void print_extended_on(outputStream* st) const;


< prev index next >