< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.hpp

Print this page

 64   // Remember the last enqueued card to avoid enqueuing the same card over and over;
 65   // since we only ever scan a card once, this is sufficient.
 66   size_t _last_enqueued_card;
 67 
 68   // Upper and lower threshold to start and end work queue draining.
 69   uint const _stack_trim_upper_threshold;
 70   uint const _stack_trim_lower_threshold;
 71 
 72   Tickspan _trim_ticks;
 73   // Map from young-age-index (0 == not young, 1 is youngest) to
 74   // surviving words. base is what we get back from the malloc call
 75   size_t* _surviving_young_words_base;
 76   // this points into the array, as we use the first few entries for padding
 77   size_t* _surviving_young_words;
 78   // Number of elements in the array above.
 79   size_t _surviving_words_length;
 80   // Indicates whether in the last generation (old) there is no more space
 81   // available for allocation.
 82   bool _old_gen_is_full;
 83 
 84 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 85 
 86   G1RedirtyCardsQueue& redirty_cards_queue()     { return _rdcq; }
 87   G1CardTable* ct()                              { return _ct; }
 88 
 89   G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
 90     assert(original.is_valid(),
 91            "Original region attr invalid: %s", original.get_type_str());
 92     assert(_dest[original.type()].is_valid_gen(),
 93            "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
 94     return _dest[original.type()];
 95   }
 96 
 97   size_t _num_optional_regions;
 98   G1OopStarChunkedList* _oops_into_optional_regions;
 99 
100   G1NUMA* _numa;
101 
102   // Records how many object allocations happened at each node during copy to survivor.
103   // Only starts recording when log of gc+heap+numa is enabled and its data is
104   // transferred when flushed.
105   size_t* _obj_alloc_stat;

 64   // Remember the last enqueued card to avoid enqueuing the same card over and over;
 65   // since we only ever scan a card once, this is sufficient.
 66   size_t _last_enqueued_card;
 67 
 68   // Upper and lower threshold to start and end work queue draining.
 69   uint const _stack_trim_upper_threshold;
 70   uint const _stack_trim_lower_threshold;
 71 
 72   Tickspan _trim_ticks;
 73   // Map from young-age-index (0 == not young, 1 is youngest) to
 74   // surviving words. base is what we get back from the malloc call
 75   size_t* _surviving_young_words_base;
 76   // this points into the array, as we use the first few entries for padding
 77   size_t* _surviving_young_words;
 78   // Number of elements in the array above.
 79   size_t _surviving_words_length;
 80   // Indicates whether in the last generation (old) there is no more space
 81   // available for allocation.
 82   bool _old_gen_is_full;
 83 


 84   G1RedirtyCardsQueue& redirty_cards_queue()     { return _rdcq; }
 85   G1CardTable* ct()                              { return _ct; }
 86 
 87   G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
 88     assert(original.is_valid(),
 89            "Original region attr invalid: %s", original.get_type_str());
 90     assert(_dest[original.type()].is_valid_gen(),
 91            "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
 92     return _dest[original.type()];
 93   }
 94 
 95   size_t _num_optional_regions;
 96   G1OopStarChunkedList* _oops_into_optional_regions;
 97 
 98   G1NUMA* _numa;
 99 
100   // Records how many object allocations happened at each node during copy to survivor.
101   // Only starts recording when log of gc+heap+numa is enabled and its data is
102   // transferred when flushed.
103   size_t* _obj_alloc_stat;
< prev index next >