37 class G1RemSet;
38 class HeapRegion;
39
40 // An evicting cache of cards that have been logged by the G1 post
41 // write barrier. Placing a card in the cache delays the refinement
42 // of the card until the card is evicted, or the cache is drained
43 // during the next evacuation pause.
44 //
45 // The first thing the G1 post write barrier does is to check whether
46 // the card containing the updated pointer is already dirty and, if
47 // so, skips the remaining code in the barrier.
48 //
49 // Delaying the refinement of a card will make the card fail the
50 // first is_dirty check in the write barrier, skipping the remainder
51 // of the write barrier.
52 //
53 // This can significantly reduce the overhead of the write barrier
54 // code, increasing throughput.
55
56 class G1HotCardCache: public CHeapObj<mtGC> {
57 G1CollectedHeap* _g1h;
58
59 // The card cache table
60 jbyte** _hot_cache;
61
62 int _hot_cache_size;
63 int _n_hot;
64 int _hot_cache_idx;
65
66 int _hot_cache_par_chunk_size;
67 volatile int _hot_cache_par_claimed_idx;
68
69 bool _use_cache;
70
71 G1CardCounts _card_counts;
72
73 // The number of cached cards a thread claims when flushing the cache
74 static const int ClaimChunkSize = 32;
75
76 bool default_use_cache() const {
77 return (G1ConcRSLogCacheSize > 0);
78 }
79
80 public:
81 G1HotCardCache(G1CollectedHeap* g1h);
82 ~G1HotCardCache();
83
84 void initialize(G1RegionToSpaceMapper* card_counts_storage);
85
86 bool use_cache() { return _use_cache; }
87
88 void set_use_cache(bool b) {
89 _use_cache = (b ? default_use_cache() : false);
90 }
91
96 // added to the hot card cache.
97 // If there is enough room in the hot card cache for the card we're
98 // adding, NULL is returned and no further action in needed.
99 // If we evict a card from the cache to make room for the new card,
100 // the evicted card is then returned for refinement.
101 jbyte* insert(jbyte* card_ptr);
102
103 // Refine the cards that have delayed as a result of
104 // being in the cache.
105 void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
106
107 // Set up for parallel processing of the cards in the hot cache
108 void reset_hot_cache_claimed_index() {
109 _hot_cache_par_claimed_idx = 0;
110 }
111
112 // Resets the hot card cache and discards the entries.
113 void reset_hot_cache() {
114 assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
115 assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
116 _hot_cache_idx = 0; _n_hot = 0;
117 }
118
119 bool hot_cache_is_empty() { return _n_hot == 0; }
120
121 // Zeros the values in the card counts table for entire committed heap
122 void reset_card_counts();
123
124 // Zeros the values in the card counts table for the given region
125 void reset_card_counts(HeapRegion* hr);
126 };
127
128 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|
37 class G1RemSet;
38 class HeapRegion;
39
40 // An evicting cache of cards that have been logged by the G1 post
41 // write barrier. Placing a card in the cache delays the refinement
42 // of the card until the card is evicted, or the cache is drained
43 // during the next evacuation pause.
44 //
45 // The first thing the G1 post write barrier does is to check whether
46 // the card containing the updated pointer is already dirty and, if
47 // so, skips the remaining code in the barrier.
48 //
49 // Delaying the refinement of a card will make the card fail the
50 // first is_dirty check in the write barrier, skipping the remainder
51 // of the write barrier.
52 //
53 // This can significantly reduce the overhead of the write barrier
54 // code, increasing throughput.
55
56 class G1HotCardCache: public CHeapObj<mtGC> {
57
58 G1CollectedHeap* _g1h;
59
60 bool _use_cache;
61
62 G1CardCounts _card_counts;
63
64 // The card cache table
65 jbyte** _hot_cache;
66
67 size_t _hot_cache_size;
68
69 int _hot_cache_par_chunk_size;
70
71 // Avoids false sharing when concurrently updating _hot_cache_idx or
72 // _hot_cache_par_claimed_idx. These are never updated at the same time
73 // thus it's not necessary to separate them as well
74 char _pad_before[DEFAULT_CACHE_LINE_SIZE];
75
76 volatile size_t _hot_cache_idx;
77
78 volatile size_t _hot_cache_par_claimed_idx;
79
80 char _pad_after[DEFAULT_CACHE_LINE_SIZE];
81
82 // The number of cached cards a thread claims when flushing the cache
83 static const int ClaimChunkSize = 32;
84
85 bool default_use_cache() const {
86 return (G1ConcRSLogCacheSize > 0);
87 }
88
89 public:
90 G1HotCardCache(G1CollectedHeap* g1h);
91 ~G1HotCardCache();
92
93 void initialize(G1RegionToSpaceMapper* card_counts_storage);
94
95 bool use_cache() { return _use_cache; }
96
97 void set_use_cache(bool b) {
98 _use_cache = (b ? default_use_cache() : false);
99 }
100
105 // added to the hot card cache.
106 // If there is enough room in the hot card cache for the card we're
107 // adding, NULL is returned and no further action in needed.
108 // If we evict a card from the cache to make room for the new card,
109 // the evicted card is then returned for refinement.
110 jbyte* insert(jbyte* card_ptr);
111
112 // Refine the cards that have delayed as a result of
113 // being in the cache.
114 void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
115
116 // Set up for parallel processing of the cards in the hot cache
117 void reset_hot_cache_claimed_index() {
118 _hot_cache_par_claimed_idx = 0;
119 }
120
121 // Resets the hot card cache and discards the entries.
122 void reset_hot_cache() {
123 assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
124 assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
125 _hot_cache_idx = 0;
126 for (size_t i = 0; i < _hot_cache_size; i++) {
127 _hot_cache[i] = NULL;
128 }
129 }
130
131 bool hot_cache_is_empty() { return _hot_cache[0] == NULL; }
132
133 // Zeros the values in the card counts table for entire committed heap
134 void reset_card_counts();
135
136 // Zeros the values in the card counts table for the given region
137 void reset_card_counts(HeapRegion* hr);
138 };
139
140 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
|