1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP 26 #define SHARE_VM_GC_G1_PTRQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "utilities/sizes.hpp" 30 31 // There are various techniques that require threads to be able to log 32 // addresses. For example, a generational write barrier might log 33 // the addresses of modified old-generation objects. This type supports 34 // this operation. 35 36 // The definition of placement operator new(size_t, void*) in the <new>. 37 #include <new> 38 39 class PtrQueueSet; 40 class PtrQueue VALUE_OBJ_CLASS_SPEC { 41 friend class VMStructs; 42 43 // Noncopyable - not defined. 44 PtrQueue(const PtrQueue&); 45 PtrQueue& operator=(const PtrQueue&); 46 47 // The ptr queue set to which this queue belongs. 48 PtrQueueSet* const _qset; 49 50 // Whether updates should be logged. 51 bool _active; 52 53 // If true, the queue is permanent, and doesn't need to deallocate 54 // its buffer in the destructor (since that obtains a lock which may not 55 // be legally locked by then. 56 const bool _permanent; 57 58 protected: 59 // The buffer. 60 void** _buf; 61 // The (byte) index at which an object was last enqueued. Starts at "_sz" 62 // (indicating an empty buffer) and goes towards zero. 63 size_t _index; 64 65 // The (byte) size of the buffer. 66 size_t _sz; 67 68 // If there is a lock associated with this buffer, this is that lock. 69 Mutex* _lock; 70 71 PtrQueueSet* qset() { return _qset; } 72 bool is_permanent() const { return _permanent; } 73 74 // Process queue entries and release resources, if not permanent. 75 void flush_impl(); 76 77 // Initialize this queue to contain a null buffer, and be part of the 78 // given PtrQueueSet. 79 PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false); 80 81 // Requires queue flushed or permanent. 82 ~PtrQueue(); 83 84 public: 85 86 // Associate a lock with a ptr queue. 87 void set_lock(Mutex* lock) { _lock = lock; } 88 89 void reset() { if (_buf != NULL) _index = _sz; } 90 91 void enqueue(volatile void* ptr) { 92 enqueue((void*)(ptr)); 93 } 94 95 // Enqueues the given "obj". 96 void enqueue(void* ptr) { 97 if (!_active) return; 98 else enqueue_known_active(ptr); 99 } 100 101 // This method is called when we're doing the zero index handling 102 // and gives a chance to the queues to do any pre-enqueueing 103 // processing they might want to do on the buffer. It should return 104 // true if the buffer should be enqueued, or false if enough 105 // entries were cleared from it so that it can be re-used. It should 106 // not return false if the buffer is still full (otherwise we can 107 // get into an infinite loop). 108 virtual bool should_enqueue_buffer() { return true; } 109 void handle_zero_index(); 110 void locking_enqueue_completed_buffer(void** buf); 111 112 void enqueue_known_active(void* ptr); 113 114 size_t size() { 115 assert(_sz >= _index, "Invariant."); 116 return _buf == NULL ? 0 : _sz - _index; 117 } 118 119 bool is_empty() { 120 return _buf == NULL || _sz == _index; 121 } 122 123 // Set the "active" property of the queue to "b". An enqueue to an 124 // inactive thread is a no-op. Setting a queue to inactive resets its 125 // log to the empty state. 126 void set_active(bool b) { 127 _active = b; 128 if (!b && _buf != NULL) { 129 _index = _sz; 130 } else if (b && _buf != NULL) { 131 assert(_index == _sz, "invariant: queues are empty when activated."); 132 } 133 } 134 135 bool is_active() { return _active; } 136 137 static size_t byte_index_to_index(size_t ind) { 138 assert((ind % sizeof(void*)) == 0, "Invariant."); 139 return ind / sizeof(void*); 140 } 141 142 // To support compiler. 143 144 protected: 145 template<typename Derived> 146 static ByteSize byte_offset_of_index() { 147 return byte_offset_of(Derived, _index); 148 } 149 150 static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); } 151 152 template<typename Derived> 153 static ByteSize byte_offset_of_buf() { 154 return byte_offset_of(Derived, _buf); 155 } 156 157 static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } 158 159 template<typename Derived> 160 static ByteSize byte_offset_of_active() { 161 return byte_offset_of(Derived, _active); 162 } 163 164 static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); } 165 166 }; 167 168 class BufferNode { 169 size_t _index; 170 BufferNode* _next; 171 public: 172 BufferNode() : _index(0), _next(NULL) { } 173 BufferNode* next() const { return _next; } 174 void set_next(BufferNode* n) { _next = n; } 175 size_t index() const { return _index; } 176 void set_index(size_t i) { _index = i; } 177 178 // Align the size of the structure to the size of the pointer 179 static size_t aligned_size() { 180 static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*)); 181 return alignment; 182 } 183 184 // BufferNode is allocated before the buffer. 185 // The chunk of memory that holds both of them is a block. 186 187 // Produce a new BufferNode given a buffer. 188 static BufferNode* new_from_buffer(void** buf) { 189 return new (make_block_from_buffer(buf)) BufferNode; 190 } 191 192 // The following are the required conversion routines: 193 static BufferNode* make_node_from_buffer(void** buf) { 194 return (BufferNode*)make_block_from_buffer(buf); 195 } 196 static void** make_buffer_from_node(BufferNode *node) { 197 return make_buffer_from_block(node); 198 } 199 static void* make_block_from_node(BufferNode *node) { 200 return (void*)node; 201 } 202 static void** make_buffer_from_block(void* p) { 203 return (void**)((char*)p + aligned_size()); 204 } 205 static void* make_block_from_buffer(void** p) { 206 return (void*)((char*)p - aligned_size()); 207 } 208 }; 209 210 // A PtrQueueSet represents resources common to a set of pointer queues. 211 // In particular, the individual queues allocate buffers from this shared 212 // set, and return completed buffers to the set. 213 // All these variables are are protected by the TLOQ_CBL_mon. XXX ??? 214 class PtrQueueSet VALUE_OBJ_CLASS_SPEC { 215 protected: 216 Monitor* _cbl_mon; // Protects the fields below. 217 BufferNode* _completed_buffers_head; 218 BufferNode* _completed_buffers_tail; 219 int _n_completed_buffers; 220 int _process_completed_threshold; 221 volatile bool _process_completed; 222 223 // This (and the interpretation of the first element as a "next" 224 // pointer) are protected by the TLOQ_FL_lock. 225 Mutex* _fl_lock; 226 BufferNode* _buf_free_list; 227 size_t _buf_free_list_sz; 228 // Queue set can share a freelist. The _fl_owner variable 229 // specifies the owner. It is set to "this" by default. 230 PtrQueueSet* _fl_owner; 231 232 // The size of all buffers in the set. 233 size_t _sz; 234 235 bool _all_active; 236 237 // If true, notify_all on _cbl_mon when the threshold is reached. 238 bool _notify_when_complete; 239 240 // Maximum number of elements allowed on completed queue: after that, 241 // enqueuer does the work itself. Zero indicates no maximum. 242 int _max_completed_queue; 243 int _completed_queue_padding; 244 245 int completed_buffers_list_length(); 246 void assert_completed_buffer_list_len_correct_locked(); 247 void assert_completed_buffer_list_len_correct(); 248 249 protected: 250 // A mutator thread does the the work of processing a buffer. 251 // Returns "true" iff the work is complete (and the buffer may be 252 // deallocated). 253 virtual bool mut_process_buffer(void** buf) { 254 ShouldNotReachHere(); 255 return false; 256 } 257 258 // Create an empty ptr queue set. 259 PtrQueueSet(bool notify_when_complete = false); 260 ~PtrQueueSet(); 261 262 // Because of init-order concerns, we can't pass these as constructor 263 // arguments. 264 void initialize(Monitor* cbl_mon, 265 Mutex* fl_lock, 266 int process_completed_threshold, 267 int max_completed_queue, 268 PtrQueueSet *fl_owner = NULL); 269 270 public: 271 272 // Return an empty array of size _sz (required to be non-zero). 273 void** allocate_buffer(); 274 275 // Return an empty buffer to the free list. The "buf" argument is 276 // required to be a pointer to the head of an array of length "_sz". 277 void deallocate_buffer(void** buf); 278 279 // Declares that "buf" is a complete buffer. 280 void enqueue_complete_buffer(void** buf, size_t index = 0); 281 282 // To be invoked by the mutator. 283 bool process_or_enqueue_complete_buffer(void** buf); 284 285 bool completed_buffers_exist_dirty() { 286 return _n_completed_buffers > 0; 287 } 288 289 bool process_completed_buffers() { return _process_completed; } 290 void set_process_completed(bool x) { _process_completed = x; } 291 292 bool is_active() { return _all_active; } 293 294 // Set the buffer size. Should be called before any "enqueue" operation 295 // can be called. And should only be called once. 296 void set_buffer_size(size_t sz); 297 298 // Get the buffer size. 299 size_t buffer_size() { return _sz; } 300 301 // Get/Set the number of completed buffers that triggers log processing. 302 void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } 303 int process_completed_threshold() const { return _process_completed_threshold; } 304 305 // Must only be called at a safe point. Indicates that the buffer free 306 // list size may be reduced, if that is deemed desirable. 307 void reduce_free_list(); 308 309 int completed_buffers_num() { return _n_completed_buffers; } 310 311 void merge_bufferlists(PtrQueueSet* src); 312 313 void set_max_completed_queue(int m) { _max_completed_queue = m; } 314 int max_completed_queue() { return _max_completed_queue; } 315 316 void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; } 317 int completed_queue_padding() { return _completed_queue_padding; } 318 319 // Notify the consumer if the number of buffers crossed the threshold 320 void notify_if_necessary(); 321 }; 322 323 #endif // SHARE_VM_GC_G1_PTRQUEUE_HPP