1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/ptrQueue.hpp" 27 #include "memory/allocation.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "runtime/mutex.hpp" 30 #include "runtime/mutexLocker.hpp" 31 #include "runtime/thread.inline.hpp" 32 33 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : 34 _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active), 35 _permanent(permanent), _lock(NULL) 36 {} 37 38 PtrQueue::~PtrQueue() { 39 assert(_permanent || (_buf == NULL), "queue must be flushed before delete"); 40 } 41 42 void PtrQueue::flush_impl() { 43 if (!_permanent && _buf != NULL) { 44 if (_index == _sz) { 45 // No work to do. 46 qset()->deallocate_buffer(_buf); 47 } else { 48 // We must NULL out the unused entries, then enqueue. 49 size_t limit = byte_index_to_index(_index); 50 for (size_t i = 0; i < limit; ++i) { 51 _buf[i] = NULL; 52 } 53 qset()->enqueue_complete_buffer(_buf); 54 } 55 _buf = NULL; 56 _index = 0; 57 } 58 } 59 60 61 void PtrQueue::enqueue_known_active(void* ptr) { 62 assert(_index <= _sz, "Invariant."); 63 assert(_index == 0 || _buf != NULL, "invariant"); 64 65 while (_index == 0) { 66 handle_zero_index(); 67 } 68 69 assert(_index > 0, "postcondition"); 70 _index -= sizeof(void*); 71 _buf[byte_index_to_index(_index)] = ptr; 72 assert(_index <= _sz, "Invariant."); 73 } 74 75 void PtrQueue::locking_enqueue_completed_buffer(void** buf) { 76 assert(_lock->owned_by_self(), "Required."); 77 78 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before 79 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they 80 // have the same rank and we may get the "possible deadlock" message 81 _lock->unlock(); 82 83 qset()->enqueue_complete_buffer(buf); 84 // We must relock only because the caller will unlock, for the normal 85 // case. 86 _lock->lock_without_safepoint_check(); 87 } 88 89 90 PtrQueueSet::PtrQueueSet(bool notify_when_complete) : 91 _max_completed_queue(0), 92 _cbl_mon(NULL), _fl_lock(NULL), 93 _notify_when_complete(notify_when_complete), 94 _sz(0), 95 _completed_buffers_head(NULL), 96 _completed_buffers_tail(NULL), 97 _n_completed_buffers(0), 98 _process_completed_threshold(0), _process_completed(false), 99 _buf_free_list(NULL), _buf_free_list_sz(0) 100 { 101 _fl_owner = this; 102 } 103 104 PtrQueueSet::~PtrQueueSet() { 105 // There are presently only a couple (derived) instances ever 106 // created, and they are permanent, so no harm currently done by 107 // doing nothing here. 108 } 109 110 void PtrQueueSet::initialize(Monitor* cbl_mon, 111 Mutex* fl_lock, 112 size_t process_completed_threshold, 113 size_t max_completed_queue, 114 PtrQueueSet *fl_owner) { 115 _max_completed_queue = max_completed_queue; 116 _process_completed_threshold = process_completed_threshold; 117 _completed_queue_padding = 0; 118 assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?"); 119 _cbl_mon = cbl_mon; 120 _fl_lock = fl_lock; 121 _fl_owner = (fl_owner != NULL) ? fl_owner : this; 122 } 123 124 void** PtrQueueSet::allocate_buffer() { 125 assert(_sz > 0, "Didn't set a buffer size."); 126 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 127 if (_fl_owner->_buf_free_list != NULL) { 128 void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list); 129 _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next(); 130 _fl_owner->_buf_free_list_sz--; 131 return res; 132 } else { 133 // Allocate space for the BufferNode in front of the buffer. 134 char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC); 135 return BufferNode::make_buffer_from_block(b); 136 } 137 } 138 139 void PtrQueueSet::deallocate_buffer(void** buf) { 140 assert(_sz > 0, "Didn't set a buffer size."); 141 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); 142 BufferNode *node = BufferNode::make_node_from_buffer(buf); 143 node->set_next(_fl_owner->_buf_free_list); 144 _fl_owner->_buf_free_list = node; 145 _fl_owner->_buf_free_list_sz++; 146 } 147 148 void PtrQueueSet::reduce_free_list() { 149 assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); 150 // For now we'll adopt the strategy of deleting half. 151 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); 152 size_t n = _buf_free_list_sz / 2; 153 while (n > 0) { 154 assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); 155 void* b = BufferNode::make_block_from_node(_buf_free_list); 156 _buf_free_list = _buf_free_list->next(); 157 FREE_C_HEAP_ARRAY(char, b); 158 _buf_free_list_sz --; 159 n--; 160 } 161 } 162 163 void PtrQueue::handle_zero_index() { 164 assert(_index == 0, "Precondition."); 165 166 // This thread records the full buffer and allocates a new one (while 167 // holding the lock if there is one). 168 if (_buf != NULL) { 169 if (!should_enqueue_buffer()) { 170 assert(_index > 0, "the buffer can only be re-used if it's not full"); 171 return; 172 } 173 174 if (_lock) { 175 assert(_lock->owned_by_self(), "Required."); 176 177 // The current PtrQ may be the shared dirty card queue and 178 // may be being manipulated by more than one worker thread 179 // during a pause. Since the enqueueing of the completed 180 // buffer unlocks the Shared_DirtyCardQ_lock more than one 181 // worker thread can 'race' on reading the shared queue attributes 182 // (_buf and _index) and multiple threads can call into this 183 // routine for the same buffer. This will cause the completed 184 // buffer to be added to the CBL multiple times. 185 186 // We "claim" the current buffer by caching value of _buf in 187 // a local and clearing the field while holding _lock. When 188 // _lock is released (while enqueueing the completed buffer) 189 // the thread that acquires _lock will skip this code, 190 // preventing the subsequent the multiple enqueue, and 191 // install a newly allocated buffer below. 192 193 void** buf = _buf; // local pointer to completed buffer 194 _buf = NULL; // clear shared _buf field 195 196 locking_enqueue_completed_buffer(buf); // enqueue completed buffer 197 198 // While the current thread was enqueueing the buffer another thread 199 // may have a allocated a new buffer and inserted it into this pointer 200 // queue. If that happens then we just return so that the current 201 // thread doesn't overwrite the buffer allocated by the other thread 202 // and potentially losing some dirtied cards. 203 204 if (_buf != NULL) return; 205 } else { 206 if (qset()->process_or_enqueue_complete_buffer(_buf)) { 207 // Recycle the buffer. No allocation. 208 _sz = qset()->buffer_size(); 209 _index = _sz; 210 return; 211 } 212 } 213 } 214 // Reallocate the buffer 215 _buf = qset()->allocate_buffer(); 216 _sz = qset()->buffer_size(); 217 _index = _sz; 218 } 219 220 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { 221 if (Thread::current()->is_Java_thread()) { 222 // We don't lock. It is fine to be epsilon-precise here. 223 if (_max_completed_queue == 0 || _max_completed_queue > 0 && 224 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { 225 bool b = mut_process_buffer(buf); 226 if (b) { 227 // True here means that the buffer hasn't been deallocated and the caller may reuse it. 228 return true; 229 } 230 } 231 } 232 // The buffer will be enqueued. The caller will have to get a new one. 233 enqueue_complete_buffer(buf); 234 return false; 235 } 236 237 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) { 238 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 239 BufferNode* cbn = BufferNode::new_from_buffer(buf); 240 cbn->set_index(index); 241 if (_completed_buffers_tail == NULL) { 242 assert(_completed_buffers_head == NULL, "Well-formedness"); 243 _completed_buffers_head = cbn; 244 _completed_buffers_tail = cbn; 245 } else { 246 _completed_buffers_tail->set_next(cbn); 247 _completed_buffers_tail = cbn; 248 } 249 _n_completed_buffers++; 250 251 if (!_process_completed && 252 _n_completed_buffers >= _process_completed_threshold) { 253 _process_completed = true; 254 if (_notify_when_complete) { 255 _cbl_mon->notify(); 256 } 257 } 258 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 259 } 260 261 size_t PtrQueueSet::completed_buffers_list_length() { 262 size_t n = 0; 263 BufferNode* cbn = _completed_buffers_head; 264 while (cbn != NULL) { 265 n++; 266 cbn = cbn->next(); 267 } 268 return n; 269 } 270 271 void PtrQueueSet::assert_completed_buffer_list_len_correct() { 272 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 273 assert_completed_buffer_list_len_correct_locked(); 274 } 275 276 void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { 277 guarantee(completed_buffers_list_length() == _n_completed_buffers, 278 "Completed buffer length is wrong."); 279 } 280 281 void PtrQueueSet::set_buffer_size(size_t sz) { 282 assert(_sz == 0 && sz > 0, "Should be called only once."); 283 _sz = sz * sizeof(void*); 284 } 285 286 // Merge lists of buffers. Notify the processing threads. 287 // The source queue is emptied as a result. The queues 288 // must share the monitor. 289 void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { 290 assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); 291 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 292 if (_completed_buffers_tail == NULL) { 293 assert(_completed_buffers_head == NULL, "Well-formedness"); 294 _completed_buffers_head = src->_completed_buffers_head; 295 _completed_buffers_tail = src->_completed_buffers_tail; 296 } else { 297 assert(_completed_buffers_head != NULL, "Well formedness"); 298 if (src->_completed_buffers_head != NULL) { 299 _completed_buffers_tail->set_next(src->_completed_buffers_head); 300 _completed_buffers_tail = src->_completed_buffers_tail; 301 } 302 } 303 _n_completed_buffers += src->_n_completed_buffers; 304 305 src->_n_completed_buffers = 0; 306 src->_completed_buffers_head = NULL; 307 src->_completed_buffers_tail = NULL; 308 309 assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || 310 _completed_buffers_head != NULL && _completed_buffers_tail != NULL, 311 "Sanity"); 312 } 313 314 void PtrQueueSet::notify_if_necessary() { 315 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 316 if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) { 317 _process_completed = true; 318 if (_notify_when_complete) 319 _cbl_mon->notify(); 320 } 321 }