1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/dirtyCardQueue.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/heapRegionRemSet.hpp" 29 #include "gc/shared/workgroup.hpp" 30 #include "runtime/atomic.inline.hpp" 31 #include "runtime/mutexLocker.hpp" 32 #include "runtime/safepoint.hpp" 33 #include "runtime/thread.inline.hpp" 34 35 // Represents a set of free small integer ids. 36 class FreeIdSet : public CHeapObj<mtGC> { 37 enum { 38 end_of_list = UINT_MAX, 39 claimed = UINT_MAX - 1 40 }; 41 42 uint _size; 43 Monitor* _mon; 44 45 uint* _ids; 46 uint _hd; 47 uint _waiters; 48 uint _claimed; 49 50 public: 51 FreeIdSet(uint size, Monitor* mon); 52 ~FreeIdSet(); 53 54 // Returns an unclaimed parallel id (waiting for one to be released if 55 // necessary). 56 uint claim_par_id(); 57 58 void release_par_id(uint id); 59 }; 60 61 FreeIdSet::FreeIdSet(uint size, Monitor* mon) : 62 _size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0) 63 { 64 guarantee(size != 0, "must be"); 65 _ids = NEW_C_HEAP_ARRAY(uint, size, mtGC); 66 for (uint i = 0; i < size - 1; i++) { 67 _ids[i] = i+1; 68 } 69 _ids[size-1] = end_of_list; // end of list. 70 } 71 72 FreeIdSet::~FreeIdSet() { 73 FREE_C_HEAP_ARRAY(uint, _ids); 74 } 75 76 uint FreeIdSet::claim_par_id() { 77 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 78 while (_hd == end_of_list) { 79 _waiters++; 80 _mon->wait(Mutex::_no_safepoint_check_flag); 81 _waiters--; 82 } 83 uint res = _hd; 84 _hd = _ids[res]; 85 _ids[res] = claimed; // For debugging. 86 _claimed++; 87 return res; 88 } 89 90 void FreeIdSet::release_par_id(uint id) { 91 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 92 assert(_ids[id] == claimed, "Precondition."); 93 _ids[id] = _hd; 94 _hd = id; 95 _claimed--; 96 if (_waiters > 0) { 97 _mon->notify_all(); 98 } 99 } 100 101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) : 102 // Dirty card queues are always active, so we create them with their 103 // active field set to true. 104 PtrQueue(qset, permanent, true /* active */) 105 { } 106 107 DirtyCardQueue::~DirtyCardQueue() { 108 if (!is_permanent()) { 109 flush(); 110 } 111 } 112 113 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl, 114 bool consume, 115 uint worker_i) { 116 bool res = true; 117 if (_buf != NULL) { 118 res = apply_closure_to_buffer(cl, _buf, _index, _sz, 119 consume, 120 worker_i); 121 if (res && consume) { 122 _index = _sz; 123 } 124 } 125 return res; 126 } 127 128 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl, 129 void** buf, 130 size_t index, size_t sz, 131 bool consume, 132 uint worker_i) { 133 if (cl == NULL) return true; 134 size_t limit = byte_index_to_index(sz); 135 for (size_t i = byte_index_to_index(index); i < limit; ++i) { 136 jbyte* card_ptr = static_cast<jbyte*>(buf[i]); 137 if (card_ptr != NULL) { 138 // Set the entry to null, so we don't do it again (via the test 139 // above) if we reconsider this buffer. 140 if (consume) { 141 buf[i] = NULL; 142 } 143 if (!cl->do_card_ptr(card_ptr, worker_i)) { 144 return false; 145 } 146 } 147 } 148 return true; 149 } 150 151 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) : 152 PtrQueueSet(notify_when_complete), 153 _mut_process_closure(NULL), 154 _shared_dirty_card_queue(this, true /* permanent */), 155 _free_ids(NULL), 156 _processed_buffers_mut(0), _processed_buffers_rs_thread(0) 157 { 158 _all_active = true; 159 } 160 161 // Determines how many mutator threads can process the buffers in parallel. 162 uint DirtyCardQueueSet::num_par_ids() { 163 return (uint)os::processor_count(); 164 } 165 166 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, 167 Monitor* cbl_mon, 168 Mutex* fl_lock, 169 size_t process_completed_threshold, 170 size_t max_completed_queue, 171 Mutex* lock, 172 DirtyCardQueueSet* fl_owner, 173 bool init_free_ids) { 174 _mut_process_closure = cl; 175 PtrQueueSet::initialize(cbl_mon, 176 fl_lock, 177 process_completed_threshold, 178 max_completed_queue, 179 fl_owner); 180 set_buffer_size(G1UpdateBufferSize); 181 _shared_dirty_card_queue.set_lock(lock); 182 if (init_free_ids) { 183 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon); 184 } 185 } 186 187 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) { 188 t->dirty_card_queue().handle_zero_index(); 189 } 190 191 bool DirtyCardQueueSet::mut_process_buffer(void** buf) { 192 guarantee(_free_ids != NULL, "must be"); 193 194 // claim a par id 195 uint worker_i = _free_ids->claim_par_id(); 196 197 bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0, 198 _sz, true, worker_i); 199 if (b) { 200 Atomic::inc(&_processed_buffers_mut); 201 } 202 203 // release the id 204 _free_ids->release_par_id(worker_i); 205 206 return b; 207 } 208 209 210 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) { 211 BufferNode* nd = NULL; 212 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 213 214 if (_n_completed_buffers <= stop_at) { 215 _process_completed = false; 216 return NULL; 217 } 218 219 if (_completed_buffers_head != NULL) { 220 nd = _completed_buffers_head; 221 assert(_n_completed_buffers > 0, "Invariant"); 222 _completed_buffers_head = nd->next(); 223 _n_completed_buffers--; 224 if (_completed_buffers_head == NULL) { 225 assert(_n_completed_buffers == 0, "Invariant"); 226 _completed_buffers_tail = NULL; 227 } 228 } 229 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 230 return nd; 231 } 232 233 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl, 234 uint worker_i, 235 size_t stop_at, 236 bool during_pause) { 237 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause"); 238 BufferNode* nd = get_completed_buffer(stop_at); 239 if (nd == NULL) { 240 return false; 241 } else { 242 void** buf = BufferNode::make_buffer_from_node(nd); 243 size_t index = nd->index(); 244 if (DirtyCardQueue::apply_closure_to_buffer(cl, 245 buf, index, _sz, 246 true, worker_i)) { 247 // Done with fully processed buffer. 248 deallocate_buffer(buf); 249 Atomic::inc(&_processed_buffers_rs_thread); 250 return true; 251 } else { 252 // Return partially processed buffer to the queue. 253 enqueue_complete_buffer(buf, index); 254 return false; 255 } 256 } 257 } 258 259 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { 260 BufferNode* nd = _completed_buffers_head; 261 while (nd != NULL) { 262 bool b = 263 DirtyCardQueue::apply_closure_to_buffer(cl, 264 BufferNode::make_buffer_from_node(nd), 265 0, _sz, false); 266 guarantee(b, "Should not stop early."); 267 nd = nd->next(); 268 } 269 } 270 271 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) { 272 BufferNode* nd = _cur_par_buffer_node; 273 while (nd != NULL) { 274 BufferNode* next = (BufferNode*)nd->next(); 275 BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd); 276 if (actual == nd) { 277 bool b = 278 DirtyCardQueue::apply_closure_to_buffer(cl, 279 BufferNode::make_buffer_from_node(actual), 280 0, _sz, false); 281 guarantee(b, "Should not stop early."); 282 nd = next; 283 } else { 284 nd = actual; 285 } 286 } 287 } 288 289 // Deallocates any completed log buffers 290 void DirtyCardQueueSet::clear() { 291 BufferNode* buffers_to_delete = NULL; 292 { 293 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); 294 while (_completed_buffers_head != NULL) { 295 BufferNode* nd = _completed_buffers_head; 296 _completed_buffers_head = nd->next(); 297 nd->set_next(buffers_to_delete); 298 buffers_to_delete = nd; 299 } 300 _n_completed_buffers = 0; 301 _completed_buffers_tail = NULL; 302 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked()); 303 } 304 while (buffers_to_delete != NULL) { 305 BufferNode* nd = buffers_to_delete; 306 buffers_to_delete = nd->next(); 307 deallocate_buffer(BufferNode::make_buffer_from_node(nd)); 308 } 309 310 } 311 312 void DirtyCardQueueSet::abandon_logs() { 313 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 314 clear(); 315 // Since abandon is done only at safepoints, we can safely manipulate 316 // these queues. 317 for (JavaThread* t = Threads::first(); t; t = t->next()) { 318 t->dirty_card_queue().reset(); 319 } 320 shared_dirty_card_queue()->reset(); 321 } 322 323 324 void DirtyCardQueueSet::concatenate_logs() { 325 // Iterate over all the threads, if we find a partial log add it to 326 // the global list of logs. Temporarily turn off the limit on the number 327 // of outstanding buffers. 328 size_t save_max_completed_queue = _max_completed_queue; 329 _max_completed_queue = SIZE_MAX; 330 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); 331 for (JavaThread* t = Threads::first(); t; t = t->next()) { 332 DirtyCardQueue& dcq = t->dirty_card_queue(); 333 if (dcq.size() != 0) { 334 void** buf = dcq.get_buf(); 335 // We must NULL out the unused entries, then enqueue. 336 size_t limit = dcq.byte_index_to_index(dcq.get_index()); 337 for (size_t i = 0; i < limit; ++i) { 338 buf[i] = NULL; 339 } 340 enqueue_complete_buffer(dcq.get_buf(), dcq.get_index()); 341 dcq.reinitialize(); 342 } 343 } 344 if (_shared_dirty_card_queue.size() != 0) { 345 enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(), 346 _shared_dirty_card_queue.get_index()); 347 _shared_dirty_card_queue.reinitialize(); 348 } 349 // Restore the completed buffer queue limit. 350 _max_completed_queue = save_max_completed_queue; 351 }