rev 58017 : [mq]: 8238854-remove-superfluous-alloc-checks
1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/parallel/objectStartArray.hpp" 28 #include "gc/parallel/parMarkBitMap.inline.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psCompactionManager.inline.hpp" 31 #include "gc/parallel/psOldGen.hpp" 32 #include "gc/parallel/psParallelCompact.inline.hpp" 33 #include "gc/shared/taskqueue.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/compressedOops.inline.hpp" 38 #include "oops/instanceKlass.inline.hpp" 39 #include "oops/instanceMirrorKlass.inline.hpp" 40 #include "oops/objArrayKlass.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 43 PSOldGen* ParCompactionManager::_old_gen = NULL; 44 ParCompactionManager** ParCompactionManager::_manager_array = NULL; 45 46 ParCompactionManager::OopTaskQueueSet* ParCompactionManager::_oop_task_queues = NULL; 47 ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = NULL; 48 ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_queues = NULL; 49 50 ObjectStartArray* ParCompactionManager::_start_array = NULL; 51 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; 52 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL; 53 Monitor* ParCompactionManager::_shadow_region_monitor = NULL; 54 55 ParCompactionManager::ParCompactionManager() { 56 57 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 58 59 _old_gen = heap->old_gen(); 60 _start_array = old_gen()->start_array(); 61 62 marking_stack()->initialize(); 63 _objarray_stack.initialize(); 64 _region_stack.initialize(); 65 66 reset_bitmap_query_cache(); 67 } 68 69 void ParCompactionManager::initialize(ParMarkBitMap* mbm) { 70 assert(ParallelScavengeHeap::heap() != NULL, 71 "Needed for initialization"); 72 73 _mark_bitmap = mbm; 74 75 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers(); 76 77 assert(_manager_array == NULL, "Attempt to initialize twice"); 78 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); 79 80 _oop_task_queues = new OopTaskQueueSet(parallel_gc_threads); 81 guarantee(_oop_task_queues != NULL, "Could not allocate oop task queues"); 82 _objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads); 83 guarantee(_objarray_task_queues != NULL, "Could not allocate objarray task queues"); 84 _region_task_queues = new RegionTaskQueueSet(parallel_gc_threads); 85 guarantee(_region_task_queues != NULL, "Could not allocate region task queues"); 86 87 // Create and register the ParCompactionManager(s) for the worker threads. 88 for(uint i=0; i<parallel_gc_threads; i++) { 89 _manager_array[i] = new ParCompactionManager(); 90 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); 91 oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack()); 92 _objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack); 93 region_task_queues()->register_queue(i, _manager_array[i]->region_stack()); 94 } 95 96 // The VMThread gets its own ParCompactionManager, which is not available 97 // for work stealing. 98 _manager_array[parallel_gc_threads] = new ParCompactionManager(); 99 guarantee(_manager_array[parallel_gc_threads] != NULL, 100 "Could not create ParCompactionManager"); 101 assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0, 102 "Not initialized?"); 103 104 _shadow_region_array = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<size_t >(10, true); 105 106 _shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor", 107 Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never); 108 } 109 110 void ParCompactionManager::reset_all_bitmap_query_caches() { 111 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers(); 112 for (uint i=0; i<=parallel_gc_threads; i++) { 113 _manager_array[i]->reset_bitmap_query_cache(); 114 } 115 } 116 117 118 ParCompactionManager* 119 ParCompactionManager::gc_thread_compaction_manager(uint index) { 120 assert(index < ParallelGCThreads, "index out of range"); 121 assert(_manager_array != NULL, "Sanity"); 122 return _manager_array[index]; 123 } 124 125 void ParCompactionManager::follow_marking_stacks() { 126 do { 127 // Drain the overflow stack first, to allow stealing from the marking stack. 128 oop obj; 129 while (marking_stack()->pop_overflow(obj)) { 130 follow_contents(obj); 131 } 132 while (marking_stack()->pop_local(obj)) { 133 follow_contents(obj); 134 } 135 136 // Process ObjArrays one at a time to avoid marking stack bloat. 137 ObjArrayTask task; 138 if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) { 139 follow_array((objArrayOop)task.obj(), task.index()); 140 } 141 } while (!marking_stacks_empty()); 142 143 assert(marking_stacks_empty(), "Sanity"); 144 } 145 146 void ParCompactionManager::drain_region_stacks() { 147 do { 148 // Drain overflow stack first so other threads can steal. 149 size_t region_index; 150 while (region_stack()->pop_overflow(region_index)) { 151 PSParallelCompact::fill_and_update_region(this, region_index); 152 } 153 154 while (region_stack()->pop_local(region_index)) { 155 PSParallelCompact::fill_and_update_region(this, region_index); 156 } 157 } while (!region_stack()->is_empty()); 158 } 159 160 size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) { 161 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag); 162 while (true) { 163 if (!_shadow_region_array->is_empty()) { 164 return _shadow_region_array->pop(); 165 } 166 // Check if the corresponding heap region is available now. 167 // If so, we don't need to get a shadow region anymore, and 168 // we return InvalidShadow to indicate such a case. 169 if (region_ptr->claimed()) { 170 return InvalidShadow; 171 } 172 ml.wait(1); 173 } 174 } 175 176 void ParCompactionManager::push_shadow_region_mt_safe(size_t shadow_region) { 177 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag); 178 _shadow_region_array->push(shadow_region); 179 ml.notify(); 180 } 181 182 void ParCompactionManager::push_shadow_region(size_t shadow_region) { 183 _shadow_region_array->push(shadow_region); 184 } 185 186 void ParCompactionManager::remove_all_shadow_regions() { 187 _shadow_region_array->clear(); 188 } --- EOF ---