rev 58017 : [mq]: 8238854-remove-superfluous-alloc-checks
1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/parallel/objectStartArray.hpp" 28 #include "gc/parallel/parMarkBitMap.inline.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psCompactionManager.inline.hpp" 31 #include "gc/parallel/psOldGen.hpp" 32 #include "gc/parallel/psParallelCompact.inline.hpp" 33 #include "gc/shared/taskqueue.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/compressedOops.inline.hpp" 38 #include "oops/instanceKlass.inline.hpp" 39 #include "oops/instanceMirrorKlass.inline.hpp" 40 #include "oops/objArrayKlass.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 43 PSOldGen* ParCompactionManager::_old_gen = NULL; 44 ParCompactionManager** ParCompactionManager::_manager_array = NULL; 45 46 ParCompactionManager::OopTaskQueueSet* ParCompactionManager::_oop_task_queues = NULL; 47 ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = NULL; 48 ParCompactionManager::RegionTaskQueueSet* ParCompactionManager::_region_task_queues = NULL; 49 50 ObjectStartArray* ParCompactionManager::_start_array = NULL; 51 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; 52 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL; 53 Monitor* ParCompactionManager::_shadow_region_monitor = NULL; 54 55 ParCompactionManager::ParCompactionManager() { 56 57 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 58 59 _old_gen = heap->old_gen(); 60 _start_array = old_gen()->start_array(); 61 62 marking_stack()->initialize(); 63 _objarray_stack.initialize(); 64 _region_stack.initialize(); 65 66 reset_bitmap_query_cache(); 67 } 68 69 void ParCompactionManager::initialize(ParMarkBitMap* mbm) { 70 assert(ParallelScavengeHeap::heap() != NULL, 71 "Needed for initialization"); 72 73 _mark_bitmap = mbm; 74 75 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers(); 76 77 assert(_manager_array == NULL, "Attempt to initialize twice"); 78 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); 79 80 _oop_task_queues = new OopTaskQueueSet(parallel_gc_threads); 81 _objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads); 82 _region_task_queues = new RegionTaskQueueSet(parallel_gc_threads); 83 84 // Create and register the ParCompactionManager(s) for the worker threads. 85 for(uint i=0; i<parallel_gc_threads; i++) { 86 _manager_array[i] = new ParCompactionManager(); 87 oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack()); 88 _objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack); 89 region_task_queues()->register_queue(i, _manager_array[i]->region_stack()); 90 } 91 92 // The VMThread gets its own ParCompactionManager, which is not available 93 // for work stealing. 94 _manager_array[parallel_gc_threads] = new ParCompactionManager(); 95 assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0, 96 "Not initialized?"); 97 98 _shadow_region_array = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<size_t >(10, true); 99 100 _shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor", 101 Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never); 102 } 103 104 void ParCompactionManager::reset_all_bitmap_query_caches() { 105 uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers(); 106 for (uint i=0; i<=parallel_gc_threads; i++) { 107 _manager_array[i]->reset_bitmap_query_cache(); 108 } 109 } 110 111 112 ParCompactionManager* 113 ParCompactionManager::gc_thread_compaction_manager(uint index) { 114 assert(index < ParallelGCThreads, "index out of range"); 115 assert(_manager_array != NULL, "Sanity"); 116 return _manager_array[index]; 117 } 118 119 void ParCompactionManager::follow_marking_stacks() { 120 do { 121 // Drain the overflow stack first, to allow stealing from the marking stack. 122 oop obj; 123 while (marking_stack()->pop_overflow(obj)) { 124 follow_contents(obj); 125 } 126 while (marking_stack()->pop_local(obj)) { 127 follow_contents(obj); 128 } 129 130 // Process ObjArrays one at a time to avoid marking stack bloat. 131 ObjArrayTask task; 132 if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) { 133 follow_array((objArrayOop)task.obj(), task.index()); 134 } 135 } while (!marking_stacks_empty()); 136 137 assert(marking_stacks_empty(), "Sanity"); 138 } 139 140 void ParCompactionManager::drain_region_stacks() { 141 do { 142 // Drain overflow stack first so other threads can steal. 143 size_t region_index; 144 while (region_stack()->pop_overflow(region_index)) { 145 PSParallelCompact::fill_and_update_region(this, region_index); 146 } 147 148 while (region_stack()->pop_local(region_index)) { 149 PSParallelCompact::fill_and_update_region(this, region_index); 150 } 151 } while (!region_stack()->is_empty()); 152 } 153 154 size_t ParCompactionManager::pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr) { 155 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag); 156 while (true) { 157 if (!_shadow_region_array->is_empty()) { 158 return _shadow_region_array->pop(); 159 } 160 // Check if the corresponding heap region is available now. 161 // If so, we don't need to get a shadow region anymore, and 162 // we return InvalidShadow to indicate such a case. 163 if (region_ptr->claimed()) { 164 return InvalidShadow; 165 } 166 ml.wait(1); 167 } 168 } 169 170 void ParCompactionManager::push_shadow_region_mt_safe(size_t shadow_region) { 171 MonitorLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag); 172 _shadow_region_array->push(shadow_region); 173 ml.notify(); 174 } 175 176 void ParCompactionManager::push_shadow_region(size_t shadow_region) { 177 _shadow_region_array->push(shadow_region); 178 } 179 180 void ParCompactionManager::remove_all_shadow_regions() { 181 _shadow_region_array->clear(); 182 } --- EOF ---