1 /* 2 * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "code/codeCache.hpp" 30 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 31 #include "gc/shenandoah/shenandoahHeap.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 34 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "runtime/mutex.hpp" 37 #include "runtime/sweeper.hpp" 38 #include "runtime/vmThread.hpp" 39 #include "services/management.hpp" 40 41 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers, 42 ShenandoahPhaseTimings::Phase phase) : 43 _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)), 44 _srs(n_workers), 45 _phase(phase), 46 _coderoots_all_iterator(ShenandoahCodeRoots::iterator()), 47 _om_iterator(ObjectSynchronizer::parallel_iterator()), 48 _threads_nmethods_cl(NULL) 49 { 50 heap->phase_timings()->record_workers_start(_phase); 51 VM_ShenandoahOperation* op = (VM_ShenandoahOperation*) VMThread::vm_operation(); 52 if (op == NULL || !op->_safepoint_cleanup_done) { 53 _threads_nmethods_cl = NMethodSweeper::prepare_mark_active_nmethods(); 54 } 55 } 56 57 ShenandoahRootProcessor::~ShenandoahRootProcessor() { 58 delete _process_strong_tasks; 59 ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase); 60 VM_ShenandoahOperation* op = (VM_ShenandoahOperation*) VMThread::vm_operation(); 61 if (op != NULL) { 62 op->_safepoint_cleanup_done = true; 63 } 64 } 65 66 void ShenandoahRootProcessor::process_all_roots_slow(OopClosure* oops) { 67 ShenandoahAlwaysTrueClosure always_true; 68 69 CLDToOopClosure clds(oops); 70 CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); 71 72 Threads::possibly_parallel_oops_do(false, oops, &blobs); 73 CodeCache::blobs_do(&blobs); 74 ClassLoaderDataGraph::cld_do(&clds); 75 Universe::oops_do(oops); 76 Management::oops_do(oops); 77 JvmtiExport::oops_do(oops); 78 JNIHandles::oops_do(oops); 79 JNIHandles::weak_oops_do(&always_true, oops); 80 ObjectSynchronizer::oops_do(oops); 81 SystemDictionary::roots_oops_do(oops, oops); 82 StringTable::oops_do(oops); 83 } 84 85 void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops, 86 OopClosure* weak_oops, 87 CLDClosure* clds, 88 CodeBlobClosure* blobs, 89 uint worker_id) { 90 91 process_java_roots(oops, clds, NULL, blobs, _threads_nmethods_cl, worker_id); 92 process_vm_roots(oops, NULL, weak_oops, worker_id); 93 94 _process_strong_tasks->all_tasks_completed(n_workers()); 95 } 96 97 void ShenandoahRootProcessor::process_all_roots(OopClosure* oops, 98 OopClosure* weak_oops, 99 CLDClosure* clds, 100 CodeBlobClosure* blobs, 101 uint worker_id) { 102 103 ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); 104 process_java_roots(oops, clds, clds, blobs, _threads_nmethods_cl, worker_id); 105 process_vm_roots(oops, oops, weak_oops, worker_id); 106 107 if (blobs != NULL) { 108 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 109 _coderoots_all_iterator.possibly_parallel_blobs_do(blobs); 110 } 111 112 _process_strong_tasks->all_tasks_completed(n_workers()); 113 } 114 115 void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots, 116 CLDClosure* strong_clds, 117 CLDClosure* weak_clds, 118 CodeBlobClosure* strong_code, 119 CodeBlobClosure* nmethods_cl, 120 uint worker_id) 121 { 122 ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); 123 // Iterating over the CLDG and the Threads are done early to allow us to 124 // first process the strong CLDs and nmethods and then, after a barrier, 125 // let the thread process the weak CLDs and nmethods. 126 { 127 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id); 128 _cld_iterator.root_cld_do(strong_clds, weak_clds); 129 } 130 131 { 132 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id); 133 bool is_par = n_workers() > 1; 134 ResourceMark rm; 135 Threads::possibly_parallel_oops_do(is_par, strong_roots, strong_code, nmethods_cl); 136 } 137 } 138 139 void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots, 140 OopClosure* weak_roots, 141 OopClosure* jni_weak_roots, 142 uint worker_id) 143 { 144 ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); 145 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Universe_oops_do)) { 146 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id); 147 Universe::oops_do(strong_roots); 148 } 149 150 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_oops_do)) { 151 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIRoots, worker_id); 152 JNIHandles::oops_do(strong_roots); 153 } 154 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Management_oops_do)) { 155 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id); 156 Management::oops_do(strong_roots); 157 } 158 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_jvmti_oops_do)) { 159 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id); 160 JvmtiExport::oops_do(strong_roots); 161 } 162 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) { 163 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id); 164 SystemDictionary::roots_oops_do(strong_roots, weak_roots); 165 } 166 if (jni_weak_roots != NULL) { 167 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) { 168 ShenandoahAlwaysTrueClosure always_true; 169 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIWeakRoots, worker_id); 170 JNIHandles::weak_oops_do(&always_true, jni_weak_roots); 171 } 172 } 173 174 { 175 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id); 176 if (ShenandoahFastSyncRoots && MonitorInUseLists) { 177 if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do)) { 178 ObjectSynchronizer::oops_do(strong_roots); 179 } 180 } else { 181 while(_om_iterator.parallel_oops_do(strong_roots)); 182 } 183 } 184 185 // All threads execute the following. A specific chunk of buckets 186 // from the StringTable are the individual tasks. 187 if (weak_roots != NULL) { 188 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id); 189 StringTable::possibly_parallel_oops_do(weak_roots); 190 } 191 } 192 193 uint ShenandoahRootProcessor::n_workers() const { 194 return _srs.n_threads(); 195 } 196 197 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) : 198 _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)), 199 _srs(n_workers), 200 _phase(phase), 201 _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator()), 202 _threads_nmethods_cl(NULL) 203 { 204 heap->phase_timings()->record_workers_start(_phase); 205 VM_ShenandoahOperation* op = (VM_ShenandoahOperation*) VMThread::vm_operation(); 206 if (op == NULL || !op->_safepoint_cleanup_done) { 207 _threads_nmethods_cl = NMethodSweeper::prepare_mark_active_nmethods(); 208 } 209 } 210 211 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() { 212 delete _process_strong_tasks; 213 ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase); 214 VM_ShenandoahOperation* op = (VM_ShenandoahOperation*) VMThread::vm_operation(); 215 if (op != NULL) { 216 op->_safepoint_cleanup_done = true; 217 } 218 } 219 220 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops, 221 CodeBlobClosure* blobs, 222 uint worker_id) { 223 224 ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times(); 225 { 226 bool is_par = n_workers() > 1; 227 ResourceMark rm; 228 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id); 229 230 Threads::possibly_parallel_oops_do(is_par, oops, NULL, _threads_nmethods_cl); 231 } 232 233 if (blobs != NULL) { 234 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 235 _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs); 236 } 237 238 _process_strong_tasks->all_tasks_completed(n_workers()); 239 } 240 241 uint ShenandoahRootEvacuator::n_workers() const { 242 return _srs.n_threads(); 243 } 244 245 // Implemenation of ParallelCLDRootIterator 246 ParallelCLDRootIterator::ParallelCLDRootIterator() { 247 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); 248 ClassLoaderDataGraph::clear_claimed_marks(); 249 } 250 251 void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) { 252 ClassLoaderDataGraph::roots_cld_do(strong, weak); 253 } 254