110
111 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
112 if (_records != NULL) {
113 _records[_sweep_index].traversal = _traversals;
114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
115 _records[_sweep_index].invocation = _invocations;
116 _records[_sweep_index].compile_id = nm->compile_id();
117 _records[_sweep_index].kind = nm->compile_kind();
118 _records[_sweep_index].state = nm->_state;
119 _records[_sweep_index].vep = nm->verified_entry_point();
120 _records[_sweep_index].uep = nm->entry_point();
121 _records[_sweep_index].line = line;
122
123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
124 }
125 }
126 #else
127 #define SWEEP(nm)
128 #endif
129
130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
131 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
132 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
133 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
134 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
135 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
136
137 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
138 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
139
140 jint NMethodSweeper::_locked_seen = 0;
141 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
142 bool NMethodSweeper::_request_mark_phase = false;
143
144 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
145 jlong NMethodSweeper::_total_time_sweeping = 0;
146 jlong NMethodSweeper::_total_time_this_sweep = 0;
147 jlong NMethodSweeper::_peak_sweep_time = 0;
148 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
149 int NMethodSweeper::_hotness_counter_reset_val = 0;
150
166
167 class SetHotnessClosure: public CodeBlobClosure {
168 public:
169 virtual void do_code_blob(CodeBlob* cb) {
170 if (cb->is_nmethod()) {
171 nmethod* nm = (nmethod*)cb;
172 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
173 }
174 }
175 };
176 static SetHotnessClosure set_hotness_closure;
177
178
179 int NMethodSweeper::hotness_counter_reset_val() {
180 if (_hotness_counter_reset_val == 0) {
181 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
182 }
183 return _hotness_counter_reset_val;
184 }
185 bool NMethodSweeper::sweep_in_progress() {
186 return (_current != NULL);
187 }
188
189 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
190 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
191 // safepoint.
192 void NMethodSweeper::mark_active_nmethods() {
193 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
194 // If we do not want to reclaim not-entrant or zombie methods there is no need
195 // to scan stacks
196 if (!MethodFlushing) {
197 return;
198 }
199
200 // Check for restart
201 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
202 if (!sweep_in_progress() && need_marking_phase()) {
203 _seen = 0;
204 _invocations = NmethodSweepFraction;
205 _current = CodeCache::first_nmethod();
206 _traversals += 1;
207 _total_time_this_sweep = 0;
208
209 if (PrintMethodFlushing) {
210 tty->print_cr("### Sweep: stack traversal %d", _traversals);
211 }
212 Threads::nmethods_do(&mark_activation_closure);
213
214 // reset the flags since we started a scan from the beginning.
215 reset_nmethod_marking();
216 _locked_seen = 0;
217 _not_entrant_seen_on_stack = 0;
218 } else {
219 // Only set hotness counter
220 Threads::nmethods_do(&set_hotness_closure);
221 }
222
223 OrderAccess::storestore();
224 }
225
234 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
235 if (old != 0) {
236 return;
237 }
238 #ifdef ASSERT
239 if (LogSweeper && _records == NULL) {
240 // Create the ring buffer for the logging code
241 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
242 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
243 }
244 #endif
245 if (_invocations > 0) {
246 sweep_code_cache();
247 _invocations--;
248 }
249 _sweep_started = 0;
250 }
251 }
252
253 void NMethodSweeper::sweep_code_cache() {
254
255 jlong sweep_start_counter = os::elapsed_counter();
256
257 _flushed_count = 0;
258 _zombified_count = 0;
259 _marked_count = 0;
260
261 if (PrintMethodFlushing && Verbose) {
262 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
263 }
264
265 if (!CompileBroker::should_compile_new_jobs()) {
266 // If we have turned off compilations we might as well do full sweeps
267 // in order to reach the clean state faster. Otherwise the sleeping compiler
268 // threads will slow down sweeping.
269 _invocations = 1;
270 }
271
272 // We want to visit all nmethods after NmethodSweepFraction
273 // invocations so divide the remaining number of nmethods by the
274 // remaining number of invocations. This is only an estimate since
275 // the number of nmethods changes during the sweep so the final
276 // stage must iterate until it there are no more nmethods.
277 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
278 int swept_count = 0;
279
280
281 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
282 assert(!CodeCache_lock->owned_by_self(), "just checking");
283
284 int freed_memory = 0;
285 {
286 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
287
288 // The last invocation iterates until there are no more nmethods
289 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
290 swept_count++;
291 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
292 if (PrintMethodFlushing && Verbose) {
293 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
294 }
295 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
296
297 assert(Thread::current()->is_Java_thread(), "should be java thread");
298 JavaThread* thread = (JavaThread*)Thread::current();
299 ThreadBlockInVM tbivm(thread);
300 thread->java_suspend_self();
301 }
302 // Since we will give up the CodeCache_lock, always skip ahead
303 // to the next nmethod. Other blobs can be deleted by other
304 // threads but nmethods are only reclaimed by the sweeper.
305 nmethod* next = CodeCache::next_nmethod(_current);
306
307 // Now ready to process nmethod and give up CodeCache_lock
308 {
309 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
310 freed_memory += process_nmethod(_current);
311 }
312 _seen++;
313 _current = next;
314 }
315 }
316
317 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
318
319 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
320 // we've completed a scan without making progress but there were
321 // nmethods we were unable to process either because they were
322 // locked or were still on stack. We don't have to aggressively
323 // clean them up so just stop scanning. We could scan once more
324 // but that complicates the control logic and it's unlikely to
325 // matter much.
326 if (PrintMethodFlushing) {
327 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
328 }
329 }
330
331 jlong sweep_end_counter = os::elapsed_counter();
332 jlong sweep_time = sweep_end_counter - sweep_start_counter;
333 _total_time_sweeping += sweep_time;
334 _total_time_this_sweep += sweep_time;
335 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
336 _total_nof_methods_reclaimed += _flushed_count;
337
390 }
391 };
392
393 void NMethodSweeper::release_nmethod(nmethod *nm) {
394 // Clean up any CompiledICHolders
395 {
396 ResourceMark rm;
397 MutexLocker ml_patch(CompiledIC_lock);
398 RelocIterator iter(nm);
399 while (iter.next()) {
400 if (iter.type() == relocInfo::virtual_call_type) {
401 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
402 }
403 }
404 }
405
406 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
407 nm->flush();
408 }
409
410 int NMethodSweeper::process_nmethod(nmethod *nm) {
411 assert(!CodeCache_lock->owned_by_self(), "just checking");
412
413 int freed_memory = 0;
414 // Make sure this nmethod doesn't get unloaded during the scan,
415 // since safepoints may happen during acquired below locks.
416 NMethodMarker nmm(nm);
417 SWEEP(nm);
418
419 // Skip methods that are currently referenced by the VM
420 if (nm->is_locked_by_vm()) {
421 // But still remember to clean-up inline caches for alive nmethods
422 if (nm->is_alive()) {
423 // Clean inline caches that point to zombie/non-entrant methods
424 MutexLocker cl(CompiledIC_lock);
425 nm->cleanup_inline_caches();
426 SWEEP(nm);
427 } else {
428 _locked_seen++;
429 SWEEP(nm);
430 }
482 SWEEP(nm);
483 // No inline caches will ever point to osr methods, so we can just remove it
484 freed_memory = nm->total_size();
485 release_nmethod(nm);
486 _flushed_count++;
487 } else {
488 nm->make_zombie();
489 request_nmethod_marking();
490 _zombified_count++;
491 SWEEP(nm);
492 }
493 } else {
494 if (UseCodeCacheFlushing) {
495 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
496 // Do not make native methods and OSR-methods not-entrant
497 nm->dec_hotness_counter();
498 // Get the initial value of the hotness counter. This value depends on the
499 // ReservedCodeCacheSize
500 int reset_val = hotness_counter_reset_val();
501 int time_since_reset = reset_val - nm->hotness_counter();
502 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
503 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
504 // I.e., 'threshold' increases with lower available space in the code cache and a higher
505 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
506 // value until it is reset by stack walking - is smaller than the computed threshold, the
507 // corresponding nmethod is considered for removal.
508 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
509 // A method is marked as not-entrant if the method is
510 // 1) 'old enough': nm->hotness_counter() < threshold
511 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
512 // The second condition is necessary if we are dealing with very small code cache
513 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
514 // The second condition ensures that methods are not immediately made not-entrant
515 // after compilation.
516 nm->make_not_entrant();
517 request_nmethod_marking();
518 }
519 }
520 }
521 // Clean-up all inline caches that point to zombie/non-reentrant methods
522 MutexLocker cl(CompiledIC_lock);
|
110
111 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
112 if (_records != NULL) {
113 _records[_sweep_index].traversal = _traversals;
114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
115 _records[_sweep_index].invocation = _invocations;
116 _records[_sweep_index].compile_id = nm->compile_id();
117 _records[_sweep_index].kind = nm->compile_kind();
118 _records[_sweep_index].state = nm->_state;
119 _records[_sweep_index].vep = nm->verified_entry_point();
120 _records[_sweep_index].uep = nm->entry_point();
121 _records[_sweep_index].line = line;
122
123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
124 }
125 }
126 #else
127 #define SWEEP(nm)
128 #endif
129
130 nmethod* NMethodSweeper::_current_nmethod = NULL; // Current nmethod
131 int NMethodSweeper::_current_type = 0; // Current CodeBlobType
132 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
133 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
137
138 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
140
141 jint NMethodSweeper::_locked_seen = 0;
142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
143 bool NMethodSweeper::_request_mark_phase = false;
144
145 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
146 jlong NMethodSweeper::_total_time_sweeping = 0;
147 jlong NMethodSweeper::_total_time_this_sweep = 0;
148 jlong NMethodSweeper::_peak_sweep_time = 0;
149 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
150 int NMethodSweeper::_hotness_counter_reset_val = 0;
151
167
168 class SetHotnessClosure: public CodeBlobClosure {
169 public:
170 virtual void do_code_blob(CodeBlob* cb) {
171 if (cb->is_nmethod()) {
172 nmethod* nm = (nmethod*)cb;
173 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
174 }
175 }
176 };
177 static SetHotnessClosure set_hotness_closure;
178
179
180 int NMethodSweeper::hotness_counter_reset_val() {
181 if (_hotness_counter_reset_val == 0) {
182 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
183 }
184 return _hotness_counter_reset_val;
185 }
186 bool NMethodSweeper::sweep_in_progress() {
187 return (_current_nmethod != NULL);
188 }
189
190 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
191 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
192 // safepoint.
193 void NMethodSweeper::mark_active_nmethods() {
194 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
195 // If we do not want to reclaim not-entrant or zombie methods there is no need
196 // to scan stacks
197 if (!MethodFlushing) {
198 return;
199 }
200
201 // Check for restart
202 assert(CodeCache::find_blob_unsafe(_current_nmethod) == _current_nmethod, "Sweeper nmethod cached state invalid");
203 if (!sweep_in_progress() && need_marking_phase()) {
204 _seen = 0;
205 _invocations = NmethodSweepFraction;
206 _current_nmethod = (nmethod*)CodeCache::first_blob(CodeBlobType::MethodNonProfiled);
207 _current_type = CodeBlobType::MethodNonProfiled;
208 _traversals += 1;
209 _total_time_this_sweep = 0;
210
211 if (PrintMethodFlushing) {
212 tty->print_cr("### Sweep: stack traversal %d", _traversals);
213 }
214 Threads::nmethods_do(&mark_activation_closure);
215
216 // reset the flags since we started a scan from the beginning.
217 reset_nmethod_marking();
218 _locked_seen = 0;
219 _not_entrant_seen_on_stack = 0;
220 } else {
221 // Only set hotness counter
222 Threads::nmethods_do(&set_hotness_closure);
223 }
224
225 OrderAccess::storestore();
226 }
227
236 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
237 if (old != 0) {
238 return;
239 }
240 #ifdef ASSERT
241 if (LogSweeper && _records == NULL) {
242 // Create the ring buffer for the logging code
243 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
244 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
245 }
246 #endif
247 if (_invocations > 0) {
248 sweep_code_cache();
249 _invocations--;
250 }
251 _sweep_started = 0;
252 }
253 }
254
255 void NMethodSweeper::sweep_code_cache() {
256 jlong sweep_start_counter = os::elapsed_counter();
257
258 _flushed_count = 0;
259 _zombified_count = 0;
260 _marked_count = 0;
261
262 if (PrintMethodFlushing && Verbose) {
263 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
264 }
265
266 if (!CompileBroker::should_compile_new_jobs()) {
267 // If we have turned off compilations we might as well do full sweeps
268 // in order to reach the clean state faster. Otherwise the sleeping compiler
269 // threads will slow down sweeping.
270 _invocations = 1;
271 }
272
273 // We want to visit all nmethods after NmethodSweepFraction
274 // invocations so divide the remaining number of nmethods by the
275 // remaining number of invocations. This is only an estimate since
276 // the number of nmethods changes during the sweep so the final
277 // stage must iterate until it there are no more nmethods.
278 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
279 int swept_count = 0;
280
281
282 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
283 assert(!CodeCache_lock->owned_by_self(), "just checking");
284
285 int freed_memory = 0;
286 {
287 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
288
289 // The last invocation iterates until there are no more nmethods
290 while ((swept_count < todo || _invocations == 1) && _current_nmethod != NULL) {
291 swept_count++;
292 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
293 if (PrintMethodFlushing && Verbose) {
294 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
295 }
296 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
297
298 assert(Thread::current()->is_Java_thread(), "should be java thread");
299 JavaThread* thread = (JavaThread*)Thread::current();
300 ThreadBlockInVM tbivm(thread);
301 thread->java_suspend_self();
302 }
303 // Since we will give up the CodeCache_lock, always skip ahead
304 // to the next nmethod. Other blobs can be deleted by other
305 // threads but nmethods are only reclaimed by the sweeper.
306 nmethod* next = (nmethod*)CodeCache::next_blob(_current_nmethod, _current_type);
307
308 // Now ready to process nmethod and give up CodeCache_lock
309 {
310 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
311 freed_memory += process_nmethod(_current_nmethod, _current_type);
312 }
313 _seen++;
314
315 while (next == NULL && _current_type < CodeBlobType::MethodProfiled) {
316 // We reached the last method of the type
317 // Go to next type that has methods available
318 _current_type++;
319 next = (nmethod*)CodeCache::first_blob(_current_type);
320 }
321 _current_nmethod = next;
322 }
323 }
324
325 assert(_invocations > 1 || _current_nmethod == NULL, "must have scanned the whole cache");
326
327 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
328 // we've completed a scan without making progress but there were
329 // nmethods we were unable to process either because they were
330 // locked or were still on stack. We don't have to aggressively
331 // clean them up so just stop scanning. We could scan once more
332 // but that complicates the control logic and it's unlikely to
333 // matter much.
334 if (PrintMethodFlushing) {
335 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
336 }
337 }
338
339 jlong sweep_end_counter = os::elapsed_counter();
340 jlong sweep_time = sweep_end_counter - sweep_start_counter;
341 _total_time_sweeping += sweep_time;
342 _total_time_this_sweep += sweep_time;
343 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
344 _total_nof_methods_reclaimed += _flushed_count;
345
398 }
399 };
400
401 void NMethodSweeper::release_nmethod(nmethod *nm) {
402 // Clean up any CompiledICHolders
403 {
404 ResourceMark rm;
405 MutexLocker ml_patch(CompiledIC_lock);
406 RelocIterator iter(nm);
407 while (iter.next()) {
408 if (iter.type() == relocInfo::virtual_call_type) {
409 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
410 }
411 }
412 }
413
414 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
415 nm->flush();
416 }
417
418 int NMethodSweeper::process_nmethod(nmethod *nm, int code_blob_type) {
419 assert(!CodeCache_lock->owned_by_self(), "just checking");
420
421 int freed_memory = 0;
422 // Make sure this nmethod doesn't get unloaded during the scan,
423 // since safepoints may happen during acquired below locks.
424 NMethodMarker nmm(nm);
425 SWEEP(nm);
426
427 // Skip methods that are currently referenced by the VM
428 if (nm->is_locked_by_vm()) {
429 // But still remember to clean-up inline caches for alive nmethods
430 if (nm->is_alive()) {
431 // Clean inline caches that point to zombie/non-entrant methods
432 MutexLocker cl(CompiledIC_lock);
433 nm->cleanup_inline_caches();
434 SWEEP(nm);
435 } else {
436 _locked_seen++;
437 SWEEP(nm);
438 }
490 SWEEP(nm);
491 // No inline caches will ever point to osr methods, so we can just remove it
492 freed_memory = nm->total_size();
493 release_nmethod(nm);
494 _flushed_count++;
495 } else {
496 nm->make_zombie();
497 request_nmethod_marking();
498 _zombified_count++;
499 SWEEP(nm);
500 }
501 } else {
502 if (UseCodeCacheFlushing) {
503 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
504 // Do not make native methods and OSR-methods not-entrant
505 nm->dec_hotness_counter();
506 // Get the initial value of the hotness counter. This value depends on the
507 // ReservedCodeCacheSize
508 int reset_val = hotness_counter_reset_val();
509 int time_since_reset = reset_val - nm->hotness_counter();
510 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
511 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
512 // I.e., 'threshold' increases with lower available space in the code cache and a higher
513 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
514 // value until it is reset by stack walking - is smaller than the computed threshold, the
515 // corresponding nmethod is considered for removal.
516 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
517 // A method is marked as not-entrant if the method is
518 // 1) 'old enough': nm->hotness_counter() < threshold
519 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
520 // The second condition is necessary if we are dealing with very small code cache
521 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
522 // The second condition ensures that methods are not immediately made not-entrant
523 // after compilation.
524 nm->make_not_entrant();
525 request_nmethod_marking();
526 }
527 }
528 }
529 // Clean-up all inline caches that point to zombie/non-reentrant methods
530 MutexLocker cl(CompiledIC_lock);
|