110 }
111
112 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
113 if (_records != NULL) {
114 _records[_sweep_index].traversal = _traversals;
115 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
116 _records[_sweep_index].invocation = _sweep_fractions_left;
117 _records[_sweep_index].compile_id = nm->compile_id();
118 _records[_sweep_index].kind = nm->compile_kind();
119 _records[_sweep_index].state = nm->_state;
120 _records[_sweep_index].vep = nm->verified_entry_point();
121 _records[_sweep_index].uep = nm->entry_point();
122 _records[_sweep_index].line = line;
123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
124 }
125 }
126 #else
127 #define SWEEP(nm)
128 #endif
129
130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
131 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
132 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
133 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
134 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
135 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
136 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
137 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
138 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
139
140 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
141 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
142 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
143 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
144 // 1) alive -> not_entrant
145 // 2) not_entrant -> zombie
146 // 3) zombie -> marked_for_reclamation
147 int NMethodSweeper::_hotness_counter_reset_val = 0;
148
149 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
150 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
151 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
152 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
153 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
154 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
155 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
156
157
158
159 class MarkActivationClosure: public CodeBlobClosure {
160 public:
161 virtual void do_code_blob(CodeBlob* cb) {
162 if (cb->is_nmethod()) {
163 nmethod* nm = (nmethod*)cb;
164 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
165 // If we see an activation belonging to a non_entrant nmethod, we mark it.
166 if (nm->is_not_entrant()) {
167 nm->mark_as_seen_on_stack();
168 }
169 }
170 }
171 };
172 static MarkActivationClosure mark_activation_closure;
173
174 class SetHotnessClosure: public CodeBlobClosure {
175 public:
176 virtual void do_code_blob(CodeBlob* cb) {
177 if (cb->is_nmethod()) {
178 nmethod* nm = (nmethod*)cb;
179 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
180 }
181 }
182 };
183 static SetHotnessClosure set_hotness_closure;
184
185
186 int NMethodSweeper::hotness_counter_reset_val() {
187 if (_hotness_counter_reset_val == 0) {
188 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
189 }
190 return _hotness_counter_reset_val;
191 }
192 bool NMethodSweeper::sweep_in_progress() {
193 return (_current != NULL);
194 }
195
196 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
197 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
198 // safepoint.
199 void NMethodSweeper::mark_active_nmethods() {
200 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
201 // If we do not want to reclaim not-entrant or zombie methods there is no need
202 // to scan stacks
203 if (!MethodFlushing) {
204 return;
205 }
206
207 // Increase time so that we can estimate when to invoke the sweeper again.
208 _time_counter++;
209
210 // Check for restart
211 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
212 if (!sweep_in_progress()) {
213 _seen = 0;
214 _sweep_fractions_left = NmethodSweepFraction;
215 _current = CodeCache::first_nmethod();
216 _traversals += 1;
217 _total_time_this_sweep = Tickspan();
218
219 if (PrintMethodFlushing) {
220 tty->print_cr("### Sweep: stack traversal %d", _traversals);
221 }
222 Threads::nmethods_do(&mark_activation_closure);
223
224 } else {
225 // Only set hotness counter
226 Threads::nmethods_do(&set_hotness_closure);
227 }
228
229 OrderAccess::storestore();
230 }
231 /**
232 * This function invokes the sweeper if at least one of the three conditions is met:
233 * (1) The code cache is getting full
234 * (2) There are sufficient state changes in/since the last sweep.
235 * (3) We have not been sweeping for 'some time'
250 // the formula considers how much space in the code cache is currently used. Here are
251 // some examples that will (hopefully) help in understanding.
252 //
253 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
254 // the result of the division is 0. This
255 // keeps the used code cache size small
256 // (important for embedded Java)
257 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
258 // computes: (256 / 16) - 1 = 15
259 // As a result, we invoke the sweeper after
260 // 15 invocations of 'mark_active_nmethods.
261 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
262 // computes: (256 / 16) - 10 = 6.
263 if (!_should_sweep) {
264 const int time_since_last_sweep = _time_counter - _last_sweep;
265 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
266 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
267 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
268 // value) that disables the intended periodic sweeps.
269 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
270 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
271 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
272
273 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
274 _should_sweep = true;
275 }
276 }
277
278 if (_should_sweep && _sweep_fractions_left > 0) {
279 // Only one thread at a time will sweep
280 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
281 if (old != 0) {
282 return;
283 }
284 #ifdef ASSERT
285 if (LogSweeper && _records == NULL) {
286 // Create the ring buffer for the logging code
287 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
288 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
289 }
290 #endif
332 _sweep_fractions_left = 1;
333 }
334
335 // We want to visit all nmethods after NmethodSweepFraction
336 // invocations so divide the remaining number of nmethods by the
337 // remaining number of invocations. This is only an estimate since
338 // the number of nmethods changes during the sweep so the final
339 // stage must iterate until it there are no more nmethods.
340 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
341 int swept_count = 0;
342
343
344 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
345 assert(!CodeCache_lock->owned_by_self(), "just checking");
346
347 int freed_memory = 0;
348 {
349 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
350
351 // The last invocation iterates until there are no more nmethods
352 for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
353 swept_count++;
354 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
355 if (PrintMethodFlushing && Verbose) {
356 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
357 }
358 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
359
360 assert(Thread::current()->is_Java_thread(), "should be java thread");
361 JavaThread* thread = (JavaThread*)Thread::current();
362 ThreadBlockInVM tbivm(thread);
363 thread->java_suspend_self();
364 }
365 // Since we will give up the CodeCache_lock, always skip ahead
366 // to the next nmethod. Other blobs can be deleted by other
367 // threads but nmethods are only reclaimed by the sweeper.
368 nmethod* next = CodeCache::next_nmethod(_current);
369
370 // Now ready to process nmethod and give up CodeCache_lock
371 {
372 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
373 freed_memory += process_nmethod(_current);
374 }
375 _seen++;
376 _current = next;
377 }
378 }
379
380 assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
381
382 const Ticks sweep_end_counter = Ticks::now();
383 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
384 _total_time_sweeping += sweep_time;
385 _total_time_this_sweep += sweep_time;
386 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
387 _total_flushed_size += freed_memory;
388 _total_nof_methods_reclaimed += _flushed_count;
389
390 EventSweepCodeCache event(UNTIMED);
391 if (event.should_commit()) {
392 event.set_starttime(sweep_start_counter);
393 event.set_endtime(sweep_end_counter);
394 event.set_sweepIndex(_traversals);
395 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
396 event.set_sweptCount(swept_count);
397 event.set_flushedCount(_flushed_count);
398 event.set_markedCount(_marked_for_reclamation_count);
399 event.set_zombifiedCount(_zombified_count);
400 event.commit();
468 }
469 };
470
471 void NMethodSweeper::release_nmethod(nmethod *nm) {
472 // Clean up any CompiledICHolders
473 {
474 ResourceMark rm;
475 MutexLocker ml_patch(CompiledIC_lock);
476 RelocIterator iter(nm);
477 while (iter.next()) {
478 if (iter.type() == relocInfo::virtual_call_type) {
479 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
480 }
481 }
482 }
483
484 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
485 nm->flush();
486 }
487
488 int NMethodSweeper::process_nmethod(nmethod *nm) {
489 assert(!CodeCache_lock->owned_by_self(), "just checking");
490
491 int freed_memory = 0;
492 // Make sure this nmethod doesn't get unloaded during the scan,
493 // since safepoints may happen during acquired below locks.
494 NMethodMarker nmm(nm);
495 SWEEP(nm);
496
497 // Skip methods that are currently referenced by the VM
498 if (nm->is_locked_by_vm()) {
499 // But still remember to clean-up inline caches for alive nmethods
500 if (nm->is_alive()) {
501 // Clean inline caches that point to zombie/non-entrant methods
502 MutexLocker cl(CompiledIC_lock);
503 nm->cleanup_inline_caches();
504 SWEEP(nm);
505 }
506 return freed_memory;
507 }
508
560 if (nm->is_compiled_by_c2()) {
561 _total_nof_c2_methods_reclaimed++;
562 }
563 release_nmethod(nm);
564 _flushed_count++;
565 } else {
566 // Code cache state change is tracked in make_zombie()
567 nm->make_zombie();
568 _zombified_count++;
569 SWEEP(nm);
570 }
571 } else {
572 if (UseCodeCacheFlushing) {
573 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
574 // Do not make native methods and OSR-methods not-entrant
575 nm->dec_hotness_counter();
576 // Get the initial value of the hotness counter. This value depends on the
577 // ReservedCodeCacheSize
578 int reset_val = hotness_counter_reset_val();
579 int time_since_reset = reset_val - nm->hotness_counter();
580 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
581 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
582 // I.e., 'threshold' increases with lower available space in the code cache and a higher
583 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
584 // value until it is reset by stack walking - is smaller than the computed threshold, the
585 // corresponding nmethod is considered for removal.
586 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
587 // A method is marked as not-entrant if the method is
588 // 1) 'old enough': nm->hotness_counter() < threshold
589 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
590 // The second condition is necessary if we are dealing with very small code cache
591 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
592 // The second condition ensures that methods are not immediately made not-entrant
593 // after compilation.
594 nm->make_not_entrant();
595 // Code cache state change is tracked in make_not_entrant()
596 if (PrintMethodFlushing && Verbose) {
597 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
598 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
599 }
600 }
|
110 }
111
112 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
113 if (_records != NULL) {
114 _records[_sweep_index].traversal = _traversals;
115 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
116 _records[_sweep_index].invocation = _sweep_fractions_left;
117 _records[_sweep_index].compile_id = nm->compile_id();
118 _records[_sweep_index].kind = nm->compile_kind();
119 _records[_sweep_index].state = nm->_state;
120 _records[_sweep_index].vep = nm->verified_entry_point();
121 _records[_sweep_index].uep = nm->entry_point();
122 _records[_sweep_index].line = line;
123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
124 }
125 }
126 #else
127 #define SWEEP(nm)
128 #endif
129
130 nmethod* NMethodSweeper::_current_nmethod = NULL; // Current nmethod
131 int NMethodSweeper::_current_type = 0; // Current CodeBlobType
132 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
133 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
134 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
135 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
136 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
137 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
138 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
139 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
140
141 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
142 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
143 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
144 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
145 // 1) alive -> not_entrant
146 // 2) not_entrant -> zombie
147 // 3) zombie -> marked_for_reclamation
148 int NMethodSweeper::_hotness_counter_reset_val = 0;
149
150 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
151 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
152 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
153 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
154 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
155 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
156 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
157
158
159 class MarkActivationClosure: public CodeBlobClosure {
160 public:
161 virtual void do_code_blob(CodeBlob* cb) {
162 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
163 nmethod* nm = (nmethod*)cb;
164 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
165 // If we see an activation belonging to a non_entrant nmethod, we mark it.
166 if (nm->is_not_entrant()) {
167 nm->mark_as_seen_on_stack();
168 }
169 }
170 };
171 static MarkActivationClosure mark_activation_closure;
172
173 class SetHotnessClosure: public CodeBlobClosure {
174 public:
175 virtual void do_code_blob(CodeBlob* cb) {
176 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
177 nmethod* nm = (nmethod*)cb;
178 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
179 }
180 };
181 static SetHotnessClosure set_hotness_closure;
182
183
184 int NMethodSweeper::hotness_counter_reset_val() {
185 if (_hotness_counter_reset_val == 0) {
186 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
187 }
188 return _hotness_counter_reset_val;
189 }
190 bool NMethodSweeper::sweep_in_progress() {
191 return (_current_nmethod != NULL);
192 }
193
194 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
195 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
196 // safepoint.
197 void NMethodSweeper::mark_active_nmethods() {
198 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
199 // If we do not want to reclaim not-entrant or zombie methods there is no need
200 // to scan stacks
201 if (!MethodFlushing) {
202 return;
203 }
204
205 // Increase time so that we can estimate when to invoke the sweeper again.
206 _time_counter++;
207
208 // Check for restart
209 assert(CodeCache::find_blob_unsafe(_current_nmethod) == _current_nmethod, "Sweeper nmethod cached state invalid");
210 if (!sweep_in_progress()) {
211 _seen = 0;
212 _sweep_fractions_left = NmethodSweepFraction;
213 _current_nmethod = (nmethod*)CodeCache::first_blob(CodeBlobType::MethodNonProfiled);
214 _current_type = CodeBlobType::MethodNonProfiled;
215 _traversals += 1;
216 _total_time_this_sweep = Tickspan();
217
218 if (PrintMethodFlushing) {
219 tty->print_cr("### Sweep: stack traversal %d", _traversals);
220 }
221 Threads::nmethods_do(&mark_activation_closure);
222
223 } else {
224 // Only set hotness counter
225 Threads::nmethods_do(&set_hotness_closure);
226 }
227
228 OrderAccess::storestore();
229 }
230 /**
231 * This function invokes the sweeper if at least one of the three conditions is met:
232 * (1) The code cache is getting full
233 * (2) There are sufficient state changes in/since the last sweep.
234 * (3) We have not been sweeping for 'some time'
249 // the formula considers how much space in the code cache is currently used. Here are
250 // some examples that will (hopefully) help in understanding.
251 //
252 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
253 // the result of the division is 0. This
254 // keeps the used code cache size small
255 // (important for embedded Java)
256 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
257 // computes: (256 / 16) - 1 = 15
258 // As a result, we invoke the sweeper after
259 // 15 invocations of 'mark_active_nmethods.
260 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
261 // computes: (256 / 16) - 10 = 6.
262 if (!_should_sweep) {
263 const int time_since_last_sweep = _time_counter - _last_sweep;
264 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
265 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
266 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
267 // value) that disables the intended periodic sweeps.
268 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
269 // Use only signed types
270 double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
271 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
272 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
273 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
274
275 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
276 _should_sweep = true;
277 }
278 }
279
280 if (_should_sweep && _sweep_fractions_left > 0) {
281 // Only one thread at a time will sweep
282 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
283 if (old != 0) {
284 return;
285 }
286 #ifdef ASSERT
287 if (LogSweeper && _records == NULL) {
288 // Create the ring buffer for the logging code
289 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
290 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
291 }
292 #endif
334 _sweep_fractions_left = 1;
335 }
336
337 // We want to visit all nmethods after NmethodSweepFraction
338 // invocations so divide the remaining number of nmethods by the
339 // remaining number of invocations. This is only an estimate since
340 // the number of nmethods changes during the sweep so the final
341 // stage must iterate until it there are no more nmethods.
342 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
343 int swept_count = 0;
344
345
346 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
347 assert(!CodeCache_lock->owned_by_self(), "just checking");
348
349 int freed_memory = 0;
350 {
351 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
352
353 // The last invocation iterates until there are no more nmethods
354 while ((swept_count < todo || _sweep_fractions_left == 1) && _current_nmethod != NULL) {
355 swept_count++;
356 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
357 if (PrintMethodFlushing && Verbose) {
358 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
359 }
360 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
361
362 assert(Thread::current()->is_Java_thread(), "should be java thread");
363 JavaThread* thread = (JavaThread*)Thread::current();
364 ThreadBlockInVM tbivm(thread);
365 thread->java_suspend_self();
366 }
367 // Since we will give up the CodeCache_lock, always skip ahead
368 // to the next nmethod. Other blobs can be deleted by other
369 // threads but nmethods are only reclaimed by the sweeper.
370 nmethod* next = (nmethod*)CodeCache::next_blob(_current_nmethod, _current_type);
371
372 // Now ready to process nmethod and give up CodeCache_lock
373 {
374 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
375 freed_memory += process_nmethod(_current_nmethod, _current_type);
376 }
377 _seen++;
378
379 while (next == NULL && _current_type < CodeBlobType::MethodProfiled) {
380 // We reached the last method of the type
381 // Go to next type that has methods available
382 _current_type++;
383 next = (nmethod*)CodeCache::first_blob(_current_type);
384 }
385 _current_nmethod = next;
386 }
387 }
388
389 assert(_sweep_fractions_left > 1 || _current_nmethod == NULL, "must have scanned the whole cache");
390
391 const Ticks sweep_end_counter = Ticks::now();
392 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
393 _total_time_sweeping += sweep_time;
394 _total_time_this_sweep += sweep_time;
395 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
396 _total_flushed_size += freed_memory;
397 _total_nof_methods_reclaimed += _flushed_count;
398
399 EventSweepCodeCache event(UNTIMED);
400 if (event.should_commit()) {
401 event.set_starttime(sweep_start_counter);
402 event.set_endtime(sweep_end_counter);
403 event.set_sweepIndex(_traversals);
404 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
405 event.set_sweptCount(swept_count);
406 event.set_flushedCount(_flushed_count);
407 event.set_markedCount(_marked_for_reclamation_count);
408 event.set_zombifiedCount(_zombified_count);
409 event.commit();
477 }
478 };
479
480 void NMethodSweeper::release_nmethod(nmethod *nm) {
481 // Clean up any CompiledICHolders
482 {
483 ResourceMark rm;
484 MutexLocker ml_patch(CompiledIC_lock);
485 RelocIterator iter(nm);
486 while (iter.next()) {
487 if (iter.type() == relocInfo::virtual_call_type) {
488 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
489 }
490 }
491 }
492
493 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
494 nm->flush();
495 }
496
497 int NMethodSweeper::process_nmethod(nmethod *nm, int code_blob_type) {
498 assert(!CodeCache_lock->owned_by_self(), "just checking");
499
500 int freed_memory = 0;
501 // Make sure this nmethod doesn't get unloaded during the scan,
502 // since safepoints may happen during acquired below locks.
503 NMethodMarker nmm(nm);
504 SWEEP(nm);
505
506 // Skip methods that are currently referenced by the VM
507 if (nm->is_locked_by_vm()) {
508 // But still remember to clean-up inline caches for alive nmethods
509 if (nm->is_alive()) {
510 // Clean inline caches that point to zombie/non-entrant methods
511 MutexLocker cl(CompiledIC_lock);
512 nm->cleanup_inline_caches();
513 SWEEP(nm);
514 }
515 return freed_memory;
516 }
517
569 if (nm->is_compiled_by_c2()) {
570 _total_nof_c2_methods_reclaimed++;
571 }
572 release_nmethod(nm);
573 _flushed_count++;
574 } else {
575 // Code cache state change is tracked in make_zombie()
576 nm->make_zombie();
577 _zombified_count++;
578 SWEEP(nm);
579 }
580 } else {
581 if (UseCodeCacheFlushing) {
582 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
583 // Do not make native methods and OSR-methods not-entrant
584 nm->dec_hotness_counter();
585 // Get the initial value of the hotness counter. This value depends on the
586 // ReservedCodeCacheSize
587 int reset_val = hotness_counter_reset_val();
588 int time_since_reset = reset_val - nm->hotness_counter();
589 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
590 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
591 // I.e., 'threshold' increases with lower available space in the code cache and a higher
592 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
593 // value until it is reset by stack walking - is smaller than the computed threshold, the
594 // corresponding nmethod is considered for removal.
595 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
596 // A method is marked as not-entrant if the method is
597 // 1) 'old enough': nm->hotness_counter() < threshold
598 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
599 // The second condition is necessary if we are dealing with very small code cache
600 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
601 // The second condition ensures that methods are not immediately made not-entrant
602 // after compilation.
603 nm->make_not_entrant();
604 // Code cache state change is tracked in make_not_entrant()
605 if (PrintMethodFlushing && Verbose) {
606 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
607 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
608 }
609 }
|