6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
50
51 // Helper class for printing in CodeCache
52
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
65
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
107
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
117
118 // CodeCache implementation
119
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127
128 int CodeCache::_codemem_full_count = 0;
129
130 CodeBlob* CodeCache::first() {
131 assert_locked_or_safepoint(CodeCache_lock);
132 return (CodeBlob*)_heap->first();
133 }
134
135
136 CodeBlob* CodeCache::next(CodeBlob* cb) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 return (CodeBlob*)_heap->next(cb);
139 }
140
141
142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
143 assert_locked_or_safepoint(CodeCache_lock);
144 while (cb != NULL && !cb->is_alive()) cb = next(cb);
145 return cb;
146 }
147
148
149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
150 assert_locked_or_safepoint(CodeCache_lock);
151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
152 return (nmethod*)cb;
153 }
154
155 nmethod* CodeCache::first_nmethod() {
156 assert_locked_or_safepoint(CodeCache_lock);
157 CodeBlob* cb = first();
158 while (cb != NULL && !cb->is_nmethod()) {
159 cb = next(cb);
160 }
161 return (nmethod*)cb;
162 }
163
164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
165 assert_locked_or_safepoint(CodeCache_lock);
166 cb = next(cb);
167 while (cb != NULL && !cb->is_nmethod()) {
168 cb = next(cb);
169 }
170 return (nmethod*)cb;
171 }
172
173 static size_t maxCodeCacheUsed = 0;
174
175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
176 // Do not seize the CodeCache lock here--if the caller has not
177 // already done so, we are going to lose bigtime, since the code
178 // cache will contain a garbage CodeBlob until the caller can
179 // run the constructor for the CodeBlob subclass he is busy
180 // instantiating.
181 guarantee(size >= 0, "allocation request must be reasonable");
182 assert_locked_or_safepoint(CodeCache_lock);
183 CodeBlob* cb = NULL;
184 _number_of_blobs++;
185 while (true) {
186 cb = (CodeBlob*)_heap->allocate(size, is_critical);
187 if (cb != NULL) break;
188 if (!_heap->expand_by(CodeCacheExpansionSize)) {
189 // Expansion failed
190 return NULL;
191 }
192 if (PrintCodeCacheExtension) {
193 ResourceMark rm;
194 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
195 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
196 (address)_heap->high() - (address)_heap->low_boundary());
197 }
198 }
199 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
200 (address)_heap->low_boundary()) - unallocated_capacity());
201 print_trace("allocation", cb, size);
202 return cb;
203 }
204
205 void CodeCache::free(CodeBlob* cb) {
206 assert_locked_or_safepoint(CodeCache_lock);
207
208 print_trace("free", cb);
209 if (cb->is_nmethod()) {
210 _number_of_nmethods--;
211 if (((nmethod *)cb)->has_dependencies()) {
212 _number_of_nmethods_with_dependencies--;
213 }
214 }
215 if (cb->is_adapter_blob()) {
216 _number_of_adapters--;
217 }
218 _number_of_blobs--;
219
220 _heap->deallocate(cb);
221
222 assert(_number_of_blobs >= 0, "sanity check");
223 }
224
225
226 void CodeCache::commit(CodeBlob* cb) {
227 // this is called by nmethod::nmethod, which must already own CodeCache_lock
228 assert_locked_or_safepoint(CodeCache_lock);
229 if (cb->is_nmethod()) {
230 _number_of_nmethods++;
231 if (((nmethod *)cb)->has_dependencies()) {
232 _number_of_nmethods_with_dependencies++;
233 }
234 }
235 if (cb->is_adapter_blob()) {
236 _number_of_adapters++;
237 }
238
239 // flush the hardware I-cache
240 ICache::invalidate_range(cb->content_begin(), cb->content_size());
241 }
242
243
244 // Iteration over CodeBlobs
245
246 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
247 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
248 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
249
250
251 bool CodeCache::contains(void *p) {
252 // It should be ok to call contains without holding a lock
253 return _heap->contains(p);
254 }
255
256
257 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
258 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
259 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
260 CodeBlob* CodeCache::find_blob(void* start) {
261 CodeBlob* result = find_blob_unsafe(start);
262 if (result == NULL) return NULL;
263 // We could potentially look up non_entrant methods
264 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
265 return result;
266 }
267
268 nmethod* CodeCache::find_nmethod(void* start) {
269 CodeBlob *cb = find_blob(start);
270 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
271 return (nmethod*)cb;
272 }
273
274
275 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
276 assert_locked_or_safepoint(CodeCache_lock);
277 FOR_ALL_BLOBS(p) {
278 f(p);
279 }
280 }
281
282
283 void CodeCache::nmethods_do(void f(nmethod* nm)) {
284 assert_locked_or_safepoint(CodeCache_lock);
285 FOR_ALL_BLOBS(nm) {
286 if (nm->is_nmethod()) f((nmethod*)nm);
287 }
288 }
289
290 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
291 assert_locked_or_safepoint(CodeCache_lock);
292 FOR_ALL_ALIVE_NMETHODS(nm) {
293 f(nm);
294 }
295 }
296
297 int CodeCache::alignment_unit() {
298 return (int)_heap->alignment_unit();
299 }
300
301
302 int CodeCache::alignment_offset() {
303 return (int)_heap->alignment_offset();
304 }
305
306
307 // Mark nmethods for unloading if they contain otherwise unreachable
308 // oops.
309 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
310 assert_locked_or_safepoint(CodeCache_lock);
311 FOR_ALL_ALIVE_NMETHODS(nm) {
312 nm->do_unloading(is_alive, unloading_occurred);
313 }
314 }
315
316 void CodeCache::blobs_do(CodeBlobClosure* f) {
317 assert_locked_or_safepoint(CodeCache_lock);
318 FOR_ALL_ALIVE_BLOBS(cb) {
319 f->do_code_blob(cb);
320
321 #ifdef ASSERT
322 if (cb->is_nmethod())
323 ((nmethod*)cb)->verify_scavenge_root_oops();
324 #endif //ASSERT
325 }
326 }
327
328 // Walk the list of methods which might contain non-perm oops.
329 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
330 assert_locked_or_safepoint(CodeCache_lock);
331 debug_only(mark_scavenge_root_nmethods());
332
333 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
334 debug_only(cur->clear_scavenge_root_marked());
335 assert(cur->scavenge_root_not_marked(), "");
336 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
337
338 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
339 #ifndef PRODUCT
340 if (TraceScavenge) {
341 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
342 }
343 #endif //PRODUCT
344 if (is_live) {
345 // Perform cur->oops_do(f), maybe just once per nmethod.
408 cur = next;
409 }
410
411 // Check for stray marks.
412 debug_only(verify_perm_nmethods(NULL));
413 }
414
415 #ifndef PRODUCT
416 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
417 // While we are here, verify the integrity of the list.
418 mark_scavenge_root_nmethods();
419 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
420 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
421 cur->clear_scavenge_root_marked();
422 }
423 verify_perm_nmethods(f);
424 }
425
426 // Temporarily mark nmethods that are claimed to be on the non-perm list.
427 void CodeCache::mark_scavenge_root_nmethods() {
428 FOR_ALL_ALIVE_BLOBS(cb) {
429 if (cb->is_nmethod()) {
430 nmethod *nm = (nmethod*)cb;
431 assert(nm->scavenge_root_not_marked(), "clean state");
432 if (nm->on_scavenge_root_list())
433 nm->set_scavenge_root_marked();
434 }
435 }
436 }
437
438 // If the closure is given, run it on the unlisted nmethods.
439 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
440 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
441 FOR_ALL_ALIVE_BLOBS(cb) {
442 bool call_f = (f_or_null != NULL);
443 if (cb->is_nmethod()) {
444 nmethod *nm = (nmethod*)cb;
445 assert(nm->scavenge_root_not_marked(), "must be already processed");
446 if (nm->on_scavenge_root_list())
447 call_f = false; // don't show this one to the client
448 nm->verify_scavenge_root_oops();
449 } else {
450 call_f = false; // not an nmethod
451 }
452 if (call_f) f_or_null->do_code_blob(cb);
453 }
454 }
455 #endif //PRODUCT
456
457
458 void CodeCache::gc_prologue() {
459 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
460 }
461
462 void CodeCache::gc_epilogue() {
463 assert_locked_or_safepoint(CodeCache_lock);
464 FOR_ALL_ALIVE_BLOBS(cb) {
465 if (cb->is_nmethod()) {
466 nmethod *nm = (nmethod*)cb;
467 assert(!nm->is_unloaded(), "Tautology");
468 if (needs_cache_clean()) {
469 nm->cleanup_inline_caches();
470 }
471 DEBUG_ONLY(nm->verify());
472 nm->fix_oop_relocations();
473 }
474 }
475 set_needs_cache_clean(false);
476 prune_scavenge_root_nmethods();
477 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
478
479 #ifdef ASSERT
480 // make sure that we aren't leaking icholders
481 int count = 0;
482 FOR_ALL_BLOBS(cb) {
483 if (cb->is_nmethod()) {
484 RelocIterator iter((nmethod*)cb);
485 while(iter.next()) {
486 if (iter.type() == relocInfo::virtual_call_type) {
487 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
488 CompiledIC *ic = CompiledIC_at(iter.reloc());
489 if (TraceCompiledIC) {
490 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
491 ic->print();
492 }
493 assert(ic->cached_icholder() != NULL, "must be non-NULL");
494 count++;
495 }
496 }
497 }
498 }
499 }
500
501 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
502 CompiledICHolder::live_count(), "must agree");
503 #endif
504 }
505
506
507 void CodeCache::verify_oops() {
508 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
509 VerifyOopClosure voc;
510 FOR_ALL_ALIVE_BLOBS(cb) {
511 if (cb->is_nmethod()) {
512 nmethod *nm = (nmethod*)cb;
513 nm->oops_do(&voc);
514 nm->verify_oop_relocations();
515 }
516 }
517 }
518
519
520 address CodeCache::first_address() {
521 assert_locked_or_safepoint(CodeCache_lock);
522 return (address)_heap->low_boundary();
523 }
524
525
526 address CodeCache::last_address() {
527 assert_locked_or_safepoint(CodeCache_lock);
528 return (address)_heap->high();
529 }
530
531 /**
532 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
533 * is free, reverse_free_ratio() returns 4.
534 */
535 double CodeCache::reverse_free_ratio() {
536 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
537 double max_capacity = (double)CodeCache::max_capacity();
538 return max_capacity / unallocated_capacity;
539 }
540
541 void icache_init();
542
543 void CodeCache::initialize() {
544 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
545 #ifdef COMPILER2
546 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
547 #endif
548 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
549 // This was originally just a check of the alignment, causing failure, instead, round
550 // the code cache to the page size. In particular, Solaris is moving to a larger
551 // default page size.
552 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
553 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
554 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
555 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
556 vm_exit_during_initialization("Could not reserve enough space for code cache");
557 }
558
559 MemoryService::add_code_heap_memory_pool(_heap);
560
561 // Initialize ICache flush mechanism
562 // This service is needed for os::register_code_area
563 icache_init();
564
565 // Give OS a chance to register generated code area.
566 // This is used on Windows 64 bit platforms to register
567 // Structured Exception Handlers for our generated code.
568 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
569 }
570
571
572 void codeCache_init() {
573 CodeCache::initialize();
574 }
575
576 //------------------------------------------------------------------------------------------------
577
578 int CodeCache::number_of_nmethods_with_dependencies() {
579 return _number_of_nmethods_with_dependencies;
580 }
581
582 void CodeCache::clear_inline_caches() {
583 assert_locked_or_safepoint(CodeCache_lock);
584 FOR_ALL_ALIVE_NMETHODS(nm) {
585 nm->clear_inline_caches();
586 }
587 }
588
589 // Keeps track of time spent for checking dependencies
590 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
591
592 int CodeCache::mark_for_deoptimization(DepChange& changes) {
593 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
594 int number_of_marked_CodeBlobs = 0;
595
596 // search the hierarchy looking for nmethods which are affected by the loading of this class
597
598 // then search the interfaces this class implements looking for nmethods
599 // which might be dependent of the fact that an interface only had one
600 // implementor.
601 // nmethod::check_all_dependencies works only correctly, if no safepoint
602 // can happen
603 No_Safepoint_Verifier nsv;
604 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
605 Klass* d = str.klass();
606 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
620 }
621
622
623 #ifdef HOTSWAP
624 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
625 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
626 int number_of_marked_CodeBlobs = 0;
627
628 // Deoptimize all methods of the evolving class itself
629 Array<Method*>* old_methods = dependee->methods();
630 for (int i = 0; i < old_methods->length(); i++) {
631 ResourceMark rm;
632 Method* old_method = old_methods->at(i);
633 nmethod *nm = old_method->code();
634 if (nm != NULL) {
635 nm->mark_for_deoptimization();
636 number_of_marked_CodeBlobs++;
637 }
638 }
639
640 FOR_ALL_ALIVE_NMETHODS(nm) {
641 if (nm->is_marked_for_deoptimization()) {
642 // ...Already marked in the previous pass; don't count it again.
643 } else if (nm->is_evol_dependent_on(dependee())) {
644 ResourceMark rm;
645 nm->mark_for_deoptimization();
646 number_of_marked_CodeBlobs++;
647 } else {
648 // flush caches in case they refer to a redefined Method*
649 nm->clear_inline_caches();
650 }
651 }
652
653 return number_of_marked_CodeBlobs;
654 }
655 #endif // HOTSWAP
656
657
658 // Deoptimize all methods
659 void CodeCache::mark_all_nmethods_for_deoptimization() {
660 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
661 FOR_ALL_ALIVE_NMETHODS(nm) {
662 nm->mark_for_deoptimization();
663 }
664 }
665
666
667 int CodeCache::mark_for_deoptimization(Method* dependee) {
668 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
669 int number_of_marked_CodeBlobs = 0;
670
671 FOR_ALL_ALIVE_NMETHODS(nm) {
672 if (nm->is_dependent_on_method(dependee)) {
673 ResourceMark rm;
674 nm->mark_for_deoptimization();
675 number_of_marked_CodeBlobs++;
676 }
677 }
678
679 return number_of_marked_CodeBlobs;
680 }
681
682 void CodeCache::make_marked_nmethods_zombies() {
683 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
684 FOR_ALL_ALIVE_NMETHODS(nm) {
685 if (nm->is_marked_for_deoptimization()) {
686
687 // If the nmethod has already been made non-entrant and it can be converted
688 // then zombie it now. Otherwise make it non-entrant and it will eventually
689 // be zombied when it is no longer seen on the stack. Note that the nmethod
690 // might be "entrant" and not on the stack and so could be zombied immediately
691 // but we can't tell because we don't track it on stack until it becomes
692 // non-entrant.
693
694 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
695 nm->make_zombie();
696 } else {
697 nm->make_not_entrant();
698 }
699 }
700 }
701 }
702
703 void CodeCache::make_marked_nmethods_not_entrant() {
704 assert_locked_or_safepoint(CodeCache_lock);
705 FOR_ALL_ALIVE_NMETHODS(nm) {
706 if (nm->is_marked_for_deoptimization()) {
707 nm->make_not_entrant();
708 }
709 }
710 }
711
712 void CodeCache::verify() {
713 _heap->verify();
714 FOR_ALL_ALIVE_BLOBS(p) {
715 p->verify();
716 }
717 }
718
719 void CodeCache::report_codemem_full() {
720 _codemem_full_count++;
721 EventCodeCacheFull event;
722 if (event.should_commit()) {
723 event.set_startAddress((u8)low_bound());
724 event.set_commitedTopAddress((u8)high());
725 event.set_reservedTopAddress((u8)high_bound());
726 event.set_entryCount(nof_blobs());
727 event.set_methodCount(nof_nmethods());
728 event.set_adaptorCount(nof_adapters());
729 event.set_unallocatedCapacity(unallocated_capacity()/K);
730 event.set_fullCount(_codemem_full_count);
731 event.commit();
732 }
733 }
734
735 void CodeCache::print_memory_overhead() {
736 size_t wasted_bytes = 0;
737 CodeBlob *cb;
738 for (cb = first(); cb != NULL; cb = next(cb)) {
739 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
740 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
741 }
742 // Print bytes that are allocated in the freelist
743 ttyLocker ttl;
744 tty->print_cr("Number of elements in freelist: %d", freelist_length());
745 tty->print_cr("Allocated in freelist: %dkB", bytes_allocated_in_freelist()/K);
746 tty->print_cr("Unused bytes in CodeBlobs: %dkB", (int)(wasted_bytes/K));
747 tty->print_cr("Segment map size: %dkB", allocated_segments()/K); // 1 byte per segment
748 }
749
750 //------------------------------------------------------------------------------------------------
751 // Non-product version
752
753 #ifndef PRODUCT
754
755 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
756 if (PrintCodeCache2) { // Need to add a new flag
757 ResourceMark rm;
758 if (size == 0) size = cb->size();
759 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
760 }
761 }
762
763 void CodeCache::print_internals() {
764 int nmethodCount = 0;
765 int runtimeStubCount = 0;
766 int adapterCount = 0;
767 int deoptimizationStubCount = 0;
768 int uncommonTrapStubCount = 0;
769 int bufferBlobCount = 0;
770 int total = 0;
771 int nmethodAlive = 0;
772 int nmethodNotEntrant = 0;
773 int nmethodZombie = 0;
774 int nmethodUnloaded = 0;
775 int nmethodJava = 0;
776 int nmethodNative = 0;
777 int max_nm_size = 0;
778 ResourceMark rm;
779
780 CodeBlob *cb;
781 for (cb = first(); cb != NULL; cb = next(cb)) {
782 total++;
783 if (cb->is_nmethod()) {
784 nmethod* nm = (nmethod*)cb;
785
786 if (Verbose && nm->method() != NULL) {
787 ResourceMark rm;
788 char *method_name = nm->method()->name_and_sig_as_C_string();
789 tty->print("%s", method_name);
790 if(nm->is_alive()) { tty->print_cr(" alive"); }
791 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
792 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
793 }
794
795 nmethodCount++;
796
797 if(nm->is_alive()) { nmethodAlive++; }
798 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
799 if(nm->is_zombie()) { nmethodZombie++; }
800 if(nm->is_unloaded()) { nmethodUnloaded++; }
801 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
802
803 if(nm->method() != NULL && nm->is_java_method()) {
804 nmethodJava++;
805 max_nm_size = MAX2(max_nm_size, nm->size());
806 }
807 } else if (cb->is_runtime_stub()) {
808 runtimeStubCount++;
809 } else if (cb->is_deoptimization_stub()) {
810 deoptimizationStubCount++;
811 } else if (cb->is_uncommon_trap_stub()) {
812 uncommonTrapStubCount++;
813 } else if (cb->is_adapter_blob()) {
814 adapterCount++;
815 } else if (cb->is_buffer_blob()) {
816 bufferBlobCount++;
817 }
818 }
819
820 int bucketSize = 512;
821 int bucketLimit = max_nm_size / bucketSize + 1;
822 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
823 memset(buckets, 0, sizeof(int) * bucketLimit);
824
825 for (cb = first(); cb != NULL; cb = next(cb)) {
826 if (cb->is_nmethod()) {
827 nmethod* nm = (nmethod*)cb;
828 if(nm->is_java_method()) {
829 buckets[nm->size() / bucketSize]++;
830 }
831 }
832 }
833
834 tty->print_cr("Code Cache Entries (total of %d)",total);
835 tty->print_cr("-------------------------------------------------");
836 tty->print_cr("nmethods: %d",nmethodCount);
837 tty->print_cr("\talive: %d",nmethodAlive);
838 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
839 tty->print_cr("\tzombie: %d",nmethodZombie);
840 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
841 tty->print_cr("\tjava: %d",nmethodJava);
842 tty->print_cr("\tnative: %d",nmethodNative);
843 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
844 tty->print_cr("adapters: %d",adapterCount);
845 tty->print_cr("buffer blobs: %d",bufferBlobCount);
846 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
847 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
848 tty->print_cr("\nnmethod size distribution (non-zombie java)");
849 tty->print_cr("-------------------------------------------------");
850
851 for(int i=0; i<bucketLimit; i++) {
852 if(buckets[i] != 0) {
853 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
854 tty->fill_to(40);
855 tty->print_cr("%d",buckets[i]);
856 }
857 }
858
859 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
860 print_memory_overhead();
861 }
862
863 #endif // !PRODUCT
864
865 void CodeCache::print() {
866 print_summary(tty);
867
868 #ifndef PRODUCT
869 if (!Verbose) return;
870
871 CodeBlob_sizes live;
872 CodeBlob_sizes dead;
873
874 FOR_ALL_BLOBS(p) {
875 if (!p->is_alive()) {
876 dead.add(p);
877 } else {
878 live.add(p);
879 }
880 }
881
882 tty->print_cr("CodeCache:");
883 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
884
885 if (!live.is_empty()) {
886 live.print("live");
887 }
888 if (!dead.is_empty()) {
889 dead.print("dead");
890 }
891
892
893 if (WizardMode) {
894 // print the oop_map usage
895 int code_size = 0;
896 int number_of_blobs = 0;
897 int number_of_oop_maps = 0;
898 int map_size = 0;
899 FOR_ALL_BLOBS(p) {
900 if (p->is_alive()) {
901 number_of_blobs++;
902 code_size += p->code_size();
903 OopMapSet* set = p->oop_maps();
904 if (set != NULL) {
905 number_of_oop_maps += set->size();
906 map_size += set->heap_size();
907 }
908 }
909 }
910 tty->print_cr("OopMaps");
911 tty->print_cr(" #blobs = %d", number_of_blobs);
912 tty->print_cr(" code size = %d", code_size);
913 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
914 tty->print_cr(" map size = %d", map_size);
915 }
916
917 #endif // !PRODUCT
918 }
919
920 void CodeCache::print_summary(outputStream* st, bool detailed) {
921 size_t total = (_heap->high_boundary() - _heap->low_boundary());
922 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
923 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
924 total/K, (total - unallocated_capacity())/K,
925 maxCodeCacheUsed/K, unallocated_capacity()/K);
926
927 if (detailed) {
928 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
929 _heap->low_boundary(),
930 _heap->high(),
931 _heap->high_boundary());
932 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
933 " adapters=" UINT32_FORMAT,
934 nof_blobs(), nof_nmethods(), nof_adapters());
935 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
936 "enabled" : Arguments::mode() == Arguments::_int ?
937 "disabled (interpreter mode)" :
938 "disabled (not enough contiguous free space left)");
939 }
940 }
941
942 void CodeCache::log_state(outputStream* st) {
943 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
944 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
945 nof_blobs(), nof_nmethods(), nof_adapters(),
946 unallocated_capacity());
947 }
948
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/icBuffer.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "gc_implementation/shared/markSweep.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/gcLocker.hpp"
38 #include "memory/iterator.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "oops/method.hpp"
41 #include "oops/objArrayOop.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "opto/compile.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/arguments.hpp"
46 #include "runtime/icache.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "runtime/compilationPolicy.hpp"
50 #include "services/memoryService.hpp"
51 #include "trace/tracing.hpp"
52 #include "utilities/xmlstream.hpp"
53
54 // Helper class for printing in CodeCache
55 class CodeBlob_sizes {
56 private:
57 int count;
58 int total_size;
59 int header_size;
60 int code_size;
61 int stub_size;
62 int relocation_size;
63 int scopes_oop_size;
64 int scopes_metadata_size;
65 int scopes_data_size;
66 int scopes_pcs_size;
67
68 public:
69 CodeBlob_sizes() {
70 count = 0;
71 total_size = 0;
72 header_size = 0;
73 code_size = 0;
74 stub_size = 0;
100 void add(CodeBlob* cb) {
101 count++;
102 total_size += cb->size();
103 header_size += cb->header_size();
104 relocation_size += cb->relocation_size();
105 if (cb->is_nmethod()) {
106 nmethod* nm = cb->as_nmethod_or_null();
107 code_size += nm->insts_size();
108 stub_size += nm->stub_size();
109
110 scopes_oop_size += nm->oops_size();
111 scopes_metadata_size += nm->metadata_size();
112 scopes_data_size += nm->scopes_data_size();
113 scopes_pcs_size += nm->scopes_pcs_size();
114 } else {
115 code_size += cb->code_size();
116 }
117 }
118 };
119
120 // Iterate over all CodeHeaps
121 #define FOR_ALL_HEAPS(it) for (GrowableArrayIterator<CodeHeap*> it = _heaps->begin(); it != _heaps->end(); ++it)
122 // Iterate over all CodeHeaps containing nmethods
123 #define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator<CodeHeap*, IsMethodPredicate> it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it)
124 // Iterate over all CodeBlobs (cb) on the given CodeHeap
125 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
126 // Iterate over all alive CodeBlobs (cb) on the given CodeHeap
127 #define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb))
128
129 address CodeCache::_low_bound = 0;
130 address CodeCache::_high_bound = 0;
131 int CodeCache::_number_of_blobs = 0;
132 int CodeCache::_number_of_adapters = 0;
133 int CodeCache::_number_of_nmethods = 0;
134 int CodeCache::_number_of_nmethods_with_dependencies = 0;
135 bool CodeCache::_needs_cache_clean = false;
136 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
137 int CodeCache::_codemem_full_count = 0;
138
139 // Initialize array of CodeHeaps
140 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true);
141
142 void CodeCache::initialize_heaps() {
143 // Calculate default CodeHeap sizes if not set by user
144 if (FLAG_IS_DEFAULT(NonMethodCodeHeapSize) && FLAG_IS_DEFAULT(ProfiledCodeHeapSize)
145 && FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
146 // Number of c1/c2 compiler threads
147 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
148 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
149
150 // C1 temporary code buffer size (see Compiler::init_buffer_blob())
151 const int c1_buffer_size = Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
152
153 // C2 scratch buffer size (see Compile::init_scratch_buffer_blob())
154 // Initial size of constant table (this may be increased if a compiled method needs more space)
155 const int constant_size = (4 * 1024);
156 const int c2_buffer_size = Compile::MAX_inst_size + Compile::MAX_locs_size + constant_size;
157
158 // Increase default NonMethodCodeHeapSize to account for buffers
159 int total_buffer_size = c1_count * c1_buffer_size + c2_count * c2_buffer_size;
160 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, NonMethodCodeHeapSize + total_buffer_size);
161
162 // Check if we have enough space for the non-method code heap
163 if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
164 // Use the default value for NonMethodCodeHeapSize and one half of the
165 // remaining size for non-profiled methods and one half for profiled methods
166 size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
167 size_t profiled_size = remaining_size / 2;
168 size_t non_profiled_size = remaining_size - profiled_size;
169 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, profiled_size);
170 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, non_profiled_size);
171 } else {
172 // Use all space for the non-method heap and set other heaps to minimal size
173 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
174 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, os::vm_page_size());
175 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, os::vm_page_size());
176 }
177 }
178
179 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
180 if(!heap_available(CodeBlobType::MethodProfiled)) {
181 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
182 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, 0);
183 }
184
185 // Size check
186 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
187
188 // Align reserved sizes of CodeHeaps
189 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
190 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
191 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
192
193 // Compute initial sizes of CodeHeaps
194 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
195 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
196 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
197
198 // Reserve one continuous chunk of memory for CodeHeaps and split it into
199 // parts for the individual heaps. The memory layout looks like this:
200 // ---------- high -----------
201 // Non-profiled nmethods
202 // Profiled nmethods
203 // Non-methods
204 // ---------- low ------------
205 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
206 ReservedSpace non_method_space = rs.first_part(non_method_size);
207 ReservedSpace rest = rs.last_part(non_method_size);
208 ReservedSpace profiled_space = rest.first_part(profiled_size);
209 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
210
211 // Non-methods (stubs, adapters, ...)
212 add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod);
213 // Tier 2 and tier 3 (profiled) methods
214 add_heap(profiled_space, "Profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled);
215 // Tier 1 and tier 4 (non-profiled) methods and native methods
216 add_heap(non_profiled_space, "Non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
217 }
218
219 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
220 // Determine alignment
221 const size_t page_size = os::can_execute_large_page_memory() ?
222 os::page_size_for_region(InitialCodeCacheSize, size, 8) :
223 os::vm_page_size();
224 const size_t granularity = os::vm_allocation_granularity();
225 const size_t r_align = MAX2(page_size, granularity);
226 const size_t r_size = align_size_up(size, r_align);
227 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
228 MAX2(page_size, granularity);
229
230 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
231
232 // Initialize bounds
233 _low_bound = (address)rs.base();
234 _high_bound = _low_bound + rs.size();
235
236 return rs;
237 }
238
239 bool CodeCache::heap_available(int code_blob_type) {
240 if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) {
241 // Use all heaps for TieredCompilation
242 return true;
243 } else {
244 // Without TieredCompilation we only need the non-profiled heap
245 return (code_blob_type == CodeBlobType::MethodNonProfiled);
246 }
247 }
248
249 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
250 // Check if heap is needed
251 if (!heap_available(code_blob_type)) {
252 return;
253 }
254
255 // Create CodeHeap
256 CodeHeap* heap = new CodeHeap(name, code_blob_type);
257 _heaps->append(heap);
258
259 // Reserve Space
260 size_initial = round_to(size_initial, os::vm_page_size());
261
262 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
263 vm_exit_during_initialization("Could not reserve enough space for code cache");
264 }
265
266 // Register the CodeHeap
267 MemoryService::add_code_heap_memory_pool(heap, name);
268 }
269
270 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
271 FOR_ALL_HEAPS(it) {
272 if ((*it)->accepts(code_blob_type)) {
273 return (*it);
274 }
275 }
276 return NULL;
277 }
278
279 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
280 assert_locked_or_safepoint(CodeCache_lock);
281 if (heap != NULL) {
282 return (CodeBlob*)heap->first();
283 }
284 return NULL;
285 }
286
287 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
288 assert_locked_or_safepoint(CodeCache_lock);
289 if (heap != NULL) {
290 return (CodeBlob*)heap->next(cb);
291 }
292 return NULL;
293 }
294
295 CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) {
296 assert_locked_or_safepoint(CodeCache_lock);
297 CodeBlob* cb = first_blob(heap);
298 while (cb != NULL && !cb->is_alive()) {
299 cb = next_blob(heap, cb);
300 }
301 return cb;
302 }
303
304 CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) {
305 assert_locked_or_safepoint(CodeCache_lock);
306 cb = next_blob(heap, cb);
307 while (cb != NULL && !cb->is_alive()) {
308 cb = next_blob(heap, cb);
309 }
310 return cb;
311 }
312
313 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
314 // Do not seize the CodeCache lock here--if the caller has not
315 // already done so, we are going to lose bigtime, since the code
316 // cache will contain a garbage CodeBlob until the caller can
317 // run the constructor for the CodeBlob subclass he is busy
318 // instantiating.
319 guarantee(size >= 0, "allocation request must be reasonable");
320 assert_locked_or_safepoint(CodeCache_lock);
321 CodeBlob* cb = NULL;
322 _number_of_blobs++;
323
324 // Get CodeHeap for the given CodeBlobType
325 CodeHeap* heap = get_code_heap(code_blob_type);
326 assert (heap != NULL, "Heap exists");
327
328 while (true) {
329 cb = (CodeBlob*)heap->allocate(size, is_critical);
330 if (cb != NULL) break;
331 if (!heap->expand_by(CodeCacheExpansionSize)) {
332 // Expansion failed
333 return NULL;
334 }
335 if (PrintCodeCacheExtension) {
336 ResourceMark rm;
337 tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
338 heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
339 (address)heap->high() - (address)heap->low_boundary());
340 }
341 }
342 print_trace("allocation", cb, size);
343
344 return cb;
345 }
346
347 void CodeCache::free(CodeBlob* cb, int code_blob_type) {
348 assert_locked_or_safepoint(CodeCache_lock);
349
350 print_trace("free", cb);
351 if (cb->is_nmethod()) {
352 _number_of_nmethods--;
353 if (((nmethod *)cb)->has_dependencies()) {
354 _number_of_nmethods_with_dependencies--;
355 }
356 }
357 if (cb->is_adapter_blob()) {
358 _number_of_adapters--;
359 }
360 _number_of_blobs--;
361
362 // Get heap for given CodeBlobType and deallocate
363 get_code_heap(code_blob_type)->deallocate(cb);
364
365 assert(_number_of_blobs >= 0, "sanity check");
366 }
367
368 void CodeCache::commit(CodeBlob* cb) {
369 // this is called by nmethod::nmethod, which must already own CodeCache_lock
370 assert_locked_or_safepoint(CodeCache_lock);
371 if (cb->is_nmethod()) {
372 _number_of_nmethods++;
373 if (((nmethod *)cb)->has_dependencies()) {
374 _number_of_nmethods_with_dependencies++;
375 }
376 }
377 if (cb->is_adapter_blob()) {
378 _number_of_adapters++;
379 }
380
381 // flush the hardware I-cache
382 ICache::invalidate_range(cb->content_begin(), cb->content_size());
383 }
384
385 bool CodeCache::contains(void *p) {
386 // It should be ok to call contains without holding a lock
387 FOR_ALL_HEAPS(it) {
388 if ((*it)->contains(p)) {
389 return true;
390 }
391 }
392 return false;
393 }
394
395 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
396 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
397 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
398 CodeBlob* CodeCache::find_blob(void* start) {
399 CodeBlob* result = find_blob_unsafe(start);
400 // We could potentially look up non_entrant methods
401 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
402 return result;
403 }
404
405 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
406 // what you are doing)
407 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
408 // NMT can walk the stack before code cache is created
409 if (_heaps->is_empty()) return NULL;
410
411 FOR_ALL_HEAPS(it) {
412 CodeBlob* result = (CodeBlob*) (*it)->find_start(start);
413 if (result != NULL && result->blob_contains((address)start)) {
414 return result;
415 }
416 }
417 return NULL;
418 }
419
420 nmethod* CodeCache::find_nmethod(void* start) {
421 CodeBlob* cb = find_blob(start);
422 assert(cb->is_nmethod(), "did not find an nmethod");
423 return (nmethod*)cb;
424 }
425
426 bool CodeCache::contains_nmethod(nmethod* nm) {
427 FOR_ALL_METHOD_HEAPS(it) {
428 if ((*it)->contains(nm)) {
429 return true;
430 }
431 }
432 return false;
433 }
434
435 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
436 assert_locked_or_safepoint(CodeCache_lock);
437 FOR_ALL_HEAPS(it) {
438 FOR_ALL_BLOBS(cb, *it) {
439 f(cb);
440 }
441 }
442 }
443
444 void CodeCache::nmethods_do(void f(nmethod* nm)) {
445 assert_locked_or_safepoint(CodeCache_lock);
446 FOR_ALL_METHOD_HEAPS(it) {
447 FOR_ALL_BLOBS(cb, *it) {
448 f((nmethod*)cb);
449 }
450 }
451 }
452
453 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
454 assert_locked_or_safepoint(CodeCache_lock);
455 FOR_ALL_METHOD_HEAPS(it) {
456 FOR_ALL_ALIVE_BLOBS(cb, *it) {
457 f((nmethod*)cb);
458 }
459 }
460 }
461
462 int CodeCache::alignment_unit() {
463 return (int)_heaps->first()->alignment_unit();
464 }
465
466 int CodeCache::alignment_offset() {
467 return (int)_heaps->first()->alignment_offset();
468 }
469
470 // Mark nmethods for unloading if they contain otherwise unreachable oops.
471 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
472 assert_locked_or_safepoint(CodeCache_lock);
473 FOR_ALL_METHOD_HEAPS(it) {
474 FOR_ALL_ALIVE_BLOBS(cb, *it) {
475 nmethod* nm = (nmethod*)cb;
476 nm->do_unloading(is_alive, unloading_occurred);
477 }
478 }
479 }
480
481 void CodeCache::blobs_do(CodeBlobClosure* f) {
482 assert_locked_or_safepoint(CodeCache_lock);
483 FOR_ALL_HEAPS(it) {
484 FOR_ALL_BLOBS(cb, *it) {
485 if (cb->is_alive()) {
486 f->do_code_blob(cb);
487
488 #ifdef ASSERT
489 if (cb->is_nmethod())
490 ((nmethod*)cb)->verify_scavenge_root_oops();
491 #endif //ASSERT
492 }
493 }
494 }
495 }
496
497 // Walk the list of methods which might contain non-perm oops.
498 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
499 assert_locked_or_safepoint(CodeCache_lock);
500 debug_only(mark_scavenge_root_nmethods());
501
502 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
503 debug_only(cur->clear_scavenge_root_marked());
504 assert(cur->scavenge_root_not_marked(), "");
505 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
506
507 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
508 #ifndef PRODUCT
509 if (TraceScavenge) {
510 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
511 }
512 #endif //PRODUCT
513 if (is_live) {
514 // Perform cur->oops_do(f), maybe just once per nmethod.
577 cur = next;
578 }
579
580 // Check for stray marks.
581 debug_only(verify_perm_nmethods(NULL));
582 }
583
584 #ifndef PRODUCT
585 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
586 // While we are here, verify the integrity of the list.
587 mark_scavenge_root_nmethods();
588 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
589 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
590 cur->clear_scavenge_root_marked();
591 }
592 verify_perm_nmethods(f);
593 }
594
595 // Temporarily mark nmethods that are claimed to be on the non-perm list.
596 void CodeCache::mark_scavenge_root_nmethods() {
597 FOR_ALL_METHOD_HEAPS(it) {
598 FOR_ALL_ALIVE_BLOBS(cb, *it) {
599 nmethod* nm = (nmethod*)cb;
600 assert(nm->scavenge_root_not_marked(), "clean state");
601 if (nm->on_scavenge_root_list())
602 nm->set_scavenge_root_marked();
603 }
604 }
605 }
606
607 // If the closure is given, run it on the unlisted nmethods.
608 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
609 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
610 FOR_ALL_METHOD_HEAPS(it) {
611 FOR_ALL_ALIVE_BLOBS(cb, *it) {
612 nmethod* nm = (nmethod*)cb;
613 bool call_f = (f_or_null != NULL);
614 assert(nm->scavenge_root_not_marked(), "must be already processed");
615 if (nm->on_scavenge_root_list())
616 call_f = false; // don't show this one to the client
617 nm->verify_scavenge_root_oops();
618 if (call_f) f_or_null->do_code_blob(nm);
619 }
620 }
621 }
622 #endif //PRODUCT
623
624 void CodeCache::gc_prologue() {
625 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
626 }
627
628 void CodeCache::gc_epilogue() {
629 assert_locked_or_safepoint(CodeCache_lock);
630 FOR_ALL_METHOD_HEAPS(it) {
631 FOR_ALL_ALIVE_BLOBS(cb, *it) {
632 nmethod* nm = (nmethod*)cb;
633 assert(!nm->is_unloaded(), "Tautology");
634 if (needs_cache_clean()) {
635 nm->cleanup_inline_caches();
636 }
637 DEBUG_ONLY(nm->verify());
638 nm->fix_oop_relocations();
639 }
640 }
641 set_needs_cache_clean(false);
642 prune_scavenge_root_nmethods();
643 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
644
645 #ifdef ASSERT
646 // make sure that we aren't leaking icholders
647 int count = 0;
648 FOR_ALL_METHOD_HEAPS(it) {
649 FOR_ALL_BLOBS(cb, *it) {
650 RelocIterator iter((nmethod*)cb);
651 while(iter.next()) {
652 if (iter.type() == relocInfo::virtual_call_type) {
653 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
654 CompiledIC *ic = CompiledIC_at(iter.reloc());
655 if (TraceCompiledIC) {
656 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
657 ic->print();
658 }
659 assert(ic->cached_icholder() != NULL, "must be non-NULL");
660 count++;
661 }
662 }
663 }
664 }
665 }
666
667 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
668 CompiledICHolder::live_count(), "must agree");
669 #endif
670 }
671
672 void CodeCache::verify_oops() {
673 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
674 VerifyOopClosure voc;
675 FOR_ALL_METHOD_HEAPS(it) {
676 FOR_ALL_ALIVE_BLOBS(cb, *it) {
677 nmethod* nm = (nmethod*)cb;
678 nm->oops_do(&voc);
679 nm->verify_oop_relocations();
680 }
681 }
682 }
683
684 size_t CodeCache::capacity() {
685 size_t cap = 0;
686 FOR_ALL_HEAPS(it) {
687 cap += (*it)->capacity();
688 }
689 return cap;
690 }
691
692 size_t CodeCache::unallocated_capacity() {
693 size_t unallocated_cap = 0;
694 FOR_ALL_HEAPS(it) {
695 unallocated_cap += (*it)->unallocated_capacity();
696 }
697 return unallocated_cap;
698 }
699
700 size_t CodeCache::max_capacity() {
701 size_t max_cap = 0;
702 FOR_ALL_HEAPS(it) {
703 max_cap += (*it)->max_capacity();
704 }
705 return max_cap;
706 }
707
708 /**
709 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
710 * is free, reverse_free_ratio() returns 4.
711 */
712 double CodeCache::reverse_free_ratio(int code_blob_type) {
713 CodeHeap* heap = get_code_heap(code_blob_type);
714 if (heap == NULL) {
715 return 0;
716 }
717 IsMethodPredicate isMethodHeap;
718 // Subtract CodeCacheMinimumFreeSpace from capacity of the non-method heap
719 double unallocated_capacity = (double)(heap->unallocated_capacity() - (isMethodHeap(heap) ? 0 : CodeCacheMinimumFreeSpace));
720 double max_capacity = (double)heap->max_capacity();
721 return max_capacity / unallocated_capacity;
722 }
723
724 size_t CodeCache::bytes_allocated_in_freelists() {
725 size_t allocated_bytes = 0;
726 FOR_ALL_HEAPS(it) {
727 allocated_bytes += (*it)->allocated_in_freelist();
728 }
729 return allocated_bytes;
730 }
731
732 int CodeCache::allocated_segments() {
733 int number_of_segments = 0;
734 FOR_ALL_HEAPS(it) {
735 number_of_segments += (*it)->allocated_segments();
736 }
737 return number_of_segments;
738 }
739
740 size_t CodeCache::freelists_length() {
741 size_t length = 0;
742 FOR_ALL_HEAPS(it) {
743 length += (*it)->freelist_length();
744 }
745 return length;
746 }
747
748 void icache_init();
749
750 void CodeCache::initialize() {
751 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
752 #ifdef COMPILER2
753 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
754 #endif
755 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
756 // This was originally just a check of the alignment, causing failure, instead, round
757 // the code cache to the page size. In particular, Solaris is moving to a larger
758 // default page size.
759 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
760
761 // Reserve space and create heaps
762 initialize_heaps();
763
764 // Initialize ICache flush mechanism
765 // This service is needed for os::register_code_area
766 icache_init();
767
768 // Give OS a chance to register generated code area.
769 // This is used on Windows 64 bit platforms to register
770 // Structured Exception Handlers for our generated code.
771 os::register_code_area((char*)low_bound(), (char*)high_bound());
772 }
773
774 void codeCache_init() {
775 CodeCache::initialize();
776 }
777
778 //------------------------------------------------------------------------------------------------
779
780 int CodeCache::number_of_nmethods_with_dependencies() {
781 return _number_of_nmethods_with_dependencies;
782 }
783
784 void CodeCache::clear_inline_caches() {
785 assert_locked_or_safepoint(CodeCache_lock);
786 FOR_ALL_METHOD_HEAPS(it) {
787 FOR_ALL_ALIVE_BLOBS(cb, *it) {
788 nmethod* nm = (nmethod*)cb;
789 nm->clear_inline_caches();
790 }
791 }
792 }
793
794 // Keeps track of time spent for checking dependencies
795 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
796
797 int CodeCache::mark_for_deoptimization(DepChange& changes) {
798 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
799 int number_of_marked_CodeBlobs = 0;
800
801 // search the hierarchy looking for nmethods which are affected by the loading of this class
802
803 // then search the interfaces this class implements looking for nmethods
804 // which might be dependent of the fact that an interface only had one
805 // implementor.
806 // nmethod::check_all_dependencies works only correctly, if no safepoint
807 // can happen
808 No_Safepoint_Verifier nsv;
809 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
810 Klass* d = str.klass();
811 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
825 }
826
827
828 #ifdef HOTSWAP
829 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
830 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
831 int number_of_marked_CodeBlobs = 0;
832
833 // Deoptimize all methods of the evolving class itself
834 Array<Method*>* old_methods = dependee->methods();
835 for (int i = 0; i < old_methods->length(); i++) {
836 ResourceMark rm;
837 Method* old_method = old_methods->at(i);
838 nmethod *nm = old_method->code();
839 if (nm != NULL) {
840 nm->mark_for_deoptimization();
841 number_of_marked_CodeBlobs++;
842 }
843 }
844
845 FOR_ALL_METHOD_HEAPS(it) {
846 FOR_ALL_ALIVE_BLOBS(cb, *it) {
847 nmethod* nm = (nmethod*)cb;
848 if (nm->is_marked_for_deoptimization()) {
849 // ...Already marked in the previous pass; don't count it again.
850 } else if (nm->is_evol_dependent_on(dependee())) {
851 ResourceMark rm;
852 nm->mark_for_deoptimization();
853 number_of_marked_CodeBlobs++;
854 } else {
855 // flush caches in case they refer to a redefined Method*
856 nm->clear_inline_caches();
857 }
858 }
859 }
860
861 return number_of_marked_CodeBlobs;
862 }
863 #endif // HOTSWAP
864
865
866 // Deoptimize all methods
867 void CodeCache::mark_all_nmethods_for_deoptimization() {
868 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
869 FOR_ALL_METHOD_HEAPS(it) {
870 FOR_ALL_ALIVE_BLOBS(cb, *it) {
871 nmethod* nm = (nmethod*)cb;
872 nm->mark_for_deoptimization();
873 }
874 }
875 }
876
877 int CodeCache::mark_for_deoptimization(Method* dependee) {
878 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
879 int number_of_marked_CodeBlobs = 0;
880
881 FOR_ALL_METHOD_HEAPS(it) {
882 FOR_ALL_ALIVE_BLOBS(cb, *it) {
883 nmethod* nm = (nmethod*)cb;
884 if (nm->is_dependent_on_method(dependee)) {
885 ResourceMark rm;
886 nm->mark_for_deoptimization();
887 number_of_marked_CodeBlobs++;
888 }
889 }
890 }
891
892 return number_of_marked_CodeBlobs;
893 }
894
895 void CodeCache::make_marked_nmethods_zombies() {
896 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
897 FOR_ALL_METHOD_HEAPS(it) {
898 FOR_ALL_ALIVE_BLOBS(cb, *it) {
899 nmethod* nm = (nmethod*)cb;
900 if (nm->is_marked_for_deoptimization()) {
901
902 // If the nmethod has already been made non-entrant and it can be converted
903 // then zombie it now. Otherwise make it non-entrant and it will eventually
904 // be zombied when it is no longer seen on the stack. Note that the nmethod
905 // might be "entrant" and not on the stack and so could be zombied immediately
906 // but we can't tell because we don't track it on stack until it becomes
907 // non-entrant.
908
909 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
910 nm->make_zombie();
911 } else {
912 nm->make_not_entrant();
913 }
914 }
915 }
916 }
917 }
918
919 void CodeCache::make_marked_nmethods_not_entrant() {
920 assert_locked_or_safepoint(CodeCache_lock);
921 FOR_ALL_METHOD_HEAPS(it) {
922 FOR_ALL_ALIVE_BLOBS(cb, *it) {
923 nmethod* nm = (nmethod*)cb;
924 if (nm->is_marked_for_deoptimization()) {
925 nm->make_not_entrant();
926 }
927 }
928 }
929 }
930
931 void CodeCache::verify() {
932 assert_locked_or_safepoint(CodeCache_lock);
933 FOR_ALL_HEAPS(it) {
934 CodeHeap* heap = *it;
935 heap->verify();
936 FOR_ALL_BLOBS(cb, heap) {
937 if (cb->is_alive()) {
938 cb->verify();
939 }
940 }
941 }
942 }
943
944 // A CodeHeap is full. Print out warning and report event.
945 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
946 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
947 CodeHeap* heap = get_code_heap(code_blob_type);
948
949 if (!heap->was_full() || print) {
950 // Not yet reported for this heap, report
951 heap->report_full();
952 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type));
953 warning("Try increasing the code heap size using -XX:%s=",
954 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
955
956 ResourceMark rm;
957 stringStream s;
958 // Dump CodeCache summary into a buffer before locking the tty
959 {
960 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
961 print_summary(&s, true);
962 }
963 ttyLocker ttyl;
964 tty->print(s.as_string());
965 }
966
967 _codemem_full_count++;
968 EventCodeCacheFull event;
969 if (event.should_commit()) {
970 event.set_codeBlobType(code_blob_type);
971 event.set_startAddress((u8)heap->low_boundary());
972 event.set_commitedTopAddress((u8)heap->high());
973 event.set_reservedTopAddress((u8)heap->high_boundary());
974 event.set_entryCount(nof_blobs());
975 event.set_methodCount(nof_nmethods());
976 event.set_adaptorCount(nof_adapters());
977 event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
978 event.set_fullCount(_codemem_full_count);
979 event.commit();
980 }
981 }
982
983 void CodeCache::print_memory_overhead() {
984 size_t wasted_bytes = 0;
985
986 FOR_ALL_HEAPS(it) {
987 CodeHeap* heap = *it;
988 CodeBlob* cb;
989 for (cb = (CodeBlob*)heap->first(); cb != NULL; cb = (CodeBlob*)heap->next(cb)) {
990 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
991 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
992 }
993 }
994 // Print bytes that are allocated in the freelist
995 ttyLocker ttl;
996 tty->print_cr("Number of elements in freelist: %d", freelists_length());
997 tty->print_cr("Allocated in freelist: %dkB", bytes_allocated_in_freelists()/K);
998 tty->print_cr("Unused bytes in CodeBlobs: %dkB", (int)(wasted_bytes/K));
999 tty->print_cr("Segment map size: %dkB", allocated_segments()/K); // 1 byte per segment
1000 }
1001
1002 //------------------------------------------------------------------------------------------------
1003 // Non-product version
1004
1005 #ifndef PRODUCT
1006
1007 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1008 if (PrintCodeCache2) { // Need to add a new flag
1009 ResourceMark rm;
1010 if (size == 0) size = cb->size();
1011 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
1012 }
1013 }
1014
1015 void CodeCache::print_internals() {
1016 int nmethodCount = 0;
1017 int runtimeStubCount = 0;
1018 int adapterCount = 0;
1019 int deoptimizationStubCount = 0;
1020 int uncommonTrapStubCount = 0;
1021 int bufferBlobCount = 0;
1022 int total = 0;
1023 int nmethodAlive = 0;
1024 int nmethodNotEntrant = 0;
1025 int nmethodZombie = 0;
1026 int nmethodUnloaded = 0;
1027 int nmethodJava = 0;
1028 int nmethodNative = 0;
1029 int max_nm_size = 0;
1030 ResourceMark rm;
1031
1032 int i = 0;
1033 FOR_ALL_HEAPS(it) {
1034 if (Verbose) {
1035 tty->print_cr("## Heap '%s' ##", (*it)->name());
1036 }
1037 FOR_ALL_BLOBS(cb, *it) {
1038 total++;
1039 if (cb->is_nmethod()) {
1040 nmethod* nm = (nmethod*)cb;
1041
1042 if (Verbose && nm->method() != NULL) {
1043 ResourceMark rm;
1044 char *method_name = nm->method()->name_and_sig_as_C_string();
1045 tty->print("%s %d", method_name, nm->comp_level());
1046 if(nm->is_alive()) { tty->print_cr(" alive"); }
1047 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1048 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1049 }
1050
1051 nmethodCount++;
1052
1053 if(nm->is_alive()) { nmethodAlive++; }
1054 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1055 if(nm->is_zombie()) { nmethodZombie++; }
1056 if(nm->is_unloaded()) { nmethodUnloaded++; }
1057 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1058
1059 if(nm->method() != NULL && nm->is_java_method()) {
1060 nmethodJava++;
1061 max_nm_size = MAX2(max_nm_size, nm->size());
1062 }
1063 } else if (cb->is_runtime_stub()) {
1064 runtimeStubCount++;
1065 } else if (cb->is_deoptimization_stub()) {
1066 deoptimizationStubCount++;
1067 } else if (cb->is_uncommon_trap_stub()) {
1068 uncommonTrapStubCount++;
1069 } else if (cb->is_adapter_blob()) {
1070 adapterCount++;
1071 } else if (cb->is_buffer_blob()) {
1072 bufferBlobCount++;
1073 }
1074 }
1075 }
1076
1077 int bucketSize = 512;
1078 int bucketLimit = max_nm_size / bucketSize + 1;
1079 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1080 memset(buckets, 0, sizeof(int) * bucketLimit);
1081
1082 FOR_ALL_METHOD_HEAPS(it) {
1083 FOR_ALL_BLOBS(cb, *it) {
1084 nmethod* nm = (nmethod*)cb;
1085 if(nm->method() != NULL && nm->is_java_method()) {
1086 buckets[nm->size() / bucketSize]++;
1087 }
1088 }
1089 }
1090
1091 tty->print_cr("Code Cache Entries (total of %d)",total);
1092 tty->print_cr("-------------------------------------------------");
1093 tty->print_cr("nmethods: %d",nmethodCount);
1094 tty->print_cr("\talive: %d",nmethodAlive);
1095 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1096 tty->print_cr("\tzombie: %d",nmethodZombie);
1097 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1098 tty->print_cr("\tjava: %d",nmethodJava);
1099 tty->print_cr("\tnative: %d",nmethodNative);
1100 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1101 tty->print_cr("adapters: %d",adapterCount);
1102 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1103 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1104 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1105 tty->print_cr("\nnmethod size distribution (non-zombie java)");
1106 tty->print_cr("-------------------------------------------------");
1107
1108 for(int i = 0; i < bucketLimit; ++i) {
1109 if(buckets[i] != 0) {
1110 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1111 tty->fill_to(40);
1112 tty->print_cr("%d",buckets[i]);
1113 }
1114 }
1115
1116 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1117 print_memory_overhead();
1118 }
1119
1120 #endif // !PRODUCT
1121
1122 void CodeCache::print() {
1123 print_summary(tty);
1124
1125 #ifndef PRODUCT
1126 if (!Verbose) return;
1127
1128 CodeBlob_sizes live;
1129 CodeBlob_sizes dead;
1130
1131 FOR_ALL_HEAPS(it) {
1132 FOR_ALL_BLOBS(cb, *it) {
1133 if (!cb->is_alive()) {
1134 dead.add(cb);
1135 } else {
1136 live.add(cb);
1137 }
1138 }
1139 }
1140
1141 tty->print_cr("CodeCache:");
1142 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1143
1144 if (!live.is_empty()) {
1145 live.print("live");
1146 }
1147 if (!dead.is_empty()) {
1148 dead.print("dead");
1149 }
1150
1151 if (WizardMode) {
1152 // print the oop_map usage
1153 int code_size = 0;
1154 int number_of_blobs = 0;
1155 int number_of_oop_maps = 0;
1156 int map_size = 0;
1157 FOR_ALL_HEAPS(it) {
1158 FOR_ALL_BLOBS(cb, *it) {
1159 if (cb->is_alive()) {
1160 number_of_blobs++;
1161 code_size += cb->code_size();
1162 OopMapSet* set = cb->oop_maps();
1163 if (set != NULL) {
1164 number_of_oop_maps += set->size();
1165 map_size += set->heap_size();
1166 }
1167 }
1168 }
1169 }
1170 tty->print_cr("OopMaps");
1171 tty->print_cr(" #blobs = %d", number_of_blobs);
1172 tty->print_cr(" code size = %d", code_size);
1173 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1174 tty->print_cr(" map size = %d", map_size);
1175 }
1176
1177 #endif // !PRODUCT
1178 }
1179
1180 void CodeCache::print_summary(outputStream* st, bool detailed) {
1181 st->print_cr("CodeCache Summary:");
1182 FOR_ALL_HEAPS(it) {
1183 CodeHeap* heap = (*it);
1184 size_t total = (heap->high_boundary() - heap->low_boundary());
1185 st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1186 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1187 heap->name(), total/K, (total - heap->unallocated_capacity())/K,
1188 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1189
1190 if (detailed) {
1191 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1192 heap->low_boundary(),
1193 heap->high(),
1194 heap->high_boundary());
1195
1196 }
1197 }
1198
1199 if (detailed) {
1200 log_state(st);
1201 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1202 "enabled" : Arguments::mode() == Arguments::_int ?
1203 "disabled (interpreter mode)" :
1204 "disabled (not enough contiguous free space left)");
1205 }
1206 }
1207
1208 void CodeCache::log_state(outputStream* st) {
1209 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1210 " adapters='" UINT32_FORMAT "'",
1211 nof_blobs(), nof_nmethods(), nof_adapters());
1212 }
1213
|