--- old/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java 2013-10-11 15:44:33.469819747 +0200 +++ new/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java 2013-10-11 15:44:33.265819754 +0200 @@ -32,12 +32,10 @@ import sun.jvm.hotspot.utilities.*; public class CodeCache { - private static AddressField heapField; - private static AddressField scavengeRootNMethodsField; + private static GrowableArray heapArray; + private static AddressField scavengeRootNMethodsField; private static VirtualConstructor virtualConstructor; - private CodeHeap heap; - static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -49,7 +47,10 @@ private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("CodeCache"); - heapField = type.getAddressField("_heap"); + // Get array of CodeHeaps + AddressField heapsField = type.getAddressField("_heaps"); + heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor(CodeHeap.class)); + scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods"); virtualConstructor = new VirtualConstructor(db); @@ -67,16 +68,17 @@ } } - public CodeCache() { - heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue()); - } - public NMethod scavengeRootMethods() { return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue()); } public boolean contains(Address p) { - return getHeap().contains(p); + for (int i = 0; i < heapArray.length(); ++i) { + if (heapArray.at(i).contains(p)) { + return true; + } + } + return false; } /** When VM.getVM().isDebugging() returns true, this behaves like @@ -97,14 +99,24 @@ public CodeBlob findBlobUnsafe(Address start) { CodeBlob result = null; - + CodeHeap containing_heap = null; + for (int i = 0; i < heapArray.length(); ++i) { + if (heapArray.at(i).contains(start)) { + containing_heap = heapArray.at(i); + break; + } + } + if (containing_heap == null) { + return null; + } + try { - result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start)); + result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start)); } catch (WrongTypeException wte) { Address cbAddr = null; try { - cbAddr = getHeap().findStart(start); + cbAddr = containing_heap.findStart(start); } catch (Exception findEx) { findEx.printStackTrace(); @@ -167,31 +179,32 @@ } public void iterate(CodeCacheVisitor visitor) { - CodeHeap heap = getHeap(); - Address ptr = heap.begin(); - Address end = heap.end(); - - visitor.prologue(ptr, end); + visitor.prologue(lowBound(), highBound()); CodeBlob lastBlob = null; - while (ptr != null && ptr.lessThan(end)) { - try { - // Use findStart to get a pointer inside blob other findBlob asserts - CodeBlob blob = findBlobUnsafe(heap.findStart(ptr)); - if (blob != null) { - visitor.visit(blob); - if (blob == lastBlob) { - throw new InternalError("saw same blob twice"); + + for (int i = 0; i < heapArray.length(); ++i) { + CodeHeap current_heap = heapArray.at(i); + Address ptr = current_heap.begin(); + while (ptr != null && ptr.lessThan(current_heap.end())) { + try { + // Use findStart to get a pointer inside blob other findBlob asserts + CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr)); + if (blob != null) { + visitor.visit(blob); + if (blob == lastBlob) { + throw new InternalError("saw same blob twice"); + } + lastBlob = blob; } - lastBlob = blob; + } catch (RuntimeException e) { + e.printStackTrace(); } - } catch (RuntimeException e) { - e.printStackTrace(); - } - Address next = heap.nextBlock(ptr); - if (next != null && next.lessThan(ptr)) { - throw new InternalError("pointer moved backwards"); + Address next = current_heap.nextBlock(ptr); + if (next != null && next.lessThan(ptr)) { + throw new InternalError("pointer moved backwards"); + } + ptr = next; } - ptr = next; } visitor.epilogue(); } @@ -199,8 +212,24 @@ //-------------------------------------------------------------------------------- // Internals only below this point // - - private CodeHeap getHeap() { - return heap; + + private Address lowBound() { + Address low = heapArray.at(0).begin(); + for (int i = 1; i < heapArray.length(); ++i) { + if (heapArray.at(i).begin().lessThan(low)) { + low = heapArray.at(i).begin(); + } + } + return low; + } + + private Address highBound() { + Address high = heapArray.at(0).end(); + for (int i = 1; i < heapArray.length(); ++i) { + if (heapArray.at(i).end().greaterThan(high)) { + high = heapArray.at(i).end(); + } + } + return high; } } --- old/src/cpu/zero/vm/shark_globals_zero.hpp 2013-10-11 15:44:33.473819747 +0200 +++ new/src/cpu/zero/vm/shark_globals_zero.hpp 2013-10-11 15:44:33.285819754 +0200 @@ -55,6 +55,9 @@ define_pd_global(intx, NewSizeThreadIncrease, 4*K ); define_pd_global(intx, InitialCodeCacheSize, 160*K); define_pd_global(intx, ReservedCodeCacheSize, 32*M ); +define_pd_global(intx, NonProfiledCodeHeapSize, 28*M ); +define_pd_global(intx, ProfiledCodeHeapSize, 0 ); // No profiled heap needed without TieredCompilation +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(bool, ProfileInterpreter, false); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1 ); --- old/src/cpu/sparc/vm/c2_globals_sparc.hpp 2013-10-11 15:44:33.501819746 +0200 +++ new/src/cpu/sparc/vm/c2_globals_sparc.hpp 2013-10-11 15:44:33.273819754 +0200 @@ -75,6 +75,9 @@ // InitialCodeCacheSize derived from specjbb2000 run. define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize define_pd_global(intx, ReservedCodeCacheSize, 48*M); +define_pd_global(intx, NonProfiledCodeHeapSize, 30*M); +define_pd_global(intx, ProfiledCodeHeapSize, 14*M); +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(intx, CodeCacheExpansionSize, 64*K); // Ergonomics related flags @@ -83,6 +86,9 @@ // InitialCodeCacheSize derived from specjbb2000 run. define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize define_pd_global(intx, ReservedCodeCacheSize, 32*M); +define_pd_global(intx, NonProfiledCodeHeapSize, 19*M); +define_pd_global(intx, ProfiledCodeHeapSize, 9*M ); +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(intx, CodeCacheExpansionSize, 32*K); // Ergonomics related flags define_pd_global(uint64_t,MaxRAM, 4ULL*G); --- old/src/cpu/x86/vm/c2_globals_x86.hpp 2013-10-11 15:44:33.489819746 +0200 +++ new/src/cpu/x86/vm/c2_globals_x86.hpp 2013-10-11 15:44:33.269819754 +0200 @@ -85,6 +85,9 @@ define_pd_global(bool, OptoBundling, false); define_pd_global(intx, ReservedCodeCacheSize, 48*M); +define_pd_global(intx, NonProfiledCodeHeapSize, 30*M); // 4Mb non-method heap +define_pd_global(intx, ProfiledCodeHeapSize, 14*M); +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(uintx, CodeCacheMinBlockLength, 4); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); --- old/src/os/bsd/dtrace/generateJvmOffsets.cpp 2013-10-11 15:44:33.493819746 +0200 +++ new/src/os/bsd/dtrace/generateJvmOffsets.cpp 2013-10-11 15:44:33.285819754 +0200 @@ -67,7 +67,7 @@ * we link this program with -z nodefs . * * But for 'debug1' and 'fastdebug1' we still have to provide - * a particular workaround for the following symbols bellow. + * a particular workaround for the following symbols below. * It will be good to find out a generic way in the future. */ @@ -87,21 +87,24 @@ #endif /* ASSERT */ #endif /* COMPILER1 */ -#define GEN_OFFS(Type,Name) \ +#define GEN_OFFS_NAME(Type,Name,OutputType) \ switch(gen_variant) { \ case GEN_OFFSET: \ - printf("#define OFFSET_%-33s %ld\n", \ - #Type #Name, offset_of(Type, Name)); \ + printf("#define OFFSET_%-33s %ld\n", \ + #OutputType #Name, offset_of(Type, Name)); \ break; \ case GEN_INDEX: \ printf("#define IDX_OFFSET_%-33s %d\n", \ - #Type #Name, index++); \ + #OutputType #Name, index++); \ break; \ case GEN_TABLE: \ - printf("\tOFFSET_%s,\n", #Type #Name); \ + printf("\tOFFSET_%s,\n", #OutputType #Name); \ break; \ } +#define GEN_OFFS(Type,Name) \ + GEN_OFFS_NAME(Type,Name,Type) + #define GEN_SIZE(Type) \ switch(gen_variant) { \ case GEN_OFFSET: \ @@ -246,6 +249,11 @@ GEN_OFFS(VirtualSpace, _high); printf("\n"); + /* We need to use different names here because of the template parameter */ + GEN_OFFS_NAME(GrowableArray, _data, GrowableArray_CodeHeap); + GEN_OFFS_NAME(GrowableArray, _len, GrowableArray_CodeHeap); + printf("\n"); + GEN_OFFS(CodeBlob, _name); GEN_OFFS(CodeBlob, _header_size); GEN_OFFS(CodeBlob, _content_offset); --- old/make/solaris/makefiles/mapfile-vers-COMPILER1 2013-10-11 15:44:33.537819744 +0200 +++ new/make/solaris/makefiles/mapfile-vers-COMPILER1 2013-10-11 15:44:33.309819753 +0200 @@ -29,7 +29,7 @@ SUNWprivate_1.1 { global: # Dtrace support - __1cJCodeCacheF_heap_; + __1cJCodeCacheG_heaps_; __1cIUniverseO_collectedHeap_; __1cGMethodG__vtbl_; __1cHnmethodG__vtbl_; --- old/make/solaris/makefiles/mapfile-vers-COMPILER2 2013-10-11 15:44:33.493819746 +0200 +++ new/make/solaris/makefiles/mapfile-vers-COMPILER2 2013-10-11 15:44:33.269819754 +0200 @@ -29,7 +29,7 @@ SUNWprivate_1.1 { global: # Dtrace support - __1cJCodeCacheF_heap_; + __1cJCodeCacheG_heaps_; __1cIUniverseO_collectedHeap_; __1cGMethodG__vtbl_; __1cHnmethodG__vtbl_; --- old/src/os/bsd/dtrace/libjvm_db.c 2013-10-11 15:44:33.529819744 +0200 +++ new/src/os/bsd/dtrace/libjvm_db.c 2013-10-11 15:44:33.285819754 +0200 @@ -150,16 +150,18 @@ uint64_t Use_Compressed_Oops_address; uint64_t Universe_narrow_oop_base_address; uint64_t Universe_narrow_oop_shift_address; - uint64_t CodeCache_heap_address; + uint64_t CodeCache_heaps_address; /* Volatiles */ uint8_t Use_Compressed_Oops; uint64_t Universe_narrow_oop_base; uint32_t Universe_narrow_oop_shift; - uint64_t CodeCache_low; - uint64_t CodeCache_high; - uint64_t CodeCache_segmap_low; - uint64_t CodeCache_segmap_high; + // Code cache heaps + int32_t Number_of_heaps; + uint64_t* Heap_low; + uint64_t* Heap_high; + uint64_t* Heap_segmap_low; + uint64_t* Heap_segmap_high; int32_t SIZE_CodeCache_log2_segment; @@ -275,8 +277,9 @@ } if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) { - if (strcmp("_heap", vmp->fieldName) == 0) { - err = read_pointer(J, vmp->address, &J->CodeCache_heap_address); + /* Read _heaps field of type GrowableArray* */ + if (strcmp("_heaps", vmp->fieldName) == 0) { + err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address); } } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) { if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) { @@ -315,7 +318,9 @@ } static int read_volatiles(jvm_agent_t* J) { - uint64_t ptr; + int i; + uint64_t array_data; + uint64_t code_heap_address; int err; err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address); @@ -331,20 +336,43 @@ err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t)); CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory + - OFFSET_VirtualSpace_low, &J->CodeCache_low); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory + - OFFSET_VirtualSpace_high, &J->CodeCache_high); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap + - OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap + - OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high); - CHECK_FAIL(err); + /* CodeCache_heaps_address points to GrowableArray, read _data field + pointing to the first entry of type CodeCache* in the array */ + err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data); + /* Read _len field containing the number of code heaps */ + err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len, + &J->Number_of_heaps, sizeof(J->Number_of_heaps)); + + /* Allocate memory for heap configurations */ + J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + + /* Read code heap configurations */ + for (i = 0; i < J->Number_of_heaps; ++i) { + /* Read address of heap */ + err = read_pointer(J, array_data, &code_heap_address); + CHECK_FAIL(err); + + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory + + OFFSET_VirtualSpace_low, &J->Heap_low[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory + + OFFSET_VirtualSpace_high, &J->Heap_high[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap + + OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap + + OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]); + CHECK_FAIL(err); + + /* Increment pointer to next entry */ + array_data = array_data + POINTER_SIZE; + } - err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size, + err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size, &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment)); CHECK_FAIL(err); @@ -354,46 +382,57 @@ return err; } +static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) { + return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]); +} static int codecache_contains(jvm_agent_t* J, uint64_t ptr) { - /* make sure the code cache is up to date */ - return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high); + int i; + for (i = 0; i < J->Number_of_heaps; ++i) { + if (codeheap_contains(i, J, ptr)) { + return 1; + } + } + return 0; } -static uint64_t segment_for(jvm_agent_t* J, uint64_t p) { - return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment; +static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) { + return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment; } -static uint64_t block_at(jvm_agent_t* J, int i) { - return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment); +static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) { + return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment); } static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) { int err; + int i; - *startp = 0; - if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) { - int32_t used; - uint64_t segment = segment_for(J, ptr); - uint64_t block = J->CodeCache_segmap_low; - uint8_t tag; - err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); - CHECK_FAIL(err); - if (tag == 0xff) - return PS_OK; - while (tag > 0) { + for (i = 0; i < J->Number_of_heaps; ++i) { + *startp = 0; + if (codeheap_contains(i, J, ptr)) { + int32_t used; + uint64_t segment = segment_for(i, J, ptr); + uint64_t block = J->Heap_segmap_low[i]; + uint8_t tag; err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); CHECK_FAIL(err); - segment -= tag; - } - block = block_at(J, segment); - err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used)); - CHECK_FAIL(err); - if (used) { - *startp = block + SIZE_HeapBlockHeader; + if (tag == 0xff) + return PS_OK; + while (tag > 0) { + err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); + CHECK_FAIL(err); + segment -= tag; + } + block = block_at(i, J, segment); + err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used)); + CHECK_FAIL(err); + if (used) { + *startp = block + SIZE_HeapBlockHeader; + } } + return PS_OK; } - return PS_OK; fail: return -1; --- old/make/solaris/makefiles/mapfile-vers-TIERED 2013-10-11 15:44:33.549819744 +0200 +++ new/make/solaris/makefiles/mapfile-vers-TIERED 2013-10-11 15:44:33.341819752 +0200 @@ -29,7 +29,7 @@ SUNWprivate_1.1 { global: # Dtrace support - __1cJCodeCacheF_heap_; + __1cJCodeCacheG_heaps_; __1cIUniverseO_collectedHeap_; __1cGMethodG__vtbl_; __1cHnmethodG__vtbl_; --- old/src/os/bsd/dtrace/jhelper.d 2013-10-11 15:44:33.521819745 +0200 +++ new/src/os/bsd/dtrace/jhelper.d 2013-10-11 15:44:33.297819753 +0200 @@ -43,7 +43,9 @@ extern pointer __JvmOffsets; -extern pointer __1cJCodeCacheF_heap_; +/* GrowableArray* */ +extern pointer __1cJCodeCacheG_heaps_; + extern pointer __1cIUniverseO_collectedHeap_; extern pointer __1cHnmethodG__vtbl_; @@ -95,8 +97,8 @@ /!init_done && !this->done/ { MARK_LINE; - init_done = 1; + copyin_offset(POINTER_SIZE); copyin_offset(COMPILER); copyin_offset(OFFSET_CollectedHeap_reserved); copyin_offset(OFFSET_MemRegion_start); @@ -122,6 +124,9 @@ copyin_offset(OFFSET_CodeHeap_segmap); copyin_offset(OFFSET_CodeHeap_log2_segment_size); + copyin_offset(OFFSET_GrowableArray_CodeHeap_data); + copyin_offset(OFFSET_GrowableArray_CodeHeap_len); + copyin_offset(OFFSET_VirtualSpace_low); copyin_offset(OFFSET_VirtualSpace_high); @@ -152,26 +157,14 @@ #error "Don't know architecture" #endif - this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_); - - /* Reading volatile values */ - this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); - - this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); - - this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low); - - this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high); - - this->CodeHeap_log2_segment_size = copyin_uint32( - this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size); - - this->Method_vtbl = (pointer) &``__1cNMethodG__vtbl_; + /* Read address of GrowableArray */ + this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_); + /* Read address of _data array field in GrowableArray */ + this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data); + this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len); + this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_; + /* * Get Java heap bounds */ @@ -187,21 +180,152 @@ this->heap_end = this->heap_start + this->heap_size; } +/* + * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in + * the code cache. If more code heaps are added the following probes have to + * be extended. This is done by simply adding a probe to get the heap bounds + * and another probe to set the code heap address of the newly created heap. + */ + +/* + * ----- BEGIN: Get bounds of code heaps ----- + */ +dtrace:helper:ustack: +/init_done < 1 && this->number_of_heaps >= 1 && !this->done/ +{ + MARK_LINE; + /* CodeHeap 1 */ + init_done = 1; + this->code_heap1_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap1_low = copyin_ptr(this->code_heap1_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap1_high = copyin_ptr(this->code_heap1_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 2 && this->number_of_heaps >= 2 && !this->done/ +{ + MARK_LINE; + /* CodeHeap 2 */ + init_done = 2; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap2_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap2_low = copyin_ptr(this->code_heap2_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap2_high = copyin_ptr(this->code_heap2_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 3 && this->number_of_heaps >= 3 && !this->done/ +{ + /* CodeHeap 3 */ + init_done = 3; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap3_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap3_low = copyin_ptr(this->code_heap3_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap3_high = copyin_ptr(this->code_heap3_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 4 && this->number_of_heaps >= 4 && !this->done/ +{ + /* CodeHeap 4 */ + init_done = 4; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap4_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap4_low = copyin_ptr(this->code_heap4_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap4_high = copyin_ptr(this->code_heap4_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 5 && this->number_of_heaps >= 5 && !this->done/ +{ + /* CodeHeap 5 */ + init_done = 5; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap5_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap5_low = copyin_ptr(this->code_heap5_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap5_high = copyin_ptr(this->code_heap5_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} +/* + * ----- END: Get bounds of code heaps ----- + */ + +/* + * ----- BEGIN: Get address of the code heap pc points to ----- + */ +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap1_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap2_address; +} + dtrace:helper:ustack: -/!this->done && -this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/ +/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/ { MARK_LINE; this->codecache = 1; + this->code_heap_address = this->code_heap3_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap4_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap5_address; +} +/* + * ----- END: Get address of the code heap pc points to ----- + */ + +dtrace:helper:ustack: +/!this->done && this->codecache/ +{ + MARK_LINE; + /* + * Get code heap configuration + */ + this->code_heap_low = copyin_ptr(this->code_heap_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap_segmap_low = copyin_ptr(this->code_heap_address + + OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low); + this->code_heap_log2_segment_size = copyin_uint32( + this->code_heap_address + OFFSET_CodeHeap_log2_segment_size); /* - * Find start. + * Find start */ - this->segment = (this->pc - this->CodeCache_low) >> - this->CodeHeap_log2_segment_size; - this->block = this->CodeCache_segmap_low; + this->segment = (this->pc - this->code_heap_low) >> + this->code_heap_log2_segment_size; + this->block = this->code_heap_segmap_low; this->tag = copyin_uchar(this->block + this->segment); - "second"; } dtrace:helper:ustack: @@ -256,8 +380,8 @@ /!this->done && this->codecache/ { MARK_LINE; - this->block = this->CodeCache_low + - (this->segment << this->CodeHeap_log2_segment_size); + this->block = this->code_heap_low + + (this->segment << this->code_heap_log2_segment_size); this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used); } --- old/src/cpu/x86/vm/c1_globals_x86.hpp 2013-10-11 15:44:33.565819743 +0200 +++ new/src/cpu/x86/vm/c1_globals_x86.hpp 2013-10-11 15:44:33.317819752 +0200 @@ -48,6 +48,9 @@ define_pd_global(intx, NewSizeThreadIncrease, 4*K ); define_pd_global(intx, InitialCodeCacheSize, 160*K); define_pd_global(intx, ReservedCodeCacheSize, 32*M ); +define_pd_global(intx, NonProfiledCodeHeapSize, 28*M ); +define_pd_global(intx, ProfiledCodeHeapSize, 0 ); // No profiled heap needed without TieredCompilation +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(bool, ProfileInterpreter, false); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1); --- old/src/cpu/sparc/vm/c1_globals_sparc.hpp 2013-10-11 15:44:33.525819745 +0200 +++ new/src/cpu/sparc/vm/c1_globals_sparc.hpp 2013-10-11 15:44:33.301819753 +0200 @@ -48,6 +48,9 @@ define_pd_global(intx, FreqInlineSize, 325 ); define_pd_global(bool, ResizeTLAB, true ); define_pd_global(intx, ReservedCodeCacheSize, 32*M ); +define_pd_global(intx, NonProfiledCodeHeapSize, 28*M ); +define_pd_global(intx, ProfiledCodeHeapSize, 0 ); // No profiled heap needed without TieredCompilation +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); --- old/src/os/solaris/dtrace/generateJvmOffsets.cpp 2013-10-11 15:44:33.837819733 +0200 +++ new/src/os/solaris/dtrace/generateJvmOffsets.cpp 2013-10-11 15:44:33.781819735 +0200 @@ -82,21 +82,24 @@ #endif /* ASSERT */ #endif /* COMPILER1 */ -#define GEN_OFFS(Type,Name) \ +#define GEN_OFFS_NAME(Type,Name,OutputType) \ switch(gen_variant) { \ case GEN_OFFSET: \ printf("#define OFFSET_%-33s %d\n", \ - #Type #Name, offset_of(Type, Name)); \ + #OutputType #Name, offset_of(Type, Name)); \ break; \ case GEN_INDEX: \ printf("#define IDX_OFFSET_%-33s %d\n", \ - #Type #Name, index++); \ + #OutputType #Name, index++); \ break; \ case GEN_TABLE: \ - printf("\tOFFSET_%s,\n", #Type #Name); \ + printf("\tOFFSET_%s,\n", #OutputType #Name); \ break; \ } +#define GEN_OFFS(Type,Name) \ + GEN_OFFS_NAME(Type,Name,Type) + #define GEN_SIZE(Type) \ switch(gen_variant) { \ case GEN_OFFSET: \ @@ -241,6 +244,11 @@ GEN_OFFS(VirtualSpace, _high); printf("\n"); + /* We need to use different names here because of the template parameter */ + GEN_OFFS_NAME(GrowableArray, _data, GrowableArray_CodeHeap); + GEN_OFFS_NAME(GrowableArray, _len, GrowableArray_CodeHeap); + printf("\n"); + GEN_OFFS(CodeBlob, _name); GEN_OFFS(CodeBlob, _header_size); GEN_OFFS(CodeBlob, _content_offset); --- old/src/share/vm/code/codeCache.hpp 2013-10-11 15:44:35.409819673 +0200 +++ new/src/share/vm/code/codeCache.hpp 2013-10-11 15:44:35.213819681 +0200 @@ -35,96 +35,115 @@ // code, e.g., compiled java methods, runtime stubs, transition frames, etc. // The entries in the CodeCache are all CodeBlob's. -// Implementation: -// - Each CodeBlob occupies one chunk of memory. -// - Like the offset table in oldspace the zone has at table for -// locating a method given a addess of an instruction. +// -- Implementation -- +// The CodeCache consists of multiple CodeHeaps, each of which contains +// CodeBlobs of a specific CodeBlobType. Currently heaps for the following +// types are available: +// - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs +// - Profiled nmethods: nmethods that are profiled, i.e., those +// compiled at tier 2 or 3 +// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those +// compiled at tier 1 or 4 and native methods +// +// Depending on the availability of compilers and TieredCompilation being +// deactivated there may be fewer heaps. The size of the heaps depends on +// the values of ReservedCodeCacheSize, NonProfiledCodeHeapSize and +// ProfiledCodeHeapSize (see CodeCache::initialize_heaps for details). +// +// All methods of the CodeCache accepting a CodeBlobType only apply to +// CodeBlobs of the given type. For example, iteration over the +// CodeBlobs of a specific type can be done by using CodeCache::first_blob +// and CodeCache::next_blob and providing the corresponding CodeBlobType. +// +// IMPORTANT: If you add new CodeHeaps to the code cache or change the +// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for +// Solaris and BSD. class OopClosure; class DepChange; +class HeapConfiguration; class CodeCache : AllStatic { friend class VMStructs; private: - // CodeHeap is malloc()'ed at startup and never deleted during shutdown, - // so that the generated assembly code is always there when it's needed. - // This may cause memory leak, but is necessary, for now. See 4423824, - // 4422213 or 4436291 for details. - static CodeHeap * _heap; - static int _number_of_blobs; - static int _number_of_adapters; - static int _number_of_nmethods; - static int _number_of_nmethods_with_dependencies; - static bool _needs_cache_clean; - static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() + // Predicate returning true for all method heaps + class IsMethodPredicate { + public: + bool operator()(const CodeHeap* heap) { + return heap->accepts(CodeBlobType::MethodProfiled) + || heap->accepts(CodeBlobType::MethodNonProfiled); + } + }; + + // CodeHeaps of the cache + static GrowableArray* _heaps; + + static address _low_bound; // Lower bound of CodeHeap addresses + static address _high_bound; // Upper bound of CodeHeap addresses + static int _number_of_blobs; // Total number of CodeBlobs in the cache + static int _number_of_adapters; // Total number of Adapters in the cache + static int _number_of_nmethods; // Total number of nmethods in the cache + static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies + static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed + static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() + static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods. + static int _codemem_full_count; // Number of times a CodeHeap in the cache was full + // CodeHeap verification static void verify_if_often() PRODUCT_RETURN; static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; - static int _codemem_full_count; + // CodeHeap management + static void initialize_heaps(); // Initializes the CodeHeaps + // Creates a new heap with the given name and size, containing CodeBlobs of the given type + static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type); + static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType + static bool heap_available(int code_blob_type); // Returns true if a CodeHeap for the given CodeBlobType is available + static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps - public: + // Iteration + static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap + static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap + static CodeBlob* first_alive_blob(CodeHeap* heap); // Returns the next CodeBlob on the given CodeHeap succeeding the given CodeBlob + static CodeBlob* next_alive_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next alive CodeBlob on the given CodeHeap succeeding the given CodeBlob + public: // Initialization static void initialize(); - static void report_codemem_full(); - // Allocation/administration - static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob - static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled - static int alignment_unit(); // guaranteed alignment of all CodeBlobs - static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) - static void free(CodeBlob* cb); // frees a CodeBlob - static void flush(); // flushes all CodeBlobs - static bool contains(void *p); // returns whether p is included - static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs - static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs - static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods - static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods + static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob + static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled + static int alignment_unit(); // guaranteed alignment of all CodeBlobs + static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) + static void free(CodeBlob* cb, int code_blob_type); // frees a CodeBlob + static bool contains(void *p); // returns whether p is included + static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs + static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs + static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods + static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods // Lookup - static CodeBlob* find_blob(void* start); - static nmethod* find_nmethod(void* start); - - // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know - // what you are doing) - static CodeBlob* find_blob_unsafe(void* start) { - // NMT can walk the stack before code cache is created - if (_heap == NULL) return NULL; - - CodeBlob* result = (CodeBlob*)_heap->find_start(start); - // this assert is too strong because the heap code will return the - // heapblock containing start. That block can often be larger than - // the codeBlob itself. If you look up an address that is within - // the heapblock but not in the codeBlob you will assert. - // - // Most things will not lookup such bad addresses. However - // AsyncGetCallTrace can see intermediate frames and get that kind - // of invalid address and so can a developer using hsfind. - // - // The more correct answer is to return NULL if blob_contains() returns - // false. - // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); - - if (result != NULL && !result->blob_contains((address)start)) { - result = NULL; - } - return result; - } + static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address + static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method + static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address + static bool contains_nmethod(nmethod* nm); // Returns true if the CodeCache contains the given nmethod // Iteration - static CodeBlob* first(); - static CodeBlob* next (CodeBlob* cb); - static CodeBlob* alive(CodeBlob *cb); - static nmethod* alive_nmethod(CodeBlob *cb); - static nmethod* first_nmethod(); - static nmethod* next_nmethod (CodeBlob* cb); - static int nof_blobs() { return _number_of_blobs; } - static int nof_adapters() { return _number_of_adapters; } - static int nof_nmethods() { return _number_of_nmethods; } + // Returns the first CodeBlob of the given type + static CodeBlob* first_blob(int code_blob_type) { return first_blob(get_code_heap(code_blob_type)); } + // Returns the first alive CodeBlob of the given type + static CodeBlob* first_alive_blob(int code_blob_type) { return first_alive_blob(get_code_heap(code_blob_type)); } + // Returns the next CodeBlob of the given type succeeding the given CodeBlob + static CodeBlob* next_blob(CodeBlob* cb, int code_blob_type) { return next_blob(get_code_heap(code_blob_type), cb); } + // Returns the next alive CodeBlob of the given type succeeding the given CodeBlob + static CodeBlob* next_alive_blob(CodeBlob* cb, int code_blob_type) { return next_alive_blob(get_code_heap(code_blob_type), cb); } + + static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache + static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache + static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache // GC support static void gc_epilogue(); @@ -141,7 +160,7 @@ static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; static void scavenge_root_nmethods_do(CodeBlobClosure* f); - static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } + static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } static void add_scavenge_root_nmethod(nmethod* nm); static void drop_scavenge_root_nmethod(nmethod* nm); @@ -154,23 +173,42 @@ static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage static void log_state(outputStream* st); + static const char* get_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); } + static void report_codemem_full(int code_blob_type); // The full limits of the codeCache - static address low_bound() { return (address) _heap->low_boundary(); } - static address high_bound() { return (address) _heap->high_boundary(); } - static address high() { return (address) _heap->high(); } + static address low_bound() { return _low_bound; } + static address high_bound() { return _high_bound; } // Profiling - static address first_address(); // first address used for CodeBlobs - static address last_address(); // last address used for CodeBlobs - static size_t capacity() { return _heap->capacity(); } - static size_t max_capacity() { return _heap->max_capacity(); } - static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static double reverse_free_ratio(); - - static bool needs_cache_clean() { return _needs_cache_clean; } - static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } - static void clear_inline_caches(); // clear all inline caches + static size_t capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; } + static size_t capacity(); + static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; } + static size_t unallocated_capacity(); + static size_t max_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; } + static size_t max_capacity(); + + static bool is_full(int code_blob_type) { return heap_available(code_blob_type) && (unallocated_capacity(code_blob_type) < CodeCacheMinimumFreeSpace); } + static double reverse_free_ratio(int code_blob_type); + + static bool needs_cache_clean() { return _needs_cache_clean; } + static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } + + // Returns the CodeBlobType for nmethods of the given compilation level + static int get_code_blob_type(int comp_level) { + if (comp_level == CompLevel_none || + comp_level == CompLevel_simple || + comp_level == CompLevel_full_optimization) { + // Non profiled methods + return CodeBlobType::MethodNonProfiled; + } else if (comp_level == CompLevel_limited_profile || + comp_level == CompLevel_full_profile) { + // Profiled methods + return CodeBlobType::MethodProfiled; + } + ShouldNotReachHere(); + return 0; + } // Deoptimization static int mark_for_deoptimization(DepChange& changes); @@ -183,7 +221,7 @@ static void make_marked_nmethods_zombies(); static void make_marked_nmethods_not_entrant(); - // tells how many nmethods have dependencies + // tells how many nmethods have dependencies static int number_of_nmethods_with_dependencies(); static int get_codemem_full_count() { return _codemem_full_count; } --- old/src/os/solaris/dtrace/libjvm_db.c 2013-10-11 15:44:35.393819674 +0200 +++ new/src/os/solaris/dtrace/libjvm_db.c 2013-10-11 15:44:35.189819682 +0200 @@ -150,16 +150,18 @@ uint64_t Use_Compressed_Oops_address; uint64_t Universe_narrow_oop_base_address; uint64_t Universe_narrow_oop_shift_address; - uint64_t CodeCache_heap_address; + uint64_t CodeCache_heaps_address; /* Volatiles */ uint8_t Use_Compressed_Oops; uint64_t Universe_narrow_oop_base; uint32_t Universe_narrow_oop_shift; - uint64_t CodeCache_low; - uint64_t CodeCache_high; - uint64_t CodeCache_segmap_low; - uint64_t CodeCache_segmap_high; + // Code cache heaps + int32_t Number_of_heaps; + uint64_t* Heap_low; + uint64_t* Heap_high; + uint64_t* Heap_segmap_low; + uint64_t* Heap_segmap_high; int32_t SIZE_CodeCache_log2_segment; @@ -275,9 +277,10 @@ } if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) { - if (strcmp("_heap", vmp->fieldName) == 0) { - err = read_pointer(J, vmp->address, &J->CodeCache_heap_address); - } + /* Read _heaps field of type GrowableArray* */ + if (strcmp("_heaps", vmp->fieldName) == 0) { + err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address); + } } else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) { if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) { J->Universe_narrow_oop_base_address = vmp->address; @@ -315,7 +318,9 @@ } static int read_volatiles(jvm_agent_t* J) { - uint64_t ptr; + int i; + uint64_t array_data; + uint64_t code_heap_address; int err; err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address); @@ -331,20 +336,43 @@ err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t)); CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory + - OFFSET_VirtualSpace_low, &J->CodeCache_low); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory + - OFFSET_VirtualSpace_high, &J->CodeCache_high); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap + - OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low); - CHECK_FAIL(err); - err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap + - OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high); - CHECK_FAIL(err); + /* CodeCache_heaps_address points to GrowableArray, read _data field + pointing to the first entry of type CodeCache* in the array */ + err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data); + /* Read _len field containing the number of code heaps */ + err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len, + &J->Number_of_heaps, sizeof(J->Number_of_heaps)); + + /* Allocate memory for heap configurations */ + J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t)); + + /* Read code heap configurations */ + for (i = 0; i < J->Number_of_heaps; ++i) { + /* Read address of heap */ + err = read_pointer(J, array_data, &code_heap_address); + CHECK_FAIL(err); + + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory + + OFFSET_VirtualSpace_low, &J->Heap_low[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory + + OFFSET_VirtualSpace_high, &J->Heap_high[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap + + OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]); + CHECK_FAIL(err); + err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap + + OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]); + CHECK_FAIL(err); + + /* Increment pointer to next entry */ + array_data = array_data + POINTER_SIZE; + } - err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size, + err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size, &J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment)); CHECK_FAIL(err); @@ -354,46 +382,57 @@ return err; } +static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) { + return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]); +} static int codecache_contains(jvm_agent_t* J, uint64_t ptr) { - /* make sure the code cache is up to date */ - return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high); + int i; + for (i = 0; i < J->Number_of_heaps; ++i) { + if (codeheap_contains(i, J, ptr)) { + return 1; + } + } + return 0; } -static uint64_t segment_for(jvm_agent_t* J, uint64_t p) { - return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment; +static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) { + return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment; } -static uint64_t block_at(jvm_agent_t* J, int i) { - return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment); +static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) { + return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment); } static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) { int err; + int i; - *startp = 0; - if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) { - int32_t used; - uint64_t segment = segment_for(J, ptr); - uint64_t block = J->CodeCache_segmap_low; - uint8_t tag; - err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); - CHECK_FAIL(err); - if (tag == 0xff) - return PS_OK; - while (tag > 0) { + for (i = 0; i < J->Number_of_heaps; ++i) { + *startp = 0; + if (codeheap_contains(i, J, ptr)) { + int32_t used; + uint64_t segment = segment_for(i, J, ptr); + uint64_t block = J->Heap_segmap_low[i]; + uint8_t tag; err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); CHECK_FAIL(err); - segment -= tag; - } - block = block_at(J, segment); - err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used)); - CHECK_FAIL(err); - if (used) { - *startp = block + SIZE_HeapBlockHeader; + if (tag == 0xff) + return PS_OK; + while (tag > 0) { + err = ps_pread(J->P, block + segment, &tag, sizeof(tag)); + CHECK_FAIL(err); + segment -= tag; + } + block = block_at(i, J, segment); + err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used)); + CHECK_FAIL(err); + if (used) { + *startp = block + SIZE_HeapBlockHeader; + } } + return PS_OK; } - return PS_OK; fail: return -1; --- old/src/share/vm/c1/c1_Compiler.cpp 2013-10-11 15:44:35.385819674 +0200 +++ new/src/share/vm/c1/c1_Compiler.cpp 2013-10-11 15:44:35.189819682 +0200 @@ -93,7 +93,7 @@ buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer", code_buffer_size); if (buffer_blob == NULL) { - CompileBroker::handle_full_code_cache(); + CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod); env->record_failure("CodeCache is full"); } else { CompilerThread::current()->set_buffer_blob(buffer_blob); --- old/src/os/solaris/dtrace/jhelper.d 2013-10-11 15:44:35.461819672 +0200 +++ new/src/os/solaris/dtrace/jhelper.d 2013-10-11 15:44:35.249819680 +0200 @@ -43,7 +43,9 @@ extern pointer __JvmOffsets; -extern pointer __1cJCodeCacheF_heap_; +/* GrowableArray* */ +extern pointer __1cJCodeCacheG_heaps_; + extern pointer __1cIUniverseO_collectedHeap_; extern pointer __1cHnmethodG__vtbl_; @@ -95,8 +97,8 @@ /!init_done && !this->done/ { MARK_LINE; - init_done = 1; - + + copyin_offset(POINTER_SIZE); copyin_offset(COMPILER); copyin_offset(OFFSET_CollectedHeap_reserved); copyin_offset(OFFSET_MemRegion_start); @@ -122,6 +124,9 @@ copyin_offset(OFFSET_CodeHeap_segmap); copyin_offset(OFFSET_CodeHeap_log2_segment_size); + copyin_offset(OFFSET_GrowableArray_CodeHeap_data); + copyin_offset(OFFSET_GrowableArray_CodeHeap_len); + copyin_offset(OFFSET_VirtualSpace_low); copyin_offset(OFFSET_VirtualSpace_high); @@ -152,24 +157,13 @@ #error "Don't know architecture" #endif - this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_); - - this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); - - this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); - - this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low); - - this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address + - OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high); - - this->CodeHeap_log2_segment_size = copyin_uint32( - this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size); + /* Read address of GrowableArray */ + this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_); + /* Read address of _data array field in GrowableArray */ + this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data); + this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len); - this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_; + this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_; /* * Get Java heap bounds @@ -186,21 +180,152 @@ this->heap_end = this->heap_start + this->heap_size; } +/* + * IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in + * the code cache. If more code heaps are added the following probes have to + * be extended. This is done by simply adding a probe to get the heap bounds + * and another probe to set the code heap address of the newly created heap. + */ + +/* + * ----- BEGIN: Get bounds of code heaps ----- + */ +dtrace:helper:ustack: +/init_done < 1 && this->number_of_heaps >= 1 && !this->done/ +{ + MARK_LINE; + /* CodeHeap 1 */ + init_done = 1; + this->code_heap1_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap1_low = copyin_ptr(this->code_heap1_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap1_high = copyin_ptr(this->code_heap1_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 2 && this->number_of_heaps >= 2 && !this->done/ +{ + MARK_LINE; + /* CodeHeap 2 */ + init_done = 2; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap2_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap2_low = copyin_ptr(this->code_heap2_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap2_high = copyin_ptr(this->code_heap2_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 3 && this->number_of_heaps >= 3 && !this->done/ +{ + /* CodeHeap 3 */ + init_done = 3; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap3_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap3_low = copyin_ptr(this->code_heap3_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap3_high = copyin_ptr(this->code_heap3_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 4 && this->number_of_heaps >= 4 && !this->done/ +{ + /* CodeHeap 4 */ + init_done = 4; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap4_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap4_low = copyin_ptr(this->code_heap4_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap4_high = copyin_ptr(this->code_heap4_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} + +dtrace:helper:ustack: +/init_done < 5 && this->number_of_heaps >= 5 && !this->done/ +{ + /* CodeHeap 5 */ + init_done = 5; + this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE; + this->code_heap5_address = copyin_ptr(this->code_heaps_array_address); + this->code_heap5_low = copyin_ptr(this->code_heap5_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap5_high = copyin_ptr(this->code_heap5_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high); +} +/* + * ----- END: Get bounds of code heaps ----- + */ + +/* + * ----- BEGIN: Get address of the code heap pc points to ----- + */ +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap1_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap2_address; +} + dtrace:helper:ustack: -/!this->done && -this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/ +/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/ { MARK_LINE; this->codecache = 1; + this->code_heap_address = this->code_heap3_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap4_address; +} + +dtrace:helper:ustack: +/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/ +{ + MARK_LINE; + this->codecache = 1; + this->code_heap_address = this->code_heap5_address; +} +/* + * ----- END: Get address of the code heap pc points to ----- + */ + +dtrace:helper:ustack: +/!this->done && this->codecache/ +{ + MARK_LINE; + /* + * Get code heap configuration + */ + this->code_heap_low = copyin_ptr(this->code_heap_address + + OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low); + this->code_heap_segmap_low = copyin_ptr(this->code_heap_address + + OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low); + this->code_heap_log2_segment_size = copyin_uint32( + this->code_heap_address + OFFSET_CodeHeap_log2_segment_size); /* - * Find start. + * Find start */ - this->segment = (this->pc - this->CodeCache_low) >> - this->CodeHeap_log2_segment_size; - this->block = this->CodeCache_segmap_low; + this->segment = (this->pc - this->code_heap_low) >> + this->code_heap_log2_segment_size; + this->block = this->code_heap_segmap_low; this->tag = copyin_uchar(this->block + this->segment); - "second"; } dtrace:helper:ustack: @@ -255,8 +380,8 @@ /!this->done && this->codecache/ { MARK_LINE; - this->block = this->CodeCache_low + - (this->segment << this->CodeHeap_log2_segment_size); + this->block = this->code_heap_low + + (this->segment << this->code_heap_log2_segment_size); this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used); } --- old/src/share/vm/code/codeCache.cpp 2013-10-11 15:44:35.409819673 +0200 +++ new/src/share/vm/code/codeCache.cpp 2013-10-11 15:44:35.205819681 +0200 @@ -48,8 +48,8 @@ #include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" -// Helper class for printing in CodeCache +// Helper class for printing in CodeCache class CodeBlob_sizes { private: int count; @@ -115,64 +115,182 @@ } }; -// CodeCache implementation +// Iterate over all CodeHeaps +#define FOR_ALL_HEAPS(it) for (GrowableArrayIterator it = _heaps->begin(); it != _heaps->end(); ++it) +// Iterate over all CodeHeaps containing nmethods +#define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it) +// Iterate over all CodeBlobs (cb) on the given CodeHeap +#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) +// Iterate over all alive CodeBlobs (cb) on the given CodeHeap +#define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb)) -CodeHeap * CodeCache::_heap = new CodeHeap(); +address CodeCache::_low_bound = 0; +address CodeCache::_high_bound = 0; int CodeCache::_number_of_blobs = 0; int CodeCache::_number_of_adapters = 0; int CodeCache::_number_of_nmethods = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0; bool CodeCache::_needs_cache_clean = false; nmethod* CodeCache::_scavenge_root_nmethods = NULL; - int CodeCache::_codemem_full_count = 0; -CodeBlob* CodeCache::first() { - assert_locked_or_safepoint(CodeCache_lock); - return (CodeBlob*)_heap->first(); -} +// Initialize array of CodeHeaps +GrowableArray* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (3, true); +void CodeCache::initialize_heaps() { + // Check if custom ReservedCodeCacheSize is set and adapt CodeHeap sizes accordingly + if (!FLAG_IS_DEFAULT(ReservedCodeCacheSize) && FLAG_IS_DEFAULT(NonMethodCodeHeapSize) + && FLAG_IS_DEFAULT(ProfiledCodeHeapSize) && FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { + if (ReservedCodeCacheSize > NonMethodCodeHeapSize) { + // Use the default value for NonMethodCodeHeapSize and use 2/3 of the + // remaining size for non-profiled methods and 1/3 for profiled methods + size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize; + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, remaining_size * (double)1/3); + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, remaining_size * (double)2/3); + } else { + // Use all space for the non-method heap and set other heaps to minimal size + FLAG_SET_DEFAULT(NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, os::vm_page_size()); + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, os::vm_page_size()); + } + } + + // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap + if(!heap_available(CodeBlobType::MethodProfiled)) { + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, 0); + } + + // Size check + guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); + + // Align reserved sizes of CodeHeaps + size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize); + size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); + size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); + + // Compute initial sizes of CodeHeaps + size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); + size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); + size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); + + // Reserve one continuous chunk of memory for CodeHeaps and split it into + // parts for the individual heaps. The memory layout looks like this: + // ---------- high ----------- + // Non-profiled nmethods + // Profiled nmethods + // Non-methods + // ---------- low ------------ + ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); + ReservedSpace non_method_space = rs.first_part(non_method_size); + ReservedSpace rest = rs.last_part(non_method_size); + ReservedSpace profiled_space = rest.first_part(profiled_size); + ReservedSpace non_profiled_space = rest.last_part(profiled_size); + + // Non-methods (stubs, adapters, ...) + add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod); + // Tier 2 and tier 3 (profiled) methods + add_heap(profiled_space, "Profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled); + // Tier 1 and tier 4 (non-profiled) methods and native methods + add_heap(non_profiled_space, "Non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled); +} + +ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { + // Determine alignment + const size_t page_size = os::can_execute_large_page_memory() ? + os::page_size_for_region(InitialCodeCacheSize, size, 8) : + os::vm_page_size(); + const size_t granularity = os::vm_allocation_granularity(); + const size_t r_align = MAX2(page_size, granularity); + const size_t r_size = align_size_up(size, r_align); + const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : + MAX2(page_size, granularity); + + ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); + + // Initialize bounds + _low_bound = (address)rs.base(); + _high_bound = _low_bound + rs.size(); + guarantee(low_bound() < high_bound(), "Bound check"); + + return rs; +} + +bool CodeCache::heap_available(int code_blob_type) { + if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) { + // Use all heaps for TieredCompilation + return true; + } else { + // Without TieredCompilation we only need the non-profiled heap + return (code_blob_type == CodeBlobType::MethodNonProfiled); + } +} + +void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { + // Check if heap is needed + if (!heap_available(code_blob_type)) { + return; + } + + // Create CodeHeap + CodeHeap* heap = new CodeHeap(name, code_blob_type); + _heaps->append(heap); -CodeBlob* CodeCache::next(CodeBlob* cb) { - assert_locked_or_safepoint(CodeCache_lock); - return (CodeBlob*)_heap->next(cb); -} + // Reserve Space + size_initial = round_to(size_initial, os::vm_page_size()); + if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { + vm_exit_during_initialization("Could not reserve enough space for code cache"); + } -CodeBlob* CodeCache::alive(CodeBlob *cb) { - assert_locked_or_safepoint(CodeCache_lock); - while (cb != NULL && !cb->is_alive()) cb = next(cb); - return cb; + // Register the CodeHeap + MemoryService::add_code_heap_memory_pool(heap, name); } +CodeHeap* CodeCache::get_code_heap(int code_blob_type) { + FOR_ALL_HEAPS(it) { + if ((*it)->accepts(code_blob_type)) { + return (*it); + } + } + return NULL; +} -nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { +CodeBlob* CodeCache::first_blob(CodeHeap* heap) { assert_locked_or_safepoint(CodeCache_lock); - while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); - return (nmethod*)cb; + if (heap != NULL) { + return (CodeBlob*)heap->first(); + } + return NULL; } -nmethod* CodeCache::first_nmethod() { +CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { assert_locked_or_safepoint(CodeCache_lock); - CodeBlob* cb = first(); - while (cb != NULL && !cb->is_nmethod()) { - cb = next(cb); + if (heap != NULL) { + return (CodeBlob*)heap->next(cb); } - return (nmethod*)cb; + return NULL; } -nmethod* CodeCache::next_nmethod (CodeBlob* cb) { +CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) { assert_locked_or_safepoint(CodeCache_lock); - cb = next(cb); - while (cb != NULL && !cb->is_nmethod()) { - cb = next(cb); + CodeBlob* cb = first_blob(heap); + while (cb != NULL && !cb->is_alive()) { + cb = next_blob(heap, cb); } - return (nmethod*)cb; + return cb; } -static size_t maxCodeCacheUsed = 0; +CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) { + assert_locked_or_safepoint(CodeCache_lock); + cb = next_blob(heap, cb); + while (cb != NULL && !cb->is_alive()) { + cb = next_blob(heap, cb); + } + return cb; +} -CodeBlob* CodeCache::allocate(int size, bool is_critical) { +CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can @@ -182,28 +300,33 @@ assert_locked_or_safepoint(CodeCache_lock); CodeBlob* cb = NULL; _number_of_blobs++; + + // Get CodeHeap for the given CodeBlobType + CodeHeap* heap = get_code_heap(code_blob_type); + assert (heap != NULL, "Heap exists"); + while (true) { - cb = (CodeBlob*)_heap->allocate(size, is_critical); + cb = (CodeBlob*)heap->allocate(size, is_critical); if (cb != NULL) break; - if (!_heap->expand_by(CodeCacheExpansionSize)) { + if (!heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; - tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", - (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), - (address)_heap->high() - (address)_heap->low_boundary()); + tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", + heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), + (address)heap->high() - (address)heap->low_boundary()); } } - maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - - (address)_heap->low_boundary()) - unallocated_capacity()); + verify_if_often(); print_trace("allocation", cb, size); + return cb; } -void CodeCache::free(CodeBlob* cb) { +void CodeCache::free(CodeBlob* cb, int code_blob_type) { assert_locked_or_safepoint(CodeCache_lock); verify_if_often(); @@ -219,13 +342,13 @@ } _number_of_blobs--; - _heap->deallocate(cb); + // Get heap for given CodeBlobType and deallocate + get_code_heap(code_blob_type)->deallocate(cb); verify_if_often(); assert(_number_of_blobs >= 0, "sanity check"); } - void CodeCache::commit(CodeBlob* cb) { // this is called by nmethod::nmethod, which must already own CodeCache_lock assert_locked_or_safepoint(CodeCache_lock); @@ -243,94 +366,115 @@ ICache::invalidate_range(cb->content_begin(), cb->content_size()); } - -void CodeCache::flush() { - assert_locked_or_safepoint(CodeCache_lock); - Unimplemented(); -} - - -// Iteration over CodeBlobs - -#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) -#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) -#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) - - bool CodeCache::contains(void *p) { // It should be ok to call contains without holding a lock - return _heap->contains(p); + FOR_ALL_HEAPS(it) { + if ((*it)->contains(p)) { + return true; + } + } + return false; } - -// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not -// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain +// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not +// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. CodeBlob* CodeCache::find_blob(void* start) { CodeBlob* result = find_blob_unsafe(start); - if (result == NULL) return NULL; - // We could potientially look up non_entrant methods - guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); + // We could potentially look up non_entrant methods + guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); return result; } +// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know +// what you are doing) +CodeBlob* CodeCache::find_blob_unsafe(void* start) { + // NMT can walk the stack before code cache is created + if (_heaps->first() == NULL) return NULL; + + FOR_ALL_HEAPS(it) { + CodeBlob* result = (CodeBlob*) (*it)->find_start(start); + if (result != NULL && result->blob_contains((address)start)) { + return result; + } + } + return NULL; +} + nmethod* CodeCache::find_nmethod(void* start) { - CodeBlob *cb = find_blob(start); - assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); + CodeBlob* cb = find_blob(start); + assert(cb->is_nmethod(), "did not find an nmethod"); return (nmethod*)cb; } +bool CodeCache::contains_nmethod(nmethod* nm) { + FOR_ALL_METHOD_HEAPS(it) { + if ((*it)->contains(nm)) { + return true; + } + } + return false; +} void CodeCache::blobs_do(void f(CodeBlob* nm)) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_BLOBS(p) { - f(p); + FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + f(cb); + } } } - void CodeCache::nmethods_do(void f(nmethod* nm)) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_BLOBS(nm) { - if (nm->is_nmethod()) f((nmethod*)nm); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + f((nmethod*)cb); + } } } void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_NMETHODS(nm) { - f(nm); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + f((nmethod*)cb); + } } } int CodeCache::alignment_unit() { - return (int)_heap->alignment_unit(); + return (int)_heaps->first()->alignment_unit(); } - int CodeCache::alignment_offset() { - return (int)_heap->alignment_offset(); + return (int)_heaps->first()->alignment_offset(); } - -// Mark nmethods for unloading if they contain otherwise unreachable -// oops. +// Mark nmethods for unloading if they contain otherwise unreachable oops. void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_NMETHODS(nm) { - nm->do_unloading(is_alive, unloading_occurred); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + nm->do_unloading(is_alive, unloading_occurred); + } } } void CodeCache::blobs_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_BLOBS(cb) { - f->do_code_blob(cb); + FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + if (cb->is_alive()) { + f->do_code_blob(cb); #ifdef ASSERT - if (cb->is_nmethod()) - ((nmethod*)cb)->verify_scavenge_root_oops(); + if (cb->is_nmethod()) + ((nmethod*)cb)->verify_scavenge_root_oops(); #endif //ASSERT + } + } } } @@ -434,9 +578,9 @@ // Temporarily mark nmethods that are claimed to be on the non-perm list. void CodeCache::mark_scavenge_root_nmethods() { - FOR_ALL_ALIVE_BLOBS(cb) { - if (cb->is_nmethod()) { - nmethod *nm = (nmethod*)cb; + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; assert(nm->scavenge_root_not_marked(), "clean state"); if (nm->on_scavenge_root_list()) nm->set_scavenge_root_marked(); @@ -447,32 +591,29 @@ // If the closure is given, run it on the unlisted nmethods. // Also make sure that the effects of mark_scavenge_root_nmethods is gone. void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { - FOR_ALL_ALIVE_BLOBS(cb) { - bool call_f = (f_or_null != NULL); - if (cb->is_nmethod()) { - nmethod *nm = (nmethod*)cb; + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + bool call_f = (f_or_null != NULL); assert(nm->scavenge_root_not_marked(), "must be already processed"); if (nm->on_scavenge_root_list()) call_f = false; // don't show this one to the client nm->verify_scavenge_root_oops(); - } else { - call_f = false; // not an nmethod + if (call_f) f_or_null->do_code_blob(nm); } - if (call_f) f_or_null->do_code_blob(cb); } } #endif //PRODUCT - void CodeCache::gc_prologue() { assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); } void CodeCache::gc_epilogue() { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_BLOBS(cb) { - if (cb->is_nmethod()) { - nmethod *nm = (nmethod*)cb; + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; assert(!nm->is_unloaded(), "Tautology"); if (needs_cache_clean()) { nm->cleanup_inline_caches(); @@ -488,8 +629,8 @@ #ifdef ASSERT // make sure that we aren't leaking icholders int count = 0; - FOR_ALL_BLOBS(cb) { - if (cb->is_nmethod()) { + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { RelocIterator iter((nmethod*)cb); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { @@ -512,38 +653,53 @@ #endif } - void CodeCache::verify_oops() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); VerifyOopClosure voc; - FOR_ALL_ALIVE_BLOBS(cb) { - if (cb->is_nmethod()) { - nmethod *nm = (nmethod*)cb; + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; nm->oops_do(&voc); nm->verify_oop_relocations(); } } } - -address CodeCache::first_address() { - assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->low_boundary(); +size_t CodeCache::capacity() { + size_t cap = 0; + FOR_ALL_HEAPS(it) { + cap += (*it)->capacity(); + } + return cap; } +size_t CodeCache::unallocated_capacity() { + size_t unallocated_cap = 0; + FOR_ALL_HEAPS(it) { + unallocated_cap += (*it)->unallocated_capacity(); + } + return unallocated_cap; +} -address CodeCache::last_address() { - assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->high(); +size_t CodeCache::max_capacity() { + size_t max_cap = 0; + FOR_ALL_HEAPS(it) { + max_cap += (*it)->max_capacity(); + } + return max_cap; } /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache * is free, reverse_free_ratio() returns 4. */ -double CodeCache::reverse_free_ratio() { - double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); - double max_capacity = (double)CodeCache::max_capacity(); +double CodeCache::reverse_free_ratio(int code_blob_type) { + CodeHeap* heap = get_code_heap(code_blob_type); + if (heap == NULL) { + return 0; + } + double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); + double max_capacity = (double)heap->max_capacity(); return max_capacity / unallocated_capacity; } @@ -559,13 +715,9 @@ // the code cache to the page size. In particular, Solaris is moving to a larger // default page size. CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); - InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); - ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); - if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { - vm_exit_during_initialization("Could not reserve enough space for code cache"); - } - MemoryService::add_code_heap_memory_pool(_heap); + // Reserve space and create heaps + initialize_heaps(); // Initialize ICache flush mechanism // This service is needed for os::register_code_area @@ -574,10 +726,9 @@ // Give OS a chance to register generated code area. // This is used on Windows 64 bit platforms to register // Structured Exception Handlers for our generated code. - os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); + os::register_code_area((char*)low_bound(), (char*)high_bound()); } - void codeCache_init() { CodeCache::initialize(); } @@ -588,13 +739,6 @@ return _number_of_nmethods_with_dependencies; } -void CodeCache::clear_inline_caches() { - assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_NMETHODS(nm) { - nm->clear_inline_caches(); - } -} - #ifndef PRODUCT // used to keep track of how much time is spent in mark_for_deoptimization static elapsedTimer dependentCheckTime; @@ -628,14 +772,17 @@ if (VerifyDependencies) { // Turn off dependency tracing while actually testing deps. NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); - FOR_ALL_ALIVE_NMETHODS(nm) { - if (!nm->is_marked_for_deoptimization() && - nm->check_all_dependencies()) { - ResourceMark rm; - tty->print_cr("Should have been marked for deoptimization:"); - changes.print(); - nm->print(); - nm->print_dependencies(); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + if (!nm->is_marked_for_deoptimization() && + nm->check_all_dependencies()) { + ResourceMark rm; + tty->print_cr("Should have been marked for deoptimization:"); + changes.print(); + nm->print(); + nm->print_dependencies(); + } } } } @@ -665,16 +812,19 @@ } } - FOR_ALL_ALIVE_NMETHODS(nm) { - if (nm->is_marked_for_deoptimization()) { - // ...Already marked in the previous pass; don't count it again. - } else if (nm->is_evol_dependent_on(dependee())) { - ResourceMark rm; - nm->mark_for_deoptimization(); - number_of_marked_CodeBlobs++; - } else { - // flush caches in case they refer to a redefined Method* - nm->clear_inline_caches(); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + if (nm->is_marked_for_deoptimization()) { + // ...Already marked in the previous pass; don't count it again. + } else if (nm->is_evol_dependent_on(dependee())) { + ResourceMark rm; + nm->mark_for_deoptimization(); + number_of_marked_CodeBlobs++; + } else { + // flush caches in case they refer to a redefined Method* + nm->clear_inline_caches(); + } } } @@ -686,21 +836,26 @@ // Deoptimize all methods void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - FOR_ALL_ALIVE_NMETHODS(nm) { - nm->mark_for_deoptimization(); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + nm->mark_for_deoptimization(); + } } } - int CodeCache::mark_for_deoptimization(Method* dependee) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int number_of_marked_CodeBlobs = 0; - FOR_ALL_ALIVE_NMETHODS(nm) { - if (nm->is_dependent_on_method(dependee)) { - ResourceMark rm; - nm->mark_for_deoptimization(); - number_of_marked_CodeBlobs++; + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + if (nm->is_dependent_on_method(dependee)) { + ResourceMark rm; + nm->mark_for_deoptimization(); + number_of_marked_CodeBlobs++; + } } } @@ -709,20 +864,23 @@ void CodeCache::make_marked_nmethods_zombies() { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); - FOR_ALL_ALIVE_NMETHODS(nm) { - if (nm->is_marked_for_deoptimization()) { - - // If the nmethod has already been made non-entrant and it can be converted - // then zombie it now. Otherwise make it non-entrant and it will eventually - // be zombied when it is no longer seen on the stack. Note that the nmethod - // might be "entrant" and not on the stack and so could be zombied immediately - // but we can't tell because we don't track it on stack until it becomes - // non-entrant. + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + if (nm->is_marked_for_deoptimization()) { - if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { - nm->make_zombie(); - } else { - nm->make_not_entrant(); + // If the nmethod has already been made non-entrant and it can be converted + // then zombie it now. Otherwise make it non-entrant and it will eventually + // be zombied when it is no longer seen on the stack. Note that the nmethod + // might be "entrant" and not on the stack and so could be zombied immediately + // but we can't tell because we don't track it on stack until it becomes + // non-entrant. + + if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { + nm->make_zombie(); + } else { + nm->make_not_entrant(); + } } } } @@ -730,31 +888,63 @@ void CodeCache::make_marked_nmethods_not_entrant() { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_ALIVE_NMETHODS(nm) { - if (nm->is_marked_for_deoptimization()) { - nm->make_not_entrant(); + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; + if (nm->is_marked_for_deoptimization()) { + nm->make_not_entrant(); + } } } } void CodeCache::verify() { - _heap->verify(); - FOR_ALL_ALIVE_BLOBS(p) { - p->verify(); + assert_locked_or_safepoint(CodeCache_lock); + FOR_ALL_HEAPS(it) { + CodeHeap* heap = *it; + heap->verify(); + FOR_ALL_BLOBS(cb, heap) { + if (cb->is_alive()) { + cb->verify(); + } + } } } -void CodeCache::report_codemem_full() { +// A CodeHeap is full. Print out warning and report event. +void CodeCache::report_codemem_full(int code_blob_type) { + // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event + CodeHeap* heap = get_code_heap(code_blob_type); + + if (!heap->was_full()) { + // Not yet reported for this heap, report + heap->report_full(); + warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type)); + warning("Try increasing the code heap size using -XX:%s=", + (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); + + ResourceMark rm; + stringStream s; + // Dump CodeCache summary into a buffer before locking the tty + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + print_summary(&s, true); + } + ttyLocker ttyl; + tty->print(s.as_string()); + } + _codemem_full_count++; EventCodeCacheFull event; if (event.should_commit()) { - event.set_startAddress((u8)low_bound()); - event.set_commitedTopAddress((u8)high()); - event.set_reservedTopAddress((u8)high_bound()); + event.set_codeBlobType(code_blob_type); + event.set_startAddress((u8)heap->low_boundary()); + event.set_commitedTopAddress((u8)heap->high()); + event.set_reservedTopAddress((u8)heap->high_boundary()); event.set_entryCount(nof_blobs()); event.set_methodCount(nof_nmethods()); event.set_adaptorCount(nof_adapters()); - event.set_unallocatedCapacity(unallocated_capacity()/K); + event.set_unallocatedCapacity(heap->unallocated_capacity()/K); event.set_fullCount(_codemem_full_count); event.commit(); } @@ -767,7 +957,9 @@ void CodeCache::verify_if_often() { if (VerifyCodeCacheOften) { - _heap->verify(); + FOR_ALL_HEAPS(it) { + (*it)->verify(); + } } } @@ -796,45 +988,50 @@ int maxCodeSize = 0; ResourceMark rm; - CodeBlob *cb; - for (cb = first(); cb != NULL; cb = next(cb)) { - total++; - if (cb->is_nmethod()) { - nmethod* nm = (nmethod*)cb; - - if (Verbose && nm->method() != NULL) { - ResourceMark rm; - char *method_name = nm->method()->name_and_sig_as_C_string(); - tty->print("%s", method_name); - if(nm->is_alive()) { tty->print_cr(" alive"); } - if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } - if(nm->is_zombie()) { tty->print_cr(" zombie"); } - } + int i = 0; + FOR_ALL_HEAPS(it) { + if (Verbose) { + tty->print_cr("## Heap '%s' ##", (*it)->name()); + } + FOR_ALL_BLOBS(cb, *it) { + total++; + if (cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + + if (Verbose && nm->method() != NULL) { + ResourceMark rm; + char *method_name = nm->method()->name_and_sig_as_C_string(); + tty->print("%s %d", method_name, nm->comp_level()); + if(nm->is_alive()) { tty->print_cr(" alive"); } + if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } + if(nm->is_zombie()) { tty->print_cr(" zombie"); } + } - nmethodCount++; - - if(nm->is_alive()) { nmethodAlive++; } - if(nm->is_not_entrant()) { nmethodNotEntrant++; } - if(nm->is_zombie()) { nmethodZombie++; } - if(nm->is_unloaded()) { nmethodUnloaded++; } - if(nm->is_native_method()) { nmethodNative++; } + nmethodCount++; - if(nm->method() != NULL && nm->is_java_method()) { - nmethodJava++; - if (nm->insts_size() > maxCodeSize) { - maxCodeSize = nm->insts_size(); + if(nm->is_alive()) { nmethodAlive++; } + if(nm->is_not_entrant()) { nmethodNotEntrant++; } + if(nm->is_zombie()) { nmethodZombie++; } + if(nm->is_unloaded()) { nmethodUnloaded++; } + if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } + + if(nm->method() != NULL && nm->is_java_method()) { + nmethodJava++; + if (nm->insts_size() > maxCodeSize) { + maxCodeSize = nm->insts_size(); + } } + } else if (cb->is_runtime_stub()) { + runtimeStubCount++; + } else if (cb->is_deoptimization_stub()) { + deoptimizationStubCount++; + } else if (cb->is_uncommon_trap_stub()) { + uncommonTrapStubCount++; + } else if (cb->is_adapter_blob()) { + adapterCount++; + } else if (cb->is_buffer_blob()) { + bufferBlobCount++; } - } else if (cb->is_runtime_stub()) { - runtimeStubCount++; - } else if (cb->is_deoptimization_stub()) { - deoptimizationStubCount++; - } else if (cb->is_uncommon_trap_stub()) { - uncommonTrapStubCount++; - } else if (cb->is_adapter_blob()) { - adapterCount++; - } else if (cb->is_buffer_blob()) { - bufferBlobCount++; } } @@ -843,10 +1040,10 @@ int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); memset(buckets,0,sizeof(int) * bucketLimit); - for (cb = first(); cb != NULL; cb = next(cb)) { - if (cb->is_nmethod()) { + FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { nmethod* nm = (nmethod*)cb; - if(nm->is_java_method()) { + if(nm->method() != NULL && nm->is_java_method()) { buckets[nm->insts_size() / bucketSize]++; } } @@ -868,7 +1065,7 @@ tty->print_cr("\nnmethod size distribution (non-zombie java)"); tty->print_cr("-------------------------------------------------"); - for(int i=0; iprint("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); tty->fill_to(40); @@ -890,11 +1087,13 @@ CodeBlob_sizes live; CodeBlob_sizes dead; - FOR_ALL_BLOBS(p) { - if (!p->is_alive()) { - dead.add(p); - } else { - live.add(p); + FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + if (!cb->is_alive()) { + dead.add(cb); + } else { + live.add(cb); + } } } @@ -910,21 +1109,22 @@ dead.print("dead"); } - if (WizardMode) { // print the oop_map usage int code_size = 0; int number_of_blobs = 0; int number_of_oop_maps = 0; int map_size = 0; - FOR_ALL_BLOBS(p) { - if (p->is_alive()) { - number_of_blobs++; - code_size += p->code_size(); - OopMapSet* set = p->oop_maps(); - if (set != NULL) { - number_of_oop_maps += set->size(); - map_size += set->heap_size(); + FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + if (cb->is_alive()) { + number_of_blobs++; + code_size += cb->code_size(); + OopMapSet* set = cb->oop_maps(); + if (set != NULL) { + number_of_oop_maps += set->size(); + map_size += set->heap_size(); + } } } } @@ -939,20 +1139,26 @@ } void CodeCache::print_summary(outputStream* st, bool detailed) { - size_t total = (_heap->high_boundary() - _heap->low_boundary()); - st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT - "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", - total/K, (total - unallocated_capacity())/K, - maxCodeCacheUsed/K, unallocated_capacity()/K); + st->print_cr("CodeCache Summary:"); + FOR_ALL_HEAPS(it) { + CodeHeap* heap = (*it); + size_t total = (heap->high_boundary() - heap->low_boundary()); + st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT + "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", + heap->name(), total/K, (total - heap->unallocated_capacity())/K, + heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); + + if (detailed) { + st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", + heap->low_boundary(), + heap->high(), + heap->high_boundary()); + + } + } if (detailed) { - st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", - _heap->low_boundary(), - _heap->high(), - _heap->high_boundary()); - st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT - " adapters=" UINT32_FORMAT, - nof_blobs(), nof_nmethods(), nof_adapters()); + log_state(st); st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? "enabled" : Arguments::mode() == Arguments::_int ? "disabled (interpreter mode)" : @@ -962,8 +1168,7 @@ void CodeCache::log_state(outputStream* st) { st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", - nof_blobs(), nof_nmethods(), nof_adapters(), - unallocated_capacity()); + " adapters='" UINT32_FORMAT "'", + nof_blobs(), nof_nmethods(), nof_adapters()); } --- old/src/share/vm/ci/ciEnv.cpp 2013-10-11 15:44:35.457819672 +0200 +++ new/src/share/vm/ci/ciEnv.cpp 2013-10-11 15:44:35.213819681 +0200 @@ -1013,12 +1013,12 @@ } if (nm == NULL) { - // The CodeCache is full. Print out warning and disable compilation. + // The CodeCache is full. Print out warning and disable compilation. record_failure("code cache is full"); { MutexUnlocker ml(Compile_lock); MutexUnlocker locker(MethodCompileQueue_lock); - CompileBroker::handle_full_code_cache(); + CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level)); } } else { nm->set_has_unsafe_access(has_unsafe_access); --- old/src/share/vm/code/codeBlob.hpp 2013-10-11 15:44:35.429819673 +0200 +++ new/src/share/vm/code/codeBlob.hpp 2013-10-11 15:44:35.201819681 +0200 @@ -30,6 +30,16 @@ #include "runtime/frame.hpp" #include "runtime/handles.hpp" +// CodeBlob Types +// Used in the CodeCache to assign CodeBlobs to different CodeHeaps +struct CodeBlobType { + enum { + MethodNonProfiled = 0, // Tier 1 and tier 4 (non-profiled) nmethods (including native nmethods) + MethodProfiled = 1, // Tier 2 and Tier 3 (profiled) nmethods + NonMethod = 2 // Non-methods like Buffers, Adapters and Runtime Stubs + }; +}; + // CodeBlob - superclass for all entries in the CodeCache. // // Suptypes are: @@ -386,9 +396,6 @@ return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc); } - - - // GC for args void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ } --- old/src/share/vm/code/nmethod.hpp 2013-10-11 15:44:35.469819671 +0200 +++ new/src/share/vm/code/nmethod.hpp 2013-10-11 15:44:35.233819680 +0200 @@ -270,7 +270,7 @@ int comp_level); // helper methods - void* operator new(size_t size, int nmethod_size) throw(); + void* operator new(size_t size, int nmethod_size, int comp_level) throw(); const char* reloc_string_for(u_char* begin, u_char* end); // Returns true if this thread changed the state of the nmethod or --- old/src/share/vm/code/nmethod.cpp 2013-10-11 15:44:35.449819672 +0200 +++ new/src/share/vm/code/nmethod.cpp 2013-10-11 15:44:35.201819681 +0200 @@ -505,7 +505,7 @@ CodeOffsets offsets; offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, + nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size, compile_id, &offsets, code_buffer, frame_size, basic_lock_owner_sp_offset, @@ -543,7 +543,7 @@ offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (nmethod_size) nmethod(method(), nmethod_size, + nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size); NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); @@ -591,7 +591,7 @@ + round_to(nul_chk_table->size_in_bytes(), oopSize) + round_to(debug_info->data_size() , oopSize); - nm = new (nmethod_size) + nm = new (nmethod_size, comp_level) nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, oop_maps, @@ -803,9 +803,10 @@ } #endif // def HAVE_DTRACE_H -void* nmethod::operator new(size_t size, int nmethod_size) throw() { - // Not critical, may return null if there is too little continuous memory - return CodeCache::allocate(nmethod_size); +void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () { + // Nmethods are allocated on separate heaps and therefore do not share memory with critical CodeBlobs. + // We nevertheless define the allocation as critical to make sure all heap memory is used. + return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), true); } nmethod::nmethod( @@ -1438,7 +1439,7 @@ Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this); if (PrintMethodFlushing) { tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb", - _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024); + _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024); } // We need to deallocate any ExceptionCache data. @@ -1462,10 +1463,9 @@ ((CodeBlob*)(this))->flush(); - CodeCache::free(this); + CodeCache::free(this, CodeCache::get_code_blob_type(_comp_level)); } - // // Notify all classes this nmethod is dependent on that it is no // longer dependent. This should only be called in two situations. @@ -2366,7 +2366,7 @@ ResourceMark rm; - if (!CodeCache::contains(this)) { + if (!CodeCache::contains_nmethod(this)) { fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this)); } --- old/src/share/vm/code/codeBlob.cpp 2013-10-11 15:44:35.481819671 +0200 +++ new/src/share/vm/code/codeBlob.cpp 2013-10-11 15:44:35.217819681 +0200 @@ -244,18 +244,15 @@ return blob; } - void* BufferBlob::operator new(size_t s, unsigned size) throw() { - void* p = CodeCache::allocate(size); - return p; + return CodeCache::allocate(size, CodeBlobType::NonMethod); } - -void BufferBlob::free( BufferBlob *blob ) { +void BufferBlob::free(BufferBlob *blob) { ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::free((CodeBlob*)blob); + CodeCache::free((CodeBlob*)blob, CodeBlobType::NonMethod); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -307,7 +304,6 @@ return blob; } - //---------------------------------------------------------------------------------------------------- // Implementation of RuntimeStub @@ -348,14 +344,14 @@ void* RuntimeStub::operator new(size_t s, unsigned size) throw() { - void* p = CodeCache::allocate(size, true); + void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } // operator new shared by all singletons: void* SingletonBlob::operator new(size_t s, unsigned size) throw() { - void* p = CodeCache::allocate(size, true); + void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } --- old/src/share/vm/compiler/compileBroker.hpp 2013-10-11 15:44:35.481819671 +0200 +++ new/src/share/vm/compiler/compileBroker.hpp 2013-10-11 15:44:35.237819680 +0200 @@ -401,7 +401,7 @@ jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state); return (old == (1-new_state)); } - static void handle_full_code_cache(); + static void handle_full_code_cache(int code_blob_type); // Return total compilation ticks static jlong total_compilation_ticks() { --- old/src/share/vm/compiler/compileBroker.cpp 2013-10-11 15:44:35.509819670 +0200 +++ new/src/share/vm/compiler/compileBroker.cpp 2013-10-11 15:44:35.265819679 +0200 @@ -1592,9 +1592,12 @@ // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); - if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { - // the code cache is really full - handle_full_code_cache(); + // Iterate over non-profiled and profiled nmethods + for (int code_blob_type = CodeBlobType::MethodNonProfiled; code_blob_type <= CodeBlobType::MethodProfiled; ++code_blob_type) { + if (CodeCache::is_full(code_blob_type)) { + // The CodeHeap for this compilation level is really full + handle_full_code_cache(code_blob_type); + } } CompileTask* task = queue->get(); @@ -1917,9 +1920,9 @@ // ------------------------------------------------------------------ // CompileBroker::handle_full_code_cache // -// The CodeCache is full. Print out warning and disable compilation or -// try code cache cleaning so compilation can continue later. -void CompileBroker::handle_full_code_cache() { +// The CodeCache is full. Disable compilation or try code cache +// cleaning so compilation can continue later. +void CompileBroker::handle_full_code_cache(int code_blob_type) { UseInterpreter = true; if (UseCompiler || AlwaysCompileLoopMethods ) { if (xtty != NULL) { @@ -1935,11 +1938,6 @@ xtty->stamp(); xtty->end_elem(); } - warning("CodeCache is full. Compiler has been disabled."); - warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); - - CodeCache::report_codemem_full(); - #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { @@ -1960,7 +1958,7 @@ AlwaysCompileLoopMethods = false; } } - codecache_print(/* detailed= */ true); + CodeCache::report_codemem_full(code_blob_type); } // ------------------------------------------------------------------ --- old/src/share/vm/memory/heap.cpp 2013-10-11 15:44:35.829819658 +0200 +++ new/src/share/vm/memory/heap.cpp 2013-10-11 15:44:35.773819660 +0200 @@ -35,7 +35,9 @@ // Implementation of Heap -CodeHeap::CodeHeap() { +CodeHeap::CodeHeap(const char* name, const int code_blob_type) + : _code_blob_type(code_blob_type) { + _name = name; _number_of_committed_segments = 0; _number_of_reserved_segments = 0; _segment_size = 0; @@ -43,6 +45,8 @@ _next_segment = 0; _freelist = NULL; _freelist_segments = 0; + _max_allocated_capacity = 0; + _was_full = false; } @@ -87,9 +91,7 @@ } -bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, - size_t segment_size) { - assert(reserved_size >= committed_size, "reserved < committed"); +bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) { assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); @@ -98,17 +100,12 @@ // Reserve and initialize space for _memory. const size_t page_size = os::can_execute_large_page_memory() ? - os::page_size_for_region(committed_size, reserved_size, 8) : + os::page_size_for_region(committed_size, rs.size(), 8) : os::vm_page_size(); const size_t granularity = os::vm_allocation_granularity(); - const size_t r_align = MAX2(page_size, granularity); - const size_t r_size = align_size_up(reserved_size, r_align); const size_t c_size = align_size_up(committed_size, page_size); - const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : - MAX2(page_size, granularity); - ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); - os::trace_page_sizes("code heap", committed_size, reserved_size, page_size, + os::trace_page_sizes(_name, committed_size, rs.size(), page_size, rs.base(), rs.size()); if (!_memory.initialize(rs, c_size)) { return false; @@ -193,6 +190,7 @@ #ifdef ASSERT memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); #endif + _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); return block->allocated_space(); } @@ -218,6 +216,7 @@ #ifdef ASSERT memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); #endif + _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); return b->allocated_space(); } else { return NULL; --- old/src/share/vm/prims/jvmtiCodeBlobEvents.cpp 2013-10-11 15:44:37.373819599 +0200 +++ new/src/share/vm/prims/jvmtiCodeBlobEvents.cpp 2013-10-11 15:44:37.185819606 +0200 @@ -211,10 +211,11 @@ // created nmethod will notify normally and nmethods which are freed // can be safely skipped. MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - nmethod* current = CodeCache::first_nmethod(); - while (current != NULL) { + // Iterate over non-profiled and profiled nmethods + for (int code_blob_type = CodeBlobType::MethodNonProfiled; code_blob_type <= CodeBlobType::MethodProfiled; ++code_blob_type) { // Only notify for live nmethods - if (current->is_alive()) { + nmethod* current = (nmethod*) CodeCache::first_alive_blob(code_blob_type); + while (current != NULL) { // Lock the nmethod so it can't be freed nmethodLocker nml(current); @@ -222,8 +223,9 @@ MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); current->get_and_cache_jmethod_id(); JvmtiExport::post_compiled_method_load(current); + + current = (nmethod*) CodeCache::next_alive_blob(current, code_blob_type); } - current = CodeCache::next_nmethod(current); } return JVMTI_ERROR_NONE; } --- old/src/share/vm/services/memoryService.hpp 2013-10-11 15:44:37.337819601 +0200 +++ new/src/share/vm/services/memoryService.hpp 2013-10-11 15:44:37.173819607 +0200 @@ -53,7 +53,8 @@ private: enum { init_pools_list_size = 10, - init_managers_list_size = 5 + init_managers_list_size = 5, + init_code_heap_pools_size = 9 }; // index for minor and major generations @@ -70,8 +71,9 @@ static GCMemoryManager* _major_gc_manager; static GCMemoryManager* _minor_gc_manager; - // Code heap memory pool - static MemoryPool* _code_heap_pool; + // memory manager and code heap pools for the CodeCache + static MemoryManager* _code_cache_manager; + static GrowableArray* _code_heap_pools; static MemoryPool* _metaspace_pool; static MemoryPool* _compressed_class_pool; @@ -123,7 +125,7 @@ public: static void set_universe_heap(CollectedHeap* heap); - static void add_code_heap_memory_pool(CodeHeap* heap); + static void add_code_heap_memory_pool(CodeHeap* heap, const char* name); static void add_metaspace_memory_pools(); static MemoryPool* get_memory_pool(instanceHandle pool); @@ -146,7 +148,10 @@ static void track_memory_usage(); static void track_code_cache_memory_usage() { - track_memory_pool_usage(_code_heap_pool); + // Track memory pool usage of all CodeCache memory pools + for (int i = 0; i < _code_heap_pools->length(); ++i) { + track_memory_pool_usage(_code_heap_pools->at(i)); + } } static void track_memory_pool_usage(MemoryPool* pool); --- old/src/share/vm/runtime/arguments.cpp 2013-10-11 15:44:37.401819598 +0200 +++ new/src/share/vm/runtime/arguments.cpp 2013-10-11 15:44:37.197819606 +0200 @@ -1125,8 +1125,15 @@ "Incompatible compilation policy selected", NULL); } // Increase the code cache size - tiered compiles a lot more. - if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { + if (FLAG_IS_DEFAULT(ReservedCodeCacheSize) && + FLAG_IS_DEFAULT(ProfiledCodeHeapSize) && + FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { + intx non_method_size = ReservedCodeCacheSize - (ProfiledCodeHeapSize + NonProfiledCodeHeapSize); + + // Multiply sizes by 5 but fix non_method_size (distribute among non-profiled and profiled code heap) FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5); + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, ProfiledCodeHeapSize * 5 + non_method_size * 2); + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize * 5 + non_method_size * 2); } if (!UseInterpreter) { // -Xcomp Tier3InvokeNotifyFreqLog = 0; @@ -2339,6 +2346,17 @@ "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, (2*G)/M); status = false; + } else if (NonMethodCodeHeapSize < min_code_cache_size){ + jio_fprintf(defaultStream::error_stream(), + "Invalid NonMethodCodeHeapSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K, + min_code_cache_size/K); + status = false; + } else if ((!FLAG_IS_DEFAULT(NonMethodCodeHeapSize) || !FLAG_IS_DEFAULT(ProfiledCodeHeapSize) || !FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) + && (NonMethodCodeHeapSize + NonProfiledCodeHeapSize + ProfiledCodeHeapSize) != ReservedCodeCacheSize) { + jio_fprintf(defaultStream::error_stream(), + "Invalid NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize = %dK. Must be equal to ReservedCodeCacheSize = %uK.\n", + (NonMethodCodeHeapSize + ProfiledCodeHeapSize + NonProfiledCodeHeapSize)/K, ReservedCodeCacheSize/K); + status = false; } status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction"); @@ -2748,8 +2766,30 @@ return JNI_EINVAL; } FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize); + // -XX:ProfiledCodeHeapSize= + } else if (match_option(option, "-XX:ProfiledCodeHeapSize=", &tail)) { + julong long_ProfiledCodeHeapSize = 0; + + ArgsRange errcode = parse_memory_size(tail, &long_ProfiledCodeHeapSize, 1); + if (errcode != arg_in_range) { + jio_fprintf(defaultStream::error_stream(), + "Invalid maximum profiled code heap size: %s.\n", option->optionString); + return JNI_EINVAL; + } + FLAG_SET_CMDLINE(uintx, ProfiledCodeHeapSize, (uintx)long_ProfiledCodeHeapSize); + // -XX:NonProfiledCodeHeapSizee= + } else if (match_option(option, "-XX:NonProfiledCodeHeapSize=", &tail)) { + julong long_NonProfiledCodeHeapSize = 0; + + ArgsRange errcode = parse_memory_size(tail, &long_NonProfiledCodeHeapSize, 1); + if (errcode != arg_in_range) { + jio_fprintf(defaultStream::error_stream(), + "Invalid maximum non-profiled code heap size: %s.\n", option->optionString); + return JNI_EINVAL; + } + FLAG_SET_CMDLINE(uintx, NonProfiledCodeHeapSize, (uintx)long_NonProfiledCodeHeapSize); //-XX:IncreaseFirstTierCompileThresholdAt= - } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) { + } else if (match_option(option, "-XX:IncreaseFirstTierCompileThresholdAt=", &tail)) { uintx uint_IncreaseFirstTierCompileThresholdAt = 0; if (!parse_uintx(tail, &uint_IncreaseFirstTierCompileThresholdAt, 0) || uint_IncreaseFirstTierCompileThresholdAt > 99) { jio_fprintf(defaultStream::error_stream(), --- old/src/share/vm/memory/heap.hpp 2013-10-11 15:44:37.389819599 +0200 +++ new/src/share/vm/memory/heap.hpp 2013-10-11 15:44:37.209819606 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_MEMORY_HEAP_HPP #define SHARE_VM_MEMORY_HEAP_HPP +#include "code/codeBlob.hpp" #include "memory/allocation.hpp" #include "runtime/virtualspace.hpp" @@ -91,7 +92,12 @@ size_t _next_segment; FreeBlock* _freelist; - size_t _freelist_segments; // No. of segments in freelist + size_t _freelist_segments; // No. of segments in freelist + size_t _max_allocated_capacity; // Peak capacity that was allocated during lifetime of the heap + + const char* _name; // Name of the CodeHeap + const int _code_blob_type; // CodeBlobType it contains + bool _was_full; // True if CodeHeap was full during runtime // Helper functions size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } @@ -122,10 +128,10 @@ void on_code_mapping(char* base, size_t size); public: - CodeHeap(); + CodeHeap(const char* name, const int code_blob_type); // Heap extents - bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size); + bool reserve(ReservedSpace rs, size_t committed_size, size_t segment_size); void release(); // releases all allocated memory bool expand_by(size_t size); // expands commited memory by size void shrink_by(size_t size); // shrinks commited memory by size @@ -157,8 +163,17 @@ size_t capacity() const; size_t max_capacity() const; size_t allocated_capacity() const; + size_t max_allocated_capacity() const { return _max_allocated_capacity; } size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); } + // Returns true if the CodeHeap contains CodeBlobs of the given type + bool accepts(int code_blob_type) const { return (_code_blob_type == code_blob_type); } + + // Debugging / Profiling + const char* name() const { return _name; } + bool was_full() { return _was_full; } + void report_full() { _was_full = true; } + private: size_t heap_unallocated_capacity() const; --- old/src/share/vm/services/memoryService.cpp 2013-10-11 15:44:37.489819595 +0200 +++ new/src/share/vm/services/memoryService.cpp 2013-10-11 15:44:37.241819604 +0200 @@ -63,7 +63,9 @@ GCMemoryManager* MemoryService::_minor_gc_manager = NULL; GCMemoryManager* MemoryService::_major_gc_manager = NULL; -MemoryPool* MemoryService::_code_heap_pool = NULL; +MemoryManager* MemoryService::_code_cache_manager = NULL; +GrowableArray* MemoryService::_code_heap_pools = + new (ResourceObj::C_HEAP, mtInternal) GrowableArray(init_code_heap_pools_size, true); MemoryPool* MemoryService::_metaspace_pool = NULL; MemoryPool* MemoryService::_compressed_class_pool = NULL; @@ -391,15 +393,21 @@ } #endif // INCLUDE_ALL_GCS -void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) { - _code_heap_pool = new CodeHeapPool(heap, - "Code Cache", - true /* support_usage_threshold */); - MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager(); - mgr->add_pool(_code_heap_pool); +void MemoryService::add_code_heap_memory_pool(CodeHeap* heap, const char* name) { + // Create new memory pool for this heap + MemoryPool* code_heap_pool = new CodeHeapPool(heap, name, true /* support_usage_threshold */); + + // Append to lists + _code_heap_pools->append(code_heap_pool); + _pools_list->append(code_heap_pool); + + if (_code_cache_manager == NULL) { + // Create CodeCache memory manager + _code_cache_manager = MemoryManager::get_code_cache_memory_manager(); + _managers_list->append(_code_cache_manager); + } - _pools_list->append(_code_heap_pool); - _managers_list->append(mgr); + _code_cache_manager->add_pool(code_heap_pool); } void MemoryService::add_metaspace_memory_pools() { --- old/src/share/vm/runtime/sweeper.cpp 2013-10-11 15:44:37.473819596 +0200 +++ new/src/share/vm/runtime/sweeper.cpp 2013-10-11 15:44:37.281819603 +0200 @@ -127,7 +127,8 @@ #define SWEEP(nm) #endif -nmethod* NMethodSweeper::_current = NULL; // Current nmethod +nmethod* NMethodSweeper::_current_nmethod = NULL; // Current nmethod +int NMethodSweeper::_current_type = 0; // Current CodeBlobType long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep @@ -183,7 +184,7 @@ return _hotness_counter_reset_val; } bool NMethodSweeper::sweep_in_progress() { - return (_current != NULL); + return (_current_nmethod != NULL); } // Scans the stacks of all Java threads and marks activations of not-entrant methods. @@ -198,13 +199,14 @@ } // Check for restart - assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); + assert(CodeCache::find_blob_unsafe(_current_nmethod) == _current_nmethod, "Sweeper nmethod cached state invalid"); if (!sweep_in_progress() && need_marking_phase()) { - _seen = 0; - _invocations = NmethodSweepFraction; - _current = CodeCache::first_nmethod(); - _traversals += 1; - _total_time_this_sweep = 0; + _seen = 0; + _invocations = NmethodSweepFraction; + _current_nmethod = (nmethod*)CodeCache::first_blob(CodeBlobType::MethodNonProfiled); + _current_type = CodeBlobType::MethodNonProfiled; + _traversals += 1; + _total_time_this_sweep = 0; if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); @@ -251,7 +253,6 @@ } void NMethodSweeper::sweep_code_cache() { - jlong sweep_start_counter = os::elapsed_counter(); _flushed_count = 0; @@ -286,7 +287,7 @@ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // The last invocation iterates until there are no more nmethods - for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { + while ((swept_count < todo || _invocations == 1) && _current_nmethod != NULL) { swept_count++; if (SafepointSynchronize::is_synchronizing()) { // Safepoint request if (PrintMethodFlushing && Verbose) { @@ -302,19 +303,26 @@ // Since we will give up the CodeCache_lock, always skip ahead // to the next nmethod. Other blobs can be deleted by other // threads but nmethods are only reclaimed by the sweeper. - nmethod* next = CodeCache::next_nmethod(_current); + nmethod* next = (nmethod*)CodeCache::next_blob(_current_nmethod, _current_type); // Now ready to process nmethod and give up CodeCache_lock { MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - freed_memory += process_nmethod(_current); + freed_memory += process_nmethod(_current_nmethod, _current_type); } _seen++; - _current = next; + + while (next == NULL && _current_type < CodeBlobType::MethodProfiled) { + // We reached the last method of the type + // Go to next type that has methods available + _current_type++; + next = (nmethod*)CodeCache::first_blob(_current_type); + } + _current_nmethod = next; } } - assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); + assert(_invocations > 1 || _current_nmethod == NULL, "must have scanned the whole cache"); if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were @@ -407,7 +415,7 @@ nm->flush(); } -int NMethodSweeper::process_nmethod(nmethod *nm) { +int NMethodSweeper::process_nmethod(nmethod *nm, int code_blob_type) { assert(!CodeCache_lock->owned_by_self(), "just checking"); int freed_memory = 0; @@ -499,7 +507,7 @@ // ReservedCodeCacheSize int reset_val = hotness_counter_reset_val(); int time_since_reset = reset_val - nm->hotness_counter(); - double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); + double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); // The less free space in the code cache we have - the bigger reverse_free_ratio() is. // I.e., 'threshold' increases with lower available space in the code cache and a higher // NmethodSweepActivity. If the current hotness counter - which decreases from its initial --- old/src/share/vm/runtime/sharedRuntime.cpp 2013-10-11 15:44:37.469819596 +0200 +++ new/src/share/vm/runtime/sharedRuntime.cpp 2013-10-11 15:44:37.237819604 +0200 @@ -2461,7 +2461,7 @@ // Ought to log this but compile log is only per compile thread // and we're some non descript Java thread. MutexUnlocker mu(AdapterHandlerLibrary_lock); - CompileBroker::handle_full_code_cache(); + CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod); return NULL; // Out of CodeCache space } entry->relocate(B->content_begin()); @@ -2638,7 +2638,7 @@ nm->post_compiled_method_load_event(); } else { // CodeCache is full, disable compilation - CompileBroker::handle_full_code_cache(); + CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled); } return nm; } --- old/src/share/vm/runtime/globals.hpp 2013-10-11 15:44:37.421819598 +0200 +++ new/src/share/vm/runtime/globals.hpp 2013-10-11 15:44:37.201819606 +0200 @@ -173,6 +173,10 @@ define_pd_global(intx, InlineUnsafeOps, true); define_pd_global(intx, InitialCodeCacheSize, 160*K); define_pd_global(intx, ReservedCodeCacheSize, 32*M); +define_pd_global(intx, NonProfiledCodeHeapSize, 19*M); +define_pd_global(intx, ProfiledCodeHeapSize, 9*M ); +define_pd_global(intx, NonMethodCodeHeapSize, 4*M ); + define_pd_global(intx, CodeCacheExpansionSize, 32*K); define_pd_global(intx, CodeCacheMinBlockLength, 1); define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K); @@ -3231,6 +3235,15 @@ product_pd(uintx, ReservedCodeCacheSize, \ "Reserved code cache size (in bytes) - maximum code cache size") \ \ + product_pd(uintx, NonProfiledCodeHeapSize, \ + "Size of code heap with non-profiled methods (in bytes)") \ + \ + product_pd(uintx, ProfiledCodeHeapSize, \ + "Size of code heap with profiled methods (in bytes)") \ + \ + product_pd(uintx, NonMethodCodeHeapSize, \ + "Size of code heap with non-methods (in bytes)") \ + \ product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ "When less than X space left, we stop compiling.") \ \ --- old/src/share/vm/runtime/advancedThresholdPolicy.cpp 2013-10-11 15:44:37.525819594 +0200 +++ new/src/share/vm/runtime/advancedThresholdPolicy.cpp 2013-10-11 15:44:37.353819600 +0200 @@ -211,7 +211,7 @@ // The main intention is to keep enough free space for C2 compiled code // to achieve peak performance if the code cache is under stress. if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { - double current_reverse_free_ratio = CodeCache::reverse_free_ratio(); + double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); if (current_reverse_free_ratio > _increase_threshold_at_ratio) { k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); } --- old/src/share/vm/runtime/vmStructs.cpp 2013-10-11 15:44:37.533819593 +0200 +++ new/src/share/vm/runtime/vmStructs.cpp 2013-10-11 15:44:37.413819598 +0200 @@ -755,8 +755,8 @@ /* CodeCache (NOTE: incomplete) */ \ /********************************/ \ \ - static_field(CodeCache, _heap, CodeHeap*) \ - static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \ + static_field(CodeCache, _heaps, GrowableArray*) \ + static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \ \ /*******************************/ \ /* CodeHeap (NOTE: incomplete) */ \ --- old/src/share/vm/runtime/sweeper.hpp 2013-10-11 15:44:37.537819593 +0200 +++ new/src/share/vm/runtime/sweeper.hpp 2013-10-11 15:44:37.309819602 +0200 @@ -53,12 +53,13 @@ // is full. class NMethodSweeper : public AllStatic { - static long _traversals; // Stack scan count, also sweep ID. - static nmethod* _current; // Current nmethod - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static int _flushed_count; // Nof. nmethods flushed in current sweep - static int _zombified_count; // Nof. nmethods made zombie in current sweep - static int _marked_count; // Nof. nmethods marked for reclaim in current sweep + static long _traversals; // Stack scan count, also sweep ID. + static nmethod* _current_nmethod; // Current nmethod + static int _current_type; // Current CodeBlobType + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static int _flushed_count; // Nof. nmethods flushed in current sweep + static int _zombified_count; // Nof. nmethods made zombie in current sweep + static int _marked_count; // Nof. nmethods marked for reclaim in current sweep static volatile int _invocations; // No. of invocations left until we are completed with this pass static volatile int _sweep_started; // Flag to control conc sweeper @@ -76,7 +77,7 @@ static jlong _peak_sweep_time; // Peak time for a full sweep static jlong _peak_sweep_fraction_time; // Peak time sweeping one fraction - static int process_nmethod(nmethod *nm); + static int process_nmethod(nmethod *nm, int code_blob_type); static void release_nmethod(nmethod* nm); static bool sweep_in_progress(); @@ -97,7 +98,7 @@ #ifdef ASSERT - static bool is_sweeping(nmethod* which) { return _current == which; } + static bool is_sweeping(nmethod* which) { return _current_nmethod == which; } // Keep track of sweeper activity in the ring buffer static void record_sweep(nmethod* nm, int line); static void report_events(int id, address entry); --- old/src/share/vm/runtime/fprofiler.cpp 2013-10-11 15:44:37.573819592 +0200 +++ new/src/share/vm/runtime/fprofiler.cpp 2013-10-11 15:44:37.345819600 +0200 @@ -161,7 +161,7 @@ for (int index = 0; index < s; index++) { counters[index] = 0; } - base = CodeCache::first_address(); + base = CodeCache::low_bound(); } void PCRecorder::record(address pc) { --- old/src/share/vm/trace/trace.xml 2013-10-11 15:44:37.877819580 +0200 +++ new/src/share/vm/trace/trace.xml 2013-10-11 15:44:37.829819582 +0200 @@ -317,6 +317,7 @@ + --- old/src/share/vm/utilities/growableArray.hpp 2013-10-11 15:44:39.085819535 +0200 +++ new/src/share/vm/utilities/growableArray.hpp 2013-10-11 15:44:39.025819537 +0200 @@ -76,6 +76,9 @@ typedef int (*_sort_Fn)(const void *, const void *); } +template class GrowableArrayIterator; +template class GrowableArrayFilterIterator; + class GenericGrowableArray : public ResourceObj { friend class VMStructs; @@ -243,6 +246,14 @@ return _data[_len-1]; } + GrowableArrayIterator begin() const { + return GrowableArrayIterator(this, 0); + } + + GrowableArrayIterator end() const { + return GrowableArrayIterator(this, length()); + } + void push(const E& elem) { append(elem); } E pop() { @@ -412,4 +423,57 @@ tty->print("}\n"); } +// Custom STL iterator to iterate over GrowableArrays +// It is constructed by invoking GrowableArray::begin() and GrowableArray::end() +template class GrowableArrayIterator { + friend class GrowableArray; + template friend class GrowableArrayFilterIterator; + + private: + const GrowableArray* _array; // GrowableArray we iterate over + int _position; // The current position in the GrowableArray + + // Private constructor used in GrowableArray::begin() and GrowableArray::end() + GrowableArrayIterator(const GrowableArray* array, int position) : _array(array), _position(position) { } + + public: + GrowableArrayIterator& operator++() { ++_position; return *this; } + bool operator==(const GrowableArrayIterator& rhs) { return _position == rhs._position; } + bool operator!=(const GrowableArrayIterator& rhs) { return _position != rhs._position; } + E operator*() { return _array->at(_position); } +}; + +// Custom STL iterator to iterate over elements of a GrowableArray that satisfy a given predicate +template class GrowableArrayFilterIterator { + friend class GrowableArray; + + private: + const GrowableArray* _array; // GrowableArray we iterate over + int _position; // Current position in the GrowableArray + UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy + + public: + GrowableArrayFilterIterator(const GrowableArrayIterator& begin, UnaryPredicate filter_predicate) + : _array(begin._array), _position(begin._position), _predicate(filter_predicate) { + // Advance to first element satisfying the predicate + while(_position != _array->length() && !_predicate(_array->at(_position))) { + ++_position; + } + } + + GrowableArrayFilterIterator& operator++() { + do { + // Advance to next element satisfying the predicate + ++_position; + } while(_position != _array->length() && !_predicate(_array->at(_position))); + return *this; + } + + bool operator==(const GrowableArrayIterator& rhs) { return _position == rhs._position; } + bool operator!=(const GrowableArrayIterator& rhs) { return _position != rhs._position; } + bool operator==(const GrowableArrayFilterIterator& rhs) { return _position == rhs._position; } + bool operator!=(const GrowableArrayFilterIterator& rhs) { return _position != rhs._position; } + E operator*() { return _array->at(_position); } +}; + #endif // SHARE_VM_UTILITIES_GROWABLEARRAY_HPP