1 /* 2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/relocInfo.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "interpreter/bytecode.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/heap.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/forte.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/safepoint.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/vframe.hpp" 41 #include "services/memoryService.hpp" 42 #ifdef TARGET_ARCH_x86 43 # include "nativeInst_x86.hpp" 44 #endif 45 #ifdef TARGET_ARCH_sparc 46 # include "nativeInst_sparc.hpp" 47 #endif 48 #ifdef TARGET_ARCH_zero 49 # include "nativeInst_zero.hpp" 50 #endif 51 #ifdef TARGET_ARCH_arm 52 # include "nativeInst_arm.hpp" 53 #endif 54 #ifdef TARGET_ARCH_ppc 55 # include "nativeInst_ppc.hpp" 56 #endif 57 #ifdef COMPILER1 58 #include "c1/c1_Runtime1.hpp" 59 #endif 60 61 unsigned int align_code_offset(int offset) { 62 // align the size to CodeEntryAlignment 63 return 64 ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1)) 65 - (int)CodeHeap::header_size(); 66 } 67 68 69 // This must be consistent with the CodeBlob constructor's layout actions. 70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 71 unsigned int size = header_size; 72 size += round_to(cb->total_relocation_size(), oopSize); 73 // align the size to CodeEntryAlignment 74 size = align_code_offset(size); 75 size += round_to(cb->total_content_size(), oopSize); 76 size += round_to(cb->total_oop_size(), oopSize); 77 size += round_to(cb->total_metadata_size(), oopSize); 78 return size; 79 } 80 81 82 // Creates a simple CodeBlob. Sets up the size of the different regions. 83 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { 84 assert(size == round_to(size, oopSize), "unaligned size"); 85 assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); 86 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 87 assert(!UseRelocIndex, "no space allocated for reloc index yet"); 88 89 // Note: If UseRelocIndex is enabled, there needs to be (at least) one 90 // extra word for the relocation information, containing the reloc 91 // index table length. Unfortunately, the reloc index table imple- 92 // mentation is not easily understandable and thus it is not clear 93 // what exactly the format is supposed to be. For now, we just turn 94 // off the use of this table (gri 7/6/2000). 95 96 _name = name; 97 _size = size; 98 _frame_complete_offset = frame_complete; 99 _header_size = header_size; 100 _relocation_size = locs_size; 101 _content_offset = align_code_offset(header_size + _relocation_size); 102 _code_offset = _content_offset; 103 _data_offset = size; 104 _frame_size = 0; 105 set_oop_maps(NULL); 106 } 107 108 109 // Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, 110 // and copy code and relocation info. 111 CodeBlob::CodeBlob( 112 const char* name, 113 CodeBuffer* cb, 114 int header_size, 115 int size, 116 int frame_complete, 117 int frame_size, 118 OopMapSet* oop_maps 119 ) { 120 assert(size == round_to(size, oopSize), "unaligned size"); 121 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 122 123 _name = name; 124 _size = size; 125 _frame_complete_offset = frame_complete; 126 _header_size = header_size; 127 _relocation_size = round_to(cb->total_relocation_size(), oopSize); 128 _content_offset = align_code_offset(header_size + _relocation_size); 129 _code_offset = _content_offset + cb->total_offset_of(cb->insts()); 130 _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); 131 assert(_data_offset <= size, "codeBlob is too small"); 132 133 cb->copy_code_and_locs_to(this); 134 set_oop_maps(oop_maps); 135 _frame_size = frame_size; 136 #ifdef COMPILER1 137 // probably wrong for tiered 138 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 139 #endif // COMPILER1 140 } 141 142 143 void CodeBlob::set_oop_maps(OopMapSet* p) { 144 // Danger Will Robinson! This method allocates a big 145 // chunk of memory, its your job to free it. 146 if (p != NULL) { 147 // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps 148 _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); 149 p->copy_to((address)_oop_maps); 150 } else { 151 _oop_maps = NULL; 152 } 153 } 154 155 156 void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { 157 // Do not hold the CodeCache lock during name formatting. 158 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 159 160 if (stub != NULL) { 161 char stub_id[256]; 162 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 163 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 164 if (PrintStubCode) { 165 ttyLocker ttyl; 166 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub); 167 Disassembler::decode(stub->code_begin(), stub->code_end()); 168 tty->cr(); 169 } 170 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 171 172 if (JvmtiExport::should_post_dynamic_code_generated()) { 173 const char* stub_name = name2; 174 if (name2[0] == '\0') stub_name = name1; 175 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 176 } 177 } 178 179 // Track memory usage statistic after releasing CodeCache_lock 180 MemoryService::track_code_cache_memory_usage(); 181 } 182 183 184 void CodeBlob::flush() { 185 if (_oop_maps) { 186 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); 187 _oop_maps = NULL; 188 } 189 _strings.free(); 190 } 191 192 193 OopMap* CodeBlob::oop_map_for_return_address(address return_address) { 194 assert(oop_maps() != NULL, "nope"); 195 return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 196 } 197 198 199 //---------------------------------------------------------------------------------------------------- 200 // Implementation of BufferBlob 201 202 203 BufferBlob::BufferBlob(const char* name, int size) 204 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) 205 {} 206 207 BufferBlob* BufferBlob::create(const char* name, int buffer_size) { 208 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 209 210 BufferBlob* blob = NULL; 211 unsigned int size = sizeof(BufferBlob); 212 // align the size to CodeEntryAlignment 213 size = align_code_offset(size); 214 size += round_to(buffer_size, oopSize); 215 assert(name != NULL, "must provide a name"); 216 { 217 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 218 blob = new (size) BufferBlob(name, size); 219 } 220 // Track memory usage statistic after releasing CodeCache_lock 221 MemoryService::track_code_cache_memory_usage(); 222 223 return blob; 224 } 225 226 227 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) 228 : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) 229 {} 230 231 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 232 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 233 234 BufferBlob* blob = NULL; 235 unsigned int size = allocation_size(cb, sizeof(BufferBlob)); 236 assert(name != NULL, "must provide a name"); 237 { 238 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 239 blob = new (size) BufferBlob(name, size, cb); 240 } 241 // Track memory usage statistic after releasing CodeCache_lock 242 MemoryService::track_code_cache_memory_usage(); 243 244 return blob; 245 } 246 247 void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() { 248 return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical); 249 } 250 251 void BufferBlob::free(BufferBlob *blob) { 252 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 253 { 254 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 255 CodeCache::free((CodeBlob*)blob, CodeBlobType::NonMethod); 256 } 257 // Track memory usage statistic after releasing CodeCache_lock 258 MemoryService::track_code_cache_memory_usage(); 259 } 260 261 262 //---------------------------------------------------------------------------------------------------- 263 // Implementation of AdapterBlob 264 265 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 266 BufferBlob("I2C/C2I adapters", size, cb) { 267 CodeCache::commit(this); 268 } 269 270 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 271 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 272 273 AdapterBlob* blob = NULL; 274 unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); 275 { 276 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 277 // The parameter 'true' indicates a critical memory allocation. 278 // This means that CodeCacheMinimumFreeSpace is used, if necessary 279 const bool is_critical = true; 280 blob = new (size, is_critical) AdapterBlob(size, cb); 281 } 282 // Track memory usage statistic after releasing CodeCache_lock 283 MemoryService::track_code_cache_memory_usage(); 284 285 return blob; 286 } 287 288 289 //---------------------------------------------------------------------------------------------------- 290 // Implementation of MethodHandlesAdapterBlob 291 292 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 293 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 294 295 MethodHandlesAdapterBlob* blob = NULL; 296 unsigned int size = sizeof(MethodHandlesAdapterBlob); 297 // align the size to CodeEntryAlignment 298 size = align_code_offset(size); 299 size += round_to(buffer_size, oopSize); 300 { 301 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 302 // The parameter 'true' indicates a critical memory allocation. 303 // This means that CodeCacheMinimumFreeSpace is used, if necessary 304 const bool is_critical = true; 305 blob = new (size, is_critical) MethodHandlesAdapterBlob(size); 306 } 307 // Track memory usage statistic after releasing CodeCache_lock 308 MemoryService::track_code_cache_memory_usage(); 309 310 return blob; 311 } 312 313 //---------------------------------------------------------------------------------------------------- 314 // Implementation of RuntimeStub 315 316 RuntimeStub::RuntimeStub( 317 const char* name, 318 CodeBuffer* cb, 319 int size, 320 int frame_complete, 321 int frame_size, 322 OopMapSet* oop_maps, 323 bool caller_must_gc_arguments 324 ) 325 : CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) 326 { 327 _caller_must_gc_arguments = caller_must_gc_arguments; 328 } 329 330 331 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 332 CodeBuffer* cb, 333 int frame_complete, 334 int frame_size, 335 OopMapSet* oop_maps, 336 bool caller_must_gc_arguments) 337 { 338 RuntimeStub* stub = NULL; 339 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 340 { 341 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 342 unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); 343 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 344 } 345 346 trace_new_stub(stub, "RuntimeStub - ", stub_name); 347 348 return stub; 349 } 350 351 352 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 353 void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true); 354 if (!p) fatal("Initial size of CodeCache is too small"); 355 return p; 356 } 357 358 // operator new shared by all singletons: 359 void* SingletonBlob::operator new(size_t s, unsigned size) throw() { 360 void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true); 361 if (!p) fatal("Initial size of CodeCache is too small"); 362 return p; 363 } 364 365 366 //---------------------------------------------------------------------------------------------------- 367 // Implementation of DeoptimizationBlob 368 369 DeoptimizationBlob::DeoptimizationBlob( 370 CodeBuffer* cb, 371 int size, 372 OopMapSet* oop_maps, 373 int unpack_offset, 374 int unpack_with_exception_offset, 375 int unpack_with_reexecution_offset, 376 int frame_size 377 ) 378 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps) 379 { 380 _unpack_offset = unpack_offset; 381 _unpack_with_exception = unpack_with_exception_offset; 382 _unpack_with_reexecution = unpack_with_reexecution_offset; 383 #ifdef COMPILER1 384 _unpack_with_exception_in_tls = -1; 385 #endif 386 } 387 388 389 DeoptimizationBlob* DeoptimizationBlob::create( 390 CodeBuffer* cb, 391 OopMapSet* oop_maps, 392 int unpack_offset, 393 int unpack_with_exception_offset, 394 int unpack_with_reexecution_offset, 395 int frame_size) 396 { 397 DeoptimizationBlob* blob = NULL; 398 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 399 { 400 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 401 unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); 402 blob = new (size) DeoptimizationBlob(cb, 403 size, 404 oop_maps, 405 unpack_offset, 406 unpack_with_exception_offset, 407 unpack_with_reexecution_offset, 408 frame_size); 409 } 410 411 trace_new_stub(blob, "DeoptimizationBlob"); 412 413 return blob; 414 } 415 416 417 //---------------------------------------------------------------------------------------------------- 418 // Implementation of UncommonTrapBlob 419 420 #ifdef COMPILER2 421 UncommonTrapBlob::UncommonTrapBlob( 422 CodeBuffer* cb, 423 int size, 424 OopMapSet* oop_maps, 425 int frame_size 426 ) 427 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps) 428 {} 429 430 431 UncommonTrapBlob* UncommonTrapBlob::create( 432 CodeBuffer* cb, 433 OopMapSet* oop_maps, 434 int frame_size) 435 { 436 UncommonTrapBlob* blob = NULL; 437 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 438 { 439 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 440 unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); 441 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); 442 } 443 444 trace_new_stub(blob, "UncommonTrapBlob"); 445 446 return blob; 447 } 448 449 450 #endif // COMPILER2 451 452 453 //---------------------------------------------------------------------------------------------------- 454 // Implementation of ExceptionBlob 455 456 #ifdef COMPILER2 457 ExceptionBlob::ExceptionBlob( 458 CodeBuffer* cb, 459 int size, 460 OopMapSet* oop_maps, 461 int frame_size 462 ) 463 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps) 464 {} 465 466 467 ExceptionBlob* ExceptionBlob::create( 468 CodeBuffer* cb, 469 OopMapSet* oop_maps, 470 int frame_size) 471 { 472 ExceptionBlob* blob = NULL; 473 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 474 { 475 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 476 unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); 477 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); 478 } 479 480 trace_new_stub(blob, "ExceptionBlob"); 481 482 return blob; 483 } 484 485 486 #endif // COMPILER2 487 488 489 //---------------------------------------------------------------------------------------------------- 490 // Implementation of SafepointBlob 491 492 SafepointBlob::SafepointBlob( 493 CodeBuffer* cb, 494 int size, 495 OopMapSet* oop_maps, 496 int frame_size 497 ) 498 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps) 499 {} 500 501 502 SafepointBlob* SafepointBlob::create( 503 CodeBuffer* cb, 504 OopMapSet* oop_maps, 505 int frame_size) 506 { 507 SafepointBlob* blob = NULL; 508 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 509 { 510 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 511 unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); 512 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 513 } 514 515 trace_new_stub(blob, "SafepointBlob"); 516 517 return blob; 518 } 519 520 521 //---------------------------------------------------------------------------------------------------- 522 // Verification and printing 523 524 void CodeBlob::verify() { 525 ShouldNotReachHere(); 526 } 527 528 void CodeBlob::print_on(outputStream* st) const { 529 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this); 530 st->print_cr("Framesize: %d", _frame_size); 531 } 532 533 void CodeBlob::print_value_on(outputStream* st) const { 534 st->print_cr("[CodeBlob]"); 535 } 536 537 void BufferBlob::verify() { 538 // unimplemented 539 } 540 541 void BufferBlob::print_on(outputStream* st) const { 542 CodeBlob::print_on(st); 543 print_value_on(st); 544 } 545 546 void BufferBlob::print_value_on(outputStream* st) const { 547 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name()); 548 } 549 550 void RuntimeStub::verify() { 551 // unimplemented 552 } 553 554 void RuntimeStub::print_on(outputStream* st) const { 555 ttyLocker ttyl; 556 CodeBlob::print_on(st); 557 st->print("Runtime Stub (" INTPTR_FORMAT "): ", this); 558 st->print_cr(name()); 559 Disassembler::decode((CodeBlob*)this, st); 560 } 561 562 void RuntimeStub::print_value_on(outputStream* st) const { 563 st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name()); 564 } 565 566 void SingletonBlob::verify() { 567 // unimplemented 568 } 569 570 void SingletonBlob::print_on(outputStream* st) const { 571 ttyLocker ttyl; 572 CodeBlob::print_on(st); 573 st->print_cr(name()); 574 Disassembler::decode((CodeBlob*)this, st); 575 } 576 577 void SingletonBlob::print_value_on(outputStream* st) const { 578 st->print_cr(name()); 579 } 580 581 void DeoptimizationBlob::print_value_on(outputStream* st) const { 582 st->print_cr("Deoptimization (frame not available)"); 583 }