1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "gc/shared/barrierSetNMethod.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "interpreter/interpreterRuntime.hpp" 37 #include "interpreter/linkResolver.hpp" 38 #include "interpreter/templateTable.hpp" 39 #include "logging/log.hpp" 40 #include "memory/oopFactory.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "memory/universe.hpp" 43 #include "oops/constantPool.hpp" 44 #include "oops/cpCache.inline.hpp" 45 #include "oops/instanceKlass.hpp" 46 #include "oops/methodData.hpp" 47 #include "oops/objArrayKlass.hpp" 48 #include "oops/objArrayOop.inline.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/symbol.hpp" 51 #include "oops/valueKlass.hpp" 52 #include "oops/valueArrayKlass.hpp" 53 #include "oops/valueArrayOop.hpp" 54 #include "oops/valueArrayOop.inline.hpp" 55 #include "prims/jvmtiExport.hpp" 56 #include "prims/nativeLookup.hpp" 57 #include "runtime/atomic.hpp" 58 #include "runtime/biasedLocking.hpp" 59 #include "runtime/compilationPolicy.hpp" 60 #include "runtime/deoptimization.hpp" 61 #include "runtime/fieldDescriptor.inline.hpp" 62 #include "runtime/frame.inline.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/icache.hpp" 65 #include "runtime/interfaceSupport.inline.hpp" 66 #include "runtime/java.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/jfieldIDWorkaround.hpp" 69 #include "runtime/osThread.hpp" 70 #include "runtime/sharedRuntime.hpp" 71 #include "runtime/stubRoutines.hpp" 72 #include "runtime/synchronizer.hpp" 73 #include "runtime/threadCritical.hpp" 74 #include "utilities/align.hpp" 75 #include "utilities/copy.hpp" 76 #include "utilities/events.hpp" 77 #include "utilities/globalDefinitions.hpp" 78 #ifdef COMPILER2 79 #include "opto/runtime.hpp" 80 #endif 81 82 class UnlockFlagSaver { 83 private: 84 JavaThread* _thread; 85 bool _do_not_unlock; 86 public: 87 UnlockFlagSaver(JavaThread* t) { 88 _thread = t; 89 _do_not_unlock = t->do_not_unlock_if_synchronized(); 90 t->set_do_not_unlock_if_synchronized(false); 91 } 92 ~UnlockFlagSaver() { 93 _thread->set_do_not_unlock_if_synchronized(_do_not_unlock); 94 } 95 }; 96 97 // Helper class to access current interpreter state 98 class LastFrameAccessor : public StackObj { 99 frame _last_frame; 100 public: 101 LastFrameAccessor(JavaThread* thread) { 102 assert(thread == Thread::current(), "sanity"); 103 _last_frame = thread->last_frame(); 104 } 105 bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); } 106 Method* method() const { return _last_frame.interpreter_frame_method(); } 107 address bcp() const { return _last_frame.interpreter_frame_bcp(); } 108 int bci() const { return _last_frame.interpreter_frame_bci(); } 109 address mdp() const { return _last_frame.interpreter_frame_mdp(); } 110 111 void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); } 112 void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); } 113 114 // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) 115 Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); } 116 117 Bytecode bytecode() const { return Bytecode(method(), bcp()); } 118 int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); } 119 int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); } 120 int get_index_u2_cpcache(Bytecodes::Code bc) const 121 { return bytecode().get_index_u2_cpcache(bc); } 122 int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); } 123 int number_of_dimensions() const { return bcp()[3]; } 124 ConstantPoolCacheEntry* cache_entry_at(int i) const 125 { return method()->constants()->cache()->entry_at(i); } 126 ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); } 127 128 oop callee_receiver(Symbol* signature) { 129 return _last_frame.interpreter_callee_receiver(signature); 130 } 131 BasicObjectLock* monitor_begin() const { 132 return _last_frame.interpreter_frame_monitor_begin(); 133 } 134 BasicObjectLock* monitor_end() const { 135 return _last_frame.interpreter_frame_monitor_end(); 136 } 137 BasicObjectLock* next_monitor(BasicObjectLock* current) const { 138 return _last_frame.next_monitor_in_interpreter_frame(current); 139 } 140 141 frame& get_frame() { return _last_frame; } 142 }; 143 144 //------------------------------------------------------------------------------------------------------------------------ 145 // State accessors 146 147 void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) { 148 LastFrameAccessor last_frame(thread); 149 last_frame.set_bcp(bcp); 150 if (ProfileInterpreter) { 151 // ProfileTraps uses MDOs independently of ProfileInterpreter. 152 // That is why we must check both ProfileInterpreter and mdo != NULL. 153 MethodData* mdo = last_frame.method()->method_data(); 154 if (mdo != NULL) { 155 NEEDS_CLEANUP; 156 last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci())); 157 } 158 } 159 } 160 161 //------------------------------------------------------------------------------------------------------------------------ 162 // Constants 163 164 165 JRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide)) 166 // access constant pool 167 LastFrameAccessor last_frame(thread); 168 ConstantPool* pool = last_frame.method()->constants(); 169 int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc); 170 constantTag tag = pool->tag_at(index); 171 172 assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call"); 173 Klass* klass = pool->klass_at(index, CHECK); 174 oop java_class = klass->java_mirror(); 175 thread->set_vm_result(java_class); 176 JRT_END 177 178 JRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { 179 assert(bytecode == Bytecodes::_ldc || 180 bytecode == Bytecodes::_ldc_w || 181 bytecode == Bytecodes::_ldc2_w || 182 bytecode == Bytecodes::_fast_aldc || 183 bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 184 ResourceMark rm(thread); 185 const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc || 186 bytecode == Bytecodes::_fast_aldc_w); 187 LastFrameAccessor last_frame(thread); 188 methodHandle m (thread, last_frame.method()); 189 Bytecode_loadconstant ldc(m, last_frame.bci()); 190 191 // Double-check the size. (Condy can have any type.) 192 BasicType type = ldc.result_type(); 193 switch (type2size[type]) { 194 case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break; 195 case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break; 196 default: ShouldNotReachHere(); 197 } 198 199 // Resolve the constant. This does not do unboxing. 200 // But it does replace Universe::the_null_sentinel by null. 201 oop result = ldc.resolve_constant(CHECK); 202 assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc"); 203 204 #ifdef ASSERT 205 { 206 // The bytecode wrappers aren't GC-safe so construct a new one 207 Bytecode_loadconstant ldc2(m, last_frame.bci()); 208 int rindex = ldc2.cache_index(); 209 if (rindex < 0) 210 rindex = m->constants()->cp_to_object_index(ldc2.pool_index()); 211 if (rindex >= 0) { 212 oop coop = m->constants()->resolved_references()->obj_at(rindex); 213 oop roop = (result == NULL ? Universe::the_null_sentinel() : result); 214 assert(roop == coop, "expected result for assembly code"); 215 } 216 } 217 #endif 218 thread->set_vm_result(result); 219 if (!is_fast_aldc) { 220 // Tell the interpreter how to unbox the primitive. 221 guarantee(java_lang_boxing_object::is_instance(result, type), ""); 222 int offset = java_lang_boxing_object::value_offset_in_bytes(type); 223 intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift) 224 | (offset & ConstantPoolCacheEntry::field_index_mask)); 225 thread->set_vm_result_2((Metadata*)flags); 226 } 227 } 228 JRT_END 229 230 231 //------------------------------------------------------------------------------------------------------------------------ 232 // Allocation 233 234 JRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, ConstantPool* pool, int index)) 235 Klass* k = pool->klass_at(index, CHECK); 236 InstanceKlass* klass = InstanceKlass::cast(k); 237 238 // Make sure we are not instantiating an abstract klass 239 klass->check_valid_for_instantiation(true, CHECK); 240 241 // Make sure klass is initialized 242 klass->initialize(CHECK); 243 244 // At this point the class may not be fully initialized 245 // because of recursive initialization. If it is fully 246 // initialized & has_finalized is not set, we rewrite 247 // it into its fast version (Note: no locking is needed 248 // here since this is an atomic byte write and can be 249 // done more than once). 250 // 251 // Note: In case of classes with has_finalized we don't 252 // rewrite since that saves us an extra check in 253 // the fast version which then would call the 254 // slow version anyway (and do a call back into 255 // Java). 256 // If we have a breakpoint, then we don't rewrite 257 // because the _breakpoint bytecode would be lost. 258 oop obj = klass->allocate_instance(CHECK); 259 thread->set_vm_result(obj); 260 JRT_END 261 262 void copy_primitive_argument(intptr_t* addr, Handle instance, int offset, BasicType type) { 263 switch (type) { 264 case T_BOOLEAN: 265 instance()->bool_field_put(offset, (jboolean)*((int*)addr)); 266 break; 267 case T_CHAR: 268 instance()->char_field_put(offset, (jchar) *((int*)addr)); 269 break; 270 case T_FLOAT: 271 instance()->float_field_put(offset, (jfloat)*((float*)addr)); 272 break; 273 case T_DOUBLE: 274 instance()->double_field_put(offset, (jdouble)*((double*)addr)); 275 break; 276 case T_BYTE: 277 instance()->byte_field_put(offset, (jbyte)*((int*)addr)); 278 break; 279 case T_SHORT: 280 instance()->short_field_put(offset, (jshort)*((int*)addr)); 281 break; 282 case T_INT: 283 instance()->int_field_put(offset, (jint)*((int*)addr)); 284 break; 285 case T_LONG: 286 instance()->long_field_put(offset, (jlong)*((long long*)addr)); 287 break; 288 case T_OBJECT: 289 case T_ARRAY: 290 case T_VALUETYPE: 291 fatal("Should not be handled with this method"); 292 break; 293 default: 294 fatal("Unsupported BasicType"); 295 } 296 } 297 298 JRT_ENTRY(void, InterpreterRuntime::defaultvalue(JavaThread* thread, ConstantPool* pool, int index)) 299 // Getting the ValueKlass 300 Klass* k = pool->klass_at(index, CHECK); 301 assert(k->is_value(), "defaultvalue argument must be the value type class"); 302 ValueKlass* vklass = ValueKlass::cast(k); 303 304 vklass->initialize(THREAD); 305 oop res = vklass->default_value(); 306 thread->set_vm_result(res); 307 JRT_END 308 309 JRT_ENTRY(int, InterpreterRuntime::withfield(JavaThread* thread, ConstantPoolCache* cp_cache)) 310 LastFrameAccessor last_frame(thread); 311 // Getting the ValueKlass 312 int index = ConstantPool::decode_cpcache_index(last_frame.get_index_u2_cpcache(Bytecodes::_withfield)); 313 ConstantPoolCacheEntry* cp_entry = cp_cache->entry_at(index); 314 assert(cp_entry->is_resolved(Bytecodes::_withfield), "Should have been resolved"); 315 Klass* klass = cp_entry->f1_as_klass(); 316 assert(klass->is_value(), "withfield only applies to value types"); 317 ValueKlass* vklass = ValueKlass::cast(klass); 318 319 // Getting Field information 320 int offset = cp_entry->f2_as_index(); 321 int field_index = cp_entry->field_index(); 322 int field_offset = cp_entry->f2_as_offset(); 323 Symbol* field_signature = vklass->field_signature(field_index); 324 ResourceMark rm(THREAD); 325 const char* signature = (const char *) field_signature->as_utf8(); 326 BasicType field_type = char2type(signature[0]); 327 328 // Getting old value 329 frame& f = last_frame.get_frame(); 330 jint tos_idx = f.interpreter_frame_expression_stack_size() - 1; 331 int vt_offset = type2size[field_type]; 332 oop old_value = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx - vt_offset); 333 assert(old_value != NULL && oopDesc::is_oop(old_value) && old_value->is_value(),"Verifying receiver"); 334 Handle old_value_h(THREAD, old_value); 335 336 // Creating new value by copying the one passed in argument 337 instanceOop new_value = vklass->allocate_instance( 338 CHECK_((type2size[field_type]) * AbstractInterpreter::stackElementSize)); 339 Handle new_value_h = Handle(THREAD, new_value); 340 int first_offset = vklass->first_field_offset(); 341 vklass->value_store(vklass->data_for_oop(old_value_h()), 342 vklass->data_for_oop(new_value_h()), true, false); 343 344 // Updating the field specified in arguments 345 if (field_type == T_ARRAY || field_type == T_OBJECT) { 346 oop aoop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); 347 assert(aoop == NULL || oopDesc::is_oop(aoop),"argument must be a reference type"); 348 new_value_h()->obj_field_put(field_offset, aoop); 349 } else if (field_type == T_VALUETYPE) { 350 if (cp_entry->is_flattened()) { 351 oop vt_oop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); 352 if (vt_oop == NULL) { 353 THROW_(vmSymbols::java_lang_NullPointerException(), 354 (type2size[field_type] * AbstractInterpreter::stackElementSize)); 355 } 356 assert(vt_oop != NULL && oopDesc::is_oop(vt_oop) && vt_oop->is_value(),"argument must be a value type"); 357 Klass* field_k = vklass->get_value_field_klass(field_index); 358 ValueKlass* field_vk = ValueKlass::cast(field_k); 359 assert(field_vk == vt_oop->klass(), "Must match"); 360 field_vk->value_store(field_vk->data_for_oop(vt_oop), 361 ((char*)(oopDesc*)new_value_h()) + field_offset, false, false); 362 } else { // not flattened 363 oop voop = *(oop*)f.interpreter_frame_expression_stack_at(tos_idx); 364 if (voop == NULL && cp_entry->is_flattenable()) { 365 THROW_(vmSymbols::java_lang_NullPointerException(), 366 (type2size[field_type] * AbstractInterpreter::stackElementSize)); 367 } 368 assert(voop == NULL || oopDesc::is_oop(voop),"checking argument"); 369 new_value_h()->obj_field_put(field_offset, voop); 370 } 371 } else { // not T_OBJECT nor T_ARRAY nor T_VALUETYPE 372 intptr_t* addr = f.interpreter_frame_expression_stack_at(tos_idx); 373 copy_primitive_argument(addr, new_value_h, field_offset, field_type); 374 } 375 376 // returning result 377 thread->set_vm_result(new_value_h()); 378 return (type2size[field_type] + type2size[T_OBJECT]) * AbstractInterpreter::stackElementSize; 379 JRT_END 380 381 JRT_ENTRY(void, InterpreterRuntime::uninitialized_static_value_field(JavaThread* thread, oopDesc* mirror, int index)) 382 // The interpreter tries to access a flattenable static field that has not been initialized. 383 // This situation can happen in different scenarios: 384 // 1 - if the load or initialization of the field failed during step 8 of 385 // the initialization of the holder of the field, in this case the access to the field 386 // must fail 387 // 2 - it can also happen when the initialization of the holder class triggered the initialization of 388 // another class which accesses this field in its static initializer, in this case the 389 // access must succeed to allow circularity 390 // The code below tries to load and initialize the field's class again before returning the default value. 391 // If the field was not initialized because of an error, a exception should be thrown. 392 // If the class is being initialized, the default value is returned. 393 instanceHandle mirror_h(THREAD, (instanceOop)mirror); 394 InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror)); 395 if (klass->is_being_initialized() && klass->is_reentrant_initialization(THREAD)) { 396 int offset = klass->field_offset(index); 397 Klass* field_k = klass->get_value_field_klass_or_null(index); 398 if (field_k == NULL) { 399 field_k = SystemDictionary::resolve_or_fail(klass->field_signature(index)->fundamental_name(THREAD), 400 Handle(THREAD, klass->class_loader()), 401 Handle(THREAD, klass->protection_domain()), 402 true, CHECK); 403 assert(field_k != NULL, "Should have been loaded or an exception thrown above"); 404 klass->set_value_field_klass(index, field_k); 405 } 406 field_k->initialize(CHECK); 407 oop defaultvalue = ValueKlass::cast(field_k)->default_value(); 408 // It is safe to initialized the static field because 1) the current thread is the initializing thread 409 // and is the only one that can access it, and 2) the field is actually not initialized (i.e. null) 410 // otherwise the JVM should not be executing this code. 411 mirror->obj_field_put(offset, defaultvalue); 412 thread->set_vm_result(defaultvalue); 413 } else { 414 assert(klass->is_in_error_state(), "If not initializing, initialization must have failed to get there"); 415 ResourceMark rm(THREAD); 416 const char* desc = "Could not initialize class "; 417 const char* className = klass->external_name(); 418 size_t msglen = strlen(desc) + strlen(className) + 1; 419 char* message = NEW_RESOURCE_ARRAY(char, msglen); 420 if (NULL == message) { 421 // Out of memory: can't create detailed error message 422 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 423 } else { 424 jio_snprintf(message, msglen, "%s%s", desc, className); 425 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 426 } 427 } 428 JRT_END 429 430 JRT_ENTRY(void, InterpreterRuntime::uninitialized_instance_value_field(JavaThread* thread, oopDesc* obj, int index)) 431 instanceHandle obj_h(THREAD, (instanceOop)obj); 432 InstanceKlass* klass = InstanceKlass::cast(obj_h()->klass()); 433 Klass* field_k = klass->get_value_field_klass_or_null(index); 434 assert(field_k != NULL, "Must have been initialized"); 435 ValueKlass* field_vklass = ValueKlass::cast(field_k); 436 assert(field_vklass->is_initialized(), "Must have been initialized at this point"); 437 instanceOop res = (instanceOop)field_vklass->default_value(); 438 thread->set_vm_result(res); 439 JRT_END 440 441 JRT_ENTRY(void, InterpreterRuntime::write_flattened_value(JavaThread* thread, oopDesc* value, int offset, oopDesc* rcv)) 442 assert(oopDesc::is_oop(value), "Sanity check"); 443 assert(oopDesc::is_oop(rcv), "Sanity check"); 444 assert(value->is_value(), "Sanity check"); 445 446 ValueKlass* vklass = ValueKlass::cast(value->klass()); 447 if (!vklass->is_empty_value()) { 448 vklass->value_store(vklass->data_for_oop(value), ((char*)(oopDesc*)rcv) + offset, true, true); 449 } 450 JRT_END 451 452 JRT_ENTRY(void, InterpreterRuntime::read_flattened_field(JavaThread* thread, oopDesc* obj, int index, Klass* field_holder)) 453 Handle obj_h(THREAD, obj); 454 455 assert(oopDesc::is_oop(obj), "Sanity check"); 456 457 assert(field_holder->is_instance_klass(), "Sanity check"); 458 InstanceKlass* klass = InstanceKlass::cast(field_holder); 459 460 assert(klass->field_is_flattened(index), "Sanity check"); 461 462 ValueKlass* field_vklass = ValueKlass::cast(klass->get_value_field_klass(index)); 463 assert(field_vklass->is_initialized(), "Must be initialized at this point"); 464 465 instanceOop res = NULL; 466 if (field_vklass->is_empty_value()) { 467 res = (instanceOop)field_vklass->default_value(); 468 } else { 469 // allocate instance 470 res = field_vklass->allocate_instance(CHECK); 471 // copy value 472 field_vklass->value_store(((char*)(oopDesc*)obj_h()) + klass->field_offset(index), 473 field_vklass->data_for_oop(res), true, true); 474 } 475 assert(res != NULL, "Must be set in one of two paths above"); 476 thread->set_vm_result(res); 477 JRT_END 478 479 JRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size)) 480 oop obj = oopFactory::new_typeArray(type, size, CHECK); 481 thread->set_vm_result(obj); 482 JRT_END 483 484 485 JRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, ConstantPool* pool, int index, jint size)) 486 Klass* klass = pool->klass_at(index, CHECK); 487 bool is_qtype_desc = pool->tag_at(index).is_Qdescriptor_klass(); 488 arrayOop obj; 489 if ((!klass->is_array_klass()) && is_qtype_desc) { // Logically creates elements, ensure klass init 490 klass->initialize(CHECK); 491 obj = oopFactory::new_valueArray(klass, size, CHECK); 492 } else { 493 obj = oopFactory::new_objArray(klass, size, CHECK); 494 } 495 thread->set_vm_result(obj); 496 JRT_END 497 498 JRT_ENTRY(void, InterpreterRuntime::value_array_load(JavaThread* thread, arrayOopDesc* array, int index)) 499 Klass* klass = array->klass(); 500 assert(klass->is_valueArray_klass(), "expected value array oop"); 501 502 ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); 503 ValueKlass* vklass = vaklass->element_klass(); 504 arrayHandle ah(THREAD, array); 505 instanceOop value_holder = NULL; 506 if (vklass->is_empty_value()) { 507 value_holder = (instanceOop)vklass->default_value(); 508 } else { 509 value_holder = vklass->allocate_instance(CHECK); 510 void* src = ((valueArrayOop)ah())->value_at_addr(index, vaklass->layout_helper()); 511 vklass->value_store(src, vklass->data_for_oop(value_holder), 512 vaklass->element_byte_size(), true, false); 513 } 514 assert(value_holder != NULL, "Must be set in one of two paths above"); 515 thread->set_vm_result(value_holder); 516 JRT_END 517 518 JRT_ENTRY(void, InterpreterRuntime::value_array_store(JavaThread* thread, void* val, arrayOopDesc* array, int index)) 519 assert(val != NULL, "can't store null into flat array"); 520 Klass* klass = array->klass(); 521 assert(klass->is_valueArray_klass(), "expected value array"); 522 assert(ArrayKlass::cast(klass)->element_klass() == ((oop)val)->klass(), "Store type incorrect"); 523 524 valueArrayOop varray = (valueArrayOop)array; 525 ValueArrayKlass* vaklass = ValueArrayKlass::cast(klass); 526 ValueKlass* vklass = vaklass->element_klass(); 527 if (!vklass->is_empty_value()) { 528 const int lh = vaklass->layout_helper(); 529 vklass->value_store(vklass->data_for_oop((oop)val), varray->value_at_addr(index, lh), 530 vaklass->element_byte_size(), true, false); 531 } 532 JRT_END 533 534 JRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address)) 535 // We may want to pass in more arguments - could make this slightly faster 536 LastFrameAccessor last_frame(thread); 537 ConstantPool* constants = last_frame.method()->constants(); 538 int i = last_frame.get_index_u2(Bytecodes::_multianewarray); 539 Klass* klass = constants->klass_at(i, CHECK); 540 bool is_qtype = klass->name()->is_Q_array_signature(); 541 int nof_dims = last_frame.number_of_dimensions(); 542 assert(klass->is_klass(), "not a class"); 543 assert(nof_dims >= 1, "multianewarray rank must be nonzero"); 544 545 if (is_qtype) { // Logically creates elements, ensure klass init 546 klass->initialize(CHECK); 547 } 548 549 // We must create an array of jints to pass to multi_allocate. 550 ResourceMark rm(thread); 551 const int small_dims = 10; 552 jint dim_array[small_dims]; 553 jint *dims = &dim_array[0]; 554 if (nof_dims > small_dims) { 555 dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims); 556 } 557 for (int index = 0; index < nof_dims; index++) { 558 // offset from first_size_address is addressed as local[index] 559 int n = Interpreter::local_offset_in_bytes(index)/jintSize; 560 dims[index] = first_size_address[n]; 561 } 562 oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK); 563 thread->set_vm_result(obj); 564 JRT_END 565 566 567 JRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) 568 assert(oopDesc::is_oop(obj), "must be a valid oop"); 569 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 570 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 571 JRT_END 572 573 JRT_ENTRY(jboolean, InterpreterRuntime::is_substitutable(JavaThread* thread, oopDesc* aobj, oopDesc* bobj)) 574 assert(oopDesc::is_oop(aobj) && oopDesc::is_oop(bobj), "must be valid oops"); 575 576 Handle ha(THREAD, aobj); 577 Handle hb(THREAD, bobj); 578 JavaValue result(T_BOOLEAN); 579 JavaCallArguments args; 580 args.push_oop(ha); 581 args.push_oop(hb); 582 methodHandle method(Universe::is_substitutable_method()); 583 JavaCalls::call(&result, method, &args, THREAD); 584 guarantee(!HAS_PENDING_EXCEPTION, "isSubstitutable() raised exception"); 585 return result.get_jboolean(); 586 JRT_END 587 588 // Quicken instance-of and check-cast bytecodes 589 JRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread)) 590 // Force resolving; quicken the bytecode 591 LastFrameAccessor last_frame(thread); 592 int which = last_frame.get_index_u2(Bytecodes::_checkcast); 593 ConstantPool* cpool = last_frame.method()->constants(); 594 // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded 595 // program we might have seen an unquick'd bytecode in the interpreter but have another 596 // thread quicken the bytecode before we get here. 597 // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" ); 598 Klass* klass = cpool->klass_at(which, CHECK); 599 thread->set_vm_result_2(klass); 600 JRT_END 601 602 603 //------------------------------------------------------------------------------------------------------------------------ 604 // Exceptions 605 606 void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason, 607 const methodHandle& trap_method, int trap_bci, TRAPS) { 608 if (trap_method.not_null()) { 609 MethodData* trap_mdo = trap_method->method_data(); 610 if (trap_mdo == NULL) { 611 Method::build_interpreter_method_data(trap_method, THREAD); 612 if (HAS_PENDING_EXCEPTION) { 613 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), 614 "we expect only an OOM error here"); 615 CLEAR_PENDING_EXCEPTION; 616 } 617 trap_mdo = trap_method->method_data(); 618 // and fall through... 619 } 620 if (trap_mdo != NULL) { 621 // Update per-method count of trap events. The interpreter 622 // is updating the MDO to simulate the effect of compiler traps. 623 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 624 } 625 } 626 } 627 628 // Assume the compiler is (or will be) interested in this event. 629 // If necessary, create an MDO to hold the information, and record it. 630 void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 631 assert(ProfileTraps, "call me only if profiling"); 632 LastFrameAccessor last_frame(thread); 633 methodHandle trap_method(thread, last_frame.method()); 634 int trap_bci = trap_method->bci_from(last_frame.bcp()); 635 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 636 } 637 638 #ifdef CC_INTERP 639 // As legacy note_trap, but we have more arguments. 640 JRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) 641 methodHandle trap_method(method); 642 note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 643 JRT_END 644 645 // Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper 646 // for each exception. 647 void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci) 648 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); } 649 void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci) 650 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); } 651 void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci) 652 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); } 653 void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci) 654 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); } 655 void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci) 656 { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); } 657 #endif // CC_INTERP 658 659 660 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 661 // get klass 662 InstanceKlass* klass = InstanceKlass::cast(k); 663 assert(klass->is_initialized(), 664 "this klass should have been initialized during VM initialization"); 665 // create instance - do not call constructor since we may have no 666 // (java) stack space left (should assert constructor is empty) 667 Handle exception; 668 oop exception_oop = klass->allocate_instance(CHECK_(exception)); 669 exception = Handle(THREAD, exception_oop); 670 if (StackTraceInThrowable) { 671 java_lang_Throwable::fill_in_stack_trace(exception); 672 } 673 return exception; 674 } 675 676 // Special handling for stack overflow: since we don't have any (java) stack 677 // space left we use the pre-allocated & pre-initialized StackOverflowError 678 // klass to create an stack overflow error instance. We do not call its 679 // constructor for the same reason (it is empty, anyway). 680 JRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread)) 681 Handle exception = get_preinitialized_exception( 682 SystemDictionary::StackOverflowError_klass(), 683 CHECK); 684 // Increment counter for hs_err file reporting 685 Atomic::inc(&Exceptions::_stack_overflow_errors); 686 THROW_HANDLE(exception); 687 JRT_END 688 689 JRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* thread)) 690 Handle exception = get_preinitialized_exception( 691 SystemDictionary::StackOverflowError_klass(), 692 CHECK); 693 java_lang_Throwable::set_message(exception(), 694 Universe::delayed_stack_overflow_error_message()); 695 // Increment counter for hs_err file reporting 696 Atomic::inc(&Exceptions::_stack_overflow_errors); 697 THROW_HANDLE(exception); 698 JRT_END 699 700 JRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message)) 701 // lookup exception klass 702 TempNewSymbol s = SymbolTable::new_symbol(name); 703 if (ProfileTraps) { 704 if (s == vmSymbols::java_lang_ArithmeticException()) { 705 note_trap(thread, Deoptimization::Reason_div0_check, CHECK); 706 } else if (s == vmSymbols::java_lang_NullPointerException()) { 707 note_trap(thread, Deoptimization::Reason_null_check, CHECK); 708 } 709 } 710 // create exception 711 Handle exception = Exceptions::new_exception(thread, s, message); 712 thread->set_vm_result(exception()); 713 JRT_END 714 715 716 JRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj)) 717 // Produce the error message first because note_trap can safepoint 718 ResourceMark rm(thread); 719 const char* klass_name = obj->klass()->external_name(); 720 // lookup exception klass 721 TempNewSymbol s = SymbolTable::new_symbol(name); 722 if (ProfileTraps) { 723 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 724 } 725 // create exception, with klass name as detail message 726 Handle exception = Exceptions::new_exception(thread, s, klass_name); 727 thread->set_vm_result(exception()); 728 JRT_END 729 730 JRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, arrayOopDesc* a, jint index)) 731 // Produce the error message first because note_trap can safepoint 732 ResourceMark rm(thread); 733 stringStream ss; 734 ss.print("Index %d out of bounds for length %d", index, a->length()); 735 736 if (ProfileTraps) { 737 note_trap(thread, Deoptimization::Reason_range_check, CHECK); 738 } 739 740 THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string()); 741 JRT_END 742 743 JRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException( 744 JavaThread* thread, oopDesc* obj)) 745 746 // Produce the error message first because note_trap can safepoint 747 ResourceMark rm(thread); 748 char* message = SharedRuntime::generate_class_cast_message( 749 thread, obj->klass()); 750 751 if (ProfileTraps) { 752 note_trap(thread, Deoptimization::Reason_class_check, CHECK); 753 } 754 755 // create exception 756 THROW_MSG(vmSymbols::java_lang_ClassCastException(), message); 757 JRT_END 758 759 // exception_handler_for_exception(...) returns the continuation address, 760 // the exception oop (via TLS) and sets the bci/bcp for the continuation. 761 // The exception oop is returned to make sure it is preserved over GC (it 762 // is only on the stack if the exception was thrown explicitly via athrow). 763 // During this operation, the expression stack contains the values for the 764 // bci where the exception happened. If the exception was propagated back 765 // from a call, the expression stack contains the values for the bci at the 766 // invoke w/o arguments (i.e., as if one were inside the call). 767 JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception)) 768 769 LastFrameAccessor last_frame(thread); 770 Handle h_exception(thread, exception); 771 methodHandle h_method (thread, last_frame.method()); 772 constantPoolHandle h_constants(thread, h_method->constants()); 773 bool should_repeat; 774 int handler_bci; 775 int current_bci = last_frame.bci(); 776 777 if (thread->frames_to_pop_failed_realloc() > 0) { 778 // Allocation of scalar replaced object used in this frame 779 // failed. Unconditionally pop the frame. 780 thread->dec_frames_to_pop_failed_realloc(); 781 thread->set_vm_result(h_exception()); 782 // If the method is synchronized we already unlocked the monitor 783 // during deoptimization so the interpreter needs to skip it when 784 // the frame is popped. 785 thread->set_do_not_unlock_if_synchronized(true); 786 #ifdef CC_INTERP 787 return (address) -1; 788 #else 789 return Interpreter::remove_activation_entry(); 790 #endif 791 } 792 793 // Need to do this check first since when _do_not_unlock_if_synchronized 794 // is set, we don't want to trigger any classloading which may make calls 795 // into java, or surprisingly find a matching exception handler for bci 0 796 // since at this moment the method hasn't been "officially" entered yet. 797 if (thread->do_not_unlock_if_synchronized()) { 798 ResourceMark rm; 799 assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized"); 800 thread->set_vm_result(exception); 801 #ifdef CC_INTERP 802 return (address) -1; 803 #else 804 return Interpreter::remove_activation_entry(); 805 #endif 806 } 807 808 do { 809 should_repeat = false; 810 811 // assertions 812 #ifdef ASSERT 813 assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); 814 // Check that exception is a subclass of Throwable, otherwise we have a VerifyError 815 if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { 816 if (ExitVMOnVerifyError) vm_exit(-1); 817 ShouldNotReachHere(); 818 } 819 #endif 820 821 // tracing 822 if (log_is_enabled(Info, exceptions)) { 823 ResourceMark rm(thread); 824 stringStream tempst; 825 tempst.print("interpreter method <%s>\n" 826 " at bci %d for thread " INTPTR_FORMAT " (%s)", 827 h_method->print_value_string(), current_bci, p2i(thread), thread->name()); 828 Exceptions::log_exception(h_exception, tempst.as_string()); 829 } 830 // Don't go paging in something which won't be used. 831 // else if (extable->length() == 0) { 832 // // disabled for now - interpreter is not using shortcut yet 833 // // (shortcut is not to call runtime if we have no exception handlers) 834 // // warning("performance bug: should not call runtime if method has no exception handlers"); 835 // } 836 // for AbortVMOnException flag 837 Exceptions::debug_check_abort(h_exception); 838 839 // exception handler lookup 840 Klass* klass = h_exception->klass(); 841 handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD); 842 if (HAS_PENDING_EXCEPTION) { 843 // We threw an exception while trying to find the exception handler. 844 // Transfer the new exception to the exception handle which will 845 // be set into thread local storage, and do another lookup for an 846 // exception handler for this exception, this time starting at the 847 // BCI of the exception handler which caused the exception to be 848 // thrown (bug 4307310). 849 h_exception = Handle(THREAD, PENDING_EXCEPTION); 850 CLEAR_PENDING_EXCEPTION; 851 if (handler_bci >= 0) { 852 current_bci = handler_bci; 853 should_repeat = true; 854 } 855 } 856 } while (should_repeat == true); 857 858 #if INCLUDE_JVMCI 859 if (EnableJVMCI && h_method->method_data() != NULL) { 860 ResourceMark rm(thread); 861 ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL); 862 if (pdata != NULL && pdata->is_BitData()) { 863 BitData* bit_data = (BitData*) pdata; 864 bit_data->set_exception_seen(); 865 } 866 } 867 #endif 868 869 // notify JVMTI of an exception throw; JVMTI will detect if this is a first 870 // time throw or a stack unwinding throw and accordingly notify the debugger 871 if (JvmtiExport::can_post_on_exceptions()) { 872 JvmtiExport::post_exception_throw(thread, h_method(), last_frame.bcp(), h_exception()); 873 } 874 875 #ifdef CC_INTERP 876 address continuation = (address)(intptr_t) handler_bci; 877 #else 878 address continuation = NULL; 879 #endif 880 address handler_pc = NULL; 881 if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) { 882 // Forward exception to callee (leaving bci/bcp untouched) because (a) no 883 // handler in this method, or (b) after a stack overflow there is not yet 884 // enough stack space available to reprotect the stack. 885 #ifndef CC_INTERP 886 continuation = Interpreter::remove_activation_entry(); 887 #endif 888 #if COMPILER2_OR_JVMCI 889 // Count this for compilation purposes 890 h_method->interpreter_throwout_increment(THREAD); 891 #endif 892 } else { 893 // handler in this method => change bci/bcp to handler bci/bcp and continue there 894 handler_pc = h_method->code_base() + handler_bci; 895 #ifndef CC_INTERP 896 set_bcp_and_mdp(handler_pc, thread); 897 continuation = Interpreter::dispatch_table(vtos)[*handler_pc]; 898 #endif 899 } 900 // notify debugger of an exception catch 901 // (this is good for exceptions caught in native methods as well) 902 if (JvmtiExport::can_post_on_exceptions()) { 903 JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL)); 904 } 905 906 thread->set_vm_result(h_exception()); 907 return continuation; 908 JRT_END 909 910 911 JRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread)) 912 assert(thread->has_pending_exception(), "must only ne called if there's an exception pending"); 913 // nothing to do - eventually we should remove this code entirely (see comments @ call sites) 914 JRT_END 915 916 917 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread)) 918 THROW(vmSymbols::java_lang_AbstractMethodError()); 919 JRT_END 920 921 // This method is called from the "abstract_entry" of the interpreter. 922 // At that point, the arguments have already been removed from the stack 923 // and therefore we don't have the receiver object at our fingertips. (Though, 924 // on some platforms the receiver still resides in a register...). Thus, 925 // we have no choice but print an error message not containing the receiver 926 // type. 927 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorWithMethod(JavaThread* thread, 928 Method* missingMethod)) 929 ResourceMark rm(thread); 930 assert(missingMethod != NULL, "sanity"); 931 methodHandle m(thread, missingMethod); 932 LinkResolver::throw_abstract_method_error(m, THREAD); 933 JRT_END 934 935 JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorVerbose(JavaThread* thread, 936 Klass* recvKlass, 937 Method* missingMethod)) 938 ResourceMark rm(thread); 939 methodHandle mh = methodHandle(thread, missingMethod); 940 LinkResolver::throw_abstract_method_error(mh, recvKlass, THREAD); 941 JRT_END 942 943 944 JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) 945 THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); 946 JRT_END 947 948 JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(JavaThread* thread, 949 Klass* recvKlass, 950 Klass* interfaceKlass)) 951 ResourceMark rm(thread); 952 char buf[1000]; 953 buf[0] = '\0'; 954 jio_snprintf(buf, sizeof(buf), 955 "Class %s does not implement the requested interface %s", 956 recvKlass ? recvKlass->external_name() : "NULL", 957 interfaceKlass ? interfaceKlass->external_name() : "NULL"); 958 THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); 959 JRT_END 960 961 //------------------------------------------------------------------------------------------------------------------------ 962 // Fields 963 // 964 965 void InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode) { 966 Thread* THREAD = thread; 967 // resolve field 968 fieldDescriptor info; 969 LastFrameAccessor last_frame(thread); 970 constantPoolHandle pool(thread, last_frame.method()->constants()); 971 methodHandle m(thread, last_frame.method()); 972 bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield || 973 bytecode == Bytecodes::_putstatic || bytecode == Bytecodes::_withfield); 974 bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); 975 bool is_value = bytecode == Bytecodes::_withfield; 976 977 { 978 JvmtiHideSingleStepping jhss(thread); 979 LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode), 980 m, bytecode, CHECK); 981 } // end JvmtiHideSingleStepping 982 983 // check if link resolution caused cpCache to be updated 984 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 985 if (cp_cache_entry->is_resolved(bytecode)) return; 986 987 // compute auxiliary field attributes 988 TosState state = as_TosState(info.field_type()); 989 990 // Resolution of put instructions on final fields is delayed. That is required so that 991 // exceptions are thrown at the correct place (when the instruction is actually invoked). 992 // If we do not resolve an instruction in the current pass, leaving the put_code 993 // set to zero will cause the next put instruction to the same field to reresolve. 994 995 // Resolution of put instructions to final instance fields with invalid updates (i.e., 996 // to final instance fields with updates originating from a method different than <init>) 997 // is inhibited. A putfield instruction targeting an instance final field must throw 998 // an IllegalAccessError if the instruction is not in an instance 999 // initializer method <init>. If resolution were not inhibited, a putfield 1000 // in an initializer method could be resolved in the initializer. Subsequent 1001 // putfield instructions to the same field would then use cached information. 1002 // As a result, those instructions would not pass through the VM. That is, 1003 // checks in resolve_field_access() would not be executed for those instructions 1004 // and the required IllegalAccessError would not be thrown. 1005 // 1006 // Also, we need to delay resolving getstatic and putstatic instructions until the 1007 // class is initialized. This is required so that access to the static 1008 // field will call the initialization function every time until the class 1009 // is completely initialized ala. in 2.17.5 in JVM Specification. 1010 InstanceKlass* klass = info.field_holder(); 1011 bool uninitialized_static = is_static && !klass->is_initialized(); 1012 bool has_initialized_final_update = info.field_holder()->major_version() >= 53 && 1013 info.has_initialized_final_update(); 1014 assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final"); 1015 1016 Bytecodes::Code get_code = (Bytecodes::Code)0; 1017 Bytecodes::Code put_code = (Bytecodes::Code)0; 1018 if (!uninitialized_static) { 1019 if (is_static) { 1020 get_code = Bytecodes::_getstatic; 1021 } else { 1022 get_code = Bytecodes::_getfield; 1023 } 1024 if (is_put && is_value) { 1025 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_withfield); 1026 } else if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) { 1027 put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield); 1028 } 1029 } 1030 1031 cp_cache_entry->set_field( 1032 get_code, 1033 put_code, 1034 info.field_holder(), 1035 info.index(), 1036 info.offset(), 1037 state, 1038 info.access_flags().is_final(), 1039 info.access_flags().is_volatile(), 1040 info.is_flattened(), 1041 info.is_flattenable(), 1042 pool->pool_holder() 1043 ); 1044 } 1045 1046 1047 //------------------------------------------------------------------------------------------------------------------------ 1048 // Synchronization 1049 // 1050 // The interpreter's synchronization code is factored out so that it can 1051 // be shared by method invocation and synchronized blocks. 1052 //%note synchronization_3 1053 1054 //%note monitor_1 1055 JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem)) 1056 #ifdef ASSERT 1057 thread->last_frame().interpreter_frame_verify_monitor(elem); 1058 #endif 1059 if (PrintBiasedLockingStatistics) { 1060 Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); 1061 } 1062 Handle h_obj(thread, elem->obj()); 1063 assert(Universe::heap()->is_in_or_null(h_obj()), 1064 "must be NULL or an object"); 1065 ObjectSynchronizer::enter(h_obj, elem->lock(), CHECK); 1066 assert(Universe::heap()->is_in_or_null(elem->obj()), 1067 "must be NULL or an object"); 1068 #ifdef ASSERT 1069 thread->last_frame().interpreter_frame_verify_monitor(elem); 1070 #endif 1071 JRT_END 1072 1073 1074 //%note monitor_1 1075 JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem)) 1076 #ifdef ASSERT 1077 thread->last_frame().interpreter_frame_verify_monitor(elem); 1078 #endif 1079 Handle h_obj(thread, elem->obj()); 1080 assert(Universe::heap()->is_in_or_null(h_obj()), 1081 "must be NULL or an object"); 1082 if (elem == NULL || h_obj()->is_unlocked()) { 1083 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 1084 } 1085 ObjectSynchronizer::exit(h_obj(), elem->lock(), thread); 1086 // Free entry. This must be done here, since a pending exception might be installed on 1087 // exit. If it is not cleared, the exception handling code will try to unlock the monitor again. 1088 elem->set_obj(NULL); 1089 #ifdef ASSERT 1090 thread->last_frame().interpreter_frame_verify_monitor(elem); 1091 #endif 1092 JRT_END 1093 1094 1095 JRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread)) 1096 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 1097 JRT_END 1098 1099 1100 JRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread)) 1101 // Returns an illegal exception to install into the current thread. The 1102 // pending_exception flag is cleared so normal exception handling does not 1103 // trigger. Any current installed exception will be overwritten. This 1104 // method will be called during an exception unwind. 1105 1106 assert(!HAS_PENDING_EXCEPTION, "no pending exception"); 1107 Handle exception(thread, thread->vm_result()); 1108 assert(exception() != NULL, "vm result should be set"); 1109 thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures) 1110 if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) { 1111 exception = get_preinitialized_exception( 1112 SystemDictionary::IllegalMonitorStateException_klass(), 1113 CATCH); 1114 } 1115 thread->set_vm_result(exception()); 1116 JRT_END 1117 1118 1119 //------------------------------------------------------------------------------------------------------------------------ 1120 // Invokes 1121 1122 JRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, Method* method, address bcp)) 1123 return method->orig_bytecode_at(method->bci_from(bcp)); 1124 JRT_END 1125 1126 JRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code)) 1127 method->set_orig_bytecode_at(method->bci_from(bcp), new_code); 1128 JRT_END 1129 1130 JRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, Method* method, address bcp)) 1131 JvmtiExport::post_raw_breakpoint(thread, method, bcp); 1132 JRT_END 1133 1134 void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode) { 1135 Thread* THREAD = thread; 1136 LastFrameAccessor last_frame(thread); 1137 // extract receiver from the outgoing argument list if necessary 1138 Handle receiver(thread, NULL); 1139 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface || 1140 bytecode == Bytecodes::_invokespecial) { 1141 ResourceMark rm(thread); 1142 methodHandle m (thread, last_frame.method()); 1143 Bytecode_invoke call(m, last_frame.bci()); 1144 Symbol* signature = call.signature(); 1145 receiver = Handle(thread, last_frame.callee_receiver(signature)); 1146 1147 assert(Universe::heap()->is_in_or_null(receiver()), 1148 "sanity check"); 1149 assert(receiver.is_null() || 1150 !Universe::heap()->is_in(receiver->klass()), 1151 "sanity check"); 1152 } 1153 1154 // resolve method 1155 CallInfo info; 1156 constantPoolHandle pool(thread, last_frame.method()->constants()); 1157 1158 { 1159 JvmtiHideSingleStepping jhss(thread); 1160 LinkResolver::resolve_invoke(info, receiver, pool, 1161 last_frame.get_index_u2_cpcache(bytecode), bytecode, 1162 CHECK); 1163 if (JvmtiExport::can_hotswap_or_post_breakpoint()) { 1164 int retry_count = 0; 1165 while (info.resolved_method()->is_old()) { 1166 // It is very unlikely that method is redefined more than 100 times 1167 // in the middle of resolve. If it is looping here more than 100 times 1168 // means then there could be a bug here. 1169 guarantee((retry_count++ < 100), 1170 "Could not resolve to latest version of redefined method"); 1171 // method is redefined in the middle of resolve so re-try. 1172 LinkResolver::resolve_invoke(info, receiver, pool, 1173 last_frame.get_index_u2_cpcache(bytecode), bytecode, 1174 CHECK); 1175 } 1176 } 1177 } // end JvmtiHideSingleStepping 1178 1179 // check if link resolution caused cpCache to be updated 1180 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 1181 if (cp_cache_entry->is_resolved(bytecode)) return; 1182 1183 #ifdef ASSERT 1184 if (bytecode == Bytecodes::_invokeinterface) { 1185 if (info.resolved_method()->method_holder() == 1186 SystemDictionary::Object_klass()) { 1187 // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec 1188 // (see also CallInfo::set_interface for details) 1189 assert(info.call_kind() == CallInfo::vtable_call || 1190 info.call_kind() == CallInfo::direct_call, ""); 1191 methodHandle rm = info.resolved_method(); 1192 assert(rm->is_final() || info.has_vtable_index(), 1193 "should have been set already"); 1194 } else if (!info.resolved_method()->has_itable_index()) { 1195 // Resolved something like CharSequence.toString. Use vtable not itable. 1196 assert(info.call_kind() != CallInfo::itable_call, ""); 1197 } else { 1198 // Setup itable entry 1199 assert(info.call_kind() == CallInfo::itable_call, ""); 1200 int index = info.resolved_method()->itable_index(); 1201 assert(info.itable_index() == index, ""); 1202 } 1203 } else if (bytecode == Bytecodes::_invokespecial) { 1204 assert(info.call_kind() == CallInfo::direct_call, "must be direct call"); 1205 } else { 1206 assert(info.call_kind() == CallInfo::direct_call || 1207 info.call_kind() == CallInfo::vtable_call, ""); 1208 } 1209 #endif 1210 // Get sender or sender's unsafe_anonymous_host, and only set cpCache entry to resolved if 1211 // it is not an interface. The receiver for invokespecial calls within interface 1212 // methods must be checked for every call. 1213 InstanceKlass* sender = pool->pool_holder(); 1214 sender = sender->is_unsafe_anonymous() ? sender->unsafe_anonymous_host() : sender; 1215 1216 switch (info.call_kind()) { 1217 case CallInfo::direct_call: 1218 cp_cache_entry->set_direct_call( 1219 bytecode, 1220 info.resolved_method(), 1221 sender->is_interface()); 1222 break; 1223 case CallInfo::vtable_call: 1224 cp_cache_entry->set_vtable_call( 1225 bytecode, 1226 info.resolved_method(), 1227 info.vtable_index()); 1228 break; 1229 case CallInfo::itable_call: 1230 cp_cache_entry->set_itable_call( 1231 bytecode, 1232 info.resolved_klass(), 1233 info.resolved_method(), 1234 info.itable_index()); 1235 break; 1236 default: ShouldNotReachHere(); 1237 } 1238 } 1239 1240 1241 // First time execution: Resolve symbols, create a permanent MethodType object. 1242 void InterpreterRuntime::resolve_invokehandle(JavaThread* thread) { 1243 Thread* THREAD = thread; 1244 const Bytecodes::Code bytecode = Bytecodes::_invokehandle; 1245 LastFrameAccessor last_frame(thread); 1246 1247 // resolve method 1248 CallInfo info; 1249 constantPoolHandle pool(thread, last_frame.method()->constants()); 1250 { 1251 JvmtiHideSingleStepping jhss(thread); 1252 LinkResolver::resolve_invoke(info, Handle(), pool, 1253 last_frame.get_index_u2_cpcache(bytecode), bytecode, 1254 CHECK); 1255 } // end JvmtiHideSingleStepping 1256 1257 ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); 1258 cp_cache_entry->set_method_handle(pool, info); 1259 } 1260 1261 // First time execution: Resolve symbols, create a permanent CallSite object. 1262 void InterpreterRuntime::resolve_invokedynamic(JavaThread* thread) { 1263 Thread* THREAD = thread; 1264 LastFrameAccessor last_frame(thread); 1265 const Bytecodes::Code bytecode = Bytecodes::_invokedynamic; 1266 1267 // resolve method 1268 CallInfo info; 1269 constantPoolHandle pool(thread, last_frame.method()->constants()); 1270 int index = last_frame.get_index_u4(bytecode); 1271 { 1272 JvmtiHideSingleStepping jhss(thread); 1273 LinkResolver::resolve_invoke(info, Handle(), pool, 1274 index, bytecode, CHECK); 1275 } // end JvmtiHideSingleStepping 1276 1277 ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index); 1278 cp_cache_entry->set_dynamic_call(pool, info); 1279 } 1280 1281 // This function is the interface to the assembly code. It returns the resolved 1282 // cpCache entry. This doesn't safepoint, but the helper routines safepoint. 1283 // This function will check for redefinition! 1284 JRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* thread, Bytecodes::Code bytecode)) { 1285 switch (bytecode) { 1286 case Bytecodes::_getstatic: 1287 case Bytecodes::_putstatic: 1288 case Bytecodes::_getfield: 1289 case Bytecodes::_putfield: 1290 case Bytecodes::_withfield: 1291 resolve_get_put(thread, bytecode); 1292 break; 1293 case Bytecodes::_invokevirtual: 1294 case Bytecodes::_invokespecial: 1295 case Bytecodes::_invokestatic: 1296 case Bytecodes::_invokeinterface: 1297 resolve_invoke(thread, bytecode); 1298 break; 1299 case Bytecodes::_invokehandle: 1300 resolve_invokehandle(thread); 1301 break; 1302 case Bytecodes::_invokedynamic: 1303 resolve_invokedynamic(thread); 1304 break; 1305 default: 1306 fatal("unexpected bytecode: %s", Bytecodes::name(bytecode)); 1307 break; 1308 } 1309 } 1310 JRT_END 1311 1312 //------------------------------------------------------------------------------------------------------------------------ 1313 // Miscellaneous 1314 1315 1316 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 1317 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 1318 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 1319 if (branch_bcp != NULL && nm != NULL) { 1320 // This was a successful request for an OSR nmethod. Because 1321 // frequency_counter_overflow_inner ends with a safepoint check, 1322 // nm could have been unloaded so look it up again. It's unsafe 1323 // to examine nm directly since it might have been freed and used 1324 // for something else. 1325 LastFrameAccessor last_frame(thread); 1326 Method* method = last_frame.method(); 1327 int bci = method->bci_from(last_frame.bcp()); 1328 nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 1329 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 1330 if (nm != NULL && bs_nm != NULL) { 1331 // in case the transition passed a safepoint we need to barrier this again 1332 if (!bs_nm->nmethod_osr_entry_barrier(nm)) { 1333 nm = NULL; 1334 } 1335 } 1336 } 1337 if (nm != NULL && thread->is_interp_only_mode()) { 1338 // Normally we never get an nm if is_interp_only_mode() is true, because 1339 // policy()->event has a check for this and won't compile the method when 1340 // true. However, it's possible for is_interp_only_mode() to become true 1341 // during the compilation. We don't want to return the nm in that case 1342 // because we want to continue to execute interpreted. 1343 nm = NULL; 1344 } 1345 #ifndef PRODUCT 1346 if (TraceOnStackReplacement) { 1347 if (nm != NULL) { 1348 tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry())); 1349 nm->print(); 1350 } 1351 } 1352 #endif 1353 return nm; 1354 } 1355 1356 JRT_ENTRY(nmethod*, 1357 InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp)) 1358 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1359 // flag, in case this method triggers classloading which will call into Java. 1360 UnlockFlagSaver fs(thread); 1361 1362 LastFrameAccessor last_frame(thread); 1363 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1364 methodHandle method(thread, last_frame.method()); 1365 const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; 1366 const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci; 1367 1368 assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); 1369 nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread); 1370 assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); 1371 1372 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 1373 if (osr_nm != NULL && bs_nm != NULL) { 1374 if (!bs_nm->nmethod_osr_entry_barrier(osr_nm)) { 1375 osr_nm = NULL; 1376 } 1377 } 1378 1379 if (osr_nm != NULL) { 1380 // We may need to do on-stack replacement which requires that no 1381 // monitors in the activation are biased because their 1382 // BasicObjectLocks will need to migrate during OSR. Force 1383 // unbiasing of all monitors in the activation now (even though 1384 // the OSR nmethod might be invalidated) because we don't have a 1385 // safepoint opportunity later once the migration begins. 1386 if (UseBiasedLocking) { 1387 ResourceMark rm; 1388 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1389 for( BasicObjectLock *kptr = last_frame.monitor_end(); 1390 kptr < last_frame.monitor_begin(); 1391 kptr = last_frame.next_monitor(kptr) ) { 1392 if( kptr->obj() != NULL ) { 1393 objects_to_revoke->append(Handle(THREAD, kptr->obj())); 1394 } 1395 } 1396 BiasedLocking::revoke(objects_to_revoke, thread); 1397 } 1398 } 1399 return osr_nm; 1400 JRT_END 1401 1402 JRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp)) 1403 assert(ProfileInterpreter, "must be profiling interpreter"); 1404 int bci = method->bci_from(cur_bcp); 1405 MethodData* mdo = method->method_data(); 1406 if (mdo == NULL) return 0; 1407 return mdo->bci_to_di(bci); 1408 JRT_END 1409 1410 JRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread)) 1411 // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized 1412 // flag, in case this method triggers classloading which will call into Java. 1413 UnlockFlagSaver fs(thread); 1414 1415 assert(ProfileInterpreter, "must be profiling interpreter"); 1416 LastFrameAccessor last_frame(thread); 1417 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1418 methodHandle method(thread, last_frame.method()); 1419 Method::build_interpreter_method_data(method, THREAD); 1420 if (HAS_PENDING_EXCEPTION) { 1421 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1422 CLEAR_PENDING_EXCEPTION; 1423 // and fall through... 1424 } 1425 JRT_END 1426 1427 1428 #ifdef ASSERT 1429 JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp)) 1430 assert(ProfileInterpreter, "must be profiling interpreter"); 1431 1432 MethodData* mdo = method->method_data(); 1433 assert(mdo != NULL, "must not be null"); 1434 1435 int bci = method->bci_from(bcp); 1436 1437 address mdp2 = mdo->bci_to_dp(bci); 1438 if (mdp != mdp2) { 1439 ResourceMark rm; 1440 ResetNoHandleMark rnm; // In a LEAF entry. 1441 HandleMark hm; 1442 tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); 1443 int current_di = mdo->dp_to_di(mdp); 1444 int expected_di = mdo->dp_to_di(mdp2); 1445 tty->print_cr(" actual di %d expected di %d", current_di, expected_di); 1446 int expected_approx_bci = mdo->data_at(expected_di)->bci(); 1447 int approx_bci = -1; 1448 if (current_di >= 0) { 1449 approx_bci = mdo->data_at(current_di)->bci(); 1450 } 1451 tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci); 1452 mdo->print_on(tty); 1453 method->print_codes(); 1454 } 1455 assert(mdp == mdp2, "wrong mdp"); 1456 JRT_END 1457 #endif // ASSERT 1458 1459 JRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci)) 1460 assert(ProfileInterpreter, "must be profiling interpreter"); 1461 ResourceMark rm(thread); 1462 HandleMark hm(thread); 1463 LastFrameAccessor last_frame(thread); 1464 assert(last_frame.is_interpreted_frame(), "must come from interpreter"); 1465 MethodData* h_mdo = last_frame.method()->method_data(); 1466 1467 // Grab a lock to ensure atomic access to setting the return bci and 1468 // the displacement. This can block and GC, invalidating all naked oops. 1469 MutexLocker ml(RetData_lock); 1470 1471 // ProfileData is essentially a wrapper around a derived oop, so we 1472 // need to take the lock before making any ProfileData structures. 1473 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp())); 1474 guarantee(data != NULL, "profile data must be valid"); 1475 RetData* rdata = data->as_RetData(); 1476 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 1477 last_frame.set_mdp(new_mdp); 1478 JRT_END 1479 1480 JRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) 1481 MethodCounters* mcs = Method::build_method_counters(m, thread); 1482 if (HAS_PENDING_EXCEPTION) { 1483 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1484 CLEAR_PENDING_EXCEPTION; 1485 } 1486 return mcs; 1487 JRT_END 1488 1489 1490 JRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) 1491 // We used to need an explict preserve_arguments here for invoke bytecodes. However, 1492 // stack traversal automatically takes care of preserving arguments for invoke, so 1493 // this is no longer needed. 1494 1495 // JRT_END does an implicit safepoint check, hence we are guaranteed to block 1496 // if this is called during a safepoint 1497 1498 if (JvmtiExport::should_post_single_step()) { 1499 // We are called during regular safepoints and when the VM is 1500 // single stepping. If any thread is marked for single stepping, 1501 // then we may have JVMTI work to do. 1502 LastFrameAccessor last_frame(thread); 1503 JvmtiExport::at_single_stepping_point(thread, last_frame.method(), last_frame.bcp()); 1504 } 1505 JRT_END 1506 1507 JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj, 1508 ConstantPoolCacheEntry *cp_entry)) 1509 1510 // check the access_flags for the field in the klass 1511 1512 InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass()); 1513 int index = cp_entry->field_index(); 1514 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; 1515 1516 bool is_static = (obj == NULL); 1517 HandleMark hm(thread); 1518 1519 Handle h_obj; 1520 if (!is_static) { 1521 // non-static field accessors have an object, but we need a handle 1522 h_obj = Handle(thread, obj); 1523 } 1524 InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass()); 1525 jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static); 1526 LastFrameAccessor last_frame(thread); 1527 JvmtiExport::post_field_access(thread, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid); 1528 JRT_END 1529 1530 JRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, 1531 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) 1532 1533 Klass* k = cp_entry->f1_as_klass(); 1534 1535 // check the access_flags for the field in the klass 1536 InstanceKlass* ik = InstanceKlass::cast(k); 1537 int index = cp_entry->field_index(); 1538 // bail out if field modifications are not watched 1539 if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return; 1540 1541 char sig_type = '\0'; 1542 1543 switch(cp_entry->flag_state()) { 1544 case btos: sig_type = 'B'; break; 1545 case ztos: sig_type = 'Z'; break; 1546 case ctos: sig_type = 'C'; break; 1547 case stos: sig_type = 'S'; break; 1548 case itos: sig_type = 'I'; break; 1549 case ftos: sig_type = 'F'; break; 1550 case atos: sig_type = 'L'; break; 1551 case ltos: sig_type = 'J'; break; 1552 case dtos: sig_type = 'D'; break; 1553 default: ShouldNotReachHere(); return; 1554 } 1555 1556 // Both Q-signatures and L-signatures are mapped to atos 1557 if (cp_entry->flag_state() == atos && ik->field_signature(index)->is_Q_signature()) { 1558 sig_type = 'Q'; 1559 } 1560 1561 bool is_static = (obj == NULL); 1562 1563 HandleMark hm(thread); 1564 jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static); 1565 jvalue fvalue; 1566 #ifdef _LP64 1567 fvalue = *value; 1568 #else 1569 // Long/double values are stored unaligned and also noncontiguously with 1570 // tagged stacks. We can't just do a simple assignment even in the non- 1571 // J/D cases because a C++ compiler is allowed to assume that a jvalue is 1572 // 8-byte aligned, and interpreter stack slots are only 4-byte aligned. 1573 // We assume that the two halves of longs/doubles are stored in interpreter 1574 // stack slots in platform-endian order. 1575 jlong_accessor u; 1576 jint* newval = (jint*)value; 1577 u.words[0] = newval[0]; 1578 u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag 1579 fvalue.j = u.long_value; 1580 #endif // _LP64 1581 1582 Handle h_obj; 1583 if (!is_static) { 1584 // non-static field accessors have an object, but we need a handle 1585 h_obj = Handle(thread, obj); 1586 } 1587 1588 LastFrameAccessor last_frame(thread); 1589 JvmtiExport::post_raw_field_modification(thread, last_frame.method(), last_frame.bcp(), ik, h_obj, 1590 fid, sig_type, &fvalue); 1591 JRT_END 1592 1593 JRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread)) 1594 LastFrameAccessor last_frame(thread); 1595 JvmtiExport::post_method_entry(thread, last_frame.method(), last_frame.get_frame()); 1596 JRT_END 1597 1598 1599 JRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread)) 1600 LastFrameAccessor last_frame(thread); 1601 JvmtiExport::post_method_exit(thread, last_frame.method(), last_frame.get_frame()); 1602 JRT_END 1603 1604 JRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc)) 1605 { 1606 return (Interpreter::contains(pc) ? 1 : 0); 1607 } 1608 JRT_END 1609 1610 1611 // Implementation of SignatureHandlerLibrary 1612 1613 #ifndef SHARING_FAST_NATIVE_FINGERPRINTS 1614 // Dummy definition (else normalization method is defined in CPU 1615 // dependant code) 1616 uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) { 1617 return fingerprint; 1618 } 1619 #endif 1620 1621 address SignatureHandlerLibrary::set_handler_blob() { 1622 BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size); 1623 if (handler_blob == NULL) { 1624 return NULL; 1625 } 1626 address handler = handler_blob->code_begin(); 1627 _handler_blob = handler_blob; 1628 _handler = handler; 1629 return handler; 1630 } 1631 1632 void SignatureHandlerLibrary::initialize() { 1633 if (_fingerprints != NULL) { 1634 return; 1635 } 1636 if (set_handler_blob() == NULL) { 1637 vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers"); 1638 } 1639 1640 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 1641 SignatureHandlerLibrary::buffer_size); 1642 _buffer = bb->code_begin(); 1643 1644 _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true); 1645 _handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true); 1646 } 1647 1648 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 1649 address handler = _handler; 1650 int insts_size = buffer->pure_insts_size(); 1651 if (handler + insts_size > _handler_blob->code_end()) { 1652 // get a new handler blob 1653 handler = set_handler_blob(); 1654 } 1655 if (handler != NULL) { 1656 memcpy(handler, buffer->insts_begin(), insts_size); 1657 pd_set_handler(handler); 1658 ICache::invalidate_range(handler, insts_size); 1659 _handler = handler + insts_size; 1660 } 1661 return handler; 1662 } 1663 1664 void SignatureHandlerLibrary::add(const methodHandle& method) { 1665 if (method->signature_handler() == NULL) { 1666 // use slow signature handler if we can't do better 1667 int handler_index = -1; 1668 // check if we can use customized (fast) signature handler 1669 if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) { 1670 // use customized signature handler 1671 MutexLocker mu(SignatureHandlerLibrary_lock); 1672 // make sure data structure is initialized 1673 initialize(); 1674 // lookup method signature's fingerprint 1675 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1676 // allow CPU dependant code to optimize the fingerprints for the fast handler 1677 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1678 handler_index = _fingerprints->find(fingerprint); 1679 // create handler if necessary 1680 if (handler_index < 0) { 1681 ResourceMark rm; 1682 ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer; 1683 CodeBuffer buffer((address)(_buffer + align_offset), 1684 SignatureHandlerLibrary::buffer_size - align_offset); 1685 InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint); 1686 // copy into code heap 1687 address handler = set_handler(&buffer); 1688 if (handler == NULL) { 1689 // use slow signature handler (without memorizing it in the fingerprints) 1690 } else { 1691 // debugging suppport 1692 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1693 ttyLocker ttyl; 1694 tty->cr(); 1695 tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)", 1696 _handlers->length(), 1697 (method->is_static() ? "static" : "receiver"), 1698 method->name_and_sig_as_C_string(), 1699 fingerprint, 1700 buffer.insts_size()); 1701 if (buffer.insts_size() > 0) { 1702 Disassembler::decode(handler, handler + buffer.insts_size()); 1703 } 1704 #ifndef PRODUCT 1705 address rh_begin = Interpreter::result_handler(method()->result_type()); 1706 if (CodeCache::contains(rh_begin)) { 1707 // else it might be special platform dependent values 1708 tty->print_cr(" --- associated result handler ---"); 1709 address rh_end = rh_begin; 1710 while (*(int*)rh_end != 0) { 1711 rh_end += sizeof(int); 1712 } 1713 Disassembler::decode(rh_begin, rh_end); 1714 } else { 1715 tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin)); 1716 } 1717 #endif 1718 } 1719 // add handler to library 1720 _fingerprints->append(fingerprint); 1721 _handlers->append(handler); 1722 // set handler index 1723 assert(_fingerprints->length() == _handlers->length(), "sanity check"); 1724 handler_index = _fingerprints->length() - 1; 1725 } 1726 } 1727 // Set handler under SignatureHandlerLibrary_lock 1728 if (handler_index < 0) { 1729 // use generic signature handler 1730 method->set_signature_handler(Interpreter::slow_signature_handler()); 1731 } else { 1732 // set handler 1733 method->set_signature_handler(_handlers->at(handler_index)); 1734 } 1735 } else { 1736 DEBUG_ONLY(Thread::current()->check_possible_safepoint()); 1737 // use generic signature handler 1738 method->set_signature_handler(Interpreter::slow_signature_handler()); 1739 } 1740 } 1741 #ifdef ASSERT 1742 int handler_index = -1; 1743 int fingerprint_index = -2; 1744 { 1745 // '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized 1746 // in any way if accessed from multiple threads. To avoid races with another 1747 // thread which may change the arrays in the above, mutex protected block, we 1748 // have to protect this read access here with the same mutex as well! 1749 MutexLocker mu(SignatureHandlerLibrary_lock); 1750 if (_handlers != NULL) { 1751 handler_index = _handlers->find(method->signature_handler()); 1752 uint64_t fingerprint = Fingerprinter(method).fingerprint(); 1753 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1754 fingerprint_index = _fingerprints->find(fingerprint); 1755 } 1756 } 1757 assert(method->signature_handler() == Interpreter::slow_signature_handler() || 1758 handler_index == fingerprint_index, "sanity check"); 1759 #endif // ASSERT 1760 } 1761 1762 void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) { 1763 int handler_index = -1; 1764 // use customized signature handler 1765 MutexLocker mu(SignatureHandlerLibrary_lock); 1766 // make sure data structure is initialized 1767 initialize(); 1768 fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint); 1769 handler_index = _fingerprints->find(fingerprint); 1770 // create handler if necessary 1771 if (handler_index < 0) { 1772 if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) { 1773 tty->cr(); 1774 tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT, 1775 _handlers->length(), 1776 p2i(handler), 1777 fingerprint); 1778 } 1779 _fingerprints->append(fingerprint); 1780 _handlers->append(handler); 1781 } else { 1782 if (PrintSignatureHandlers) { 1783 tty->cr(); 1784 tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")", 1785 _handlers->length(), 1786 fingerprint, 1787 p2i(_handlers->at(handler_index)), 1788 p2i(handler)); 1789 } 1790 } 1791 } 1792 1793 1794 BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL; 1795 address SignatureHandlerLibrary::_handler = NULL; 1796 GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL; 1797 GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL; 1798 address SignatureHandlerLibrary::_buffer = NULL; 1799 1800 1801 JRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, Method* method)) 1802 methodHandle m(thread, method); 1803 assert(m->is_native(), "sanity check"); 1804 // lookup native function entry point if it doesn't exist 1805 bool in_base_library; 1806 if (!m->has_native_function()) { 1807 NativeLookup::lookup(m, in_base_library, CHECK); 1808 } 1809 // make sure signature handler is installed 1810 SignatureHandlerLibrary::add(m); 1811 // The interpreter entry point checks the signature handler first, 1812 // before trying to fetch the native entry point and klass mirror. 1813 // We must set the signature handler last, so that multiple processors 1814 // preparing the same method will be sure to see non-null entry & mirror. 1815 JRT_END 1816 1817 #if defined(IA32) || defined(AMD64) || defined(ARM) 1818 JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address)) 1819 if (src_address == dest_address) { 1820 return; 1821 } 1822 ResetNoHandleMark rnm; // In a LEAF entry. 1823 HandleMark hm; 1824 ResourceMark rm; 1825 LastFrameAccessor last_frame(thread); 1826 assert(last_frame.is_interpreted_frame(), ""); 1827 jint bci = last_frame.bci(); 1828 methodHandle mh(thread, last_frame.method()); 1829 Bytecode_invoke invoke(mh, bci); 1830 ArgumentSizeComputer asc(invoke.signature()); 1831 int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver 1832 Copy::conjoint_jbytes(src_address, dest_address, 1833 size_of_arguments * Interpreter::stackElementSize); 1834 JRT_END 1835 #endif 1836 1837 #if INCLUDE_JVMTI 1838 // This is a support of the JVMTI PopFrame interface. 1839 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument 1840 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters. 1841 // The member_name argument is a saved reference (in local#0) to the member_name. 1842 // For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle. 1843 // FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated. 1844 JRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name, 1845 Method* method, address bcp)) 1846 Bytecodes::Code code = Bytecodes::code_at(method, bcp); 1847 if (code != Bytecodes::_invokestatic) { 1848 return; 1849 } 1850 ConstantPool* cpool = method->constants(); 1851 int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG; 1852 Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index)); 1853 Symbol* mname = cpool->name_ref_at(cp_index); 1854 1855 if (MethodHandles::has_member_arg(cname, mname)) { 1856 oop member_name_oop = (oop) member_name; 1857 if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) { 1858 // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated. 1859 member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop); 1860 } 1861 thread->set_vm_result(member_name_oop); 1862 } else { 1863 thread->set_vm_result(NULL); 1864 } 1865 JRT_END 1866 #endif // INCLUDE_JVMTI 1867 1868 #ifndef PRODUCT 1869 // This must be a JRT_LEAF function because the interpreter must save registers on x86 to 1870 // call this, which changes rsp and makes the interpreter's expression stack not walkable. 1871 // The generated code still uses call_VM because that will set up the frame pointer for 1872 // bcp and method. 1873 JRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) 1874 LastFrameAccessor last_frame(thread); 1875 assert(last_frame.is_interpreted_frame(), "must be an interpreted frame"); 1876 methodHandle mh(thread, last_frame.method()); 1877 BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2); 1878 return preserve_this_value; 1879 JRT_END 1880 #endif // !PRODUCT