1 /* 2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.inline.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "oops/valueArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/memnode.hpp" 34 #include "opto/parse.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/subnode.hpp" 38 #include "opto/valuetypenode.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/handles.inline.hpp" 41 42 //============================================================================= 43 // Helper methods for _get* and _put* bytecodes 44 //============================================================================= 45 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 46 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 47 // Better to check now than to Deoptimize as soon as we execute 48 assert( field->is_static(), "Only check if field is static"); 49 // is_being_initialized() is too generous. It allows access to statics 50 // by threads that are not running the <clinit> before the <clinit> finishes. 51 // return field->holder()->is_being_initialized(); 52 53 // The following restriction is correct but conservative. 54 // It is also desirable to allow compilation of methods called from <clinit> 55 // but this generated code will need to be made safe for execution by 56 // other threads, or the transition from interpreted to compiled code would 57 // need to be guarded. 58 ciInstanceKlass *field_holder = field->holder(); 59 60 bool access_OK = false; 61 if (method->holder()->is_subclass_of(field_holder)) { 62 if (method->is_static()) { 63 if (method->name() == ciSymbol::class_initializer_name()) { 64 // OK to access static fields inside initializer 65 access_OK = true; 66 } 67 } else { 68 if (method->name() == ciSymbol::object_initializer_name()) { 69 // It's also OK to access static fields inside a constructor, 70 // because any thread calling the constructor must first have 71 // synchronized on the class by executing a '_new' bytecode. 72 access_OK = true; 73 } 74 } 75 } 76 77 return access_OK; 78 79 } 80 81 82 void Parse::do_field_access(bool is_get, bool is_field) { 83 bool will_link; 84 ciField* field = iter().get_field(will_link); 85 assert(will_link, "getfield: typeflow responsibility"); 86 87 ciInstanceKlass* field_holder = field->holder(); 88 89 if (is_field == field->is_static()) { 90 // Interpreter will throw java_lang_IncompatibleClassChangeError 91 // Check this before allowing <clinit> methods to access static fields 92 uncommon_trap(Deoptimization::Reason_unhandled, 93 Deoptimization::Action_none); 94 return; 95 } 96 97 if (!is_field && !field_holder->is_initialized()) { 98 if (!static_field_ok_in_clinit(field, method())) { 99 uncommon_trap(Deoptimization::Reason_uninitialized, 100 Deoptimization::Action_reinterpret, 101 NULL, "!static_field_ok_in_clinit"); 102 return; 103 } 104 } 105 106 // Deoptimize on putfield writes to call site target field. 107 if (!is_get && field->is_call_site_target()) { 108 uncommon_trap(Deoptimization::Reason_unhandled, 109 Deoptimization::Action_reinterpret, 110 NULL, "put to call site target field"); 111 return; 112 } 113 114 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); 115 116 // Note: We do not check for an unloaded field type here any more. 117 118 // Generate code for the object pointer. 119 Node* obj; 120 if (is_field) { 121 int obj_depth = is_get ? 0 : field->type()->size(); 122 obj = null_check(peek(obj_depth)); 123 // Compile-time detect of null-exception? 124 if (stopped()) return; 125 126 #ifdef ASSERT 127 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 128 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 129 #endif 130 131 if (is_get) { 132 (void) pop(); // pop receiver before getting 133 do_get_xxx(obj, field, is_field); 134 } else { 135 do_put_xxx(obj, field, is_field); 136 (void) pop(); // pop receiver after putting 137 } 138 } else { 139 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 140 obj = _gvn.makecon(tip); 141 if (is_get) { 142 do_get_xxx(obj, field, is_field); 143 } else { 144 do_put_xxx(obj, field, is_field); 145 } 146 } 147 } 148 149 void Parse::do_vgetfield() { 150 // fixme null/top check? 151 bool will_link; 152 ciField* field = iter().get_field(will_link); 153 BasicType bt = field->layout_type(); 154 ValueTypeNode* vt = pop()->as_ValueType(); 155 Node* value = vt->field_value_by_offset(field->offset()); 156 push_node(bt, value); 157 } 158 159 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 160 BasicType bt = field->layout_type(); 161 // Does this field have a constant value? If so, just push the value. 162 if (field->is_constant()) { 163 // final or stable field 164 const Type* con_type = Type::make_constant(field, obj); 165 if (con_type != NULL) { 166 Node* con = makecon(con_type); 167 if (bt == T_VALUETYPE) { 168 // Load value type from constant oop 169 con = ValueTypeNode::make(gvn(), map()->memory(), con); 170 } 171 push_node(con_type->basic_type(), con); 172 return; 173 } 174 } 175 176 ciType* field_klass = field->type(); 177 bool is_vol = field->is_volatile(); 178 179 // Compute address and memory type. 180 int offset = field->offset_in_bytes(); 181 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 182 Node *adr = basic_plus_adr(obj, obj, offset); 183 184 // Build the resultant type of the load 185 const Type *type; 186 187 bool must_assert_null = false; 188 189 if (bt == T_OBJECT || bt == T_VALUETYPE) { 190 if (!field->type()->is_loaded()) { 191 type = TypeInstPtr::BOTTOM; 192 must_assert_null = true; 193 } else if (field->is_constant() && field->is_static()) { 194 // This can happen if the constant oop is non-perm. 195 ciObject* con = field->constant_value().as_object(); 196 // Do not "join" in the previous type; it doesn't add value, 197 // and may yield a vacuous result if the field is of interface type. 198 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 199 assert(type != NULL, "field singleton type must be consistent"); 200 } else { 201 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 202 } 203 } else { 204 type = Type::get_const_basic_type(bt); 205 } 206 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { 207 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier 208 } 209 210 // Build the load. 211 // 212 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; 213 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 214 Node* ld = NULL; 215 if (bt == T_VALUETYPE && !field->is_static()) { 216 // Load flattened value type from non-static field 217 ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), obj, obj, field->holder(), offset); 218 } else { 219 ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access); 220 } 221 222 // Adjust Java stack 223 if (type2size[bt] == 1) 224 push(ld); 225 else 226 push_pair(ld); 227 228 if (must_assert_null) { 229 // Do not take a trap here. It's possible that the program 230 // will never load the field's class, and will happily see 231 // null values in this field forever. Don't stumble into a 232 // trap for such a program, or we might get a long series 233 // of useless recompilations. (Or, we might load a class 234 // which should not be loaded.) If we ever see a non-null 235 // value, we will then trap and recompile. (The trap will 236 // not need to mention the class index, since the class will 237 // already have been loaded if we ever see a non-null value.) 238 // uncommon_trap(iter().get_field_signature_index()); 239 if (PrintOpto && (Verbose || WizardMode)) { 240 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 241 } 242 if (C->log() != NULL) { 243 C->log()->elem("assert_null reason='field' klass='%d'", 244 C->log()->identify(field->type())); 245 } 246 // If there is going to be a trap, put it at the next bytecode: 247 set_bci(iter().next_bci()); 248 null_assert(peek()); 249 set_bci(iter().cur_bci()); // put it back 250 } 251 252 // If reference is volatile, prevent following memory ops from 253 // floating up past the volatile read. Also prevents commoning 254 // another volatile read. 255 if (field->is_volatile()) { 256 // Memory barrier includes bogus read of value to force load BEFORE membar 257 insert_mem_bar(Op_MemBarAcquire, ld); 258 } 259 } 260 261 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 262 bool is_vol = field->is_volatile(); 263 // If reference is volatile, prevent following memory ops from 264 // floating down past the volatile write. Also prevents commoning 265 // another volatile read. 266 if (is_vol) insert_mem_bar(Op_MemBarRelease); 267 268 // Compute address and memory type. 269 int offset = field->offset_in_bytes(); 270 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 271 Node* adr = basic_plus_adr(obj, obj, offset); 272 BasicType bt = field->layout_type(); 273 // Value to be stored 274 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 275 // Round doubles before storing 276 if (bt == T_DOUBLE) val = dstore_rounding(val); 277 278 // Conservatively release stores of object references. 279 const MemNode::MemOrd mo = 280 is_vol ? 281 // Volatile fields need releasing stores. 282 MemNode::release : 283 // Non-volatile fields also need releasing stores if they hold an 284 // object reference, because the object reference might point to 285 // a freshly created object. 286 StoreNode::release_if_reference(bt); 287 288 // Store the value. 289 if (bt == T_OBJECT || bt == T_VALUETYPE) { 290 const TypeOopPtr* field_type; 291 if (!field->type()->is_loaded()) { 292 field_type = TypeInstPtr::BOTTOM; 293 } else { 294 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 295 } 296 if (bt == T_VALUETYPE && !field->is_static()) { 297 // Store flattened value type to non-static field 298 val->as_ValueType()->store(this, obj, obj, field->holder(), offset); 299 } else { 300 store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); 301 } 302 } else { 303 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 304 store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access); 305 } 306 307 // If reference is volatile, prevent following volatiles ops from 308 // floating up before the volatile write. 309 if (is_vol) { 310 // If not multiple copy atomic, we do the MemBarVolatile before the load. 311 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 312 insert_mem_bar(Op_MemBarVolatile); // Use fat membar 313 } 314 // Remember we wrote a volatile field. 315 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 316 // in constructors which have such stores. See do_exits() in parse1.cpp. 317 if (is_field) { 318 set_wrote_volatile(true); 319 } 320 } 321 322 if (is_field) { 323 set_wrote_fields(true); 324 } 325 326 // If the field is final, the rules of Java say we are in <init> or <clinit>. 327 // Note the presence of writes to final non-static fields, so that we 328 // can insert a memory barrier later on to keep the writes from floating 329 // out of the constructor. 330 // Any method can write a @Stable field; insert memory barriers after those also. 331 if (is_field && (field->is_final() || field->is_stable())) { 332 if (field->is_final()) { 333 set_wrote_final(true); 334 } 335 if (field->is_stable()) { 336 set_wrote_stable(true); 337 } 338 339 // Preserve allocation ptr to create precedent edge to it in membar 340 // generated on exit from constructor. 341 // Can't bind stable with its allocation, only record allocation for final field. 342 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { 343 set_alloc_with_final(obj); 344 } 345 } 346 } 347 348 //============================================================================= 349 350 void Parse::do_newarray() { 351 bool will_link; 352 ciKlass* klass = iter().get_klass(will_link); 353 354 // Uncommon Trap when class that array contains is not loaded 355 // we need the loaded class for the rest of graph; do not 356 // initialize the container class (see Java spec)!!! 357 assert(will_link, "newarray: typeflow responsibility"); 358 359 ciArrayKlass* array_klass = ciArrayKlass::make(klass); 360 // Check that array_klass object is loaded 361 if (!array_klass->is_loaded()) { 362 // Generate uncommon_trap for unloaded array_class 363 uncommon_trap(Deoptimization::Reason_unloaded, 364 Deoptimization::Action_reinterpret, 365 array_klass); 366 return; 367 } else if (array_klass->element_klass() != NULL && 368 array_klass->element_klass()->is_valuetype() && 369 !array_klass->element_klass()->as_value_klass()->is_initialized()) { 370 uncommon_trap(Deoptimization::Reason_uninitialized, 371 Deoptimization::Action_reinterpret, 372 NULL); 373 return; 374 } 375 376 kill_dead_locals(); 377 378 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 379 Node* count_val = pop(); 380 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 381 push(obj); 382 } 383 384 385 void Parse::do_newarray(BasicType elem_type) { 386 kill_dead_locals(); 387 388 Node* count_val = pop(); 389 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 390 Node* obj = new_array(makecon(array_klass), count_val, 1); 391 // Push resultant oop onto stack 392 push(obj); 393 } 394 395 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 396 // Also handle the degenerate 1-dimensional case of anewarray. 397 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 398 Node* length = lengths[0]; 399 assert(length != NULL, ""); 400 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 401 if (ndimensions > 1) { 402 jint length_con = find_int_con(length, -1); 403 guarantee(length_con >= 0, "non-constant multianewarray"); 404 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 405 const TypePtr* adr_type = TypeAryPtr::OOPS; 406 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 407 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 408 for (jint i = 0; i < length_con; i++) { 409 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 410 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 411 Node* eaddr = basic_plus_adr(array, offset); 412 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered); 413 } 414 } 415 return array; 416 } 417 418 void Parse::do_multianewarray() { 419 int ndimensions = iter().get_dimensions(); 420 421 // the m-dimensional array 422 bool will_link; 423 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 424 assert(will_link, "multianewarray: typeflow responsibility"); 425 426 // Note: Array classes are always initialized; no is_initialized check. 427 428 kill_dead_locals(); 429 430 // get the lengths from the stack (first dimension is on top) 431 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 432 length[ndimensions] = NULL; // terminating null for make_runtime_call 433 int j; 434 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 435 436 // The original expression was of this form: new T[length0][length1]... 437 // It is often the case that the lengths are small (except the last). 438 // If that happens, use the fast 1-d creator a constant number of times. 439 const jint expand_limit = MIN2((jint)MultiArrayExpandLimit, 100); 440 jint expand_count = 1; // count of allocations in the expansion 441 jint expand_fanout = 1; // running total fanout 442 for (j = 0; j < ndimensions-1; j++) { 443 jint dim_con = find_int_con(length[j], -1); 444 expand_fanout *= dim_con; 445 expand_count += expand_fanout; // count the level-J sub-arrays 446 if (dim_con <= 0 447 || dim_con > expand_limit 448 || expand_count > expand_limit) { 449 expand_count = 0; 450 break; 451 } 452 } 453 454 // Can use multianewarray instead of [a]newarray if only one dimension, 455 // or if all non-final dimensions are small constants. 456 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 457 Node* obj = NULL; 458 // Set the original stack and the reexecute bit for the interpreter 459 // to reexecute the multianewarray bytecode if deoptimization happens. 460 // Do it unconditionally even for one dimension multianewarray. 461 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 462 // when AllocateArray node for newarray is created. 463 { PreserveReexecuteState preexecs(this); 464 inc_sp(ndimensions); 465 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 466 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 467 } //original reexecute and sp are set back here 468 push(obj); 469 return; 470 } 471 472 address fun = NULL; 473 switch (ndimensions) { 474 case 1: ShouldNotReachHere(); break; 475 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 476 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 477 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 478 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 479 }; 480 Node* c = NULL; 481 482 if (fun != NULL) { 483 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 484 OptoRuntime::multianewarray_Type(ndimensions), 485 fun, NULL, TypeRawPtr::BOTTOM, 486 makecon(TypeKlassPtr::make(array_klass)), 487 length[0], length[1], length[2], 488 (ndimensions > 2) ? length[3] : NULL, 489 (ndimensions > 3) ? length[4] : NULL); 490 } else { 491 // Create a java array for dimension sizes 492 Node* dims = NULL; 493 { PreserveReexecuteState preexecs(this); 494 inc_sp(ndimensions); 495 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 496 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 497 498 // Fill-in it with values 499 for (j = 0; j < ndimensions; j++) { 500 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 501 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 502 } 503 } 504 505 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 506 OptoRuntime::multianewarrayN_Type(), 507 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 508 makecon(TypeKlassPtr::make(array_klass)), 509 dims); 510 } 511 make_slow_call_ex(c, env()->Throwable_klass(), false); 512 513 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); 514 515 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 516 517 // Improve the type: We know it's not null, exact, and of a given length. 518 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 519 type = type->is_aryptr()->cast_to_exactness(true); 520 521 const TypeInt* ltype = _gvn.find_int_type(length[0]); 522 if (ltype != NULL) 523 type = type->is_aryptr()->cast_to_size(ltype); 524 525 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 526 527 Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); 528 push(cast); 529 530 // Possible improvements: 531 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 532 // - Issue CastII against length[*] values, to TypeInt::POS. 533 } 534 535 void Parse::do_vbox() { 536 // Obtain target type (from bytecodes) 537 bool will_link; 538 ciKlass* target_klass = iter().get_klass(will_link); 539 guarantee(will_link, "vbox: Value-capable class must be loaded"); 540 guarantee(target_klass->is_instance_klass(), "vbox: Target class must be an instance type"); 541 542 // Obtain source type 543 const TypeValueType* source_type = gvn().type(peek())->isa_valuetype(); 544 guarantee(source_type != NULL && source_type->value_klass() != NULL && source_type->value_klass()->is_loaded(), 545 "vbox: Source class must be a value type and must be loaded"); 546 547 ciInstanceKlass* target_vcc_klass = target_klass->as_instance_klass(); 548 guarantee(target_vcc_klass->has_derive_value_type(), "vbox: Target class must have a derived value type class linked"); 549 ciValueKlass* target_dvt_klass = target_vcc_klass->derive_value_type()->as_value_klass(); 550 551 kill_dead_locals(); 552 553 ValueTypeNode* vt = peek()->as_ValueType(); 554 if (gvn().type(vt->get_oop())->higher_equal(TypePtr::NULL_PTR)) { 555 // The type of both the value type instance and of the target is known. 556 // The value type that is about to be boxed has no oop associated 557 // (i.e., the value type has not been allocated yet). To be able to 558 // allocate the value type, the compiler must have saved type information 559 // into the value type. The bytecode instruction gives the exact type of 560 // the target. 561 if(!source_type->value_klass()->derive_value_type()->is_subclass_of(target_vcc_klass)) { 562 builtin_throw(Deoptimization::Reason_class_check); 563 guarantee(stopped(), "A ClassCastException must be always thrown on this path"); 564 return; 565 } 566 } else { 567 // The value type has an associated oop, so the compiler generates 568 // a typecheck for that oop (i.e., the class of the oop is supposed to 569 // be subclass of the value type derived from the target). 570 gen_checkcast(vt->get_oop(), makecon(TypeKlassPtr::make(target_dvt_klass))); 571 } 572 573 pop(); 574 575 // Create new object 576 Node* kls = makecon(TypeKlassPtr::make(target_vcc_klass)); 577 Node* obj = new_instance(kls); 578 579 // Store all field values to the newly created object. 580 // The code below relies on the assumption that the VCC has the 581 // same memory layout as the derived value type. 582 // TODO: Once the layout of the two is not the same, update code below. 583 vt->as_ValueType()->store_values(this, obj, obj, target_vcc_klass); 584 585 // Push the new object onto the stack 586 push(obj); 587 } 588 589 void Parse::do_vunbox() { 590 // Check if the VCC instance is null. 591 Node* obj = null_check(peek()); 592 593 // Value determined to be null at compile time 594 if (stopped()) { 595 return; 596 } 597 598 // Obtain target type (from bytecodes) 599 bool will_link; 600 ciKlass* target_klass = iter().get_klass(will_link); 601 guarantee(will_link, "vunbox: Derived value type must be loaded"); 602 guarantee(target_klass->is_valuetype(), "vunbox: Target class must be a value type"); 603 604 // Obtain source type 605 const TypeOopPtr* source_type = gvn().type(obj)->isa_oopptr(); 606 guarantee(source_type != NULL && source_type->klass() != NULL && 607 source_type->klass()->is_instance_klass() && source_type->klass()->is_loaded(), 608 "vunbox: Source class must be an instance type and must be loaded"); 609 610 ciValueKlass* target_dvt_klass = target_klass->as_value_klass(); 611 guarantee(target_dvt_klass->has_derive_value_type(), "vunbox: Target class must have a value-capable class linked"); 612 ciInstanceKlass* target_vcc_klass = target_dvt_klass->derive_value_type()->as_instance_klass(); 613 614 // Check if the class of the source is a subclass of the value-capable class 615 // corresponding to the target. 616 // TOOD: Investigate if gen_checkcast can be used to perform the null-check above. 617 // A good occasion to do that seems when also implementing profiling of vunbox bytecodes. 618 gen_checkcast(obj, makecon(TypeKlassPtr::make(target_vcc_klass))); 619 620 // Remove object from the top of the stack 621 pop(); 622 623 // Create a value type node with the corresponding type 624 Node* vt = ValueTypeNode::make(gvn(), target_dvt_klass, map()->memory(), obj, obj, target_vcc_klass, target_dvt_klass->first_field_offset()); 625 626 // Push the value type onto the stack 627 push(vt); 628 }