10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.inline.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/memnode.hpp"
33 #include "opto/parse.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
37 #include "opto/valuetypenode.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/handles.inline.hpp"
40
41 //=============================================================================
42 // Helper methods for _get* and _put* bytecodes
43 //=============================================================================
44 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
45 // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
46 // Better to check now than to Deoptimize as soon as we execute
47 assert( field->is_static(), "Only check if field is static");
48 // is_being_initialized() is too generous. It allows access to statics
49 // by threads that are not running the <clinit> before the <clinit> finishes.
196 // and may yield a vacuous result if the field is of interface type.
197 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
198 assert(type != NULL, "field singleton type must be consistent");
199 } else {
200 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
201 }
202 } else {
203 type = Type::get_const_basic_type(bt);
204 }
205 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
206 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
207 }
208
209 // Build the load.
210 //
211 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
212 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
213 Node* ld = NULL;
214 if (bt == T_VALUETYPE && !field->is_static()) {
215 // Load flattened value type from non-static field
216 ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), field->holder(), obj, offset);
217 } else {
218 ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
219 }
220
221 // Adjust Java stack
222 if (type2size[bt] == 1)
223 push(ld);
224 else
225 push_pair(ld);
226
227 if (must_assert_null) {
228 // Do not take a trap here. It's possible that the program
229 // will never load the field's class, and will happily see
230 // null values in this field forever. Don't stumble into a
231 // trap for such a program, or we might get a long series
232 // of useless recompilations. (Or, we might load a class
233 // which should not be loaded.) If we ever see a non-null
234 // value, we will then trap and recompile. (The trap will
235 // not need to mention the class index, since the class will
236 // already have been loaded if we ever see a non-null value.)
277 // Conservatively release stores of object references.
278 const MemNode::MemOrd mo =
279 is_vol ?
280 // Volatile fields need releasing stores.
281 MemNode::release :
282 // Non-volatile fields also need releasing stores if they hold an
283 // object reference, because the object reference might point to
284 // a freshly created object.
285 StoreNode::release_if_reference(bt);
286
287 // Store the value.
288 if (bt == T_OBJECT || bt == T_VALUETYPE) {
289 const TypeOopPtr* field_type;
290 if (!field->type()->is_loaded()) {
291 field_type = TypeInstPtr::BOTTOM;
292 } else {
293 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
294 }
295 if (bt == T_VALUETYPE && !field->is_static()) {
296 // Store flattened value type to non-static field
297 val->as_ValueType()->store_to_field(this, field->holder(), obj, offset);
298 } else {
299 store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
300 }
301 } else {
302 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
303 store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
304 }
305
306 // If reference is volatile, prevent following volatiles ops from
307 // floating up before the volatile write.
308 if (is_vol) {
309 // If not multiple copy atomic, we do the MemBarVolatile before the load.
310 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
311 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
312 }
313 // Remember we wrote a volatile field.
314 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
315 // in constructors which have such stores. See do_exits() in parse1.cpp.
316 if (is_field) {
317 set_wrote_volatile(true);
328 // out of the constructor.
329 // Any method can write a @Stable field; insert memory barriers after those also.
330 if (is_field && (field->is_final() || field->is_stable())) {
331 if (field->is_final()) {
332 set_wrote_final(true);
333 }
334 if (field->is_stable()) {
335 set_wrote_stable(true);
336 }
337
338 // Preserve allocation ptr to create precedent edge to it in membar
339 // generated on exit from constructor.
340 // Can't bind stable with its allocation, only record allocation for final field.
341 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
342 set_alloc_with_final(obj);
343 }
344 }
345 }
346
347 //=============================================================================
348 void Parse::do_anewarray() {
349 bool will_link;
350 ciKlass* klass = iter().get_klass(will_link);
351
352 // Uncommon Trap when class that array contains is not loaded
353 // we need the loaded class for the rest of graph; do not
354 // initialize the container class (see Java spec)!!!
355 assert(will_link, "anewarray: typeflow responsibility");
356
357 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
358 // Check that array_klass object is loaded
359 if (!array_klass->is_loaded()) {
360 // Generate uncommon_trap for unloaded array_class
361 uncommon_trap(Deoptimization::Reason_unloaded,
362 Deoptimization::Action_reinterpret,
363 array_klass);
364 return;
365 }
366
367 kill_dead_locals();
368
369 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
370 Node* count_val = pop();
371 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
372 push(obj);
373 }
374
375
376 void Parse::do_newarray(BasicType elem_type) {
377 kill_dead_locals();
531 bool will_link;
532 ciValueKlass* dvt_klass = gvn().type(vt)->is_valuetype()->value_klass();
533 ciInstanceKlass* vcc_klass = iter().get_klass(will_link)->as_instance_klass();
534 guarantee(will_link, "value-capable class must be loaded");
535
536 kill_dead_locals();
537
538 // TODO: Generate all (or some) of the following checks
539 // (1) if target is not an value-capable instance, throw ClassCastException
540 // (2) if source is not a value type instance, throw ClassCastException
541 // (3) if target type is not a value type derived from source
542
543 // create new object
544 Node* kls = makecon(TypeKlassPtr::make(vcc_klass));
545 Node* obj = new_instance(kls);
546
547 // Store all field values to the newly created object.
548 // The code below relies on the assumption that the VCC has the
549 // same memory layout as the derived value type.
550 // TODO: Once the layout of the two is not the same, update code below.
551 vt->store_values(this, vcc_klass, obj);
552
553 // Push the new object onto the stack
554 push(obj);
555 }
556
557 void Parse::do_vunbox() {
558 // Obtain object from the top of the stack
559 Node* obj = pop();
560
561 // Obtain types
562 bool will_link;
563 ciInstanceKlass* vcc_klass = gvn().type(obj)->is_oopptr()->klass()->as_instance_klass();
564 ciValueKlass* dvt_klass = iter().get_klass(will_link)->as_value_klass();
565 guarantee(will_link, "derived value type must be loaded");
566
567 // TOOD: Generate all the checks. Similar to vbox
568
569 // Create a value type node with the corresponding type
570 Node* vt = ValueTypeNode::make(gvn(), dvt_klass, map()->memory(), vcc_klass, obj, dvt_klass->first_field_offset());
571
572 // Push the value type onto the stack
573 push(vt);
574 }
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.inline.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "oops/valueArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/memnode.hpp"
34 #include "opto/parse.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/runtime.hpp"
37 #include "opto/subnode.hpp"
38 #include "opto/valuetypenode.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/handles.inline.hpp"
41
42 //=============================================================================
43 // Helper methods for _get* and _put* bytecodes
44 //=============================================================================
45 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
46 // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
47 // Better to check now than to Deoptimize as soon as we execute
48 assert( field->is_static(), "Only check if field is static");
49 // is_being_initialized() is too generous. It allows access to statics
50 // by threads that are not running the <clinit> before the <clinit> finishes.
197 // and may yield a vacuous result if the field is of interface type.
198 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
199 assert(type != NULL, "field singleton type must be consistent");
200 } else {
201 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
202 }
203 } else {
204 type = Type::get_const_basic_type(bt);
205 }
206 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
207 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
208 }
209
210 // Build the load.
211 //
212 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
213 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
214 Node* ld = NULL;
215 if (bt == T_VALUETYPE && !field->is_static()) {
216 // Load flattened value type from non-static field
217 ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), obj, obj, field->holder(), offset);
218 } else {
219 ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
220 }
221
222 // Adjust Java stack
223 if (type2size[bt] == 1)
224 push(ld);
225 else
226 push_pair(ld);
227
228 if (must_assert_null) {
229 // Do not take a trap here. It's possible that the program
230 // will never load the field's class, and will happily see
231 // null values in this field forever. Don't stumble into a
232 // trap for such a program, or we might get a long series
233 // of useless recompilations. (Or, we might load a class
234 // which should not be loaded.) If we ever see a non-null
235 // value, we will then trap and recompile. (The trap will
236 // not need to mention the class index, since the class will
237 // already have been loaded if we ever see a non-null value.)
278 // Conservatively release stores of object references.
279 const MemNode::MemOrd mo =
280 is_vol ?
281 // Volatile fields need releasing stores.
282 MemNode::release :
283 // Non-volatile fields also need releasing stores if they hold an
284 // object reference, because the object reference might point to
285 // a freshly created object.
286 StoreNode::release_if_reference(bt);
287
288 // Store the value.
289 if (bt == T_OBJECT || bt == T_VALUETYPE) {
290 const TypeOopPtr* field_type;
291 if (!field->type()->is_loaded()) {
292 field_type = TypeInstPtr::BOTTOM;
293 } else {
294 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
295 }
296 if (bt == T_VALUETYPE && !field->is_static()) {
297 // Store flattened value type to non-static field
298 val->as_ValueType()->store_to_field(this, obj, obj, field->holder(), offset);
299 } else {
300 store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
301 }
302 } else {
303 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
304 store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
305 }
306
307 // If reference is volatile, prevent following volatiles ops from
308 // floating up before the volatile write.
309 if (is_vol) {
310 // If not multiple copy atomic, we do the MemBarVolatile before the load.
311 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
312 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
313 }
314 // Remember we wrote a volatile field.
315 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
316 // in constructors which have such stores. See do_exits() in parse1.cpp.
317 if (is_field) {
318 set_wrote_volatile(true);
329 // out of the constructor.
330 // Any method can write a @Stable field; insert memory barriers after those also.
331 if (is_field && (field->is_final() || field->is_stable())) {
332 if (field->is_final()) {
333 set_wrote_final(true);
334 }
335 if (field->is_stable()) {
336 set_wrote_stable(true);
337 }
338
339 // Preserve allocation ptr to create precedent edge to it in membar
340 // generated on exit from constructor.
341 // Can't bind stable with its allocation, only record allocation for final field.
342 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
343 set_alloc_with_final(obj);
344 }
345 }
346 }
347
348 //=============================================================================
349
350 void Parse::do_newarray() {
351 bool will_link;
352 ciKlass* klass = iter().get_klass(will_link);
353
354 // Uncommon Trap when class that array contains is not loaded
355 // we need the loaded class for the rest of graph; do not
356 // initialize the container class (see Java spec)!!!
357 assert(will_link, "newarray: typeflow responsibility");
358
359 ciArrayKlass* array_klass = ciArrayKlass::make(klass);
360 // Check that array_klass object is loaded
361 if (!array_klass->is_loaded()) {
362 // Generate uncommon_trap for unloaded array_class
363 uncommon_trap(Deoptimization::Reason_unloaded,
364 Deoptimization::Action_reinterpret,
365 array_klass);
366 return;
367 }
368
369 kill_dead_locals();
370
371 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
372 Node* count_val = pop();
373 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
374 push(obj);
375 }
376
377
378 void Parse::do_newarray(BasicType elem_type) {
379 kill_dead_locals();
533 bool will_link;
534 ciValueKlass* dvt_klass = gvn().type(vt)->is_valuetype()->value_klass();
535 ciInstanceKlass* vcc_klass = iter().get_klass(will_link)->as_instance_klass();
536 guarantee(will_link, "value-capable class must be loaded");
537
538 kill_dead_locals();
539
540 // TODO: Generate all (or some) of the following checks
541 // (1) if target is not an value-capable instance, throw ClassCastException
542 // (2) if source is not a value type instance, throw ClassCastException
543 // (3) if target type is not a value type derived from source
544
545 // create new object
546 Node* kls = makecon(TypeKlassPtr::make(vcc_klass));
547 Node* obj = new_instance(kls);
548
549 // Store all field values to the newly created object.
550 // The code below relies on the assumption that the VCC has the
551 // same memory layout as the derived value type.
552 // TODO: Once the layout of the two is not the same, update code below.
553 vt->store_values(this, obj, obj, vcc_klass);
554
555 // Push the new object onto the stack
556 push(obj);
557 }
558
559 void Parse::do_vunbox() {
560 // Obtain object from the top of the stack
561 Node* obj = pop();
562
563 // Obtain types
564 bool will_link;
565 ciInstanceKlass* vcc_klass = gvn().type(obj)->is_oopptr()->klass()->as_instance_klass();
566 ciValueKlass* dvt_klass = iter().get_klass(will_link)->as_value_klass();
567 guarantee(will_link, "derived value type must be loaded");
568
569 // TOOD: Generate all the checks. Similar to vbox
570
571 // Create a value type node with the corresponding type
572 Node* vt = ValueTypeNode::make(gvn(), dvt_klass, map()->memory(), obj, obj, vcc_klass, dvt_klass->first_field_offset());
573
574 // Push the value type onto the stack
575 push(vt);
576 }
|