1 /*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "ci/ciObjArray.hpp"
35 #include "runtime/arguments.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/vm_version.hpp"
39 #include "utilities/bitMap.inline.hpp"
40 #include "utilities/macros.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc_implementation/g1/heapRegion.hpp"
43 #endif // INCLUDE_ALL_GCS
44
45 #ifdef ASSERT
46 #define __ gen()->lir(__FILE__, __LINE__)->
47 #else
48 #define __ gen()->lir()->
49 #endif
50
51 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
52 #ifdef ARM
53 #define PATCHED_ADDR (204)
54 #else
55 #define PATCHED_ADDR (max_jint)
56 #endif
57
58 void PhiResolverState::reset(int max_vregs) {
59 // Initialize array sizes
60 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
61 _virtual_operands.trunc_to(0);
62 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
63 _other_operands.trunc_to(0);
64 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
65 _vreg_table.trunc_to(0);
66 }
67
68
69
70 //--------------------------------------------------------------
71 // PhiResolver
72
73 // Resolves cycles:
74 //
75 // r1 := r2 becomes temp := r1
76 // r2 := r1 r1 := r2
77 // r2 := temp
78 // and orders moves:
79 //
80 // r2 := r3 becomes r1 := r2
81 // r1 := r2 r2 := r3
82
83 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
84 : _gen(gen)
85 , _state(gen->resolver_state())
86 , _temp(LIR_OprFact::illegalOpr)
87 {
88 // reinitialize the shared state arrays
89 _state.reset(max_vregs);
90 }
91
92
93 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
94 assert(src->is_valid(), "");
95 assert(dest->is_valid(), "");
96 __ move(src, dest);
97 }
98
99
100 void PhiResolver::move_temp_to(LIR_Opr dest) {
101 assert(_temp->is_valid(), "");
102 emit_move(_temp, dest);
103 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
104 }
105
106
107 void PhiResolver::move_to_temp(LIR_Opr src) {
108 assert(_temp->is_illegal(), "");
109 _temp = _gen->new_register(src->type());
110 emit_move(src, _temp);
111 }
112
113
114 // Traverse assignment graph in depth first order and generate moves in post order
115 // ie. two assignments: b := c, a := b start with node c:
116 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
117 // Generates moves in this order: move b to a and move c to b
118 // ie. cycle a := b, b := a start with node a
119 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
120 // Generates moves in this order: move b to temp, move a to b, move temp to a
121 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
122 if (!dest->visited()) {
123 dest->set_visited();
124 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
125 move(dest, dest->destination_at(i));
126 }
127 } else if (!dest->start_node()) {
128 // cylce in graph detected
129 assert(_loop == NULL, "only one loop valid!");
130 _loop = dest;
131 move_to_temp(src->operand());
132 return;
133 } // else dest is a start node
134
135 if (!dest->assigned()) {
136 if (_loop == dest) {
137 move_temp_to(dest->operand());
138 dest->set_assigned();
139 } else if (src != NULL) {
140 emit_move(src->operand(), dest->operand());
141 dest->set_assigned();
142 }
143 }
144 }
145
146
147 PhiResolver::~PhiResolver() {
148 int i;
149 // resolve any cycles in moves from and to virtual registers
150 for (i = virtual_operands().length() - 1; i >= 0; i --) {
151 ResolveNode* node = virtual_operands()[i];
152 if (!node->visited()) {
153 _loop = NULL;
154 move(NULL, node);
155 node->set_start_node();
156 assert(_temp->is_illegal(), "move_temp_to() call missing");
157 }
158 }
159
160 // generate move for move from non virtual register to abitrary destination
161 for (i = other_operands().length() - 1; i >= 0; i --) {
162 ResolveNode* node = other_operands()[i];
163 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
164 emit_move(node->operand(), node->destination_at(j)->operand());
165 }
166 }
167 }
168
169
170 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
171 ResolveNode* node;
172 if (opr->is_virtual()) {
173 int vreg_num = opr->vreg_number();
174 node = vreg_table().at_grow(vreg_num, NULL);
175 assert(node == NULL || node->operand() == opr, "");
176 if (node == NULL) {
177 node = new ResolveNode(opr);
178 vreg_table()[vreg_num] = node;
179 }
180 // Make sure that all virtual operands show up in the list when
181 // they are used as the source of a move.
182 if (source && !virtual_operands().contains(node)) {
183 virtual_operands().append(node);
184 }
185 } else {
186 assert(source, "");
187 node = new ResolveNode(opr);
188 other_operands().append(node);
189 }
190 return node;
191 }
192
193
194 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
195 assert(dest->is_virtual(), "");
196 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
197 assert(src->is_valid(), "");
198 assert(dest->is_valid(), "");
199 ResolveNode* source = source_node(src);
200 source->append(destination_node(dest));
201 }
202
203
204 //--------------------------------------------------------------
205 // LIRItem
206
207 void LIRItem::set_result(LIR_Opr opr) {
208 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
209 value()->set_operand(opr);
210
211 if (opr->is_virtual()) {
212 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
213 }
214
215 _result = opr;
216 }
217
218 void LIRItem::load_item() {
219 if (result()->is_illegal()) {
220 // update the items result
221 _result = value()->operand();
222 }
223 if (!result()->is_register()) {
224 LIR_Opr reg = _gen->new_register(value()->type());
225 __ move(result(), reg);
226 if (result()->is_constant()) {
227 _result = reg;
228 } else {
229 set_result(reg);
230 }
231 }
232 }
233
234
235 void LIRItem::load_for_store(BasicType type) {
236 if (_gen->can_store_as_constant(value(), type)) {
237 _result = value()->operand();
238 if (!_result->is_constant()) {
239 _result = LIR_OprFact::value_type(value()->type());
240 }
241 } else if (type == T_BYTE || type == T_BOOLEAN) {
242 load_byte_item();
243 } else {
244 load_item();
245 }
246 }
247
248 void LIRItem::load_item_force(LIR_Opr reg) {
249 LIR_Opr r = result();
250 if (r != reg) {
251 #if !defined(ARM) && !defined(E500V2)
252 if (r->type() != reg->type()) {
253 // moves between different types need an intervening spill slot
254 r = _gen->force_to_spill(r, reg->type());
255 }
256 #endif
257 __ move(r, reg);
258 _result = reg;
259 }
260 }
261
262 ciObject* LIRItem::get_jobject_constant() const {
263 ObjectType* oc = type()->as_ObjectType();
264 if (oc) {
265 return oc->constant_value();
266 }
267 return NULL;
268 }
269
270
271 jint LIRItem::get_jint_constant() const {
272 assert(is_constant() && value() != NULL, "");
273 assert(type()->as_IntConstant() != NULL, "type check");
274 return type()->as_IntConstant()->value();
275 }
276
277
278 jint LIRItem::get_address_constant() const {
279 assert(is_constant() && value() != NULL, "");
280 assert(type()->as_AddressConstant() != NULL, "type check");
281 return type()->as_AddressConstant()->value();
282 }
283
284
285 jfloat LIRItem::get_jfloat_constant() const {
286 assert(is_constant() && value() != NULL, "");
287 assert(type()->as_FloatConstant() != NULL, "type check");
288 return type()->as_FloatConstant()->value();
289 }
290
291
292 jdouble LIRItem::get_jdouble_constant() const {
293 assert(is_constant() && value() != NULL, "");
294 assert(type()->as_DoubleConstant() != NULL, "type check");
295 return type()->as_DoubleConstant()->value();
296 }
297
298
299 jlong LIRItem::get_jlong_constant() const {
300 assert(is_constant() && value() != NULL, "");
301 assert(type()->as_LongConstant() != NULL, "type check");
302 return type()->as_LongConstant()->value();
303 }
304
305
306
307 //--------------------------------------------------------------
308
309
310 void LIRGenerator::init() {
311 _bs = Universe::heap()->barrier_set();
312 }
313
314
315 void LIRGenerator::block_do_prolog(BlockBegin* block) {
316 #ifndef PRODUCT
317 if (PrintIRWithLIR) {
318 block->print();
319 }
320 #endif
321
322 // set up the list of LIR instructions
323 assert(block->lir() == NULL, "LIR list already computed for this block");
324 _lir = new LIR_List(compilation(), block);
325 block->set_lir(_lir);
326
327 __ branch_destination(block->label());
328
329 if (LIRTraceExecution &&
330 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
331 !block->is_set(BlockBegin::exception_entry_flag)) {
332 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
333 trace_block_entry(block);
334 }
335 }
336
337
338 void LIRGenerator::block_do_epilog(BlockBegin* block) {
339 #ifndef PRODUCT
340 if (PrintIRWithLIR) {
341 tty->cr();
342 }
343 #endif
344
345 // LIR_Opr for unpinned constants shouldn't be referenced by other
346 // blocks so clear them out after processing the block.
347 for (int i = 0; i < _unpinned_constants.length(); i++) {
348 _unpinned_constants.at(i)->clear_operand();
349 }
350 _unpinned_constants.trunc_to(0);
351
352 // clear our any registers for other local constants
353 _constants.trunc_to(0);
354 _reg_for_constants.trunc_to(0);
355 }
356
357
358 void LIRGenerator::block_do(BlockBegin* block) {
359 CHECK_BAILOUT();
360
361 block_do_prolog(block);
362 set_block(block);
363
364 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
365 if (instr->is_pinned()) do_root(instr);
366 }
367
368 set_block(NULL);
369 block_do_epilog(block);
370 }
371
372
373 //-------------------------LIRGenerator-----------------------------
374
375 // This is where the tree-walk starts; instr must be root;
376 void LIRGenerator::do_root(Value instr) {
377 CHECK_BAILOUT();
378
379 InstructionMark im(compilation(), instr);
380
381 assert(instr->is_pinned(), "use only with roots");
382 assert(instr->subst() == instr, "shouldn't have missed substitution");
383
384 instr->visit(this);
385
386 assert(!instr->has_uses() || instr->operand()->is_valid() ||
387 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
388 }
389
390
391 // This is called for each node in tree; the walk stops if a root is reached
392 void LIRGenerator::walk(Value instr) {
393 InstructionMark im(compilation(), instr);
394 //stop walk when encounter a root
395 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
396 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
397 } else {
398 assert(instr->subst() == instr, "shouldn't have missed substitution");
399 instr->visit(this);
400 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
401 }
402 }
403
404
405 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
406 assert(state != NULL, "state must be defined");
407
408 #ifndef PRODUCT
409 state->verify();
410 #endif
411
412 ValueStack* s = state;
413 for_each_state(s) {
414 if (s->kind() == ValueStack::EmptyExceptionState) {
415 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
416 continue;
417 }
418
419 int index;
420 Value value;
421 for_each_stack_value(s, index, value) {
422 assert(value->subst() == value, "missed substitution");
423 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
424 walk(value);
425 assert(value->operand()->is_valid(), "must be evaluated now");
426 }
427 }
428
429 int bci = s->bci();
430 IRScope* scope = s->scope();
431 ciMethod* method = scope->method();
432
433 MethodLivenessResult liveness = method->liveness_at_bci(bci);
434 if (bci == SynchronizationEntryBCI) {
435 if (x->as_ExceptionObject() || x->as_Throw()) {
436 // all locals are dead on exit from the synthetic unlocker
437 liveness.clear();
438 } else {
439 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
440 }
441 }
442 if (!liveness.is_valid()) {
443 // Degenerate or breakpointed method.
444 bailout("Degenerate or breakpointed method");
445 } else {
446 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
447 for_each_local_value(s, index, value) {
448 assert(value->subst() == value, "missed substition");
449 if (liveness.at(index) && !value->type()->is_illegal()) {
450 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
451 walk(value);
452 assert(value->operand()->is_valid(), "must be evaluated now");
453 }
454 } else {
455 // NULL out this local so that linear scan can assume that all non-NULL values are live.
456 s->invalidate_local(index);
457 }
458 }
459 }
460 }
461
462 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
463 }
464
465
466 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
467 return state_for(x, x->exception_state());
468 }
469
470
471 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
472 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
473 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
474 * the class. */
475 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
476 assert(info != NULL, "info must be set if class is not loaded");
477 __ klass2reg_patch(NULL, r, info);
478 } else {
479 // no patching needed
480 __ metadata2reg(obj->constant_encoding(), r);
481 }
482 }
483
484
485 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
486 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
487 CodeStub* stub = new RangeCheckStub(range_check_info, index);
488 if (index->is_constant()) {
489 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
490 index->as_jint(), null_check_info);
491 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
492 } else {
493 cmp_reg_mem(lir_cond_aboveEqual, index, array,
494 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
495 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
496 }
497 }
498
499
500 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
501 CodeStub* stub = new RangeCheckStub(info, index, true);
502 if (index->is_constant()) {
503 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
504 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
505 } else {
506 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
507 java_nio_Buffer::limit_offset(), T_INT, info);
508 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
509 }
510 __ move(index, result);
511 }
512
513
514
515 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
516 LIR_Opr result_op = result;
517 LIR_Opr left_op = left;
518 LIR_Opr right_op = right;
519
520 if (TwoOperandLIRForm && left_op != result_op) {
521 assert(right_op != result_op, "malformed");
522 __ move(left_op, result_op);
523 left_op = result_op;
524 }
525
526 switch(code) {
527 case Bytecodes::_dadd:
528 case Bytecodes::_fadd:
529 case Bytecodes::_ladd:
530 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
531 case Bytecodes::_fmul:
532 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
533
534 case Bytecodes::_dmul:
535 {
536 if (is_strictfp) {
537 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
538 } else {
539 __ mul(left_op, right_op, result_op); break;
540 }
541 }
542 break;
543
544 case Bytecodes::_imul:
545 {
546 bool did_strength_reduce = false;
547
548 if (right->is_constant()) {
549 int c = right->as_jint();
550 if (is_power_of_2(c)) {
551 // do not need tmp here
552 __ shift_left(left_op, exact_log2(c), result_op);
553 did_strength_reduce = true;
554 } else {
555 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
556 }
557 }
558 // we couldn't strength reduce so just emit the multiply
559 if (!did_strength_reduce) {
560 __ mul(left_op, right_op, result_op);
561 }
562 }
563 break;
564
565 case Bytecodes::_dsub:
566 case Bytecodes::_fsub:
567 case Bytecodes::_lsub:
568 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
569
570 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
571 // ldiv and lrem are implemented with a direct runtime call
572
573 case Bytecodes::_ddiv:
574 {
575 if (is_strictfp) {
576 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
577 } else {
578 __ div (left_op, right_op, result_op); break;
579 }
580 }
581 break;
582
583 case Bytecodes::_drem:
584 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
585
586 default: ShouldNotReachHere();
587 }
588 }
589
590
591 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
592 arithmetic_op(code, result, left, right, false, tmp);
593 }
594
595
596 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
597 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
598 }
599
600
601 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
602 arithmetic_op(code, result, left, right, is_strictfp, tmp);
603 }
604
605
606 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
607 if (TwoOperandLIRForm && value != result_op) {
608 assert(count != result_op, "malformed");
609 __ move(value, result_op);
610 value = result_op;
611 }
612
613 assert(count->is_constant() || count->is_register(), "must be");
614 switch(code) {
615 case Bytecodes::_ishl:
616 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
617 case Bytecodes::_ishr:
618 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
619 case Bytecodes::_iushr:
620 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
621 default: ShouldNotReachHere();
622 }
623 }
624
625
626 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
627 if (TwoOperandLIRForm && left_op != result_op) {
628 assert(right_op != result_op, "malformed");
629 __ move(left_op, result_op);
630 left_op = result_op;
631 }
632
633 switch(code) {
634 case Bytecodes::_iand:
635 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
636
637 case Bytecodes::_ior:
638 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
639
640 case Bytecodes::_ixor:
641 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
642
643 default: ShouldNotReachHere();
644 }
645 }
646
647
648 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
649 if (!GenerateSynchronizationCode) return;
650 // for slow path, use debug info for state after successful locking
651 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
652 __ load_stack_address_monitor(monitor_no, lock);
653 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
654 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
655 }
656
657
658 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
659 if (!GenerateSynchronizationCode) return;
660 // setup registers
661 LIR_Opr hdr = lock;
662 lock = new_hdr;
663 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
664 __ load_stack_address_monitor(monitor_no, lock);
665 __ unlock_object(hdr, object, lock, scratch, slow_path);
666 }
667
668 #ifndef PRODUCT
669 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
670 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
671 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
672 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
673 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
674 }
675 }
676 #endif
677
678 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
679 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
680 // If klass is not loaded we do not know if the klass has finalizers:
681 if (UseFastNewInstance && klass->is_loaded()
682 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
683
684 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
685
686 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
687
688 assert(klass->is_loaded(), "must be loaded");
689 // allocate space for instance
690 assert(klass->size_helper() >= 0, "illegal instance size");
691 const int instance_size = align_object_size(klass->size_helper());
692 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
693 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
694 } else {
695 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
696 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
697 __ branch_destination(slow_path->continuation());
698 }
699 }
700
701
702 static bool is_constant_zero(Instruction* inst) {
703 IntConstant* c = inst->type()->as_IntConstant();
704 if (c) {
705 return (c->value() == 0);
706 }
707 return false;
708 }
709
710
711 static bool positive_constant(Instruction* inst) {
712 IntConstant* c = inst->type()->as_IntConstant();
713 if (c) {
714 return (c->value() >= 0);
715 }
716 return false;
717 }
718
719
720 static ciArrayKlass* as_array_klass(ciType* type) {
721 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
722 return (ciArrayKlass*)type;
723 } else {
724 return NULL;
725 }
726 }
727
728 static ciType* phi_declared_type(Phi* phi) {
729 ciType* t = phi->operand_at(0)->declared_type();
730 if (t == NULL) {
731 return NULL;
732 }
733 for(int i = 1; i < phi->operand_count(); i++) {
734 if (t != phi->operand_at(i)->declared_type()) {
735 return NULL;
736 }
737 }
738 return t;
739 }
740
741 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
742 Instruction* src = x->argument_at(0);
743 Instruction* src_pos = x->argument_at(1);
744 Instruction* dst = x->argument_at(2);
745 Instruction* dst_pos = x->argument_at(3);
746 Instruction* length = x->argument_at(4);
747
748 // first try to identify the likely type of the arrays involved
749 ciArrayKlass* expected_type = NULL;
750 bool is_exact = false, src_objarray = false, dst_objarray = false;
751 {
752 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
753 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
754 Phi* phi;
755 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
756 src_declared_type = as_array_klass(phi_declared_type(phi));
757 }
758 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
759 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
760 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
761 dst_declared_type = as_array_klass(phi_declared_type(phi));
762 }
763
764 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
765 // the types exactly match so the type is fully known
766 is_exact = true;
767 expected_type = src_exact_type;
768 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
769 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
770 ciArrayKlass* src_type = NULL;
771 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
772 src_type = (ciArrayKlass*) src_exact_type;
773 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
774 src_type = (ciArrayKlass*) src_declared_type;
775 }
776 if (src_type != NULL) {
777 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
778 is_exact = true;
779 expected_type = dst_type;
780 }
781 }
782 }
783 // at least pass along a good guess
784 if (expected_type == NULL) expected_type = dst_exact_type;
785 if (expected_type == NULL) expected_type = src_declared_type;
786 if (expected_type == NULL) expected_type = dst_declared_type;
787
788 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
789 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
790 }
791
792 // if a probable array type has been identified, figure out if any
793 // of the required checks for a fast case can be elided.
794 int flags = LIR_OpArrayCopy::all_flags;
795
796 if (!src_objarray)
797 flags &= ~LIR_OpArrayCopy::src_objarray;
798 if (!dst_objarray)
799 flags &= ~LIR_OpArrayCopy::dst_objarray;
800
801 if (!x->arg_needs_null_check(0))
802 flags &= ~LIR_OpArrayCopy::src_null_check;
803 if (!x->arg_needs_null_check(2))
804 flags &= ~LIR_OpArrayCopy::dst_null_check;
805
806
807 if (expected_type != NULL) {
808 Value length_limit = NULL;
809
810 IfOp* ifop = length->as_IfOp();
811 if (ifop != NULL) {
812 // look for expressions like min(v, a.length) which ends up as
813 // x > y ? y : x or x >= y ? y : x
814 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
815 ifop->x() == ifop->fval() &&
816 ifop->y() == ifop->tval()) {
817 length_limit = ifop->y();
818 }
819 }
820
821 // try to skip null checks and range checks
822 NewArray* src_array = src->as_NewArray();
823 if (src_array != NULL) {
824 flags &= ~LIR_OpArrayCopy::src_null_check;
825 if (length_limit != NULL &&
826 src_array->length() == length_limit &&
827 is_constant_zero(src_pos)) {
828 flags &= ~LIR_OpArrayCopy::src_range_check;
829 }
830 }
831
832 NewArray* dst_array = dst->as_NewArray();
833 if (dst_array != NULL) {
834 flags &= ~LIR_OpArrayCopy::dst_null_check;
835 if (length_limit != NULL &&
836 dst_array->length() == length_limit &&
837 is_constant_zero(dst_pos)) {
838 flags &= ~LIR_OpArrayCopy::dst_range_check;
839 }
840 }
841
842 // check from incoming constant values
843 if (positive_constant(src_pos))
844 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
845 if (positive_constant(dst_pos))
846 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
847 if (positive_constant(length))
848 flags &= ~LIR_OpArrayCopy::length_positive_check;
849
850 // see if the range check can be elided, which might also imply
851 // that src or dst is non-null.
852 ArrayLength* al = length->as_ArrayLength();
853 if (al != NULL) {
854 if (al->array() == src) {
855 // it's the length of the source array
856 flags &= ~LIR_OpArrayCopy::length_positive_check;
857 flags &= ~LIR_OpArrayCopy::src_null_check;
858 if (is_constant_zero(src_pos))
859 flags &= ~LIR_OpArrayCopy::src_range_check;
860 }
861 if (al->array() == dst) {
862 // it's the length of the destination array
863 flags &= ~LIR_OpArrayCopy::length_positive_check;
864 flags &= ~LIR_OpArrayCopy::dst_null_check;
865 if (is_constant_zero(dst_pos))
866 flags &= ~LIR_OpArrayCopy::dst_range_check;
867 }
868 }
869 if (is_exact) {
870 flags &= ~LIR_OpArrayCopy::type_check;
871 }
872 }
873
874 IntConstant* src_int = src_pos->type()->as_IntConstant();
875 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
876 if (src_int && dst_int) {
877 int s_offs = src_int->value();
878 int d_offs = dst_int->value();
879 if (src_int->value() >= dst_int->value()) {
880 flags &= ~LIR_OpArrayCopy::overlapping;
881 }
882 if (expected_type != NULL) {
883 BasicType t = expected_type->element_type()->basic_type();
884 int element_size = type2aelembytes(t);
885 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
886 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
887 flags &= ~LIR_OpArrayCopy::unaligned;
888 }
889 }
890 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
891 // src and dest positions are the same, or dst is zero so assume
892 // nonoverlapping copy.
893 flags &= ~LIR_OpArrayCopy::overlapping;
894 }
895
896 if (src == dst) {
897 // moving within a single array so no type checks are needed
898 if (flags & LIR_OpArrayCopy::type_check) {
899 flags &= ~LIR_OpArrayCopy::type_check;
900 }
901 }
902 *flagsp = flags;
903 *expected_typep = (ciArrayKlass*)expected_type;
904 }
905
906
907 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
908 assert(opr->is_register(), "why spill if item is not register?");
909
910 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
911 LIR_Opr result = new_register(T_FLOAT);
912 set_vreg_flag(result, must_start_in_memory);
913 assert(opr->is_register(), "only a register can be spilled");
914 assert(opr->value_type()->is_float(), "rounding only for floats available");
915 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
916 return result;
917 }
918 return opr;
919 }
920
921
922 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
923 assert(type2size[t] == type2size[value->type()],
924 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
925 if (!value->is_register()) {
926 // force into a register
927 LIR_Opr r = new_register(value->type());
928 __ move(value, r);
929 value = r;
930 }
931
932 // create a spill location
933 LIR_Opr tmp = new_register(t);
934 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
935
936 // move from register to spill
937 __ move(value, tmp);
938 return tmp;
939 }
940
941 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
942 if (if_instr->should_profile()) {
943 ciMethod* method = if_instr->profiled_method();
944 assert(method != NULL, "method should be set if branch is profiled");
945 ciMethodData* md = method->method_data_or_null();
946 assert(md != NULL, "Sanity");
947 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
948 assert(data != NULL, "must have profiling data");
949 assert(data->is_BranchData(), "need BranchData for two-way branches");
950 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
951 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
952 if (if_instr->is_swapped()) {
953 int t = taken_count_offset;
954 taken_count_offset = not_taken_count_offset;
955 not_taken_count_offset = t;
956 }
957
958 LIR_Opr md_reg = new_register(T_METADATA);
959 __ metadata2reg(md->constant_encoding(), md_reg);
960
961 LIR_Opr data_offset_reg = new_pointer_register();
962 __ cmove(lir_cond(cond),
963 LIR_OprFact::intptrConst(taken_count_offset),
964 LIR_OprFact::intptrConst(not_taken_count_offset),
965 data_offset_reg, as_BasicType(if_instr->x()->type()));
966
967 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
968 LIR_Opr data_reg = new_pointer_register();
969 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
970 __ move(data_addr, data_reg);
971 // Use leal instead of add to avoid destroying condition codes on x86
972 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
973 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
974 __ move(data_reg, data_addr);
975 }
976 }
977
978 // Phi technique:
979 // This is about passing live values from one basic block to the other.
980 // In code generated with Java it is rather rare that more than one
981 // value is on the stack from one basic block to the other.
982 // We optimize our technique for efficient passing of one value
983 // (of type long, int, double..) but it can be extended.
984 // When entering or leaving a basic block, all registers and all spill
985 // slots are release and empty. We use the released registers
986 // and spill slots to pass the live values from one block
987 // to the other. The topmost value, i.e., the value on TOS of expression
988 // stack is passed in registers. All other values are stored in spilling
989 // area. Every Phi has an index which designates its spill slot
990 // At exit of a basic block, we fill the register(s) and spill slots.
991 // At entry of a basic block, the block_prolog sets up the content of phi nodes
992 // and locks necessary registers and spilling slots.
993
994
995 // move current value to referenced phi function
996 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
997 Phi* phi = sux_val->as_Phi();
998 // cur_val can be null without phi being null in conjunction with inlining
999 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1000 LIR_Opr operand = cur_val->operand();
1001 if (cur_val->operand()->is_illegal()) {
1002 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1003 "these can be produced lazily");
1004 operand = operand_for_instruction(cur_val);
1005 }
1006 resolver->move(operand, operand_for_instruction(phi));
1007 }
1008 }
1009
1010
1011 // Moves all stack values into their PHI position
1012 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1013 BlockBegin* bb = block();
1014 if (bb->number_of_sux() == 1) {
1015 BlockBegin* sux = bb->sux_at(0);
1016 assert(sux->number_of_preds() > 0, "invalid CFG");
1017
1018 // a block with only one predecessor never has phi functions
1019 if (sux->number_of_preds() > 1) {
1020 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1021 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1022
1023 ValueStack* sux_state = sux->state();
1024 Value sux_value;
1025 int index;
1026
1027 assert(cur_state->scope() == sux_state->scope(), "not matching");
1028 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1029 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1030
1031 for_each_stack_value(sux_state, index, sux_value) {
1032 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1033 }
1034
1035 for_each_local_value(sux_state, index, sux_value) {
1036 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1037 }
1038
1039 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1040 }
1041 }
1042 }
1043
1044
1045 LIR_Opr LIRGenerator::new_register(BasicType type) {
1046 int vreg = _virtual_register_number;
1047 // add a little fudge factor for the bailout, since the bailout is
1048 // only checked periodically. This gives a few extra registers to
1049 // hand out before we really run out, which helps us keep from
1050 // tripping over assertions.
1051 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1052 bailout("out of virtual registers");
1053 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1054 // wrap it around
1055 _virtual_register_number = LIR_OprDesc::vreg_base;
1056 }
1057 }
1058 _virtual_register_number += 1;
1059 return LIR_OprFact::virtual_register(vreg, type);
1060 }
1061
1062
1063 // Try to lock using register in hint
1064 LIR_Opr LIRGenerator::rlock(Value instr) {
1065 return new_register(instr->type());
1066 }
1067
1068
1069 // does an rlock and sets result
1070 LIR_Opr LIRGenerator::rlock_result(Value x) {
1071 LIR_Opr reg = rlock(x);
1072 set_result(x, reg);
1073 return reg;
1074 }
1075
1076
1077 // does an rlock and sets result
1078 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1079 LIR_Opr reg;
1080 switch (type) {
1081 case T_BYTE:
1082 case T_BOOLEAN:
1083 reg = rlock_byte(type);
1084 break;
1085 default:
1086 reg = rlock(x);
1087 break;
1088 }
1089
1090 set_result(x, reg);
1091 return reg;
1092 }
1093
1094
1095 //---------------------------------------------------------------------
1096 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1097 ObjectType* oc = value->type()->as_ObjectType();
1098 if (oc) {
1099 return oc->constant_value();
1100 }
1101 return NULL;
1102 }
1103
1104
1105 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1106 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1107 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1108
1109 // no moves are created for phi functions at the begin of exception
1110 // handlers, so assign operands manually here
1111 for_each_phi_fun(block(), phi,
1112 operand_for_instruction(phi));
1113
1114 LIR_Opr thread_reg = getThreadPointer();
1115 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1116 exceptionOopOpr());
1117 __ move_wide(LIR_OprFact::oopConst(NULL),
1118 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1119 __ move_wide(LIR_OprFact::oopConst(NULL),
1120 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1121
1122 LIR_Opr result = new_register(T_OBJECT);
1123 __ move(exceptionOopOpr(), result);
1124 set_result(x, result);
1125 }
1126
1127
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //----------------------------------------------------------------------
1132 // visitor functions
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //----------------------------------------------------------------------
1137
1138 void LIRGenerator::do_Phi(Phi* x) {
1139 // phi functions are never visited directly
1140 ShouldNotReachHere();
1141 }
1142
1143
1144 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1145 void LIRGenerator::do_Constant(Constant* x) {
1146 if (x->state_before() != NULL) {
1147 // Any constant with a ValueStack requires patching so emit the patch here
1148 LIR_Opr reg = rlock_result(x);
1149 CodeEmitInfo* info = state_for(x, x->state_before());
1150 __ oop2reg_patch(NULL, reg, info);
1151 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1152 if (!x->is_pinned()) {
1153 // unpinned constants are handled specially so that they can be
1154 // put into registers when they are used multiple times within a
1155 // block. After the block completes their operand will be
1156 // cleared so that other blocks can't refer to that register.
1157 set_result(x, load_constant(x));
1158 } else {
1159 LIR_Opr res = x->operand();
1160 if (!res->is_valid()) {
1161 res = LIR_OprFact::value_type(x->type());
1162 }
1163 if (res->is_constant()) {
1164 LIR_Opr reg = rlock_result(x);
1165 __ move(res, reg);
1166 } else {
1167 set_result(x, res);
1168 }
1169 }
1170 } else {
1171 set_result(x, LIR_OprFact::value_type(x->type()));
1172 }
1173 }
1174
1175
1176 void LIRGenerator::do_Local(Local* x) {
1177 // operand_for_instruction has the side effect of setting the result
1178 // so there's no need to do it here.
1179 operand_for_instruction(x);
1180 }
1181
1182
1183 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1184 Unimplemented();
1185 }
1186
1187
1188 void LIRGenerator::do_Return(Return* x) {
1189 if (compilation()->env()->dtrace_method_probes()) {
1190 BasicTypeList signature;
1191 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1192 signature.append(T_METADATA); // Method*
1193 LIR_OprList* args = new LIR_OprList();
1194 args->append(getThreadPointer());
1195 LIR_Opr meth = new_register(T_METADATA);
1196 __ metadata2reg(method()->constant_encoding(), meth);
1197 args->append(meth);
1198 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1199 }
1200
1201 if (x->type()->is_void()) {
1202 __ return_op(LIR_OprFact::illegalOpr);
1203 } else {
1204 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1205 LIRItem result(x->result(), this);
1206
1207 result.load_item_force(reg);
1208 __ return_op(result.result());
1209 }
1210 set_no_result(x);
1211 }
1212
1213 // Examble: ref.get()
1214 // Combination of LoadField and g1 pre-write barrier
1215 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1216
1217 const int referent_offset = java_lang_ref_Reference::referent_offset;
1218 guarantee(referent_offset > 0, "referent offset not initialized");
1219
1220 assert(x->number_of_arguments() == 1, "wrong type");
1221
1222 LIRItem reference(x->argument_at(0), this);
1223 reference.load_item();
1224
1225 // need to perform the null check on the reference objecy
1226 CodeEmitInfo* info = NULL;
1227 if (x->needs_null_check()) {
1228 info = state_for(x);
1229 }
1230
1231 LIR_Address* referent_field_adr =
1232 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1233
1234 LIR_Opr result = rlock_result(x);
1235
1236 __ load(referent_field_adr, result, info);
1237
1238 // Register the value in the referent field with the pre-barrier
1239 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1240 result /* pre_val */,
1241 false /* do_load */,
1242 false /* patch */,
1243 NULL /* info */);
1244 }
1245
1246 // Example: clazz.isInstance(object)
1247 void LIRGenerator::do_isInstance(Intrinsic* x) {
1248 assert(x->number_of_arguments() == 2, "wrong type");
1249
1250 // TODO could try to substitute this node with an equivalent InstanceOf
1251 // if clazz is known to be a constant Class. This will pick up newly found
1252 // constants after HIR construction. I'll leave this to a future change.
1253
1254 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1255 // could follow the aastore example in a future change.
1256
1257 LIRItem clazz(x->argument_at(0), this);
1258 LIRItem object(x->argument_at(1), this);
1259 clazz.load_item();
1260 object.load_item();
1261 LIR_Opr result = rlock_result(x);
1262
1263 // need to perform null check on clazz
1264 if (x->needs_null_check()) {
1265 CodeEmitInfo* info = state_for(x);
1266 __ null_check(clazz.result(), info);
1267 }
1268
1269 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1270 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1271 x->type(),
1272 NULL); // NULL CodeEmitInfo results in a leaf call
1273 __ move(call_result, result);
1274 }
1275
1276 // Example: object.getClass ()
1277 void LIRGenerator::do_getClass(Intrinsic* x) {
1278 assert(x->number_of_arguments() == 1, "wrong type");
1279
1280 LIRItem rcvr(x->argument_at(0), this);
1281 rcvr.load_item();
1282 LIR_Opr temp = new_register(T_METADATA);
1283 LIR_Opr result = rlock_result(x);
1284
1285 // need to perform the null check on the rcvr
1286 CodeEmitInfo* info = NULL;
1287 if (x->needs_null_check()) {
1288 info = state_for(x);
1289 }
1290
1291 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1292 // meaning of these two is mixed up (see JDK-8026837).
1293 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1294 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1295 }
1296
1297
1298 // Example: Thread.currentThread()
1299 void LIRGenerator::do_currentThread(Intrinsic* x) {
1300 assert(x->number_of_arguments() == 0, "wrong type");
1301 LIR_Opr reg = rlock_result(x);
1302 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1303 }
1304
1305
1306 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1307 assert(x->number_of_arguments() == 1, "wrong type");
1308 LIRItem receiver(x->argument_at(0), this);
1309
1310 receiver.load_item();
1311 BasicTypeList signature;
1312 signature.append(T_OBJECT); // receiver
1313 LIR_OprList* args = new LIR_OprList();
1314 args->append(receiver.result());
1315 CodeEmitInfo* info = state_for(x, x->state());
1316 call_runtime(&signature, args,
1317 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1318 voidType, info);
1319
1320 set_no_result(x);
1321 }
1322
1323
1324 //------------------------local access--------------------------------------
1325
1326 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1327 if (x->operand()->is_illegal()) {
1328 Constant* c = x->as_Constant();
1329 if (c != NULL) {
1330 x->set_operand(LIR_OprFact::value_type(c->type()));
1331 } else {
1332 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1333 // allocate a virtual register for this local or phi
1334 x->set_operand(rlock(x));
1335 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1336 }
1337 }
1338 return x->operand();
1339 }
1340
1341
1342 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1343 if (opr->is_virtual()) {
1344 return instruction_for_vreg(opr->vreg_number());
1345 }
1346 return NULL;
1347 }
1348
1349
1350 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1351 if (reg_num < _instruction_for_operand.length()) {
1352 return _instruction_for_operand.at(reg_num);
1353 }
1354 return NULL;
1355 }
1356
1357
1358 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1359 if (_vreg_flags.size_in_bits() == 0) {
1360 BitMap2D temp(100, num_vreg_flags);
1361 temp.clear();
1362 _vreg_flags = temp;
1363 }
1364 _vreg_flags.at_put_grow(vreg_num, f, true);
1365 }
1366
1367 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1368 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1369 return false;
1370 }
1371 return _vreg_flags.at(vreg_num, f);
1372 }
1373
1374
1375 // Block local constant handling. This code is useful for keeping
1376 // unpinned constants and constants which aren't exposed in the IR in
1377 // registers. Unpinned Constant instructions have their operands
1378 // cleared when the block is finished so that other blocks can't end
1379 // up referring to their registers.
1380
1381 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1382 assert(!x->is_pinned(), "only for unpinned constants");
1383 _unpinned_constants.append(x);
1384 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1385 }
1386
1387
1388 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1389 BasicType t = c->type();
1390 for (int i = 0; i < _constants.length(); i++) {
1391 LIR_Const* other = _constants.at(i);
1392 if (t == other->type()) {
1393 switch (t) {
1394 case T_INT:
1395 case T_FLOAT:
1396 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1397 break;
1398 case T_LONG:
1399 case T_DOUBLE:
1400 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1401 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1402 break;
1403 case T_OBJECT:
1404 if (c->as_jobject() != other->as_jobject()) continue;
1405 break;
1406 }
1407 return _reg_for_constants.at(i);
1408 }
1409 }
1410
1411 LIR_Opr result = new_register(t);
1412 __ move((LIR_Opr)c, result);
1413 _constants.append(c);
1414 _reg_for_constants.append(result);
1415 return result;
1416 }
1417
1418 // Various barriers
1419
1420 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1421 bool do_load, bool patch, CodeEmitInfo* info) {
1422 // Do the pre-write barrier, if any.
1423 switch (_bs->kind()) {
1424 #if INCLUDE_ALL_GCS
1425 case BarrierSet::G1SATBCT:
1426 case BarrierSet::G1SATBCTLogging:
1427 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1428 break;
1429 #endif // INCLUDE_ALL_GCS
1430 case BarrierSet::CardTableModRef:
1431 case BarrierSet::CardTableExtension:
1432 // No pre barriers
1433 break;
1434 case BarrierSet::ModRef:
1435 // No pre barriers
1436 break;
1437 default :
1438 ShouldNotReachHere();
1439
1440 }
1441 }
1442
1443 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1444 switch (_bs->kind()) {
1445 #if INCLUDE_ALL_GCS
1446 case BarrierSet::G1SATBCT:
1447 case BarrierSet::G1SATBCTLogging:
1448 G1SATBCardTableModRef_post_barrier(addr, new_val);
1449 break;
1450 #endif // INCLUDE_ALL_GCS
1451 case BarrierSet::CardTableModRef:
1452 case BarrierSet::CardTableExtension:
1453 CardTableModRef_post_barrier(addr, new_val);
1454 break;
1455 case BarrierSet::ModRef:
1456 // No post barriers
1457 break;
1458 default :
1459 ShouldNotReachHere();
1460 }
1461 }
1462
1463 ////////////////////////////////////////////////////////////////////////
1464 #if INCLUDE_ALL_GCS
1465
1466 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1467 bool do_load, bool patch, CodeEmitInfo* info) {
1468 // First we test whether marking is in progress.
1469 BasicType flag_type;
1470 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1471 flag_type = T_INT;
1472 } else {
1473 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1474 "Assumption");
1475 flag_type = T_BYTE;
1476 }
1477 LIR_Opr thrd = getThreadPointer();
1478 LIR_Address* mark_active_flag_addr =
1479 new LIR_Address(thrd,
1480 in_bytes(JavaThread::satb_mark_queue_offset() +
1481 PtrQueue::byte_offset_of_active()),
1482 flag_type);
1483 // Read the marking-in-progress flag.
1484 LIR_Opr flag_val = new_register(T_INT);
1485 __ load(mark_active_flag_addr, flag_val);
1486 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1487
1488 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1489
1490 CodeStub* slow;
1491
1492 if (do_load) {
1493 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1494 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1495
1496 if (patch)
1497 pre_val_patch_code = lir_patch_normal;
1498
1499 pre_val = new_register(T_OBJECT);
1500
1501 if (!addr_opr->is_address()) {
1502 assert(addr_opr->is_register(), "must be");
1503 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1504 }
1505 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1506 } else {
1507 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1508 assert(pre_val->is_register(), "must be");
1509 assert(pre_val->type() == T_OBJECT, "must be an object");
1510 assert(info == NULL, "sanity");
1511
1512 slow = new G1PreBarrierStub(pre_val);
1513 }
1514
1515 __ branch(lir_cond_notEqual, T_INT, slow);
1516 __ branch_destination(slow->continuation());
1517 }
1518
1519 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1520 // If the "new_val" is a constant NULL, no barrier is necessary.
1521 if (new_val->is_constant() &&
1522 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1523
1524 if (!new_val->is_register()) {
1525 LIR_Opr new_val_reg = new_register(T_OBJECT);
1526 if (new_val->is_constant()) {
1527 __ move(new_val, new_val_reg);
1528 } else {
1529 __ leal(new_val, new_val_reg);
1530 }
1531 new_val = new_val_reg;
1532 }
1533 assert(new_val->is_register(), "must be a register at this point");
1534
1535 if (addr->is_address()) {
1536 LIR_Address* address = addr->as_address_ptr();
1537 LIR_Opr ptr = new_pointer_register();
1538 if (!address->index()->is_valid() && address->disp() == 0) {
1539 __ move(address->base(), ptr);
1540 } else {
1541 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1542 __ leal(addr, ptr);
1543 }
1544 addr = ptr;
1545 }
1546 assert(addr->is_register(), "must be a register at this point");
1547
1548 LIR_Opr xor_res = new_pointer_register();
1549 LIR_Opr xor_shift_res = new_pointer_register();
1550 if (TwoOperandLIRForm ) {
1551 __ move(addr, xor_res);
1552 __ logical_xor(xor_res, new_val, xor_res);
1553 __ move(xor_res, xor_shift_res);
1554 __ unsigned_shift_right(xor_shift_res,
1555 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1556 xor_shift_res,
1557 LIR_OprDesc::illegalOpr());
1558 } else {
1559 __ logical_xor(addr, new_val, xor_res);
1560 __ unsigned_shift_right(xor_res,
1561 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1562 xor_shift_res,
1563 LIR_OprDesc::illegalOpr());
1564 }
1565
1566 if (!new_val->is_register()) {
1567 LIR_Opr new_val_reg = new_register(T_OBJECT);
1568 __ leal(new_val, new_val_reg);
1569 new_val = new_val_reg;
1570 }
1571 assert(new_val->is_register(), "must be a register at this point");
1572
1573 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1574
1575 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1576 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1577 __ branch_destination(slow->continuation());
1578 }
1579
1580 #endif // INCLUDE_ALL_GCS
1581 ////////////////////////////////////////////////////////////////////////
1582
1583 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1584
1585 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1586 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1587 if (addr->is_address()) {
1588 LIR_Address* address = addr->as_address_ptr();
1589 // ptr cannot be an object because we use this barrier for array card marks
1590 // and addr can point in the middle of an array.
1591 LIR_Opr ptr = new_pointer_register();
1592 if (!address->index()->is_valid() && address->disp() == 0) {
1593 __ move(address->base(), ptr);
1594 } else {
1595 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1596 __ leal(addr, ptr);
1597 }
1598 addr = ptr;
1599 }
1600 assert(addr->is_register(), "must be a register at this point");
1601
1602 #ifdef ARM
1603 // TODO: ARM - move to platform-dependent code
1604 LIR_Opr tmp = FrameMap::R14_opr;
1605 if (VM_Version::supports_movw()) {
1606 __ move((LIR_Opr)card_table_base, tmp);
1607 } else {
1608 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1609 }
1610
1611 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1612 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1613 if(((int)ct->byte_map_base & 0xff) == 0) {
1614 __ move(tmp, card_addr);
1615 } else {
1616 LIR_Opr tmp_zero = new_register(T_INT);
1617 __ move(LIR_OprFact::intConst(0), tmp_zero);
1618 __ move(tmp_zero, card_addr);
1619 }
1620 #else // ARM
1621 LIR_Opr tmp = new_pointer_register();
1622 if (TwoOperandLIRForm) {
1623 __ move(addr, tmp);
1624 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1625 } else {
1626 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1627 }
1628 if (can_inline_as_constant(card_table_base)) {
1629 __ move(LIR_OprFact::intConst(0),
1630 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1631 } else {
1632 __ move(LIR_OprFact::intConst(0),
1633 new LIR_Address(tmp, load_constant(card_table_base),
1634 T_BYTE));
1635 }
1636 #endif // ARM
1637 }
1638
1639
1640 //------------------------field access--------------------------------------
1641
1642 // Comment copied form templateTable_i486.cpp
1643 // ----------------------------------------------------------------------------
1644 // Volatile variables demand their effects be made known to all CPU's in
1645 // order. Store buffers on most chips allow reads & writes to reorder; the
1646 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1647 // memory barrier (i.e., it's not sufficient that the interpreter does not
1648 // reorder volatile references, the hardware also must not reorder them).
1649 //
1650 // According to the new Java Memory Model (JMM):
1651 // (1) All volatiles are serialized wrt to each other.
1652 // ALSO reads & writes act as aquire & release, so:
1653 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1654 // the read float up to before the read. It's OK for non-volatile memory refs
1655 // that happen before the volatile read to float down below it.
1656 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1657 // that happen BEFORE the write float down to after the write. It's OK for
1658 // non-volatile memory refs that happen after the volatile write to float up
1659 // before it.
1660 //
1661 // We only put in barriers around volatile refs (they are expensive), not
1662 // _between_ memory refs (that would require us to track the flavor of the
1663 // previous memory refs). Requirements (2) and (3) require some barriers
1664 // before volatile stores and after volatile loads. These nearly cover
1665 // requirement (1) but miss the volatile-store-volatile-load case. This final
1666 // case is placed after volatile-stores although it could just as well go
1667 // before volatile-loads.
1668
1669
1670 void LIRGenerator::do_StoreField(StoreField* x) {
1671 bool needs_patching = x->needs_patching();
1672 bool is_volatile = x->field()->is_volatile();
1673 BasicType field_type = x->field_type();
1674 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1675
1676 CodeEmitInfo* info = NULL;
1677 if (needs_patching) {
1678 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1679 info = state_for(x, x->state_before());
1680 } else if (x->needs_null_check()) {
1681 NullCheck* nc = x->explicit_null_check();
1682 if (nc == NULL) {
1683 info = state_for(x);
1684 } else {
1685 info = state_for(nc);
1686 }
1687 }
1688
1689
1690 LIRItem object(x->obj(), this);
1691 LIRItem value(x->value(), this);
1692
1693 object.load_item();
1694
1695 if (is_volatile || needs_patching) {
1696 // load item if field is volatile (fewer special cases for volatiles)
1697 // load item if field not initialized
1698 // load item if field not constant
1699 // because of code patching we cannot inline constants
1700 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1701 value.load_byte_item();
1702 } else {
1703 value.load_item();
1704 }
1705 } else {
1706 value.load_for_store(field_type);
1707 }
1708
1709 set_no_result(x);
1710
1711 #ifndef PRODUCT
1712 if (PrintNotLoaded && needs_patching) {
1713 tty->print_cr(" ###class not loaded at store_%s bci %d",
1714 x->is_static() ? "static" : "field", x->printable_bci());
1715 }
1716 #endif
1717
1718 if (x->needs_null_check() &&
1719 (needs_patching ||
1720 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1721 // emit an explicit null check because the offset is too large
1722 __ null_check(object.result(), new CodeEmitInfo(info));
1723 }
1724
1725 LIR_Address* address;
1726 if (needs_patching) {
1727 // we need to patch the offset in the instruction so don't allow
1728 // generate_address to try to be smart about emitting the -1.
1729 // Otherwise the patching code won't know how to find the
1730 // instruction to patch.
1731 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1732 } else {
1733 address = generate_address(object.result(), x->offset(), field_type);
1734 }
1735
1736 if (is_volatile && os::is_MP()) {
1737 __ membar_release();
1738 }
1739
1740 if (is_oop) {
1741 // Do the pre-write barrier, if any.
1742 pre_barrier(LIR_OprFact::address(address),
1743 LIR_OprFact::illegalOpr /* pre_val */,
1744 true /* do_load*/,
1745 needs_patching,
1746 (info ? new CodeEmitInfo(info) : NULL));
1747 }
1748
1749 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1750 if (needs_atomic_access && !needs_patching) {
1751 volatile_field_store(value.result(), address, info);
1752 } else {
1753 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1754 __ store(value.result(), address, info, patch_code);
1755 }
1756
1757 if (is_oop) {
1758 // Store to object so mark the card of the header
1759 post_barrier(object.result(), value.result());
1760 }
1761
1762 if (is_volatile && os::is_MP()) {
1763 __ membar();
1764 }
1765 }
1766
1767
1768 void LIRGenerator::do_LoadField(LoadField* x) {
1769 bool needs_patching = x->needs_patching();
1770 bool is_volatile = x->field()->is_volatile();
1771 BasicType field_type = x->field_type();
1772
1773 CodeEmitInfo* info = NULL;
1774 if (needs_patching) {
1775 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1776 info = state_for(x, x->state_before());
1777 } else if (x->needs_null_check()) {
1778 NullCheck* nc = x->explicit_null_check();
1779 if (nc == NULL) {
1780 info = state_for(x);
1781 } else {
1782 info = state_for(nc);
1783 }
1784 }
1785
1786 LIRItem object(x->obj(), this);
1787
1788 object.load_item();
1789
1790 #ifndef PRODUCT
1791 if (PrintNotLoaded && needs_patching) {
1792 tty->print_cr(" ###class not loaded at load_%s bci %d",
1793 x->is_static() ? "static" : "field", x->printable_bci());
1794 }
1795 #endif
1796
1797 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1798 if (x->needs_null_check() &&
1799 (needs_patching ||
1800 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1801 stress_deopt)) {
1802 LIR_Opr obj = object.result();
1803 if (stress_deopt) {
1804 obj = new_register(T_OBJECT);
1805 __ move(LIR_OprFact::oopConst(NULL), obj);
1806 }
1807 // emit an explicit null check because the offset is too large
1808 __ null_check(obj, new CodeEmitInfo(info));
1809 }
1810
1811 LIR_Opr reg = rlock_result(x, field_type);
1812 LIR_Address* address;
1813 if (needs_patching) {
1814 // we need to patch the offset in the instruction so don't allow
1815 // generate_address to try to be smart about emitting the -1.
1816 // Otherwise the patching code won't know how to find the
1817 // instruction to patch.
1818 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1819 } else {
1820 address = generate_address(object.result(), x->offset(), field_type);
1821 }
1822
1823 bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
1824 if (needs_atomic_access && !needs_patching) {
1825 volatile_field_load(address, reg, info);
1826 } else {
1827 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1828 __ load(address, reg, info, patch_code);
1829 }
1830
1831 if (is_volatile && os::is_MP()) {
1832 __ membar_acquire();
1833 }
1834 }
1835
1836
1837 //------------------------java.nio.Buffer.checkIndex------------------------
1838
1839 // int java.nio.Buffer.checkIndex(int)
1840 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1841 // NOTE: by the time we are in checkIndex() we are guaranteed that
1842 // the buffer is non-null (because checkIndex is package-private and
1843 // only called from within other methods in the buffer).
1844 assert(x->number_of_arguments() == 2, "wrong type");
1845 LIRItem buf (x->argument_at(0), this);
1846 LIRItem index(x->argument_at(1), this);
1847 buf.load_item();
1848 index.load_item();
1849
1850 LIR_Opr result = rlock_result(x);
1851 if (GenerateRangeChecks) {
1852 CodeEmitInfo* info = state_for(x);
1853 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1854 if (index.result()->is_constant()) {
1855 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1856 __ branch(lir_cond_belowEqual, T_INT, stub);
1857 } else {
1858 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1859 java_nio_Buffer::limit_offset(), T_INT, info);
1860 __ branch(lir_cond_aboveEqual, T_INT, stub);
1861 }
1862 __ move(index.result(), result);
1863 } else {
1864 // Just load the index into the result register
1865 __ move(index.result(), result);
1866 }
1867 }
1868
1869
1870 //------------------------array access--------------------------------------
1871
1872
1873 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1874 LIRItem array(x->array(), this);
1875 array.load_item();
1876 LIR_Opr reg = rlock_result(x);
1877
1878 CodeEmitInfo* info = NULL;
1879 if (x->needs_null_check()) {
1880 NullCheck* nc = x->explicit_null_check();
1881 if (nc == NULL) {
1882 info = state_for(x);
1883 } else {
1884 info = state_for(nc);
1885 }
1886 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1887 LIR_Opr obj = new_register(T_OBJECT);
1888 __ move(LIR_OprFact::oopConst(NULL), obj);
1889 __ null_check(obj, new CodeEmitInfo(info));
1890 }
1891 }
1892 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1893 }
1894
1895
1896 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1897 bool use_length = x->length() != NULL;
1898 LIRItem array(x->array(), this);
1899 LIRItem index(x->index(), this);
1900 LIRItem length(this);
1901 bool needs_range_check = x->compute_needs_range_check();
1902
1903 if (use_length && needs_range_check) {
1904 length.set_instruction(x->length());
1905 length.load_item();
1906 }
1907
1908 array.load_item();
1909 if (index.is_constant() && can_inline_as_constant(x->index())) {
1910 // let it be a constant
1911 index.dont_load_item();
1912 } else {
1913 index.load_item();
1914 }
1915
1916 CodeEmitInfo* range_check_info = state_for(x);
1917 CodeEmitInfo* null_check_info = NULL;
1918 if (x->needs_null_check()) {
1919 NullCheck* nc = x->explicit_null_check();
1920 if (nc != NULL) {
1921 null_check_info = state_for(nc);
1922 } else {
1923 null_check_info = range_check_info;
1924 }
1925 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1926 LIR_Opr obj = new_register(T_OBJECT);
1927 __ move(LIR_OprFact::oopConst(NULL), obj);
1928 __ null_check(obj, new CodeEmitInfo(null_check_info));
1929 }
1930 }
1931
1932 // emit array address setup early so it schedules better
1933 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1934
1935 if (GenerateRangeChecks && needs_range_check) {
1936 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1937 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1938 } else if (use_length) {
1939 // TODO: use a (modified) version of array_range_check that does not require a
1940 // constant length to be loaded to a register
1941 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1942 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1943 } else {
1944 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1945 // The range check performs the null check, so clear it out for the load
1946 null_check_info = NULL;
1947 }
1948 }
1949
1950 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1951 }
1952
1953
1954 void LIRGenerator::do_NullCheck(NullCheck* x) {
1955 if (x->can_trap()) {
1956 LIRItem value(x->obj(), this);
1957 value.load_item();
1958 CodeEmitInfo* info = state_for(x);
1959 __ null_check(value.result(), info);
1960 }
1961 }
1962
1963
1964 void LIRGenerator::do_TypeCast(TypeCast* x) {
1965 LIRItem value(x->obj(), this);
1966 value.load_item();
1967 // the result is the same as from the node we are casting
1968 set_result(x, value.result());
1969 }
1970
1971
1972 void LIRGenerator::do_Throw(Throw* x) {
1973 LIRItem exception(x->exception(), this);
1974 exception.load_item();
1975 set_no_result(x);
1976 LIR_Opr exception_opr = exception.result();
1977 CodeEmitInfo* info = state_for(x, x->state());
1978
1979 #ifndef PRODUCT
1980 if (PrintC1Statistics) {
1981 increment_counter(Runtime1::throw_count_address(), T_INT);
1982 }
1983 #endif
1984
1985 // check if the instruction has an xhandler in any of the nested scopes
1986 bool unwind = false;
1987 if (info->exception_handlers()->length() == 0) {
1988 // this throw is not inside an xhandler
1989 unwind = true;
1990 } else {
1991 // get some idea of the throw type
1992 bool type_is_exact = true;
1993 ciType* throw_type = x->exception()->exact_type();
1994 if (throw_type == NULL) {
1995 type_is_exact = false;
1996 throw_type = x->exception()->declared_type();
1997 }
1998 if (throw_type != NULL && throw_type->is_instance_klass()) {
1999 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2000 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2001 }
2002 }
2003
2004 // do null check before moving exception oop into fixed register
2005 // to avoid a fixed interval with an oop during the null check.
2006 // Use a copy of the CodeEmitInfo because debug information is
2007 // different for null_check and throw.
2008 if (GenerateCompilerNullChecks &&
2009 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2010 // if the exception object wasn't created using new then it might be null.
2011 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2012 }
2013
2014 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2015 // we need to go through the exception lookup path to get JVMTI
2016 // notification done
2017 unwind = false;
2018 }
2019
2020 // move exception oop into fixed register
2021 __ move(exception_opr, exceptionOopOpr());
2022
2023 if (unwind) {
2024 __ unwind_exception(exceptionOopOpr());
2025 } else {
2026 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2027 }
2028 }
2029
2030
2031 void LIRGenerator::do_RoundFP(RoundFP* x) {
2032 LIRItem input(x->input(), this);
2033 input.load_item();
2034 LIR_Opr input_opr = input.result();
2035 assert(input_opr->is_register(), "why round if value is not in a register?");
2036 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2037 if (input_opr->is_single_fpu()) {
2038 set_result(x, round_item(input_opr)); // This code path not currently taken
2039 } else {
2040 LIR_Opr result = new_register(T_DOUBLE);
2041 set_vreg_flag(result, must_start_in_memory);
2042 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2043 set_result(x, result);
2044 }
2045 }
2046
2047 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2048 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2049 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2050 LIRItem base(x->base(), this);
2051 LIRItem idx(this);
2052
2053 base.load_item();
2054 if (x->has_index()) {
2055 idx.set_instruction(x->index());
2056 idx.load_nonconstant();
2057 }
2058
2059 LIR_Opr reg = rlock_result(x, x->basic_type());
2060
2061 int log2_scale = 0;
2062 if (x->has_index()) {
2063 log2_scale = x->log2_scale();
2064 }
2065
2066 assert(!x->has_index() || idx.value() == x->index(), "should match");
2067
2068 LIR_Opr base_op = base.result();
2069 LIR_Opr index_op = idx.result();
2070 #ifndef _LP64
2071 if (base_op->type() == T_LONG) {
2072 base_op = new_register(T_INT);
2073 __ convert(Bytecodes::_l2i, base.result(), base_op);
2074 }
2075 if (x->has_index()) {
2076 if (index_op->type() == T_LONG) {
2077 LIR_Opr long_index_op = index_op;
2078 if (index_op->is_constant()) {
2079 long_index_op = new_register(T_LONG);
2080 __ move(index_op, long_index_op);
2081 }
2082 index_op = new_register(T_INT);
2083 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2084 } else {
2085 assert(x->index()->type()->tag() == intTag, "must be");
2086 }
2087 }
2088 // At this point base and index should be all ints.
2089 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2090 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2091 #else
2092 if (x->has_index()) {
2093 if (index_op->type() == T_INT) {
2094 if (!index_op->is_constant()) {
2095 index_op = new_register(T_LONG);
2096 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2097 }
2098 } else {
2099 assert(index_op->type() == T_LONG, "must be");
2100 if (index_op->is_constant()) {
2101 index_op = new_register(T_LONG);
2102 __ move(idx.result(), index_op);
2103 }
2104 }
2105 }
2106 // At this point base is a long non-constant
2107 // Index is a long register or a int constant.
2108 // We allow the constant to stay an int because that would allow us a more compact encoding by
2109 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2110 // move it into a register first.
2111 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2112 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2113 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2114 #endif
2115
2116 BasicType dst_type = x->basic_type();
2117
2118 LIR_Address* addr;
2119 if (index_op->is_constant()) {
2120 assert(log2_scale == 0, "must not have a scale");
2121 assert(index_op->type() == T_INT, "only int constants supported");
2122 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2123 } else {
2124 #ifdef X86
2125 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2126 #elif defined(ARM)
2127 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2128 #else
2129 if (index_op->is_illegal() || log2_scale == 0) {
2130 addr = new LIR_Address(base_op, index_op, dst_type);
2131 } else {
2132 LIR_Opr tmp = new_pointer_register();
2133 __ shift_left(index_op, log2_scale, tmp);
2134 addr = new LIR_Address(base_op, tmp, dst_type);
2135 }
2136 #endif
2137 }
2138
2139 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2140 __ unaligned_move(addr, reg);
2141 } else {
2142 if (dst_type == T_OBJECT && x->is_wide()) {
2143 __ move_wide(addr, reg);
2144 } else {
2145 __ move(addr, reg);
2146 }
2147 }
2148 }
2149
2150
2151 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2152 int log2_scale = 0;
2153 BasicType type = x->basic_type();
2154
2155 if (x->has_index()) {
2156 log2_scale = x->log2_scale();
2157 }
2158
2159 LIRItem base(x->base(), this);
2160 LIRItem value(x->value(), this);
2161 LIRItem idx(this);
2162
2163 base.load_item();
2164 if (x->has_index()) {
2165 idx.set_instruction(x->index());
2166 idx.load_item();
2167 }
2168
2169 if (type == T_BYTE || type == T_BOOLEAN) {
2170 value.load_byte_item();
2171 } else {
2172 value.load_item();
2173 }
2174
2175 set_no_result(x);
2176
2177 LIR_Opr base_op = base.result();
2178 LIR_Opr index_op = idx.result();
2179
2180 #ifndef _LP64
2181 if (base_op->type() == T_LONG) {
2182 base_op = new_register(T_INT);
2183 __ convert(Bytecodes::_l2i, base.result(), base_op);
2184 }
2185 if (x->has_index()) {
2186 if (index_op->type() == T_LONG) {
2187 index_op = new_register(T_INT);
2188 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2189 }
2190 }
2191 // At this point base and index should be all ints and not constants
2192 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2193 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2194 #else
2195 if (x->has_index()) {
2196 if (index_op->type() == T_INT) {
2197 index_op = new_register(T_LONG);
2198 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2199 }
2200 }
2201 // At this point base and index are long and non-constant
2202 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2203 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2204 #endif
2205
2206 if (log2_scale != 0) {
2207 // temporary fix (platform dependent code without shift on Intel would be better)
2208 // TODO: ARM also allows embedded shift in the address
2209 __ shift_left(index_op, log2_scale, index_op);
2210 }
2211
2212 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2213 __ move(value.result(), addr);
2214 }
2215
2216
2217 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2218 BasicType type = x->basic_type();
2219 LIRItem src(x->object(), this);
2220 LIRItem off(x->offset(), this);
2221
2222 off.load_item();
2223 src.load_item();
2224
2225 LIR_Opr value = rlock_result(x, x->basic_type());
2226
2227 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2228
2229 #if INCLUDE_ALL_GCS
2230 // We might be reading the value of the referent field of a
2231 // Reference object in order to attach it back to the live
2232 // object graph. If G1 is enabled then we need to record
2233 // the value that is being returned in an SATB log buffer.
2234 //
2235 // We need to generate code similar to the following...
2236 //
2237 // if (offset == java_lang_ref_Reference::referent_offset) {
2238 // if (src != NULL) {
2239 // if (klass(src)->reference_type() != REF_NONE) {
2240 // pre_barrier(..., value, ...);
2241 // }
2242 // }
2243 // }
2244
2245 if (UseG1GC && type == T_OBJECT) {
2246 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2247 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2248 bool gen_source_check = true; // Assume we need to check the src object for null.
2249 bool gen_type_check = true; // Assume we need to check the reference_type.
2250
2251 if (off.is_constant()) {
2252 jlong off_con = (off.type()->is_int() ?
2253 (jlong) off.get_jint_constant() :
2254 off.get_jlong_constant());
2255
2256
2257 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2258 // The constant offset is something other than referent_offset.
2259 // We can skip generating/checking the remaining guards and
2260 // skip generation of the code stub.
2261 gen_pre_barrier = false;
2262 } else {
2263 // The constant offset is the same as referent_offset -
2264 // we do not need to generate a runtime offset check.
2265 gen_offset_check = false;
2266 }
2267 }
2268
2269 // We don't need to generate stub if the source object is an array
2270 if (gen_pre_barrier && src.type()->is_array()) {
2271 gen_pre_barrier = false;
2272 }
2273
2274 if (gen_pre_barrier) {
2275 // We still need to continue with the checks.
2276 if (src.is_constant()) {
2277 ciObject* src_con = src.get_jobject_constant();
2278 guarantee(src_con != NULL, "no source constant");
2279
2280 if (src_con->is_null_object()) {
2281 // The constant src object is null - We can skip
2282 // generating the code stub.
2283 gen_pre_barrier = false;
2284 } else {
2285 // Non-null constant source object. We still have to generate
2286 // the slow stub - but we don't need to generate the runtime
2287 // null object check.
2288 gen_source_check = false;
2289 }
2290 }
2291 }
2292 if (gen_pre_barrier && !PatchALot) {
2293 // Can the klass of object be statically determined to be
2294 // a sub-class of Reference?
2295 ciType* type = src.value()->declared_type();
2296 if ((type != NULL) && type->is_loaded()) {
2297 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2298 gen_type_check = false;
2299 } else if (type->is_klass() &&
2300 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2301 // Not Reference and not Object klass.
2302 gen_pre_barrier = false;
2303 }
2304 }
2305 }
2306
2307 if (gen_pre_barrier) {
2308 LabelObj* Lcont = new LabelObj();
2309
2310 // We can have generate one runtime check here. Let's start with
2311 // the offset check.
2312 if (gen_offset_check) {
2313 // if (offset != referent_offset) -> continue
2314 // If offset is an int then we can do the comparison with the
2315 // referent_offset constant; otherwise we need to move
2316 // referent_offset into a temporary register and generate
2317 // a reg-reg compare.
2318
2319 LIR_Opr referent_off;
2320
2321 if (off.type()->is_int()) {
2322 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2323 } else {
2324 assert(off.type()->is_long(), "what else?");
2325 referent_off = new_register(T_LONG);
2326 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2327 }
2328 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2329 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2330 }
2331 if (gen_source_check) {
2332 // offset is a const and equals referent offset
2333 // if (source == null) -> continue
2334 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2335 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2336 }
2337 LIR_Opr src_klass = new_register(T_OBJECT);
2338 if (gen_type_check) {
2339 // We have determined that offset == referent_offset && src != null.
2340 // if (src->_klass->_reference_type == REF_NONE) -> continue
2341 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2342 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2343 LIR_Opr reference_type = new_register(T_INT);
2344 __ move(reference_type_addr, reference_type);
2345 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2346 __ branch(lir_cond_equal, T_INT, Lcont->label());
2347 }
2348 {
2349 // We have determined that src->_klass->_reference_type != REF_NONE
2350 // so register the value in the referent field with the pre-barrier.
2351 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2352 value /* pre_val */,
2353 false /* do_load */,
2354 false /* patch */,
2355 NULL /* info */);
2356 }
2357 __ branch_destination(Lcont->label());
2358 }
2359 }
2360 #endif // INCLUDE_ALL_GCS
2361
2362 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2363 }
2364
2365
2366 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2367 BasicType type = x->basic_type();
2368 LIRItem src(x->object(), this);
2369 LIRItem off(x->offset(), this);
2370 LIRItem data(x->value(), this);
2371
2372 src.load_item();
2373 if (type == T_BOOLEAN || type == T_BYTE) {
2374 data.load_byte_item();
2375 } else {
2376 data.load_item();
2377 }
2378 off.load_item();
2379
2380 set_no_result(x);
2381
2382 if (x->is_volatile() && os::is_MP()) __ membar_release();
2383 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2384 if (x->is_volatile() && os::is_MP()) __ membar();
2385 }
2386
2387
2388 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2389 LIRItem src(x->object(), this);
2390 LIRItem off(x->offset(), this);
2391
2392 src.load_item();
2393 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2394 // let it be a constant
2395 off.dont_load_item();
2396 } else {
2397 off.load_item();
2398 }
2399
2400 set_no_result(x);
2401
2402 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2403 __ prefetch(addr, is_store);
2404 }
2405
2406
2407 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2408 do_UnsafePrefetch(x, false);
2409 }
2410
2411
2412 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2413 do_UnsafePrefetch(x, true);
2414 }
2415
2416
2417 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2418 int lng = x->length();
2419
2420 for (int i = 0; i < lng; i++) {
2421 SwitchRange* one_range = x->at(i);
2422 int low_key = one_range->low_key();
2423 int high_key = one_range->high_key();
2424 BlockBegin* dest = one_range->sux();
2425 if (low_key == high_key) {
2426 __ cmp(lir_cond_equal, value, low_key);
2427 __ branch(lir_cond_equal, T_INT, dest);
2428 } else if (high_key - low_key == 1) {
2429 __ cmp(lir_cond_equal, value, low_key);
2430 __ branch(lir_cond_equal, T_INT, dest);
2431 __ cmp(lir_cond_equal, value, high_key);
2432 __ branch(lir_cond_equal, T_INT, dest);
2433 } else {
2434 LabelObj* L = new LabelObj();
2435 __ cmp(lir_cond_less, value, low_key);
2436 __ branch(lir_cond_less, T_INT, L->label());
2437 __ cmp(lir_cond_lessEqual, value, high_key);
2438 __ branch(lir_cond_lessEqual, T_INT, dest);
2439 __ branch_destination(L->label());
2440 }
2441 }
2442 __ jump(default_sux);
2443 }
2444
2445
2446 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2447 SwitchRangeList* res = new SwitchRangeList();
2448 int len = x->length();
2449 if (len > 0) {
2450 BlockBegin* sux = x->sux_at(0);
2451 int key = x->lo_key();
2452 BlockBegin* default_sux = x->default_sux();
2453 SwitchRange* range = new SwitchRange(key, sux);
2454 for (int i = 0; i < len; i++, key++) {
2455 BlockBegin* new_sux = x->sux_at(i);
2456 if (sux == new_sux) {
2457 // still in same range
2458 range->set_high_key(key);
2459 } else {
2460 // skip tests which explicitly dispatch to the default
2461 if (sux != default_sux) {
2462 res->append(range);
2463 }
2464 range = new SwitchRange(key, new_sux);
2465 }
2466 sux = new_sux;
2467 }
2468 if (res->length() == 0 || res->last() != range) res->append(range);
2469 }
2470 return res;
2471 }
2472
2473
2474 // we expect the keys to be sorted by increasing value
2475 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2476 SwitchRangeList* res = new SwitchRangeList();
2477 int len = x->length();
2478 if (len > 0) {
2479 BlockBegin* default_sux = x->default_sux();
2480 int key = x->key_at(0);
2481 BlockBegin* sux = x->sux_at(0);
2482 SwitchRange* range = new SwitchRange(key, sux);
2483 for (int i = 1; i < len; i++) {
2484 int new_key = x->key_at(i);
2485 BlockBegin* new_sux = x->sux_at(i);
2486 if (key+1 == new_key && sux == new_sux) {
2487 // still in same range
2488 range->set_high_key(new_key);
2489 } else {
2490 // skip tests which explicitly dispatch to the default
2491 if (range->sux() != default_sux) {
2492 res->append(range);
2493 }
2494 range = new SwitchRange(new_key, new_sux);
2495 }
2496 key = new_key;
2497 sux = new_sux;
2498 }
2499 if (res->length() == 0 || res->last() != range) res->append(range);
2500 }
2501 return res;
2502 }
2503
2504
2505 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2506 LIRItem tag(x->tag(), this);
2507 tag.load_item();
2508 set_no_result(x);
2509
2510 if (x->is_safepoint()) {
2511 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2512 }
2513
2514 // move values into phi locations
2515 move_to_phi(x->state());
2516
2517 int lo_key = x->lo_key();
2518 int hi_key = x->hi_key();
2519 int len = x->length();
2520 LIR_Opr value = tag.result();
2521 if (UseTableRanges) {
2522 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2523 } else {
2524 for (int i = 0; i < len; i++) {
2525 __ cmp(lir_cond_equal, value, i + lo_key);
2526 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2527 }
2528 __ jump(x->default_sux());
2529 }
2530 }
2531
2532
2533 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2534 LIRItem tag(x->tag(), this);
2535 tag.load_item();
2536 set_no_result(x);
2537
2538 if (x->is_safepoint()) {
2539 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2540 }
2541
2542 // move values into phi locations
2543 move_to_phi(x->state());
2544
2545 LIR_Opr value = tag.result();
2546 if (UseTableRanges) {
2547 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2548 } else {
2549 int len = x->length();
2550 for (int i = 0; i < len; i++) {
2551 __ cmp(lir_cond_equal, value, x->key_at(i));
2552 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2553 }
2554 __ jump(x->default_sux());
2555 }
2556 }
2557
2558
2559 void LIRGenerator::do_Goto(Goto* x) {
2560 set_no_result(x);
2561
2562 if (block()->next()->as_OsrEntry()) {
2563 // need to free up storage used for OSR entry point
2564 LIR_Opr osrBuffer = block()->next()->operand();
2565 BasicTypeList signature;
2566 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2567 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2568 __ move(osrBuffer, cc->args()->at(0));
2569 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2570 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2571 }
2572
2573 if (x->is_safepoint()) {
2574 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2575
2576 // increment backedge counter if needed
2577 CodeEmitInfo* info = state_for(x, state);
2578 increment_backedge_counter(info, x->profiled_bci());
2579 CodeEmitInfo* safepoint_info = state_for(x, state);
2580 __ safepoint(safepoint_poll_register(), safepoint_info);
2581 }
2582
2583 // Gotos can be folded Ifs, handle this case.
2584 if (x->should_profile()) {
2585 ciMethod* method = x->profiled_method();
2586 assert(method != NULL, "method should be set if branch is profiled");
2587 ciMethodData* md = method->method_data_or_null();
2588 assert(md != NULL, "Sanity");
2589 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2590 assert(data != NULL, "must have profiling data");
2591 int offset;
2592 if (x->direction() == Goto::taken) {
2593 assert(data->is_BranchData(), "need BranchData for two-way branches");
2594 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2595 } else if (x->direction() == Goto::not_taken) {
2596 assert(data->is_BranchData(), "need BranchData for two-way branches");
2597 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2598 } else {
2599 assert(data->is_JumpData(), "need JumpData for branches");
2600 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2601 }
2602 LIR_Opr md_reg = new_register(T_METADATA);
2603 __ metadata2reg(md->constant_encoding(), md_reg);
2604
2605 increment_counter(new LIR_Address(md_reg, offset,
2606 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2607 }
2608
2609 // emit phi-instruction move after safepoint since this simplifies
2610 // describing the state as the safepoint.
2611 move_to_phi(x->state());
2612
2613 __ jump(x->default_sux());
2614 }
2615
2616 /**
2617 * Emit profiling code if needed for arguments, parameters, return value types
2618 *
2619 * @param md MDO the code will update at runtime
2620 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2621 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2622 * @param profiled_k current profile
2623 * @param obj IR node for the object to be profiled
2624 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2625 * Set once we find an update to make and use for next ones.
2626 * @param not_null true if we know obj cannot be null
2627 * @param signature_at_call_k signature at call for obj
2628 * @param callee_signature_k signature of callee for obj
2629 * at call and callee signatures differ at method handle call
2630 * @return the only klass we know will ever be seen at this profile point
2631 */
2632 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2633 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2634 ciKlass* callee_signature_k) {
2635 ciKlass* result = NULL;
2636 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2637 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2638 // known not to be null or null bit already set and already set to
2639 // unknown: nothing we can do to improve profiling
2640 if (!do_null && !do_update) {
2641 return result;
2642 }
2643
2644 ciKlass* exact_klass = NULL;
2645 Compilation* comp = Compilation::current();
2646 if (do_update) {
2647 // try to find exact type, using CHA if possible, so that loading
2648 // the klass from the object can be avoided
2649 ciType* type = obj->exact_type();
2650 if (type == NULL) {
2651 type = obj->declared_type();
2652 type = comp->cha_exact_type(type);
2653 }
2654 assert(type == NULL || type->is_klass(), "type should be class");
2655 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2656
2657 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2658 }
2659
2660 if (!do_null && !do_update) {
2661 return result;
2662 }
2663
2664 ciKlass* exact_signature_k = NULL;
2665 if (do_update) {
2666 // Is the type from the signature exact (the only one possible)?
2667 exact_signature_k = signature_at_call_k->exact_klass();
2668 if (exact_signature_k == NULL) {
2669 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2670 } else {
2671 result = exact_signature_k;
2672 // Known statically. No need to emit any code: prevent
2673 // LIR_Assembler::emit_profile_type() from emitting useless code
2674 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2675 }
2676 // exact_klass and exact_signature_k can be both non NULL but
2677 // different if exact_klass is loaded after the ciObject for
2678 // exact_signature_k is created.
2679 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2680 // sometimes the type of the signature is better than the best type
2681 // the compiler has
2682 exact_klass = exact_signature_k;
2683 }
2684 if (callee_signature_k != NULL &&
2685 callee_signature_k != signature_at_call_k) {
2686 ciKlass* improved_klass = callee_signature_k->exact_klass();
2687 if (improved_klass == NULL) {
2688 improved_klass = comp->cha_exact_type(callee_signature_k);
2689 }
2690 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2691 exact_klass = exact_signature_k;
2692 }
2693 }
2694 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2695 }
2696
2697 if (!do_null && !do_update) {
2698 return result;
2699 }
2700
2701 if (mdp == LIR_OprFact::illegalOpr) {
2702 mdp = new_register(T_METADATA);
2703 __ metadata2reg(md->constant_encoding(), mdp);
2704 if (md_base_offset != 0) {
2705 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2706 mdp = new_pointer_register();
2707 __ leal(LIR_OprFact::address(base_type_address), mdp);
2708 }
2709 }
2710 LIRItem value(obj, this);
2711 value.load_item();
2712 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2713 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2714 return result;
2715 }
2716
2717 // profile parameters on entry to the root of the compilation
2718 void LIRGenerator::profile_parameters(Base* x) {
2719 if (compilation()->profile_parameters()) {
2720 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2721 ciMethodData* md = scope()->method()->method_data_or_null();
2722 assert(md != NULL, "Sanity");
2723
2724 if (md->parameters_type_data() != NULL) {
2725 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2726 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2727 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2728 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2729 LIR_Opr src = args->at(i);
2730 assert(!src->is_illegal(), "check");
2731 BasicType t = src->type();
2732 if (t == T_OBJECT || t == T_ARRAY) {
2733 intptr_t profiled_k = parameters->type(j);
2734 Local* local = x->state()->local_at(java_index)->as_Local();
2735 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2736 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2737 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2738 // If the profile is known statically set it once for all and do not emit any code
2739 if (exact != NULL) {
2740 md->set_parameter_type(j, exact);
2741 }
2742 j++;
2743 }
2744 java_index += type2size[t];
2745 }
2746 }
2747 }
2748 }
2749
2750 void LIRGenerator::do_Base(Base* x) {
2751 __ std_entry(LIR_OprFact::illegalOpr);
2752 // Emit moves from physical registers / stack slots to virtual registers
2753 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2754 IRScope* irScope = compilation()->hir()->top_scope();
2755 int java_index = 0;
2756 for (int i = 0; i < args->length(); i++) {
2757 LIR_Opr src = args->at(i);
2758 assert(!src->is_illegal(), "check");
2759 BasicType t = src->type();
2760
2761 // Types which are smaller than int are passed as int, so
2762 // correct the type which passed.
2763 switch (t) {
2764 case T_BYTE:
2765 case T_BOOLEAN:
2766 case T_SHORT:
2767 case T_CHAR:
2768 t = T_INT;
2769 break;
2770 }
2771
2772 LIR_Opr dest = new_register(t);
2773 __ move(src, dest);
2774
2775 // Assign new location to Local instruction for this local
2776 Local* local = x->state()->local_at(java_index)->as_Local();
2777 assert(local != NULL, "Locals for incoming arguments must have been created");
2778 #ifndef __SOFTFP__
2779 // The java calling convention passes double as long and float as int.
2780 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2781 #endif // __SOFTFP__
2782 local->set_operand(dest);
2783 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2784 java_index += type2size[t];
2785 }
2786
2787 if (compilation()->env()->dtrace_method_probes()) {
2788 BasicTypeList signature;
2789 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2790 signature.append(T_METADATA); // Method*
2791 LIR_OprList* args = new LIR_OprList();
2792 args->append(getThreadPointer());
2793 LIR_Opr meth = new_register(T_METADATA);
2794 __ metadata2reg(method()->constant_encoding(), meth);
2795 args->append(meth);
2796 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2797 }
2798
2799 if (method()->is_synchronized()) {
2800 LIR_Opr obj;
2801 if (method()->is_static()) {
2802 obj = new_register(T_OBJECT);
2803 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2804 } else {
2805 Local* receiver = x->state()->local_at(0)->as_Local();
2806 assert(receiver != NULL, "must already exist");
2807 obj = receiver->operand();
2808 }
2809 assert(obj->is_valid(), "must be valid");
2810
2811 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2812 LIR_Opr lock = new_register(T_INT);
2813 __ load_stack_address_monitor(0, lock);
2814
2815 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2816 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2817
2818 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2819 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2820 }
2821 }
2822 if (compilation()->age_code()) {
2823 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);
2824 decrement_age(info);
2825 }
2826 // increment invocation counters if needed
2827 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2828 profile_parameters(x);
2829 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2830 increment_invocation_counter(info);
2831 }
2832
2833 // all blocks with a successor must end with an unconditional jump
2834 // to the successor even if they are consecutive
2835 __ jump(x->default_sux());
2836 }
2837
2838
2839 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2840 // construct our frame and model the production of incoming pointer
2841 // to the OSR buffer.
2842 __ osr_entry(LIR_Assembler::osrBufferPointer());
2843 LIR_Opr result = rlock_result(x);
2844 __ move(LIR_Assembler::osrBufferPointer(), result);
2845 }
2846
2847
2848 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2849 assert(args->length() == arg_list->length(),
2850 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2851 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2852 LIRItem* param = args->at(i);
2853 LIR_Opr loc = arg_list->at(i);
2854 if (loc->is_register()) {
2855 param->load_item_force(loc);
2856 } else {
2857 LIR_Address* addr = loc->as_address_ptr();
2858 param->load_for_store(addr->type());
2859 if (addr->type() == T_OBJECT) {
2860 __ move_wide(param->result(), addr);
2861 } else
2862 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2863 __ unaligned_move(param->result(), addr);
2864 } else {
2865 __ move(param->result(), addr);
2866 }
2867 }
2868 }
2869
2870 if (x->has_receiver()) {
2871 LIRItem* receiver = args->at(0);
2872 LIR_Opr loc = arg_list->at(0);
2873 if (loc->is_register()) {
2874 receiver->load_item_force(loc);
2875 } else {
2876 assert(loc->is_address(), "just checking");
2877 receiver->load_for_store(T_OBJECT);
2878 __ move_wide(receiver->result(), loc->as_address_ptr());
2879 }
2880 }
2881 }
2882
2883
2884 // Visits all arguments, returns appropriate items without loading them
2885 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2886 LIRItemList* argument_items = new LIRItemList();
2887 if (x->has_receiver()) {
2888 LIRItem* receiver = new LIRItem(x->receiver(), this);
2889 argument_items->append(receiver);
2890 }
2891 for (int i = 0; i < x->number_of_arguments(); i++) {
2892 LIRItem* param = new LIRItem(x->argument_at(i), this);
2893 argument_items->append(param);
2894 }
2895 return argument_items;
2896 }
2897
2898
2899 // The invoke with receiver has following phases:
2900 // a) traverse and load/lock receiver;
2901 // b) traverse all arguments -> item-array (invoke_visit_argument)
2902 // c) push receiver on stack
2903 // d) load each of the items and push on stack
2904 // e) unlock receiver
2905 // f) move receiver into receiver-register %o0
2906 // g) lock result registers and emit call operation
2907 //
2908 // Before issuing a call, we must spill-save all values on stack
2909 // that are in caller-save register. "spill-save" moves thos registers
2910 // either in a free callee-save register or spills them if no free
2911 // callee save register is available.
2912 //
2913 // The problem is where to invoke spill-save.
2914 // - if invoked between e) and f), we may lock callee save
2915 // register in "spill-save" that destroys the receiver register
2916 // before f) is executed
2917 // - if we rearange the f) to be earlier, by loading %o0, it
2918 // may destroy a value on the stack that is currently in %o0
2919 // and is waiting to be spilled
2920 // - if we keep the receiver locked while doing spill-save,
2921 // we cannot spill it as it is spill-locked
2922 //
2923 void LIRGenerator::do_Invoke(Invoke* x) {
2924 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2925
2926 LIR_OprList* arg_list = cc->args();
2927 LIRItemList* args = invoke_visit_arguments(x);
2928 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2929
2930 // setup result register
2931 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2932 if (x->type() != voidType) {
2933 result_register = result_register_for(x->type());
2934 }
2935
2936 CodeEmitInfo* info = state_for(x, x->state());
2937
2938 invoke_load_arguments(x, args, arg_list);
2939
2940 if (x->has_receiver()) {
2941 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2942 receiver = args->at(0)->result();
2943 }
2944
2945 // emit invoke code
2946 bool optimized = x->target_is_loaded() && x->target_is_final();
2947 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2948
2949 // JSR 292
2950 // Preserve the SP over MethodHandle call sites.
2951 ciMethod* target = x->target();
2952 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2953 target->is_method_handle_intrinsic() ||
2954 target->is_compiled_lambda_form());
2955 if (is_method_handle_invoke) {
2956 info->set_is_method_handle_invoke(true);
2957 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2958 }
2959
2960 switch (x->code()) {
2961 case Bytecodes::_invokestatic:
2962 __ call_static(target, result_register,
2963 SharedRuntime::get_resolve_static_call_stub(),
2964 arg_list, info);
2965 break;
2966 case Bytecodes::_invokespecial:
2967 case Bytecodes::_invokevirtual:
2968 case Bytecodes::_invokeinterface:
2969 // for final target we still produce an inline cache, in order
2970 // to be able to call mixed mode
2971 if (x->code() == Bytecodes::_invokespecial || optimized) {
2972 __ call_opt_virtual(target, receiver, result_register,
2973 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2974 arg_list, info);
2975 } else if (x->vtable_index() < 0) {
2976 __ call_icvirtual(target, receiver, result_register,
2977 SharedRuntime::get_resolve_virtual_call_stub(),
2978 arg_list, info);
2979 } else {
2980 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2981 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2982 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2983 }
2984 break;
2985 case Bytecodes::_invokedynamic: {
2986 __ call_dynamic(target, receiver, result_register,
2987 SharedRuntime::get_resolve_static_call_stub(),
2988 arg_list, info);
2989 break;
2990 }
2991 default:
2992 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2993 break;
2994 }
2995
2996 // JSR 292
2997 // Restore the SP after MethodHandle call sites.
2998 if (is_method_handle_invoke) {
2999 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3000 }
3001
3002 if (x->type()->is_float() || x->type()->is_double()) {
3003 // Force rounding of results from non-strictfp when in strictfp
3004 // scope (or when we don't know the strictness of the callee, to
3005 // be safe.)
3006 if (method()->is_strict()) {
3007 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3008 result_register = round_item(result_register);
3009 }
3010 }
3011 }
3012
3013 if (result_register->is_valid()) {
3014 LIR_Opr result = rlock_result(x);
3015 __ move(result_register, result);
3016 }
3017 }
3018
3019
3020 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3021 assert(x->number_of_arguments() == 1, "wrong type");
3022 LIRItem value (x->argument_at(0), this);
3023 LIR_Opr reg = rlock_result(x);
3024 value.load_item();
3025 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3026 __ move(tmp, reg);
3027 }
3028
3029
3030
3031 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3032 void LIRGenerator::do_IfOp(IfOp* x) {
3033 #ifdef ASSERT
3034 {
3035 ValueTag xtag = x->x()->type()->tag();
3036 ValueTag ttag = x->tval()->type()->tag();
3037 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3038 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3039 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3040 }
3041 #endif
3042
3043 LIRItem left(x->x(), this);
3044 LIRItem right(x->y(), this);
3045 left.load_item();
3046 if (can_inline_as_constant(right.value())) {
3047 right.dont_load_item();
3048 } else {
3049 right.load_item();
3050 }
3051
3052 LIRItem t_val(x->tval(), this);
3053 LIRItem f_val(x->fval(), this);
3054 t_val.dont_load_item();
3055 f_val.dont_load_item();
3056 LIR_Opr reg = rlock_result(x);
3057
3058 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3059 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3060 }
3061
3062 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3063 assert(x->number_of_arguments() == expected_arguments, "wrong type");
3064 LIR_Opr reg = result_register_for(x->type());
3065 __ call_runtime_leaf(routine, getThreadTemp(),
3066 reg, new LIR_OprList());
3067 LIR_Opr result = rlock_result(x);
3068 __ move(reg, result);
3069 }
3070
3071 #ifdef TRACE_HAVE_INTRINSICS
3072 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3073 LIR_Opr thread = getThreadPointer();
3074 LIR_Opr osthread = new_pointer_register();
3075 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3076 size_t thread_id_size = OSThread::thread_id_size();
3077 if (thread_id_size == (size_t) BytesPerLong) {
3078 LIR_Opr id = new_register(T_LONG);
3079 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3080 __ convert(Bytecodes::_l2i, id, rlock_result(x));
3081 } else if (thread_id_size == (size_t) BytesPerInt) {
3082 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3083 } else {
3084 ShouldNotReachHere();
3085 }
3086 }
3087
3088 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3089 CodeEmitInfo* info = state_for(x);
3090 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3091 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3092 assert(info != NULL, "must have info");
3093 LIRItem arg(x->argument_at(1), this);
3094 arg.load_item();
3095 LIR_Opr klass = new_pointer_register();
3096 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3097 LIR_Opr id = new_register(T_LONG);
3098 ByteSize offset = TRACE_ID_OFFSET;
3099 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3100 __ move(trace_id_addr, id);
3101 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3102 __ store(id, trace_id_addr);
3103 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3104 __ move(id, rlock_result(x));
3105 }
3106 #endif
3107
3108 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3109 switch (x->id()) {
3110 case vmIntrinsics::_intBitsToFloat :
3111 case vmIntrinsics::_doubleToRawLongBits :
3112 case vmIntrinsics::_longBitsToDouble :
3113 case vmIntrinsics::_floatToRawIntBits : {
3114 do_FPIntrinsics(x);
3115 break;
3116 }
3117
3118 #ifdef TRACE_HAVE_INTRINSICS
3119 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3120 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3121 case vmIntrinsics::_counterTime:
3122 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3123 break;
3124 #endif
3125
3126 case vmIntrinsics::_currentTimeMillis:
3127 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3128 break;
3129
3130 case vmIntrinsics::_nanoTime:
3131 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3132 break;
3133
3134 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3135 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3136 case vmIntrinsics::_getClass: do_getClass(x); break;
3137 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3138
3139 case vmIntrinsics::_dlog: // fall through
3140 case vmIntrinsics::_dlog10: // fall through
3141 case vmIntrinsics::_dabs: // fall through
3142 case vmIntrinsics::_dsqrt: // fall through
3143 case vmIntrinsics::_dtan: // fall through
3144 case vmIntrinsics::_dsin : // fall through
3145 case vmIntrinsics::_dcos : // fall through
3146 case vmIntrinsics::_dexp : // fall through
3147 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3148 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3149
3150 // java.nio.Buffer.checkIndex
3151 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3152
3153 case vmIntrinsics::_compareAndSwapObject:
3154 do_CompareAndSwap(x, objectType);
3155 break;
3156 case vmIntrinsics::_compareAndSwapInt:
3157 do_CompareAndSwap(x, intType);
3158 break;
3159 case vmIntrinsics::_compareAndSwapLong:
3160 do_CompareAndSwap(x, longType);
3161 break;
3162
3163 case vmIntrinsics::_loadFence :
3164 if (os::is_MP()) __ membar_acquire();
3165 break;
3166 case vmIntrinsics::_storeFence:
3167 if (os::is_MP()) __ membar_release();
3168 break;
3169 case vmIntrinsics::_fullFence :
3170 if (os::is_MP()) __ membar();
3171 break;
3172
3173 case vmIntrinsics::_Reference_get:
3174 do_Reference_get(x);
3175 break;
3176
3177 case vmIntrinsics::_updateCRC32:
3178 case vmIntrinsics::_updateBytesCRC32:
3179 case vmIntrinsics::_updateByteBufferCRC32:
3180 do_update_CRC32(x);
3181 break;
3182
3183 default: ShouldNotReachHere(); break;
3184 }
3185 }
3186
3187 void LIRGenerator::profile_arguments(ProfileCall* x) {
3188 if (compilation()->profile_arguments()) {
3189 int bci = x->bci_of_invoke();
3190 ciMethodData* md = x->method()->method_data_or_null();
3191 ciProfileData* data = md->bci_to_data(bci);
3192 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3193 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3194 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3195 int base_offset = md->byte_offset_of_slot(data, extra);
3196 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3197 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3198
3199 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3200 int start = 0;
3201 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3202 if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3203 // first argument is not profiled at call (method handle invoke)
3204 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3205 start = 1;
3206 }
3207 ciSignature* callee_signature = x->callee()->signature();
3208 // method handle call to virtual method
3209 bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3210 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3211
3212 bool ignored_will_link;
3213 ciSignature* signature_at_call = NULL;
3214 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3215 ciSignatureStream signature_at_call_stream(signature_at_call);
3216
3217 // if called through method handle invoke, some arguments may have been popped
3218 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3219 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3220 ciKlass* exact = profile_type(md, base_offset, off,
3221 args->type(i), x->profiled_arg_at(i+start), mdp,
3222 !x->arg_needs_null_check(i+start),
3223 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3224 if (exact != NULL) {
3225 md->set_argument_type(bci, i, exact);
3226 }
3227 }
3228 } else {
3229 #ifdef ASSERT
3230 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3231 int n = x->nb_profiled_args();
3232 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3233 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3234 "only at JSR292 bytecodes");
3235 #endif
3236 }
3237 }
3238 }
3239
3240 // profile parameters on entry to an inlined method
3241 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3242 if (compilation()->profile_parameters() && x->inlined()) {
3243 ciMethodData* md = x->callee()->method_data_or_null();
3244 if (md != NULL) {
3245 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3246 if (parameters_type_data != NULL) {
3247 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3248 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3249 bool has_receiver = !x->callee()->is_static();
3250 ciSignature* sig = x->callee()->signature();
3251 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3252 int i = 0; // to iterate on the Instructions
3253 Value arg = x->recv();
3254 bool not_null = false;
3255 int bci = x->bci_of_invoke();
3256 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3257 // The first parameter is the receiver so that's what we start
3258 // with if it exists. One exception is method handle call to
3259 // virtual method: the receiver is in the args list
3260 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3261 i = 1;
3262 arg = x->profiled_arg_at(0);
3263 not_null = !x->arg_needs_null_check(0);
3264 }
3265 int k = 0; // to iterate on the profile data
3266 for (;;) {
3267 intptr_t profiled_k = parameters->type(k);
3268 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3269 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3270 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3271 // If the profile is known statically set it once for all and do not emit any code
3272 if (exact != NULL) {
3273 md->set_parameter_type(k, exact);
3274 }
3275 k++;
3276 if (k >= parameters_type_data->number_of_parameters()) {
3277 #ifdef ASSERT
3278 int extra = 0;
3279 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3280 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3281 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3282 extra += 1;
3283 }
3284 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3285 #endif
3286 break;
3287 }
3288 arg = x->profiled_arg_at(i);
3289 not_null = !x->arg_needs_null_check(i);
3290 i++;
3291 }
3292 }
3293 }
3294 }
3295 }
3296
3297 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3298 // Need recv in a temporary register so it interferes with the other temporaries
3299 LIR_Opr recv = LIR_OprFact::illegalOpr;
3300 LIR_Opr mdo = new_register(T_OBJECT);
3301 // tmp is used to hold the counters on SPARC
3302 LIR_Opr tmp = new_pointer_register();
3303
3304 if (x->nb_profiled_args() > 0) {
3305 profile_arguments(x);
3306 }
3307
3308 // profile parameters on inlined method entry including receiver
3309 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3310 profile_parameters_at_call(x);
3311 }
3312
3313 if (x->recv() != NULL) {
3314 LIRItem value(x->recv(), this);
3315 value.load_item();
3316 recv = new_register(T_OBJECT);
3317 __ move(value.result(), recv);
3318 }
3319 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3320 }
3321
3322 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3323 int bci = x->bci_of_invoke();
3324 ciMethodData* md = x->method()->method_data_or_null();
3325 ciProfileData* data = md->bci_to_data(bci);
3326 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3327 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3328 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3329
3330 bool ignored_will_link;
3331 ciSignature* signature_at_call = NULL;
3332 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3333
3334 // The offset within the MDO of the entry to update may be too large
3335 // to be used in load/store instructions on some platforms. So have
3336 // profile_type() compute the address of the profile in a register.
3337 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3338 ret->type(), x->ret(), mdp,
3339 !x->needs_null_check(),
3340 signature_at_call->return_type()->as_klass(),
3341 x->callee()->signature()->return_type()->as_klass());
3342 if (exact != NULL) {
3343 md->set_return_type(bci, exact);
3344 }
3345 }
3346
3347 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3348 // We can safely ignore accessors here, since c2 will inline them anyway,
3349 // accessors are also always mature.
3350 if (!x->inlinee()->is_accessor()) {
3351 CodeEmitInfo* info = state_for(x, x->state(), true);
3352 // Notify the runtime very infrequently only to take care of counter overflows
3353 int freq_log = Tier23InlineeNotifyFreqLog;
3354 double scale;
3355 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3356 freq_log = Arguments::scaled_freq_log(freq_log, scale);
3357 }
3358 increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
3359 }
3360 }
3361
3362 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3363 int freq_log;
3364 int level = compilation()->env()->comp_level();
3365 if (level == CompLevel_limited_profile) {
3366 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3367 } else if (level == CompLevel_full_profile) {
3368 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3369 } else {
3370 ShouldNotReachHere();
3371 }
3372 // Increment the appropriate invocation/backedge counter and notify the runtime.
3373 double scale;
3374 if (_method->has_option_value("CompileThresholdScaling", scale)) {
3375 freq_log = Arguments::scaled_freq_log(freq_log, scale);
3376 }
3377 increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
3378 }
3379
3380 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
3381 ciMethod* method = info->scope()->method();
3382 MethodCounters* mc_adr = method->ensure_method_counters();
3383 if (mc_adr != NULL) {
3384 LIR_Opr mc = new_pointer_register();
3385 __ move(LIR_OprFact::intptrConst(mc_adr), mc);
3386 int offset = in_bytes(MethodCounters::nmethod_age_offset());
3387 LIR_Address* counter = new LIR_Address(mc, offset, T_INT);
3388 LIR_Opr result = new_register(T_INT);
3389 __ load(counter, result);
3390 __ sub(result, LIR_OprFact::intConst(1), result);
3391 __ store(result, counter);
3392 // DeoptimizeStub will reexecute from the current state in code info.
3393 CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,
3394 Deoptimization::Action_make_not_entrant);
3395 __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));
3396 __ branch(lir_cond_lessEqual, T_INT, deopt);
3397 }
3398 }
3399
3400
3401 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3402 ciMethod *method, int frequency,
3403 int bci, bool backedge, bool notify) {
3404 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3405 int level = _compilation->env()->comp_level();
3406 assert(level > CompLevel_simple, "Shouldn't be here");
3407
3408 int offset = -1;
3409 LIR_Opr counter_holder;
3410 if (level == CompLevel_limited_profile) {
3411 MethodCounters* counters_adr = method->ensure_method_counters();
3412 if (counters_adr == NULL) {
3413 bailout("method counters allocation failed");
3414 return;
3415 }
3416 counter_holder = new_pointer_register();
3417 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3418 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3419 MethodCounters::invocation_counter_offset());
3420 } else if (level == CompLevel_full_profile) {
3421 counter_holder = new_register(T_METADATA);
3422 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3423 MethodData::invocation_counter_offset());
3424 ciMethodData* md = method->method_data_or_null();
3425 assert(md != NULL, "Sanity");
3426 __ metadata2reg(md->constant_encoding(), counter_holder);
3427 } else {
3428 ShouldNotReachHere();
3429 }
3430 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3431 LIR_Opr result = new_register(T_INT);
3432 __ load(counter, result);
3433 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3434 __ store(result, counter);
3435 if (notify) {
3436 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3437 LIR_Opr meth = new_register(T_METADATA);
3438 __ metadata2reg(method->constant_encoding(), meth);
3439 __ logical_and(result, mask, result);
3440 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3441 // The bci for info can point to cmp for if's we want the if bci
3442 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3443 __ branch(lir_cond_equal, T_INT, overflow);
3444 __ branch_destination(overflow->continuation());
3445 }
3446 }
3447
3448 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3449 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3450 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3451
3452 if (x->pass_thread()) {
3453 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3454 args->append(getThreadPointer());
3455 }
3456
3457 for (int i = 0; i < x->number_of_arguments(); i++) {
3458 Value a = x->argument_at(i);
3459 LIRItem* item = new LIRItem(a, this);
3460 item->load_item();
3461 args->append(item->result());
3462 signature->append(as_BasicType(a->type()));
3463 }
3464
3465 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3466 if (x->type() == voidType) {
3467 set_no_result(x);
3468 } else {
3469 __ move(result, rlock_result(x));
3470 }
3471 }
3472
3473 #ifdef ASSERT
3474 void LIRGenerator::do_Assert(Assert *x) {
3475 ValueTag tag = x->x()->type()->tag();
3476 If::Condition cond = x->cond();
3477
3478 LIRItem xitem(x->x(), this);
3479 LIRItem yitem(x->y(), this);
3480 LIRItem* xin = &xitem;
3481 LIRItem* yin = &yitem;
3482
3483 assert(tag == intTag, "Only integer assertions are valid!");
3484
3485 xin->load_item();
3486 yin->dont_load_item();
3487
3488 set_no_result(x);
3489
3490 LIR_Opr left = xin->result();
3491 LIR_Opr right = yin->result();
3492
3493 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3494 }
3495 #endif
3496
3497 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3498
3499
3500 Instruction *a = x->x();
3501 Instruction *b = x->y();
3502 if (!a || StressRangeCheckElimination) {
3503 assert(!b || StressRangeCheckElimination, "B must also be null");
3504
3505 CodeEmitInfo *info = state_for(x, x->state());
3506 CodeStub* stub = new PredicateFailedStub(info);
3507
3508 __ jump(stub);
3509 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3510 int a_int = a->type()->as_IntConstant()->value();
3511 int b_int = b->type()->as_IntConstant()->value();
3512
3513 bool ok = false;
3514
3515 switch(x->cond()) {
3516 case Instruction::eql: ok = (a_int == b_int); break;
3517 case Instruction::neq: ok = (a_int != b_int); break;
3518 case Instruction::lss: ok = (a_int < b_int); break;
3519 case Instruction::leq: ok = (a_int <= b_int); break;
3520 case Instruction::gtr: ok = (a_int > b_int); break;
3521 case Instruction::geq: ok = (a_int >= b_int); break;
3522 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3523 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3524 default: ShouldNotReachHere();
3525 }
3526
3527 if (ok) {
3528
3529 CodeEmitInfo *info = state_for(x, x->state());
3530 CodeStub* stub = new PredicateFailedStub(info);
3531
3532 __ jump(stub);
3533 }
3534 } else {
3535
3536 ValueTag tag = x->x()->type()->tag();
3537 If::Condition cond = x->cond();
3538 LIRItem xitem(x->x(), this);
3539 LIRItem yitem(x->y(), this);
3540 LIRItem* xin = &xitem;
3541 LIRItem* yin = &yitem;
3542
3543 assert(tag == intTag, "Only integer deoptimizations are valid!");
3544
3545 xin->load_item();
3546 yin->dont_load_item();
3547 set_no_result(x);
3548
3549 LIR_Opr left = xin->result();
3550 LIR_Opr right = yin->result();
3551
3552 CodeEmitInfo *info = state_for(x, x->state());
3553 CodeStub* stub = new PredicateFailedStub(info);
3554
3555 __ cmp(lir_cond(cond), left, right);
3556 __ branch(lir_cond(cond), right->type(), stub);
3557 }
3558 }
3559
3560
3561 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3562 LIRItemList args(1);
3563 LIRItem value(arg1, this);
3564 args.append(&value);
3565 BasicTypeList signature;
3566 signature.append(as_BasicType(arg1->type()));
3567
3568 return call_runtime(&signature, &args, entry, result_type, info);
3569 }
3570
3571
3572 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3573 LIRItemList args(2);
3574 LIRItem value1(arg1, this);
3575 LIRItem value2(arg2, this);
3576 args.append(&value1);
3577 args.append(&value2);
3578 BasicTypeList signature;
3579 signature.append(as_BasicType(arg1->type()));
3580 signature.append(as_BasicType(arg2->type()));
3581
3582 return call_runtime(&signature, &args, entry, result_type, info);
3583 }
3584
3585
3586 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3587 address entry, ValueType* result_type, CodeEmitInfo* info) {
3588 // get a result register
3589 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3590 LIR_Opr result = LIR_OprFact::illegalOpr;
3591 if (result_type->tag() != voidTag) {
3592 result = new_register(result_type);
3593 phys_reg = result_register_for(result_type);
3594 }
3595
3596 // move the arguments into the correct location
3597 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3598 assert(cc->length() == args->length(), "argument mismatch");
3599 for (int i = 0; i < args->length(); i++) {
3600 LIR_Opr arg = args->at(i);
3601 LIR_Opr loc = cc->at(i);
3602 if (loc->is_register()) {
3603 __ move(arg, loc);
3604 } else {
3605 LIR_Address* addr = loc->as_address_ptr();
3606 // if (!can_store_as_constant(arg)) {
3607 // LIR_Opr tmp = new_register(arg->type());
3608 // __ move(arg, tmp);
3609 // arg = tmp;
3610 // }
3611 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3612 __ unaligned_move(arg, addr);
3613 } else {
3614 __ move(arg, addr);
3615 }
3616 }
3617 }
3618
3619 if (info) {
3620 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3621 } else {
3622 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3623 }
3624 if (result->is_valid()) {
3625 __ move(phys_reg, result);
3626 }
3627 return result;
3628 }
3629
3630
3631 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3632 address entry, ValueType* result_type, CodeEmitInfo* info) {
3633 // get a result register
3634 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3635 LIR_Opr result = LIR_OprFact::illegalOpr;
3636 if (result_type->tag() != voidTag) {
3637 result = new_register(result_type);
3638 phys_reg = result_register_for(result_type);
3639 }
3640
3641 // move the arguments into the correct location
3642 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3643
3644 assert(cc->length() == args->length(), "argument mismatch");
3645 for (int i = 0; i < args->length(); i++) {
3646 LIRItem* arg = args->at(i);
3647 LIR_Opr loc = cc->at(i);
3648 if (loc->is_register()) {
3649 arg->load_item_force(loc);
3650 } else {
3651 LIR_Address* addr = loc->as_address_ptr();
3652 arg->load_for_store(addr->type());
3653 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3654 __ unaligned_move(arg->result(), addr);
3655 } else {
3656 __ move(arg->result(), addr);
3657 }
3658 }
3659 }
3660
3661 if (info) {
3662 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3663 } else {
3664 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3665 }
3666 if (result->is_valid()) {
3667 __ move(phys_reg, result);
3668 }
3669 return result;
3670 }
3671
3672 void LIRGenerator::do_MemBar(MemBar* x) {
3673 if (os::is_MP()) {
3674 LIR_Code code = x->code();
3675 switch(code) {
3676 case lir_membar_acquire : __ membar_acquire(); break;
3677 case lir_membar_release : __ membar_release(); break;
3678 case lir_membar : __ membar(); break;
3679 case lir_membar_loadload : __ membar_loadload(); break;
3680 case lir_membar_storestore: __ membar_storestore(); break;
3681 case lir_membar_loadstore : __ membar_loadstore(); break;
3682 case lir_membar_storeload : __ membar_storeload(); break;
3683 default : ShouldNotReachHere(); break;
3684 }
3685 }
3686 }
--- EOF ---