1 /*
2 * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
464 __ movptr(STATE(_locals), locals); // state->_locals = locals()
465 __ movptr(STATE(_self_link), state); // point to self
466 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
467 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
468 #ifdef _LP64
469 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
470 #else
471 __ get_thread(rax); // get vm's javathread*
472 __ movptr(STATE(_thread), rax); // state->_bcp = codes()
473 #endif // _LP64
474 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
475 __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
476 if (native) {
477 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
478 } else {
479 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
480 }
481 __ xorptr(rdx, rdx);
482 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
483 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
484 __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
485 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
486 __ movptr(STATE(_constants), rdx); // state->_constants = constants()
487
488 __ movptr(STATE(_method), rbx); // state->_method = method()
489 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
490 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
491
492
493 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
494 // entries run from -1..x where &monitor[x] ==
495
496 {
497 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
498 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
499 // immediately.
500
501 // synchronize method
502 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
503 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
504 Label not_synced;
505
506 __ movl(rax, access_flags);
507 __ testl(rax, JVM_ACC_SYNCHRONIZED);
508 __ jcc(Assembler::zero, not_synced);
509
510 // Allocate initial monitor and pre initialize it
511 // get synchronization object
512
513 Label done;
514 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
515 __ movl(rax, access_flags);
516 __ testl(rax, JVM_ACC_STATIC);
517 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
518 __ jcc(Assembler::zero, done);
519 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
520 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
521 __ movptr(rax, Address(rax, mirror_offset));
522 __ bind(done);
523 // add space for monitor & lock
524 __ subptr(rsp, entry_size); // add space for a monitor entry
525 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
526 __ bind(not_synced);
527 }
528
529 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
530 if (native) {
531 __ movptr(STATE(_stack), rsp); // set current expression stack tos
532 __ movptr(STATE(_stack_limit), rsp);
533 } else {
534 __ subptr(rsp, wordSize); // pre-push stack
535 __ movptr(STATE(_stack), rsp); // set current expression stack tos
536
537 // compute full expression stack limit
538
539 const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
752 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
753 __ subptr(monitor, entry_size); // point to initial monitor
754
755 #ifdef ASSERT
756 { Label L;
757 __ movl(rax, access_flags);
758 __ testl(rax, JVM_ACC_SYNCHRONIZED);
759 __ jcc(Assembler::notZero, L);
760 __ stop("method doesn't need synchronization");
761 __ bind(L);
762 }
763 #endif // ASSERT
764 // get synchronization object
765 { Label done;
766 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
767 __ movl(rax, access_flags);
768 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
769 __ testl(rax, JVM_ACC_STATIC);
770 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
771 __ jcc(Assembler::zero, done);
772 __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
773 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
774 __ movptr(rax, Address(rax, mirror_offset));
775 __ bind(done);
776 }
777 #ifdef ASSERT
778 { Label L;
779 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
780 __ jcc(Assembler::equal, L);
781 __ stop("wrong synchronization lobject");
782 __ bind(L);
783 }
784 #endif // ASSERT
785 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
786 __ lock_object(monitor);
787 }
788
789 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
790
791 address InterpreterGenerator::generate_accessor_entry(void) {
792
804 Label slow_path;
805 // If we need a safepoint check, generate full interpreter entry.
806 ExternalAddress state(SafepointSynchronize::address_of_state());
807 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
808 SafepointSynchronize::_not_synchronized);
809
810 __ jcc(Assembler::notEqual, slow_path);
811 // ASM/C++ Interpreter
812 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
813 // Note: We can only use this code if the getfield has been resolved
814 // and if we don't have a null-pointer exception => check for
815 // these conditions first and use slow path if necessary.
816 // rbx,: method
817 // rcx: receiver
818 __ movptr(rax, Address(rsp, wordSize));
819
820 // check if local 0 != NULL and read field
821 __ testptr(rax, rax);
822 __ jcc(Assembler::zero, slow_path);
823
824 __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
825 // read first instruction word and extract bytecode @ 1 and index @ 2
826 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
827 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
828 // Shift codes right to get the index on the right.
829 // The bytecode fetched looks like <index><0xb4><0x2a>
830 __ shrl(rdx, 2*BitsPerByte);
831 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
832 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
833
834 // rax,: local 0
835 // rbx,: method
836 // rcx: receiver - do not destroy since it is needed for slow path!
837 // rcx: scratch
838 // rdx: constant pool cache index
839 // rdi: constant pool cache
840 // rsi/r13: sender sp
841
842 // check if getfield has been resolved and read constant pool cache entry
843 // check the validity of the cache entry by testing whether _indices field
844 // contains Bytecode::_getfield in b1 byte.
845 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
846 __ movl(rcx,
1168
1169 // get native function entry point
1170 { Label L;
1171 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1172 __ testptr(rax, rax);
1173 __ jcc(Assembler::notZero, L);
1174 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1175 __ movptr(method, STATE(_method));
1176 __ verify_oop(method);
1177 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1178 __ bind(L);
1179 }
1180
1181 // pass mirror handle if static call
1182 { Label L;
1183 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1184 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1185 __ testl(t, JVM_ACC_STATIC);
1186 __ jcc(Assembler::zero, L);
1187 // get mirror
1188 __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
1189 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
1190 __ movptr(t, Address(t, mirror_offset));
1191 // copy mirror into activation object
1192 __ movptr(STATE(_oop_temp), t);
1193 // pass handle to mirror
1194 #ifdef _LP64
1195 __ lea(c_rarg1, STATE(_oop_temp));
1196 #else
1197 __ lea(t, STATE(_oop_temp));
1198 __ movptr(Address(rsp, wordSize), t);
1199 #endif // _LP64
1200 __ bind(L);
1201 }
1202 #ifdef ASSERT
1203 {
1204 Label L;
1205 __ push(t);
1206 __ get_thread(t); // get vm's javathread*
1207 __ cmpptr(t, STATE(_thread));
1208 __ jcc(Assembler::equal, L);
|
1 /*
2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
464 __ movptr(STATE(_locals), locals); // state->_locals = locals()
465 __ movptr(STATE(_self_link), state); // point to self
466 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
467 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
468 #ifdef _LP64
469 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
470 #else
471 __ get_thread(rax); // get vm's javathread*
472 __ movptr(STATE(_thread), rax); // state->_bcp = codes()
473 #endif // _LP64
474 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
475 __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
476 if (native) {
477 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
478 } else {
479 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
480 }
481 __ xorptr(rdx, rdx);
482 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
483 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
484 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
485 __ movptr(rdx, Address(rdx, constMethodOopDesc::constants_offset()));
486 __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
487 __ movptr(STATE(_constants), rdx); // state->_constants = constants()
488
489 __ movptr(STATE(_method), rbx); // state->_method = method()
490 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
491 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
492
493
494 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
495 // entries run from -1..x where &monitor[x] ==
496
497 {
498 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
499 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
500 // immediately.
501
502 // synchronize method
503 const Address access_flags (rbx, methodOopDesc::access_flags_offset());
504 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
505 Label not_synced;
506
507 __ movl(rax, access_flags);
508 __ testl(rax, JVM_ACC_SYNCHRONIZED);
509 __ jcc(Assembler::zero, not_synced);
510
511 // Allocate initial monitor and pre initialize it
512 // get synchronization object
513
514 Label done;
515 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
516 __ movl(rax, access_flags);
517 __ testl(rax, JVM_ACC_STATIC);
518 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
519 __ jcc(Assembler::zero, done);
520 __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
521 __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
522 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
523 __ movptr(rax, Address(rax, mirror_offset));
524 __ bind(done);
525 // add space for monitor & lock
526 __ subptr(rsp, entry_size); // add space for a monitor entry
527 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
528 __ bind(not_synced);
529 }
530
531 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
532 if (native) {
533 __ movptr(STATE(_stack), rsp); // set current expression stack tos
534 __ movptr(STATE(_stack_limit), rsp);
535 } else {
536 __ subptr(rsp, wordSize); // pre-push stack
537 __ movptr(STATE(_stack), rsp); // set current expression stack tos
538
539 // compute full expression stack limit
540
541 const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
754 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
755 __ subptr(monitor, entry_size); // point to initial monitor
756
757 #ifdef ASSERT
758 { Label L;
759 __ movl(rax, access_flags);
760 __ testl(rax, JVM_ACC_SYNCHRONIZED);
761 __ jcc(Assembler::notZero, L);
762 __ stop("method doesn't need synchronization");
763 __ bind(L);
764 }
765 #endif // ASSERT
766 // get synchronization object
767 { Label done;
768 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
769 __ movl(rax, access_flags);
770 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
771 __ testl(rax, JVM_ACC_STATIC);
772 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
773 __ jcc(Assembler::zero, done);
774 __ movptr(rax, Address(rbx, methodOopDesc::const_offset()));
775 __ movptr(rax, Address(rax, constMethodOopDesc::constants_offset()));
776 __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
777 __ movptr(rax, Address(rax, mirror_offset));
778 __ bind(done);
779 }
780 #ifdef ASSERT
781 { Label L;
782 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
783 __ jcc(Assembler::equal, L);
784 __ stop("wrong synchronization lobject");
785 __ bind(L);
786 }
787 #endif // ASSERT
788 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
789 __ lock_object(monitor);
790 }
791
792 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
793
794 address InterpreterGenerator::generate_accessor_entry(void) {
795
807 Label slow_path;
808 // If we need a safepoint check, generate full interpreter entry.
809 ExternalAddress state(SafepointSynchronize::address_of_state());
810 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
811 SafepointSynchronize::_not_synchronized);
812
813 __ jcc(Assembler::notEqual, slow_path);
814 // ASM/C++ Interpreter
815 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
816 // Note: We can only use this code if the getfield has been resolved
817 // and if we don't have a null-pointer exception => check for
818 // these conditions first and use slow path if necessary.
819 // rbx,: method
820 // rcx: receiver
821 __ movptr(rax, Address(rsp, wordSize));
822
823 // check if local 0 != NULL and read field
824 __ testptr(rax, rax);
825 __ jcc(Assembler::zero, slow_path);
826
827 // read first instruction word and extract bytecode @ 1 and index @ 2
828 __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
829 __ movptr(rdi, Address(rdx, constMethodOopDesc::constants_offset()));
830 __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
831 // Shift codes right to get the index on the right.
832 // The bytecode fetched looks like <index><0xb4><0x2a>
833 __ shrl(rdx, 2*BitsPerByte);
834 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
835 __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
836
837 // rax,: local 0
838 // rbx,: method
839 // rcx: receiver - do not destroy since it is needed for slow path!
840 // rcx: scratch
841 // rdx: constant pool cache index
842 // rdi: constant pool cache
843 // rsi/r13: sender sp
844
845 // check if getfield has been resolved and read constant pool cache entry
846 // check the validity of the cache entry by testing whether _indices field
847 // contains Bytecode::_getfield in b1 byte.
848 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
849 __ movl(rcx,
1171
1172 // get native function entry point
1173 { Label L;
1174 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1175 __ testptr(rax, rax);
1176 __ jcc(Assembler::notZero, L);
1177 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1178 __ movptr(method, STATE(_method));
1179 __ verify_oop(method);
1180 __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
1181 __ bind(L);
1182 }
1183
1184 // pass mirror handle if static call
1185 { Label L;
1186 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1187 __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
1188 __ testl(t, JVM_ACC_STATIC);
1189 __ jcc(Assembler::zero, L);
1190 // get mirror
1191 __ movptr(t, Address(method, methodOopDesc:: const_offset()));
1192 __ movptr(t, Address(t, constMethodOopDesc::constants_offset()));
1193 __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
1194 __ movptr(t, Address(t, mirror_offset));
1195 // copy mirror into activation object
1196 __ movptr(STATE(_oop_temp), t);
1197 // pass handle to mirror
1198 #ifdef _LP64
1199 __ lea(c_rarg1, STATE(_oop_temp));
1200 #else
1201 __ lea(t, STATE(_oop_temp));
1202 __ movptr(Address(rsp, wordSize), t);
1203 #endif // _LP64
1204 __ bind(L);
1205 }
1206 #ifdef ASSERT
1207 {
1208 Label L;
1209 __ push(t);
1210 __ get_thread(t); // get vm's javathread*
1211 __ cmpptr(t, STATE(_thread));
1212 __ jcc(Assembler::equal, L);
|