< prev index next >
src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
Print this page
@@ -32,10 +32,11 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
+#include "ci/ciValueKlass.hpp"
#include "code/compiledIC.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_aarch64.hpp"
@@ -503,10 +504,25 @@
void LIR_Assembler::return_op(LIR_Opr result) {
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
ciMethod* method = compilation()->method();
+
+ if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) {
+ ciType* return_type = method->return_type();
+ if (return_type->is_valuetype()) {
+ ciValueKlass* vk = return_type->as_value_klass();
+ if (vk->can_be_returned_as_fields()) {
+ address unpack_handler = vk->unpack_handler();
+ assert(unpack_handler != NULL, "must be");
+ __ far_call(RuntimeAddress(unpack_handler));
+ // At this point, rax points to the value object (for interpreter or C1 caller).
+ // The fields of the object are copied into registers (for C2 caller).
+ }
+ }
+ }
+
// Pop the stack before the safepoint code
__ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
@@ -515,12 +531,12 @@
address polling_page(os::get_polling_page());
__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
__ ret(lr);
}
-void LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) {
- __ store_value_type_fields_to_buf(vk);
+int LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) {
+ return (__ store_value_type_fields_to_buf(vk, false));
}
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
address polling_page(os::get_polling_page());
guarantee(info != NULL, "Shouldn't be NULL");
@@ -680,13 +696,15 @@
break;
case T_INT:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::strw;
break;
- case T_VALUETYPE: // DMS CHECK: the code is significantly differ from x86
+ case T_VALUETYPE:
case T_OBJECT:
case T_ARRAY:
+ // Non-null case is not handled on aarch64 but handled on x86
+ // FIXME: do we need to add it here?
assert(c->as_jobject() == 0, "should be");
if (UseCompressedOops && !wide) {
insn = &Assembler::strw;
} else {
insn = &Assembler::str;
@@ -1638,14 +1656,20 @@
__ b(*op->stub()->entry());
} else {
Register left_klass_op = op->left_klass_op()->as_register();
Register right_klass_op = op->right_klass_op()->as_register();
- // DMS CHECK, likely x86 bug, make aarch64 implementation correct
- __ load_klass(left_klass_op, left);
- __ load_klass(right_klass_op, right);
+ if (UseCompressedOops) {
+ __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
+ __ cmpw(left_klass_op, right_klass_op);
+ } else {
+ __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
+ __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
__ cmp(left_klass_op, right_klass_op);
+ }
+
__ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
// fall through to L_oops_not_equal
}
__ bind(L_oops_not_equal);
< prev index next >