src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8068945-8u-patched Sdiff src/share/vm/c1

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 7386 : 8068945: Use RBP register as proper frame pointer in JIT compiled code on x86
Summary: Introduce the PreserveFramePointer flag to control if RBP is used as the frame pointer or as a general purpose register.
Reviewed-by: kvn, roland, dlong, enevill, shade


2870     argument_items->append(receiver);
2871   }
2872   for (int i = 0; i < x->number_of_arguments(); i++) {
2873     LIRItem* param = new LIRItem(x->argument_at(i), this);
2874     argument_items->append(param);
2875   }
2876   return argument_items;
2877 }
2878 
2879 
2880 // The invoke with receiver has following phases:
2881 //   a) traverse and load/lock receiver;
2882 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2883 //   c) push receiver on stack
2884 //   d) load each of the items and push on stack
2885 //   e) unlock receiver
2886 //   f) move receiver into receiver-register %o0
2887 //   g) lock result registers and emit call operation
2888 //
2889 // Before issuing a call, we must spill-save all values on stack
2890 // that are in caller-save register. "spill-save" moves thos registers
2891 // either in a free callee-save register or spills them if no free
2892 // callee save register is available.
2893 //
2894 // The problem is where to invoke spill-save.
2895 // - if invoked between e) and f), we may lock callee save
2896 //   register in "spill-save" that destroys the receiver register
2897 //   before f) is executed
2898 // - if we rearange the f) to be earlier, by loading %o0, it
2899 //   may destroy a value on the stack that is currently in %o0
2900 //   and is waiting to be spilled
2901 // - if we keep the receiver locked while doing spill-save,
2902 //   we cannot spill it as it is spill-locked
2903 //
2904 void LIRGenerator::do_Invoke(Invoke* x) {
2905   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2906 
2907   LIR_OprList* arg_list = cc->args();
2908   LIRItemList* args = invoke_visit_arguments(x);
2909   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2910 
2911   // setup result register
2912   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2913   if (x->type() != voidType) {
2914     result_register = result_register_for(x->type());
2915   }
2916 
2917   CodeEmitInfo* info = state_for(x, x->state());
2918 
2919   invoke_load_arguments(x, args, arg_list);
2920 
2921   if (x->has_receiver()) {
2922     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2923     receiver = args->at(0)->result();
2924   }
2925 
2926   // emit invoke code
2927   bool optimized = x->target_is_loaded() && x->target_is_final();
2928   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2929 
2930   // JSR 292
2931   // Preserve the SP over MethodHandle call sites.
2932   ciMethod* target = x->target();
2933   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2934                                   target->is_method_handle_intrinsic() ||
2935                                   target->is_compiled_lambda_form());
2936   if (is_method_handle_invoke) {
2937     info->set_is_method_handle_invoke(true);

2938     __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2939   }

2940 
2941   switch (x->code()) {
2942     case Bytecodes::_invokestatic:
2943       __ call_static(target, result_register,
2944                      SharedRuntime::get_resolve_static_call_stub(),
2945                      arg_list, info);
2946       break;
2947     case Bytecodes::_invokespecial:
2948     case Bytecodes::_invokevirtual:
2949     case Bytecodes::_invokeinterface:
2950       // for final target we still produce an inline cache, in order
2951       // to be able to call mixed mode
2952       if (x->code() == Bytecodes::_invokespecial || optimized) {
2953         __ call_opt_virtual(target, receiver, result_register,
2954                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2955                             arg_list, info);
2956       } else if (x->vtable_index() < 0) {
2957         __ call_icvirtual(target, receiver, result_register,
2958                           SharedRuntime::get_resolve_virtual_call_stub(),
2959                           arg_list, info);
2960       } else {
2961         int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2962         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2963         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2964       }
2965       break;
2966     case Bytecodes::_invokedynamic: {
2967       __ call_dynamic(target, receiver, result_register,
2968                       SharedRuntime::get_resolve_static_call_stub(),
2969                       arg_list, info);
2970       break;
2971     }
2972     default:
2973       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2974       break;
2975   }
2976 
2977   // JSR 292
2978   // Restore the SP after MethodHandle call sites.
2979   if (is_method_handle_invoke) {

2980     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2981   }
2982 
2983   if (x->type()->is_float() || x->type()->is_double()) {
2984     // Force rounding of results from non-strictfp when in strictfp
2985     // scope (or when we don't know the strictness of the callee, to
2986     // be safe.)
2987     if (method()->is_strict()) {
2988       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2989         result_register = round_item(result_register);
2990       }
2991     }
2992   }
2993 
2994   if (result_register->is_valid()) {
2995     LIR_Opr result = rlock_result(x);
2996     __ move(result_register, result);
2997   }
2998 }
2999 




2870     argument_items->append(receiver);
2871   }
2872   for (int i = 0; i < x->number_of_arguments(); i++) {
2873     LIRItem* param = new LIRItem(x->argument_at(i), this);
2874     argument_items->append(param);
2875   }
2876   return argument_items;
2877 }
2878 
2879 
2880 // The invoke with receiver has following phases:
2881 //   a) traverse and load/lock receiver;
2882 //   b) traverse all arguments -> item-array (invoke_visit_argument)
2883 //   c) push receiver on stack
2884 //   d) load each of the items and push on stack
2885 //   e) unlock receiver
2886 //   f) move receiver into receiver-register %o0
2887 //   g) lock result registers and emit call operation
2888 //
2889 // Before issuing a call, we must spill-save all values on stack
2890 // that are in caller-save register. "spill-save" moves those registers
2891 // either in a free callee-save register or spills them if no free
2892 // callee save register is available.
2893 //
2894 // The problem is where to invoke spill-save.
2895 // - if invoked between e) and f), we may lock callee save
2896 //   register in "spill-save" that destroys the receiver register
2897 //   before f) is executed
2898 // - if we rearrange f) to be earlier (by loading %o0) it
2899 //   may destroy a value on the stack that is currently in %o0
2900 //   and is waiting to be spilled
2901 // - if we keep the receiver locked while doing spill-save,
2902 //   we cannot spill it as it is spill-locked
2903 //
2904 void LIRGenerator::do_Invoke(Invoke* x) {
2905   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2906 
2907   LIR_OprList* arg_list = cc->args();
2908   LIRItemList* args = invoke_visit_arguments(x);
2909   LIR_Opr receiver = LIR_OprFact::illegalOpr;
2910 
2911   // setup result register
2912   LIR_Opr result_register = LIR_OprFact::illegalOpr;
2913   if (x->type() != voidType) {
2914     result_register = result_register_for(x->type());
2915   }
2916 
2917   CodeEmitInfo* info = state_for(x, x->state());
2918 
2919   invoke_load_arguments(x, args, arg_list);
2920 
2921   if (x->has_receiver()) {
2922     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2923     receiver = args->at(0)->result();
2924   }
2925 
2926   // emit invoke code
2927   bool optimized = x->target_is_loaded() && x->target_is_final();
2928   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2929 
2930   // JSR 292
2931   // Preserve the SP over MethodHandle call sites, if needed.
2932   ciMethod* target = x->target();
2933   bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2934                                   target->is_method_handle_intrinsic() ||
2935                                   target->is_compiled_lambda_form());
2936   if (is_method_handle_invoke) {
2937     info->set_is_method_handle_invoke(true);
2938     if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2939         __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2940     }
2941   }
2942 
2943   switch (x->code()) {
2944     case Bytecodes::_invokestatic:
2945       __ call_static(target, result_register,
2946                      SharedRuntime::get_resolve_static_call_stub(),
2947                      arg_list, info);
2948       break;
2949     case Bytecodes::_invokespecial:
2950     case Bytecodes::_invokevirtual:
2951     case Bytecodes::_invokeinterface:
2952       // for final target we still produce an inline cache, in order
2953       // to be able to call mixed mode
2954       if (x->code() == Bytecodes::_invokespecial || optimized) {
2955         __ call_opt_virtual(target, receiver, result_register,
2956                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
2957                             arg_list, info);
2958       } else if (x->vtable_index() < 0) {
2959         __ call_icvirtual(target, receiver, result_register,
2960                           SharedRuntime::get_resolve_virtual_call_stub(),
2961                           arg_list, info);
2962       } else {
2963         int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2964         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2965         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2966       }
2967       break;
2968     case Bytecodes::_invokedynamic: {
2969       __ call_dynamic(target, receiver, result_register,
2970                       SharedRuntime::get_resolve_static_call_stub(),
2971                       arg_list, info);
2972       break;
2973     }
2974     default:
2975       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2976       break;
2977   }
2978 
2979   // JSR 292
2980   // Restore the SP after MethodHandle call sites, if needed.
2981   if (is_method_handle_invoke
2982       && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2983     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2984   }
2985 
2986   if (x->type()->is_float() || x->type()->is_double()) {
2987     // Force rounding of results from non-strictfp when in strictfp
2988     // scope (or when we don't know the strictness of the callee, to
2989     // be safe.)
2990     if (method()->is_strict()) {
2991       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2992         result_register = round_item(result_register);
2993       }
2994     }
2995   }
2996 
2997   if (result_register->is_valid()) {
2998     LIR_Opr result = rlock_result(x);
2999     __ move(result_register, result);
3000   }
3001 }
3002 


src/share/vm/c1/c1_LIRGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File