253 void do_isInstance(Intrinsic* x);
254 void do_isPrimitive(Intrinsic* x);
255 void do_getClass(Intrinsic* x);
256 void do_currentThread(Intrinsic* x);
257 void do_FmaIntrinsic(Intrinsic* x);
258 void do_MathIntrinsic(Intrinsic* x);
259 void do_LibmIntrinsic(Intrinsic* x);
260 void do_ArrayCopy(Intrinsic* x);
261 void do_CompareAndSwap(Intrinsic* x, ValueType* type);
262 void do_NIOCheckIndex(Intrinsic* x);
263 void do_FPIntrinsics(Intrinsic* x);
264 void do_Reference_get(Intrinsic* x);
265 void do_update_CRC32(Intrinsic* x);
266 void do_update_CRC32C(Intrinsic* x);
267 void do_vectorizedMismatch(Intrinsic* x);
268
269 Value flattenable_load_field_prolog(LoadField* x, CodeEmitInfo* info);
270 void access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item);
271 bool needs_flattened_array_store_check(StoreIndexed* x);
272 void check_flattened_array(LIRItem& array, CodeStub* slow_path);
273
274 public:
275 LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
276 LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
277
278 // convenience functions
279 LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
280 LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
281
282 // Access API
283
284 private:
285 BarrierSetC1 *_barrier_set;
286
287 public:
288 void access_store_at(DecoratorSet decorators, BasicType type,
289 LIRItem& base, LIR_Opr offset, LIR_Opr value,
290 CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
291
292 void access_load_at(DecoratorSet decorators, BasicType type,
310 // These need to guarantee JMM volatile semantics are preserved on each platform
311 // and requires one implementation per architecture.
312 LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
313 LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
314 LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
315
316 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
317 virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
318 #endif
319
320 // specific implementations
321 void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
322 void flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info);
323
324 static LIR_Opr result_register_for(ValueType* type, bool callee = false);
325
326 ciObject* get_jobject_constant(Value value);
327
328 LIRItemList* invoke_visit_arguments(Invoke* x);
329 void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
330
331 void trace_block_entry(BlockBegin* block);
332
333 // volatile field operations are never patchable because a klass
334 // must be loaded to know it's volatile which means that the offset
335 // it always known as well.
336 void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
337 void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
338
339 void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
340 void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
341
342 void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
343
344 void increment_counter(address counter, BasicType type, int step = 1);
345 void increment_counter(LIR_Address* addr, int step = 1);
346
347 // is_strictfp is only needed for mul and div (and only generates different code on i486)
348 void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
349 // machine dependent. returns true if it emitted code for the multiply
350 bool strength_reduce_multiply(LIR_Opr left, jint constant, LIR_Opr result, LIR_Opr tmp);
|
253 void do_isInstance(Intrinsic* x);
254 void do_isPrimitive(Intrinsic* x);
255 void do_getClass(Intrinsic* x);
256 void do_currentThread(Intrinsic* x);
257 void do_FmaIntrinsic(Intrinsic* x);
258 void do_MathIntrinsic(Intrinsic* x);
259 void do_LibmIntrinsic(Intrinsic* x);
260 void do_ArrayCopy(Intrinsic* x);
261 void do_CompareAndSwap(Intrinsic* x, ValueType* type);
262 void do_NIOCheckIndex(Intrinsic* x);
263 void do_FPIntrinsics(Intrinsic* x);
264 void do_Reference_get(Intrinsic* x);
265 void do_update_CRC32(Intrinsic* x);
266 void do_update_CRC32C(Intrinsic* x);
267 void do_vectorizedMismatch(Intrinsic* x);
268
269 Value flattenable_load_field_prolog(LoadField* x, CodeEmitInfo* info);
270 void access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item);
271 bool needs_flattened_array_store_check(StoreIndexed* x);
272 void check_flattened_array(LIRItem& array, CodeStub* slow_path);
273 void substituability_check(IfOp* x, LIRItem& left, LIRItem& right, LIRItem& t_val, LIRItem& f_val);
274
275 public:
276 LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
277 LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
278
279 // convenience functions
280 LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
281 LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
282
283 // Access API
284
285 private:
286 BarrierSetC1 *_barrier_set;
287
288 public:
289 void access_store_at(DecoratorSet decorators, BasicType type,
290 LIRItem& base, LIR_Opr offset, LIR_Opr value,
291 CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
292
293 void access_load_at(DecoratorSet decorators, BasicType type,
311 // These need to guarantee JMM volatile semantics are preserved on each platform
312 // and requires one implementation per architecture.
313 LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
314 LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
315 LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
316
317 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
318 virtual void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
319 #endif
320
321 // specific implementations
322 void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
323 void flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info);
324
325 static LIR_Opr result_register_for(ValueType* type, bool callee = false);
326
327 ciObject* get_jobject_constant(Value value);
328
329 LIRItemList* invoke_visit_arguments(Invoke* x);
330 void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
331 void invoke_load_one_argument(LIRItem* param, LIR_Opr loc);
332 void trace_block_entry(BlockBegin* block);
333
334 // volatile field operations are never patchable because a klass
335 // must be loaded to know it's volatile which means that the offset
336 // it always known as well.
337 void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
338 void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
339
340 void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
341 void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
342
343 void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
344
345 void increment_counter(address counter, BasicType type, int step = 1);
346 void increment_counter(LIR_Address* addr, int step = 1);
347
348 // is_strictfp is only needed for mul and div (and only generates different code on i486)
349 void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
350 // machine dependent. returns true if it emitted code for the multiply
351 bool strength_reduce_multiply(LIR_Opr left, jint constant, LIR_Opr result, LIR_Opr tmp);
|