1 /*
2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/callGenerator.hpp"
41 #include "opto/castnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/convertnode.hpp"
44 #include "opto/countbitsnode.hpp"
45 #include "opto/intrinsicnode.hpp"
46 #include "opto/idealKit.hpp"
47 #include "opto/mathexactnode.hpp"
48 #include "opto/movenode.hpp"
49 #include "opto/mulnode.hpp"
50 #include "opto/narrowptrnode.hpp"
51 #include "opto/opaquenode.hpp"
52 #include "opto/parse.hpp"
53 #include "opto/runtime.hpp"
54 #include "opto/rootnode.hpp"
55 #include "opto/subnode.hpp"
56 #include "prims/nativeLookup.hpp"
57 #include "prims/unsafe.hpp"
58 #include "runtime/objectMonitor.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "utilities/macros.hpp"
61 #include "utilities/powerOfTwo.hpp"
62
63 class LibraryIntrinsic : public InlineCallGenerator {
64 // Extend the set of intrinsics known to the runtime:
65 public:
66 private:
67 bool _is_virtual;
68 bool _does_virtual_dispatch;
69 int8_t _predicates_count; // Intrinsic is predicated by several conditions
70 int8_t _last_predicate; // Last generated predicate
71 vmIntrinsics::ID _intrinsic_id;
72
73 public:
74 LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
75 : InlineCallGenerator(m),
76 _is_virtual(is_virtual),
77 _does_virtual_dispatch(does_virtual_dispatch),
78 _predicates_count((int8_t)predicates_count),
79 _last_predicate((int8_t)-1),
80 _intrinsic_id(id)
81 {
82 }
83 virtual bool is_intrinsic() const { return true; }
84 virtual bool is_virtual() const { return _is_virtual; }
85 virtual bool is_predicated() const { return _predicates_count > 0; }
86 virtual int predicates_count() const { return _predicates_count; }
87 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
88 virtual JVMState* generate(JVMState* jvms);
89 virtual Node* generate_predicate(JVMState* jvms, int predicate);
90 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
91 };
92
93
94 // Local helper class for LibraryIntrinsic:
95 class LibraryCallKit : public GraphKit {
96 private:
97 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
98 Node* _result; // the result node, if any
99 int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
100
101 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type);
102
103 public:
104 LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
105 : GraphKit(jvms),
106 _intrinsic(intrinsic),
107 _result(NULL)
108 {
109 // Check if this is a root compile. In that case we don't have a caller.
110 if (!jvms->has_method()) {
111 _reexecute_sp = sp();
112 } else {
113 // Find out how many arguments the interpreter needs when deoptimizing
114 // and save the stack pointer value so it can used by uncommon_trap.
115 // We find the argument count by looking at the declared signature.
116 bool ignored_will_link;
117 ciSignature* declared_signature = NULL;
118 ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
119 const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
120 _reexecute_sp = sp() + nargs; // "push" arguments back on stack
121 }
122 }
123
124 virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
125
126 ciMethod* caller() const { return jvms()->method(); }
127 int bci() const { return jvms()->bci(); }
128 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
129 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
130 ciMethod* callee() const { return _intrinsic->method(); }
131
132 bool try_to_inline(int predicate);
133 Node* try_to_predicate(int predicate);
134
135 void push_result() {
136 // Push the result onto the stack.
137 if (!stopped() && result() != NULL) {
138 BasicType bt = result()->bottom_type()->basic_type();
139 push_node(bt, result());
140 }
141 }
142
143 private:
144 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
145 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
146 }
147
148 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
149 void set_result(RegionNode* region, PhiNode* value);
150 Node* result() { return _result; }
151
152 virtual int reexecute_sp() { return _reexecute_sp; }
153
154 // Helper functions to inline natives
155 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
156 Node* generate_slow_guard(Node* test, RegionNode* region);
157 Node* generate_fair_guard(Node* test, RegionNode* region);
158 Node* generate_negative_guard(Node* index, RegionNode* region,
159 // resulting CastII of index:
160 Node* *pos_index = NULL);
161 Node* generate_limit_guard(Node* offset, Node* subseq_length,
162 Node* array_length,
163 RegionNode* region);
164 void generate_string_range_check(Node* array, Node* offset,
165 Node* length, bool char_count);
166 Node* generate_current_thread(Node* &tls_output);
167 Node* load_mirror_from_klass(Node* klass);
168 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
169 RegionNode* region, int null_path,
170 int offset);
171 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
172 RegionNode* region, int null_path) {
173 int offset = java_lang_Class::klass_offset_in_bytes();
174 return load_klass_from_mirror_common(mirror, never_see_null,
175 region, null_path,
176 offset);
177 }
178 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
179 RegionNode* region, int null_path) {
180 int offset = java_lang_Class::array_klass_offset_in_bytes();
181 return load_klass_from_mirror_common(mirror, never_see_null,
182 region, null_path,
183 offset);
184 }
185 Node* generate_access_flags_guard(Node* kls,
186 int modifier_mask, int modifier_bits,
187 RegionNode* region);
188 Node* generate_interface_guard(Node* kls, RegionNode* region);
189 Node* generate_array_guard(Node* kls, RegionNode* region) {
190 return generate_array_guard_common(kls, region, false, false);
191 }
192 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
193 return generate_array_guard_common(kls, region, false, true);
194 }
195 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
196 return generate_array_guard_common(kls, region, true, false);
197 }
198 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
199 return generate_array_guard_common(kls, region, true, true);
200 }
201 Node* generate_array_guard_common(Node* kls, RegionNode* region,
202 bool obj_array, bool not_array);
203 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
204 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
205 bool is_virtual = false, bool is_static = false);
206 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
207 return generate_method_call(method_id, false, true);
208 }
209 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
210 return generate_method_call(method_id, true, false);
211 }
212 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
213 Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
214
215 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
216 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
217 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
218 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
219 Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
220 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
221 bool inline_string_indexOfChar();
222 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
223 bool inline_string_toBytesU();
224 bool inline_string_getCharsU();
225 bool inline_string_copy(bool compress);
226 bool inline_string_char_access(bool is_store);
227 Node* round_double_node(Node* n);
228 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
229 bool inline_math_native(vmIntrinsics::ID id);
230 bool inline_math(vmIntrinsics::ID id);
231 bool inline_double_math(vmIntrinsics::ID id);
232 template <typename OverflowOp>
233 bool inline_math_overflow(Node* arg1, Node* arg2);
234 void inline_math_mathExact(Node* math, Node* test);
235 bool inline_math_addExactI(bool is_increment);
236 bool inline_math_addExactL(bool is_increment);
237 bool inline_math_multiplyExactI();
238 bool inline_math_multiplyExactL();
239 bool inline_math_multiplyHigh();
240 bool inline_math_negateExactI();
241 bool inline_math_negateExactL();
242 bool inline_math_subtractExactI(bool is_decrement);
243 bool inline_math_subtractExactL(bool is_decrement);
244 bool inline_min_max(vmIntrinsics::ID id);
245 bool inline_notify(vmIntrinsics::ID id);
246 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
247 // This returns Type::AnyPtr, RawPtr, or OopPtr.
248 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
249 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
250
251 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
252 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
253 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
254 static bool klass_needs_init_guard(Node* kls);
255 bool inline_unsafe_allocate();
256 bool inline_unsafe_newArray(bool uninitialized);
257 bool inline_unsafe_writeback0();
258 bool inline_unsafe_writebackSync0(bool is_pre);
259 bool inline_unsafe_copyMemory();
260 bool inline_native_currentThread();
261
262 bool inline_native_time_funcs(address method, const char* funcName);
263 #ifdef JFR_HAVE_INTRINSICS
264 bool inline_native_classID();
265 bool inline_native_getEventWriter();
266 #endif
267 bool inline_native_Class_query(vmIntrinsics::ID id);
268 bool inline_native_subtype_check();
269 bool inline_native_getLength();
270 bool inline_array_copyOf(bool is_copyOfRange);
271 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
272 bool inline_preconditions_checkIndex();
273 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
274 bool inline_native_clone(bool is_virtual);
275 bool inline_native_Reflection_getCallerClass();
276 // Helper function for inlining native object hash method
277 bool inline_native_hashcode(bool is_virtual, bool is_static);
278 bool inline_native_getClass();
279
280 // Helper functions for inlining arraycopy
281 bool inline_arraycopy();
282 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
283 RegionNode* slow_region);
284 JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
285 void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
286 uint new_idx);
287
288 typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
289 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind, AccessKind access_kind);
290 bool inline_unsafe_fence(vmIntrinsics::ID id);
291 bool inline_onspinwait();
292 bool inline_fp_conversions(vmIntrinsics::ID id);
293 bool inline_number_methods(vmIntrinsics::ID id);
294 bool inline_reference_get();
295 bool inline_Class_cast();
296 bool inline_aescrypt_Block(vmIntrinsics::ID id);
297 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
298 bool inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id);
299 bool inline_counterMode_AESCrypt(vmIntrinsics::ID id);
300 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
301 Node* inline_electronicCodeBook_AESCrypt_predicate(bool decrypting);
302 Node* inline_counterMode_AESCrypt_predicate();
303 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
304 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
305 bool inline_ghash_processBlocks();
306 bool inline_base64_encodeBlock();
307 bool inline_sha_implCompress(vmIntrinsics::ID id);
308 bool inline_digestBase_implCompressMB(int predicate);
309 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
310 bool long_state, address stubAddr, const char *stubName,
311 Node* src_start, Node* ofs, Node* limit);
312 Node* get_state_from_sha_object(Node *sha_object);
313 Node* get_state_from_sha5_object(Node *sha_object);
314 Node* inline_digestBase_implCompressMB_predicate(int predicate);
315 bool inline_encodeISOArray();
316 bool inline_updateCRC32();
317 bool inline_updateBytesCRC32();
318 bool inline_updateByteBufferCRC32();
319 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
320 bool inline_updateBytesCRC32C();
321 bool inline_updateDirectByteBufferCRC32C();
322 bool inline_updateBytesAdler32();
323 bool inline_updateByteBufferAdler32();
324 bool inline_multiplyToLen();
325 bool inline_hasNegatives();
326 bool inline_squareToLen();
327 bool inline_mulAdd();
328 bool inline_montgomeryMultiply();
329 bool inline_montgomerySquare();
330 bool inline_bigIntegerShift(bool isRightShift);
331 bool inline_vectorizedMismatch();
332 bool inline_fma(vmIntrinsics::ID id);
333 bool inline_character_compare(vmIntrinsics::ID id);
334 bool inline_fp_min_max(vmIntrinsics::ID id);
335
336 bool inline_profileBoolean();
337 bool inline_isCompileConstant();
338 void clear_upper_avx() {
339 #ifdef X86
340 if (UseAVX >= 2) {
341 C->set_clear_upper_avx(true);
342 }
343 #endif
344 }
345 };
346
347 //---------------------------make_vm_intrinsic----------------------------
348 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
349 vmIntrinsics::ID id = m->intrinsic_id();
350 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
351
352 if (!m->is_loaded()) {
353 // Do not attempt to inline unloaded methods.
354 return NULL;
355 }
356
357 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
358 bool is_available = false;
359
360 {
361 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
362 // the compiler must transition to '_thread_in_vm' state because both
363 // methods access VM-internal data.
364 VM_ENTRY_MARK;
365 methodHandle mh(THREAD, m->get_Method());
366 is_available = compiler != NULL && compiler->is_intrinsic_supported(mh, is_virtual) &&
435 }
436 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, msg);
437 if (C->print_intrinsics() || C->print_inlining()) {
438 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
439 }
440 } else {
441 // Root compile
442 ResourceMark rm;
443 stringStream msg_stream;
444 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
445 vmIntrinsics::name_at(intrinsic_id()),
446 is_virtual() ? " (virtual)" : "", bci);
447 const char *msg = msg_stream.as_string();
448 log_debug(jit, inlining)("%s", msg);
449 if (C->print_intrinsics() || C->print_inlining()) {
450 tty->print("%s", msg);
451 }
452 }
453 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
454 C->print_inlining_update(this);
455 return NULL;
456 }
457
458 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
459 LibraryCallKit kit(jvms, this);
460 Compile* C = kit.C;
461 int nodes = C->unique();
462 _last_predicate = predicate;
463 #ifndef PRODUCT
464 assert(is_predicated() && predicate < predicates_count(), "sanity");
465 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
466 char buf[1000];
467 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
468 tty->print_cr("Predicate for intrinsic %s", str);
469 }
470 #endif
471 ciMethod* callee = kit.callee();
472 const int bci = kit.bci();
473
474 Node* slow_ctl = kit.try_to_predicate(predicate);
512 }
513 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
514 return NULL;
515 }
516
517 bool LibraryCallKit::try_to_inline(int predicate) {
518 // Handle symbolic names for otherwise undistinguished boolean switches:
519 const bool is_store = true;
520 const bool is_compress = true;
521 const bool is_static = true;
522 const bool is_volatile = true;
523
524 if (!jvms()->has_method()) {
525 // Root JVMState has a null method.
526 assert(map()->memory()->Opcode() == Op_Parm, "");
527 // Insert the memory aliasing node
528 set_all_memory(reset_memory());
529 }
530 assert(merged_memory(), "");
531
532
533 switch (intrinsic_id()) {
534 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
535 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
536 case vmIntrinsics::_getClass: return inline_native_getClass();
537
538 case vmIntrinsics::_ceil:
539 case vmIntrinsics::_floor:
540 case vmIntrinsics::_rint:
541 case vmIntrinsics::_dsin:
542 case vmIntrinsics::_dcos:
543 case vmIntrinsics::_dtan:
544 case vmIntrinsics::_dabs:
545 case vmIntrinsics::_fabs:
546 case vmIntrinsics::_iabs:
547 case vmIntrinsics::_labs:
548 case vmIntrinsics::_datan2:
549 case vmIntrinsics::_dsqrt:
550 case vmIntrinsics::_dexp:
551 case vmIntrinsics::_dlog:
552 case vmIntrinsics::_dlog10:
887
888 case vmIntrinsics::_hasNegatives:
889 return inline_hasNegatives();
890
891 case vmIntrinsics::_fmaD:
892 case vmIntrinsics::_fmaF:
893 return inline_fma(intrinsic_id());
894
895 case vmIntrinsics::_isDigit:
896 case vmIntrinsics::_isLowerCase:
897 case vmIntrinsics::_isUpperCase:
898 case vmIntrinsics::_isWhitespace:
899 return inline_character_compare(intrinsic_id());
900
901 case vmIntrinsics::_maxF:
902 case vmIntrinsics::_minF:
903 case vmIntrinsics::_maxD:
904 case vmIntrinsics::_minD:
905 return inline_fp_min_max(intrinsic_id());
906
907 default:
908 // If you get here, it may be that someone has added a new intrinsic
909 // to the list in vmSymbols.hpp without implementing it here.
910 #ifndef PRODUCT
911 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
912 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
913 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
914 }
915 #endif
916 return false;
917 }
918 }
919
920 Node* LibraryCallKit::try_to_predicate(int predicate) {
921 if (!jvms()->has_method()) {
922 // Root JVMState has a null method.
923 assert(map()->memory()->Opcode() == Op_Parm, "");
924 // Insert the memory aliasing node
925 set_all_memory(reset_memory());
926 }
2220 // Offset is small => always a heap address.
2221 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2222 if (offset_type != NULL &&
2223 base_type->offset() == 0 && // (should always be?)
2224 offset_type->_lo >= 0 &&
2225 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2226 return Type::OopPtr;
2227 } else if (type == T_OBJECT) {
2228 // off heap access to an oop doesn't make any sense. Has to be on
2229 // heap.
2230 return Type::OopPtr;
2231 }
2232 // Otherwise, it might either be oop+off or NULL+addr.
2233 return Type::AnyPtr;
2234 } else {
2235 // No information:
2236 return Type::AnyPtr;
2237 }
2238 }
2239
2240 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
2241 Node* uncasted_base = base;
2242 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2243 if (kind == Type::RawPtr) {
2244 return basic_plus_adr(top(), uncasted_base, offset);
2245 } else if (kind == Type::AnyPtr) {
2246 assert(base == uncasted_base, "unexpected base change");
2247 if (can_cast) {
2248 if (!_gvn.type(base)->speculative_maybe_null() &&
2249 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2250 // According to profiling, this access is always on
2251 // heap. Casting the base to not null and thus avoiding membars
2252 // around the access should allow better optimizations
2253 Node* null_ctl = top();
2254 base = null_check_oop(base, &null_ctl, true, true, true);
2255 assert(null_ctl->is_top(), "no null control here");
2256 return basic_plus_adr(base, offset);
2257 } else if (_gvn.type(base)->speculative_always_null() &&
2258 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2259 // According to profiling, this access is always off
2260 // heap.
| 1 /*
2 * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/arraycopynode.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/idealKit.hpp"
45 #include "opto/library_call.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/mulnode.hpp"
48 #include "opto/narrowptrnode.hpp"
49 #include "opto/opaquenode.hpp"
50 #include "opto/parse.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/rootnode.hpp"
53 #include "opto/subnode.hpp"
54 #include "prims/nativeLookup.hpp"
55 #include "prims/unsafe.hpp"
56 #include "runtime/objectMonitor.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/powerOfTwo.hpp"
60
61 //---------------------------make_vm_intrinsic----------------------------
62 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
63 vmIntrinsics::ID id = m->intrinsic_id();
64 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
65
66 if (!m->is_loaded()) {
67 // Do not attempt to inline unloaded methods.
68 return NULL;
69 }
70
71 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
72 bool is_available = false;
73
74 {
75 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
76 // the compiler must transition to '_thread_in_vm' state because both
77 // methods access VM-internal data.
78 VM_ENTRY_MARK;
79 methodHandle mh(THREAD, m->get_Method());
80 is_available = compiler != NULL && compiler->is_intrinsic_supported(mh, is_virtual) &&
149 }
150 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, msg);
151 if (C->print_intrinsics() || C->print_inlining()) {
152 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
153 }
154 } else {
155 // Root compile
156 ResourceMark rm;
157 stringStream msg_stream;
158 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
159 vmIntrinsics::name_at(intrinsic_id()),
160 is_virtual() ? " (virtual)" : "", bci);
161 const char *msg = msg_stream.as_string();
162 log_debug(jit, inlining)("%s", msg);
163 if (C->print_intrinsics() || C->print_inlining()) {
164 tty->print("%s", msg);
165 }
166 }
167 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
168 C->print_inlining_update(this);
169
170 return NULL;
171 }
172
173 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
174 LibraryCallKit kit(jvms, this);
175 Compile* C = kit.C;
176 int nodes = C->unique();
177 _last_predicate = predicate;
178 #ifndef PRODUCT
179 assert(is_predicated() && predicate < predicates_count(), "sanity");
180 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
181 char buf[1000];
182 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
183 tty->print_cr("Predicate for intrinsic %s", str);
184 }
185 #endif
186 ciMethod* callee = kit.callee();
187 const int bci = kit.bci();
188
189 Node* slow_ctl = kit.try_to_predicate(predicate);
227 }
228 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
229 return NULL;
230 }
231
232 bool LibraryCallKit::try_to_inline(int predicate) {
233 // Handle symbolic names for otherwise undistinguished boolean switches:
234 const bool is_store = true;
235 const bool is_compress = true;
236 const bool is_static = true;
237 const bool is_volatile = true;
238
239 if (!jvms()->has_method()) {
240 // Root JVMState has a null method.
241 assert(map()->memory()->Opcode() == Op_Parm, "");
242 // Insert the memory aliasing node
243 set_all_memory(reset_memory());
244 }
245 assert(merged_memory(), "");
246
247 switch (intrinsic_id()) {
248 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
249 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
250 case vmIntrinsics::_getClass: return inline_native_getClass();
251
252 case vmIntrinsics::_ceil:
253 case vmIntrinsics::_floor:
254 case vmIntrinsics::_rint:
255 case vmIntrinsics::_dsin:
256 case vmIntrinsics::_dcos:
257 case vmIntrinsics::_dtan:
258 case vmIntrinsics::_dabs:
259 case vmIntrinsics::_fabs:
260 case vmIntrinsics::_iabs:
261 case vmIntrinsics::_labs:
262 case vmIntrinsics::_datan2:
263 case vmIntrinsics::_dsqrt:
264 case vmIntrinsics::_dexp:
265 case vmIntrinsics::_dlog:
266 case vmIntrinsics::_dlog10:
601
602 case vmIntrinsics::_hasNegatives:
603 return inline_hasNegatives();
604
605 case vmIntrinsics::_fmaD:
606 case vmIntrinsics::_fmaF:
607 return inline_fma(intrinsic_id());
608
609 case vmIntrinsics::_isDigit:
610 case vmIntrinsics::_isLowerCase:
611 case vmIntrinsics::_isUpperCase:
612 case vmIntrinsics::_isWhitespace:
613 return inline_character_compare(intrinsic_id());
614
615 case vmIntrinsics::_maxF:
616 case vmIntrinsics::_minF:
617 case vmIntrinsics::_maxD:
618 case vmIntrinsics::_minD:
619 return inline_fp_min_max(intrinsic_id());
620
621 case vmIntrinsics::_VectorUnaryOp:
622 return inline_vector_nary_operation(1);
623 case vmIntrinsics::_VectorBinaryOp:
624 return inline_vector_nary_operation(2);
625 case vmIntrinsics::_VectorTernaryOp:
626 return inline_vector_nary_operation(3);
627 case vmIntrinsics::_VectorBroadcastCoerced:
628 return inline_vector_broadcast_coerced();
629 case vmIntrinsics::_VectorShuffleIota:
630 return inline_vector_shuffle_iota();
631 case vmIntrinsics::_VectorShuffleToVector:
632 return inline_vector_shuffle_to_vector();
633 case vmIntrinsics::_VectorLoadOp:
634 return inline_vector_mem_operation(/*is_store=*/false);
635 case vmIntrinsics::_VectorStoreOp:
636 return inline_vector_mem_operation(/*is_store=*/true);
637 case vmIntrinsics::_VectorGatherOp:
638 return inline_vector_gather_scatter(/*is_scatter*/ false);
639 case vmIntrinsics::_VectorScatterOp:
640 return inline_vector_gather_scatter(/*is_scatter*/ true);
641 case vmIntrinsics::_VectorReductionCoerced:
642 return inline_vector_reduction();
643 case vmIntrinsics::_VectorTest:
644 return inline_vector_test();
645 case vmIntrinsics::_VectorBlend:
646 return inline_vector_blend();
647 case vmIntrinsics::_VectorRearrange:
648 return inline_vector_rearrange();
649 case vmIntrinsics::_VectorCompare:
650 return inline_vector_compare();
651 case vmIntrinsics::_VectorBroadcastInt:
652 return inline_vector_broadcast_int();
653 case vmIntrinsics::_VectorConvert:
654 return inline_vector_convert();
655 case vmIntrinsics::_VectorInsert:
656 return inline_vector_insert();
657 case vmIntrinsics::_VectorExtract:
658 return inline_vector_extract();
659
660 default:
661 // If you get here, it may be that someone has added a new intrinsic
662 // to the list in vmSymbols.hpp without implementing it here.
663 #ifndef PRODUCT
664 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
665 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
666 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
667 }
668 #endif
669 return false;
670 }
671 }
672
673 Node* LibraryCallKit::try_to_predicate(int predicate) {
674 if (!jvms()->has_method()) {
675 // Root JVMState has a null method.
676 assert(map()->memory()->Opcode() == Op_Parm, "");
677 // Insert the memory aliasing node
678 set_all_memory(reset_memory());
679 }
1973 // Offset is small => always a heap address.
1974 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
1975 if (offset_type != NULL &&
1976 base_type->offset() == 0 && // (should always be?)
1977 offset_type->_lo >= 0 &&
1978 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
1979 return Type::OopPtr;
1980 } else if (type == T_OBJECT) {
1981 // off heap access to an oop doesn't make any sense. Has to be on
1982 // heap.
1983 return Type::OopPtr;
1984 }
1985 // Otherwise, it might either be oop+off or NULL+addr.
1986 return Type::AnyPtr;
1987 } else {
1988 // No information:
1989 return Type::AnyPtr;
1990 }
1991 }
1992
1993 Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
1994 Node* uncasted_base = base;
1995 int kind = classify_unsafe_addr(uncasted_base, offset, type);
1996 if (kind == Type::RawPtr) {
1997 return basic_plus_adr(top(), uncasted_base, offset);
1998 } else if (kind == Type::AnyPtr) {
1999 assert(base == uncasted_base, "unexpected base change");
2000 if (can_cast) {
2001 if (!_gvn.type(base)->speculative_maybe_null() &&
2002 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2003 // According to profiling, this access is always on
2004 // heap. Casting the base to not null and thus avoiding membars
2005 // around the access should allow better optimizations
2006 Node* null_ctl = top();
2007 base = null_check_oop(base, &null_ctl, true, true, true);
2008 assert(null_ctl->is_top(), "no null control here");
2009 return basic_plus_adr(base, offset);
2010 } else if (_gvn.type(base)->speculative_always_null() &&
2011 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2012 // According to profiling, this access is always off
2013 // heap.
|