1 /* 2 * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/arguments.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/os.inline.hpp" 32 #include "runtime/stubCodeGenerator.hpp" 33 #include "runtime/vm_version.hpp" 34 35 int VM_Version::_stored_pc_adjustment = 4; 36 int VM_Version::_arm_arch = 5; 37 bool VM_Version::_is_initialized = false; 38 int VM_Version::_kuser_helper_version = 0; 39 40 extern "C" { 41 typedef int (*get_cpu_info_t)(); 42 typedef bool (*check_vfp_t)(double *d); 43 typedef bool (*check_simd_t)(); 44 typedef bool (*check_mp_ext_t)(int *addr); 45 } 46 47 #define __ _masm-> 48 49 class VM_Version_StubGenerator: public StubCodeGenerator { 50 public: 51 52 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 53 54 address generate_get_cpu_info() { 55 StubCodeMark mark(this, "VM_Version", "get_cpu_info"); 56 address start = __ pc(); 57 58 __ mov(R0, PC); 59 __ push(PC); 60 __ pop(R1); 61 __ sub(R0, R1, R0); 62 // return the result in R0 63 __ bx(LR); 64 65 return start; 66 }; 67 68 address generate_check_vfp() { 69 StubCodeMark mark(this, "VM_Version", "check_vfp"); 70 address start = __ pc(); 71 72 __ fstd(D0, Address(R0)); 73 __ mov(R0, 1); 74 __ bx(LR); 75 76 return start; 77 }; 78 79 address generate_check_vfp3_32() { 80 StubCodeMark mark(this, "VM_Version", "check_vfp3_32"); 81 address start = __ pc(); 82 83 __ fstd(D16, Address(R0)); 84 __ mov(R0, 1); 85 __ bx(LR); 86 87 return start; 88 }; 89 90 address generate_check_simd() { 91 StubCodeMark mark(this, "VM_Version", "check_simd"); 92 address start = __ pc(); 93 94 __ vcnt(Stemp, Stemp); 95 __ mov(R0, 1); 96 __ bx(LR); 97 98 return start; 99 }; 100 101 address generate_check_mp_ext() { 102 StubCodeMark mark(this, "VM_Version", "check_mp_ext"); 103 address start = __ pc(); 104 105 // PLDW is available with Multiprocessing Extensions only 106 __ pldw(Address(R0)); 107 // Return true if instruction caused no signals 108 __ mov(R0, 1); 109 // JVM_handle_linux_signal moves PC here if SIGILL happens 110 __ bx(LR); 111 112 return start; 113 }; 114 }; 115 116 #undef __ 117 118 119 extern "C" address check_vfp3_32_fault_instr; 120 extern "C" address check_vfp_fault_instr; 121 extern "C" address check_simd_fault_instr; 122 extern "C" address check_mp_ext_fault_instr; 123 124 void VM_Version::early_initialize() { 125 126 // Make sure that _arm_arch is initialized so that any calls to OrderAccess will 127 // use proper dmb instruction 128 get_os_cpu_info(); 129 130 _kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR; 131 // armv7 has the ldrexd instruction that can be used to implement cx8 132 // armv5 with linux >= 3.1 can use kernel helper routine 133 _supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64()); 134 } 135 136 void VM_Version::initialize() { 137 ResourceMark rm; 138 139 // Making this stub must be FIRST use of assembler 140 const int stub_size = 128; 141 BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size); 142 if (stub_blob == NULL) { 143 vm_exit_during_initialization("Unable to allocate get_cpu_info stub"); 144 } 145 146 CodeBuffer c(stub_blob); 147 VM_Version_StubGenerator g(&c); 148 address get_cpu_info_pc = g.generate_get_cpu_info(); 149 get_cpu_info_t get_cpu_info = CAST_TO_FN_PTR(get_cpu_info_t, get_cpu_info_pc); 150 151 int pc_adjustment = get_cpu_info(); 152 153 VM_Version::_stored_pc_adjustment = pc_adjustment; 154 155 #ifndef __SOFTFP__ 156 address check_vfp_pc = g.generate_check_vfp(); 157 check_vfp_t check_vfp = CAST_TO_FN_PTR(check_vfp_t, check_vfp_pc); 158 159 check_vfp_fault_instr = (address)check_vfp; 160 double dummy; 161 if (check_vfp(&dummy)) { 162 _features |= vfp_m; 163 } 164 165 #ifdef COMPILER2 166 if (has_vfp()) { 167 address check_vfp3_32_pc = g.generate_check_vfp3_32(); 168 check_vfp_t check_vfp3_32 = CAST_TO_FN_PTR(check_vfp_t, check_vfp3_32_pc); 169 check_vfp3_32_fault_instr = (address)check_vfp3_32; 170 double dummy; 171 if (check_vfp3_32(&dummy)) { 172 _features |= vfp3_32_m; 173 } 174 175 address check_simd_pc =g.generate_check_simd(); 176 check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc); 177 check_simd_fault_instr = (address)check_simd; 178 if (check_simd()) { 179 _features |= simd_m; 180 } 181 } 182 #endif 183 #endif 184 185 address check_mp_ext_pc = g.generate_check_mp_ext(); 186 check_mp_ext_t check_mp_ext = CAST_TO_FN_PTR(check_mp_ext_t, check_mp_ext_pc); 187 check_mp_ext_fault_instr = (address)check_mp_ext; 188 int dummy_local_variable; 189 if (check_mp_ext(&dummy_local_variable)) { 190 _features |= mp_ext_m; 191 } 192 193 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 194 warning("AES intrinsics are not available on this CPU"); 195 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 196 } 197 198 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 199 warning("AES instructions are not available on this CPU"); 200 FLAG_SET_DEFAULT(UseAES, false); 201 } 202 203 if (UseAESCTRIntrinsics) { 204 warning("AES/CTR intrinsics are not available on this CPU"); 205 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 206 } 207 208 if (UseFMA) { 209 warning("FMA instructions are not available on this CPU"); 210 FLAG_SET_DEFAULT(UseFMA, false); 211 } 212 213 if (UseMD5Intrinsics) { 214 warning("MD5 intrinsics are not available on this CPU"); 215 FLAG_SET_DEFAULT(UseMD5Intrinsics, false); 216 } 217 218 if (UseSHA) { 219 warning("SHA instructions are not available on this CPU"); 220 FLAG_SET_DEFAULT(UseSHA, false); 221 } 222 223 if (UseSHA1Intrinsics) { 224 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 225 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 226 } 227 228 if (UseSHA256Intrinsics) { 229 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 230 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 231 } 232 233 if (UseSHA512Intrinsics) { 234 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 235 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 236 } 237 238 if (UseCRC32Intrinsics) { 239 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 240 warning("CRC32 intrinsics are not available on this CPU"); 241 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 242 } 243 244 if (UseCRC32CIntrinsics) { 245 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) 246 warning("CRC32C intrinsics are not available on this CPU"); 247 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 248 } 249 250 if (UseAdler32Intrinsics) { 251 warning("Adler32 intrinsics are not available on this CPU"); 252 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 253 } 254 255 if (UseVectorizedMismatchIntrinsic) { 256 warning("vectorizedMismatch intrinsic is not available on this CPU."); 257 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 258 } 259 260 #ifdef COMPILER2 261 // C2 is only supported on v7+ VFP at this time 262 if (_arm_arch < 7 || !has_vfp()) { 263 vm_exit_during_initialization("Server VM is only supported on ARMv7+ VFP"); 264 } 265 #endif 266 267 // ARM doesn't have special instructions for these but ldrex/ldrexd 268 // enable shorter instruction sequences that the ones based on cas. 269 _supports_atomic_getset4 = supports_ldrex(); 270 _supports_atomic_getadd4 = supports_ldrex(); 271 _supports_atomic_getset8 = supports_ldrexd(); 272 _supports_atomic_getadd8 = supports_ldrexd(); 273 274 #ifdef COMPILER2 275 assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4 276 && _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported"); 277 #endif 278 char buf[512]; 279 jio_snprintf(buf, sizeof(buf), "(ARMv%d)%s%s%s%s", 280 _arm_arch, 281 (has_vfp() ? ", vfp" : ""), 282 (has_vfp3_32() ? ", vfp3-32" : ""), 283 (has_simd() ? ", simd" : ""), 284 (has_multiprocessing_extensions() ? ", mp_ext" : "")); 285 286 // buf is started with ", " or is empty 287 _features_string = os::strdup(buf); 288 289 if (has_simd()) { 290 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 291 FLAG_SET_DEFAULT(UsePopCountInstruction, true); 292 } 293 } else { 294 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 295 } 296 297 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 298 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128); 299 } 300 301 #ifdef COMPILER2 302 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 303 304 if (FLAG_IS_DEFAULT(MaxVectorSize)) { 305 // FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8); 306 // SIMD/NEON can use 16, but default is 8 because currently 307 // larger than 8 will disable instruction scheduling 308 FLAG_SET_DEFAULT(MaxVectorSize, 8); 309 } else { 310 int max_vector_size = has_simd() ? 16 : 8; 311 if (MaxVectorSize > max_vector_size) { 312 warning("MaxVectorSize must be at most %i on this platform", max_vector_size); 313 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 314 } 315 } 316 #endif 317 318 if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) { 319 Tier4CompileThreshold = 10000; 320 } 321 if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) { 322 Tier3InvocationThreshold = 1000; 323 } 324 if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) { 325 Tier3CompileThreshold = 5000; 326 } 327 if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) { 328 Tier3MinInvocationThreshold = 500; 329 } 330 331 UNSUPPORTED_OPTION(TypeProfileLevel); 332 UNSUPPORTED_OPTION(CriticalJNINatives); 333 334 FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported 335 336 // This machine does not allow unaligned memory accesses 337 if (UseUnalignedAccesses) { 338 if (!FLAG_IS_DEFAULT(UseUnalignedAccesses)) 339 warning("Unaligned memory access is not available on this CPU"); 340 FLAG_SET_DEFAULT(UseUnalignedAccesses, false); 341 } 342 343 _is_initialized = true; 344 } 345 346 bool VM_Version::use_biased_locking() { 347 get_os_cpu_info(); 348 // The cost of CAS on uniprocessor ARM v6 and later is low compared to the 349 // overhead related to slightly longer Biased Locking execution path. 350 // Testing shows no improvement when running with Biased Locking enabled 351 // on an ARMv6 and higher uniprocessor systems. The situation is different on 352 // ARMv5 and MP systems. 353 // 354 // Therefore the Biased Locking is enabled on ARMv5 and ARM MP only. 355 // 356 return (!os::is_MP() && (arm_arch() > 5)) ? false : true; 357 }