1391 // Move a constant pointer into r. In AArch64 mode the virtual
1392 // address space is 48 bits in size, so we only need three
1393 // instructions to create a patchable instruction sequence that can
1394 // reach anywhere.
1395 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1396 #ifndef PRODUCT
1397 {
1398 char buffer[64];
1399 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1400 block_comment(buffer);
1401 }
1402 #endif
1403 assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
1404 movz(r, imm64 & 0xffff);
1405 imm64 >>= 16;
1406 movk(r, imm64 & 0xffff, 16);
1407 imm64 >>= 16;
1408 movk(r, imm64 & 0xffff, 32);
1409 }
1410
1411 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
1412 {
1413 #ifndef PRODUCT
1414 {
1415 char buffer[64];
1416 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1417 block_comment(buffer);
1418 }
1419 #endif
1420 if (operand_valid_for_logical_immediate(false, imm64)) {
1421 orr(dst, zr, imm64);
1422 } else {
1423 // we can use a combination of MOVZ or MOVN with
1424 // MOVK to build up the constant
1425 u_int64_t imm_h[4];
1426 int zero_count = 0;
1427 int neg_count = 0;
1428 int i;
1429 for (i = 0; i < 4; i++) {
1430 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
|
1391 // Move a constant pointer into r. In AArch64 mode the virtual
1392 // address space is 48 bits in size, so we only need three
1393 // instructions to create a patchable instruction sequence that can
1394 // reach anywhere.
1395 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1396 #ifndef PRODUCT
1397 {
1398 char buffer[64];
1399 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1400 block_comment(buffer);
1401 }
1402 #endif
1403 assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
1404 movz(r, imm64 & 0xffff);
1405 imm64 >>= 16;
1406 movk(r, imm64 & 0xffff, 16);
1407 imm64 >>= 16;
1408 movk(r, imm64 & 0xffff, 32);
1409 }
1410
1411 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
1412 assert(T != T1D && T != T2D, "invalid arrangement");
1413 if (T == T8B || T == T16B) {
1414 movi(Vd, T, imm32 & 0xff, 0);
1415 return;
1416 }
1417 u_int32_t nimm32 = ~imm32;
1418 if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; }
1419 u_int32_t x = imm32;
1420 int movi_cnt = 0;
1421 int movn_cnt = 0;
1422 while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
1423 x = nimm32;
1424 while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
1425 if (movn_cnt < movi_cnt) imm32 = nimm32;
1426 unsigned lsl = 0;
1427 while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1428 if (movn_cnt < movi_cnt)
1429 mvni(Vd, T, imm32 & 0xff, lsl);
1430 else
1431 movi(Vd, T, imm32 & 0xff, lsl);
1432 imm32 >>= 8; lsl += 8;
1433 while (imm32) {
1434 while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
1435 if (movn_cnt < movi_cnt)
1436 bici(Vd, T, imm32 & 0xff, lsl);
1437 else
1438 orri(Vd, T, imm32 & 0xff, lsl);
1439 lsl += 8; imm32 >>= 8;
1440 }
1441 }
1442
1443 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
1444 {
1445 #ifndef PRODUCT
1446 {
1447 char buffer[64];
1448 snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
1449 block_comment(buffer);
1450 }
1451 #endif
1452 if (operand_valid_for_logical_immediate(false, imm64)) {
1453 orr(dst, zr, imm64);
1454 } else {
1455 // we can use a combination of MOVZ or MOVN with
1456 // MOVK to build up the constant
1457 u_int64_t imm_h[4];
1458 int zero_count = 0;
1459 int neg_count = 0;
1460 int i;
1461 for (i = 0; i < 4; i++) {
1462 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
|