1 // 2 // Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 24 source %{ 25 26 #include "gc/z/zBarrierSetAssembler.hpp" 27 28 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) { 29 assert(dst != rsp, "Invalid register"); 30 assert(dst != r15, "Invalid register"); 31 32 const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) 33 : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); 34 __ lea(dst, src); 35 __ call(RuntimeAddress(stub)); 36 } 37 38 %} 39 40 // For XMM and YMM enabled processors 41 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, 42 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, 43 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 44 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 45 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ 46 47 match(Set dst (LoadBarrierSlowReg src)); 48 predicate(UseAVX <= 2); 49 50 effect(DEF dst, KILL cr, 51 KILL x0, KILL x1, KILL x2, KILL x3, 52 KILL x4, KILL x5, KILL x6, KILL x7, 53 KILL x8, KILL x9, KILL x10, KILL x11, 54 KILL x12, KILL x13, KILL x14, KILL x15); 55 56 format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %} 57 58 ins_encode %{ 59 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); 60 %} 61 62 ins_pipe(pipe_slow); 63 %} 64 65 // For ZMM enabled processors 66 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, 67 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, 68 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 69 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 70 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, 71 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, 72 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, 73 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, 74 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ 75 76 match(Set dst (LoadBarrierSlowReg src)); 77 predicate(UseAVX == 3); 78 79 effect(DEF dst, KILL cr, 80 KILL x0, KILL x1, KILL x2, KILL x3, 81 KILL x4, KILL x5, KILL x6, KILL x7, 82 KILL x8, KILL x9, KILL x10, KILL x11, 83 KILL x12, KILL x13, KILL x14, KILL x15, 84 KILL x16, KILL x17, KILL x18, KILL x19, 85 KILL x20, KILL x21, KILL x22, KILL x23, 86 KILL x24, KILL x25, KILL x26, KILL x27, 87 KILL x28, KILL x29, KILL x30, KILL x31); 88 89 format %{ "zLoadBarrierSlowRegZmm $dst, $src" %} 90 91 ins_encode %{ 92 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); 93 %} 94 95 ins_pipe(pipe_slow); 96 %} 97 98 // For XMM and YMM enabled processors 99 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, 100 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, 101 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 102 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 103 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ 104 105 match(Set dst (LoadBarrierWeakSlowReg src)); 106 predicate(UseAVX <= 2); 107 108 effect(DEF dst, KILL cr, 109 KILL x0, KILL x1, KILL x2, KILL x3, 110 KILL x4, KILL x5, KILL x6, KILL x7, 111 KILL x8, KILL x9, KILL x10, KILL x11, 112 KILL x12, KILL x13, KILL x14, KILL x15); 113 114 format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %} 115 116 ins_encode %{ 117 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); 118 %} 119 120 ins_pipe(pipe_slow); 121 %} 122 123 // For ZMM enabled processors 124 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, 125 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, 126 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, 127 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, 128 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, 129 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, 130 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, 131 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, 132 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ 133 134 match(Set dst (LoadBarrierWeakSlowReg src)); 135 predicate(UseAVX == 3); 136 137 effect(DEF dst, KILL cr, 138 KILL x0, KILL x1, KILL x2, KILL x3, 139 KILL x4, KILL x5, KILL x6, KILL x7, 140 KILL x8, KILL x9, KILL x10, KILL x11, 141 KILL x12, KILL x13, KILL x14, KILL x15, 142 KILL x16, KILL x17, KILL x18, KILL x19, 143 KILL x20, KILL x21, KILL x22, KILL x23, 144 KILL x24, KILL x25, KILL x26, KILL x27, 145 KILL x28, KILL x29, KILL x30, KILL x31); 146 147 format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %} 148 149 ins_encode %{ 150 z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); 151 %} 152 153 ins_pipe(pipe_slow); 154 %}