1 //
   2 // Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4 //
   5 // This code is free software; you can redistribute it and/or modify it
   6 // under the terms of the GNU General Public License version 2 only, as
   7 // published by the Free Software Foundation.
   8 //
   9 // This code is distributed in the hope that it will be useful, but WITHOUT
  10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12 // version 2 for more details (a copy is included in the LICENSE file that
  13 // accompanied this code).
  14 //
  15 // You should have received a copy of the GNU General Public License version
  16 // 2 along with this work; if not, write to the Free Software Foundation,
  17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18 //
  19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20 // or visit www.oracle.com if you need additional information or have any
  21 // questions.
  22 //
  23 
  24 source %{
  25 
  26 #include "gc/z/zBarrierSetAssembler.hpp"
  27 
  28 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
  29   assert(dst != r12, "Invalid register");
  30   assert(dst != r15, "Invalid register");
  31   assert(dst != rsp, "Invalid register");
  32 
  33   const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
  34                             : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
  35   __ lea(dst, src);
  36   __ call(RuntimeAddress(stub));
  37 }
  38 
  39 %}
  40 
  41 // For XMM and YMM enabled processors
  42 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
  43                                       rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  44                                       rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  45                                       rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  46                                       rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
  47 
  48   match(Set dst (LoadBarrierSlowReg src));
  49   predicate(UseAVX <= 2);
  50 
  51   effect(DEF dst, KILL cr,
  52          KILL x0, KILL x1, KILL x2, KILL x3,
  53          KILL x4, KILL x5, KILL x6, KILL x7,
  54          KILL x8, KILL x9, KILL x10, KILL x11,
  55          KILL x12, KILL x13, KILL x14, KILL x15);
  56 
  57   format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
  58 
  59   ins_encode %{
  60     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  61   %}
  62 
  63   ins_pipe(pipe_slow);
  64 %}
  65 
  66 // For ZMM enabled processors
  67 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
  68                                 rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
  69                                 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
  70                                 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
  71                                 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
  72                                 rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
  73                                 rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
  74                                 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
  75                                 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
  76 
  77   match(Set dst (LoadBarrierSlowReg src));
  78   predicate(UseAVX == 3);
  79 
  80   effect(DEF dst, KILL cr,
  81          KILL x0, KILL x1, KILL x2, KILL x3,
  82          KILL x4, KILL x5, KILL x6, KILL x7,
  83          KILL x8, KILL x9, KILL x10, KILL x11,
  84          KILL x12, KILL x13, KILL x14, KILL x15,
  85          KILL x16, KILL x17, KILL x18, KILL x19,
  86          KILL x20, KILL x21, KILL x22, KILL x23,
  87          KILL x24, KILL x25, KILL x26, KILL x27,
  88          KILL x28, KILL x29, KILL x30, KILL x31);
  89 
  90   format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
  91 
  92   ins_encode %{
  93     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
  94   %}
  95 
  96   ins_pipe(pipe_slow);
  97 %}
  98 
  99 // For XMM and YMM enabled processors
 100 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
 101                                           rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 102                                           rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 103                                           rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 104                                           rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
 105 
 106   match(Set dst (LoadBarrierWeakSlowReg src));
 107   predicate(UseAVX <= 2);
 108 
 109   effect(DEF dst, KILL cr,
 110          KILL x0, KILL x1, KILL x2, KILL x3,
 111          KILL x4, KILL x5, KILL x6, KILL x7,
 112          KILL x8, KILL x9, KILL x10, KILL x11,
 113          KILL x12, KILL x13, KILL x14, KILL x15);
 114 
 115   format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
 116 
 117   ins_encode %{
 118     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 119   %}
 120 
 121   ins_pipe(pipe_slow);
 122 %}
 123 
 124 // For ZMM enabled processors
 125 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
 126                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
 127                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
 128                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
 129                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
 130                                     rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
 131                                     rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
 132                                     rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
 133                                     rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 134 
 135   match(Set dst (LoadBarrierWeakSlowReg src));
 136   predicate(UseAVX == 3);
 137 
 138   effect(DEF dst, KILL cr,
 139          KILL x0, KILL x1, KILL x2, KILL x3,
 140          KILL x4, KILL x5, KILL x6, KILL x7,
 141          KILL x8, KILL x9, KILL x10, KILL x11,
 142          KILL x12, KILL x13, KILL x14, KILL x15,
 143          KILL x16, KILL x17, KILL x18, KILL x19,
 144          KILL x20, KILL x21, KILL x22, KILL x23,
 145          KILL x24, KILL x25, KILL x26, KILL x27,
 146          KILL x28, KILL x29, KILL x30, KILL x31);
 147 
 148   format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
 149 
 150   ins_encode %{
 151     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
 152   %}
 153 
 154   ins_pipe(pipe_slow);
 155 %}