1 /* 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP 26 #define SHARE_VM_MEMORY_BARRIERSET_HPP 27 28 #include "memory/memRegion.hpp" 29 #include "oops/oopsHierarchy.hpp" 30 31 // This class provides the interface between a barrier implementation and 32 // the rest of the system. 33 34 class BarrierSet: public CHeapObj<mtGC> { 35 friend class VMStructs; 36 public: 37 enum Name { 38 ModRef, 39 CardTableModRef, 40 CardTableExtension, 41 G1SATBCT, 42 G1SATBCTLogging, 43 Other 44 }; 45 46 enum Flags { 47 None = 0, 48 TargetUninitialized = 1 49 }; 50 protected: 51 // Some barrier sets create tables whose elements correspond to parts of 52 // the heap; the CardTableModRefBS is an example. Such barrier sets will 53 // normally reserve space for such tables, and commit parts of the table 54 // "covering" parts of the heap that are committed. At most one covered 55 // region per generation is needed. 56 static const int _max_covered_regions = 2; 57 Name _kind; 58 59 BarrierSet(Name kind) : _kind(kind) { } 60 ~BarrierSet() { } 61 62 public: 63 64 // To get around prohibition on RTTI. 65 BarrierSet::Name kind() { return _kind; } 66 virtual bool is_a(BarrierSet::Name bsn) = 0; 67 68 // These operations indicate what kind of barriers the BarrierSet has. 69 virtual bool has_read_ref_barrier() = 0; 70 virtual bool has_read_prim_barrier() = 0; 71 virtual bool has_write_ref_barrier() = 0; 72 virtual bool has_write_ref_pre_barrier() = 0; 73 virtual bool has_write_prim_barrier() = 0; 74 75 // These functions indicate whether a particular access of the given 76 // kinds requires a barrier. 77 virtual bool read_ref_needs_barrier(void* field) = 0; 78 virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0; 79 virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, 80 juint val1, juint val2) = 0; 81 82 // The first four operations provide a direct implementation of the 83 // barrier set. An interpreter loop, for example, could call these 84 // directly, as appropriate. 85 86 // Invoke the barrier, if any, necessary when reading the given ref field. 87 virtual void read_ref_field(void* field) = 0; 88 89 // Invoke the barrier, if any, necessary when reading the given primitive 90 // "field" of "bytes" bytes in "obj". 91 virtual void read_prim_field(HeapWord* field, size_t bytes) = 0; 92 93 // Invoke the barrier, if any, necessary when writing "new_val" into the 94 // ref field at "offset" in "obj". 95 // (For efficiency reasons, this operation is specialized for certain 96 // barrier types. Semantically, it should be thought of as a call to the 97 // virtual "_work" function below, which must implement the barrier.) 98 // First the pre-write versions... 99 template <class T> inline void write_ref_field_pre(T* field, oop new_val); 100 private: 101 // Keep this private so as to catch violations at build time. 102 virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); }; 103 protected: 104 virtual void write_ref_field_pre_work( oop* field, oop new_val) {}; 105 virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {}; 106 public: 107 108 // ...then the post-write version. 109 inline void write_ref_field(void* field, oop new_val, bool release = false); 110 protected: 111 virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0; 112 public: 113 114 // Invoke the barrier, if any, necessary when writing the "bytes"-byte 115 // value(s) "val1" (and "val2") into the primitive "field". 116 virtual void write_prim_field(HeapWord* field, size_t bytes, 117 juint val1, juint val2) = 0; 118 119 // Operations on arrays, or general regions (e.g., for "clone") may be 120 // optimized by some barriers. 121 122 // The first six operations tell whether such an optimization exists for 123 // the particular barrier. 124 virtual bool has_read_ref_array_opt() = 0; 125 virtual bool has_read_prim_array_opt() = 0; 126 virtual bool has_write_ref_array_pre_opt() { return true; } 127 virtual bool has_write_ref_array_opt() = 0; 128 virtual bool has_write_prim_array_opt() = 0; 129 130 virtual bool has_read_region_opt() = 0; 131 virtual bool has_write_region_opt() = 0; 132 133 // These operations should assert false unless the corresponding operation 134 // above returns true. Otherwise, they should perform an appropriate 135 // barrier for an array whose elements are all in the given memory region. 136 virtual void read_ref_array(MemRegion mr) = 0; 137 virtual void read_prim_array(MemRegion mr) = 0; 138 139 // Below length is the # array elements being written 140 virtual void write_ref_array_pre(oop* dst, int length, 141 bool dest_uninitialized = false) {} 142 virtual void write_ref_array_pre(narrowOop* dst, int length, 143 bool dest_uninitialized = false) {} 144 // Below count is the # array elements being written, starting 145 // at the address "start", which may not necessarily be HeapWord-aligned 146 inline void write_ref_array(HeapWord* start, size_t count); 147 148 // Static versions, suitable for calling from generated code; 149 // count is # array elements being written, starting with "start", 150 // which may not necessarily be HeapWord-aligned. 151 static void static_write_ref_array_pre(HeapWord* start, size_t count); 152 static void static_write_ref_array_post(HeapWord* start, size_t count); 153 154 protected: 155 virtual void write_ref_array_work(MemRegion mr) = 0; 156 public: 157 virtual void write_prim_array(MemRegion mr) = 0; 158 159 virtual void read_region(MemRegion mr) = 0; 160 161 // (For efficiency reasons, this operation is specialized for certain 162 // barrier types. Semantically, it should be thought of as a call to the 163 // virtual "_work" function below, which must implement the barrier.) 164 inline void write_region(MemRegion mr); 165 protected: 166 virtual void write_region_work(MemRegion mr) = 0; 167 public: 168 // Inform the BarrierSet that the the covered heap region that starts 169 // with "base" has been changed to have the given size (possibly from 0, 170 // for initialization.) 171 virtual void resize_covered_region(MemRegion new_region) = 0; 172 173 // If the barrier set imposes any alignment restrictions on boundaries 174 // within the heap, this function tells whether they are met. 175 virtual bool is_aligned(HeapWord* addr) = 0; 176 177 // Print a description of the memory for the barrier set 178 virtual void print_on(outputStream* st) const = 0; 179 }; 180 181 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP