1 /*
2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP
26 #define SHARE_VM_MEMORY_BARRIERSET_HPP
27
28 #include "memory/memRegion.hpp"
29 #include "oops/oopsHierarchy.hpp"
30
31 // This class provides the interface between a barrier implementation and
32 // the rest of the system.
33
34 class BarrierSet: public CHeapObj<mtGC> {
35 friend class VMStructs;
36 public:
37 enum Name {
38 ModRef,
39 CardTableModRef,
40 CardTableExtension,
41 G1SATBCT,
42 G1SATBCTLogging
43 };
44
45 enum Flags {
46 None = 0,
47 TargetUninitialized = 1
48 };
49 protected:
50 // Some barrier sets create tables whose elements correspond to parts of
51 // the heap; the CardTableModRefBS is an example. Such barrier sets will
52 // normally reserve space for such tables, and commit parts of the table
53 // "covering" parts of the heap that are committed. At most one covered
54 // region per generation is needed.
55 static const int _max_covered_regions = 2;
56 Name _kind;
57
58 BarrierSet(Name kind) : _kind(kind) { }
59 ~BarrierSet() { }
60
61 public:
62
63 // To get around prohibition on RTTI.
64 BarrierSet::Name kind() { return _kind; }
65 virtual bool is_a(BarrierSet::Name bsn) = 0;
66
67 // These operations indicate what kind of barriers the BarrierSet has.
68 virtual bool has_read_ref_barrier() = 0;
69 virtual bool has_read_prim_barrier() = 0;
70 virtual bool has_write_ref_barrier() = 0;
71 virtual bool has_write_ref_pre_barrier() = 0;
72 virtual bool has_write_prim_barrier() = 0;
73
74 // These functions indicate whether a particular access of the given
75 // kinds requires a barrier.
76 virtual bool read_ref_needs_barrier(void* field) = 0;
77 virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
78 virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
79 juint val1, juint val2) = 0;
80
81 // The first four operations provide a direct implementation of the
82 // barrier set. An interpreter loop, for example, could call these
83 // directly, as appropriate.
84
85 // Invoke the barrier, if any, necessary when reading the given ref field.
86 virtual void read_ref_field(void* field) = 0;
87
88 // Invoke the barrier, if any, necessary when reading the given primitive
89 // "field" of "bytes" bytes in "obj".
90 virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
91
92 // Invoke the barrier, if any, necessary when writing "new_val" into the
93 // ref field at "offset" in "obj".
94 // (For efficiency reasons, this operation is specialized for certain
95 // barrier types. Semantically, it should be thought of as a call to the
96 // virtual "_work" function below, which must implement the barrier.)
97 // First the pre-write versions...
98 template <class T> inline void write_ref_field_pre(T* field, oop new_val);
99 private:
100 // Keep this private so as to catch violations at build time.
101 virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
102 protected:
103 virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
104 virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
105 public:
106
107 // ...then the post-write version.
108 inline void write_ref_field(void* field, oop new_val, bool release = false);
109 protected:
110 virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
111 public:
112
113 // Invoke the barrier, if any, necessary when writing the "bytes"-byte
114 // value(s) "val1" (and "val2") into the primitive "field".
115 virtual void write_prim_field(HeapWord* field, size_t bytes,
116 juint val1, juint val2) = 0;
117
118 // Operations on arrays, or general regions (e.g., for "clone") may be
119 // optimized by some barriers.
120
121 // The first six operations tell whether such an optimization exists for
122 // the particular barrier.
123 virtual bool has_read_ref_array_opt() = 0;
124 virtual bool has_read_prim_array_opt() = 0;
125 virtual bool has_write_ref_array_pre_opt() { return true; }
126 virtual bool has_write_ref_array_opt() = 0;
127 virtual bool has_write_prim_array_opt() = 0;
128
129 virtual bool has_read_region_opt() = 0;
130 virtual bool has_write_region_opt() = 0;
131
132 // These operations should assert false unless the corresponding operation
133 // above returns true. Otherwise, they should perform an appropriate
134 // barrier for an array whose elements are all in the given memory region.
135 virtual void read_ref_array(MemRegion mr) = 0;
136 virtual void read_prim_array(MemRegion mr) = 0;
137
138 // Below length is the # array elements being written
139 virtual void write_ref_array_pre(oop* dst, int length,
140 bool dest_uninitialized = false) {}
141 virtual void write_ref_array_pre(narrowOop* dst, int length,
142 bool dest_uninitialized = false) {}
143 // Below count is the # array elements being written, starting
144 // at the address "start", which may not necessarily be HeapWord-aligned
145 inline void write_ref_array(HeapWord* start, size_t count);
146
147 // Static versions, suitable for calling from generated code;
148 // count is # array elements being written, starting with "start",
149 // which may not necessarily be HeapWord-aligned.
150 static void static_write_ref_array_pre(HeapWord* start, size_t count);
151 static void static_write_ref_array_post(HeapWord* start, size_t count);
152
153 protected:
154 virtual void write_ref_array_work(MemRegion mr) = 0;
155 public:
156 virtual void write_prim_array(MemRegion mr) = 0;
157
158 virtual void read_region(MemRegion mr) = 0;
159
160 // (For efficiency reasons, this operation is specialized for certain
161 // barrier types. Semantically, it should be thought of as a call to the
162 // virtual "_work" function below, which must implement the barrier.)
163 inline void write_region(MemRegion mr);
164 protected:
165 virtual void write_region_work(MemRegion mr) = 0;
166 public:
167 // Inform the BarrierSet that the the covered heap region that starts
168 // with "base" has been changed to have the given size (possibly from 0,
169 // for initialization.)
170 virtual void resize_covered_region(MemRegion new_region) = 0;
171
172 // If the barrier set imposes any alignment restrictions on boundaries
173 // within the heap, this function tells whether they are met.
174 virtual bool is_aligned(HeapWord* addr) = 0;
175
176 // Print a description of the memory for the barrier set
177 virtual void print_on(outputStream* st) const = 0;
178 };
179
180 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP
--- EOF ---