103 104 public: 105 // Operations on arrays, or general regions (e.g., for "clone") may be 106 // optimized by some barriers. 107 108 // Below length is the # array elements being written 109 virtual void write_ref_array_pre(oop* dst, int length, 110 bool dest_uninitialized = false) {} 111 virtual void write_ref_array_pre(narrowOop* dst, int length, 112 bool dest_uninitialized = false) {} 113 // Below count is the # array elements being written, starting 114 // at the address "start", which may not necessarily be HeapWord-aligned 115 inline void write_ref_array(HeapWord* start, size_t count); 116 117 // Static versions, suitable for calling from generated code; 118 // count is # array elements being written, starting with "start", 119 // which may not necessarily be HeapWord-aligned. 120 static void static_write_ref_array_pre(HeapWord* start, size_t count); 121 static void static_write_ref_array_post(HeapWord* start, size_t count); 122 123 virtual void write_ref_nmethod_pre(oop* dst, nmethod* nm) {} 124 virtual void write_ref_nmethod_post(oop* dst, nmethod* nm) {} 125 126 protected: 127 virtual void write_ref_array_work(MemRegion mr) = 0; 128 129 public: 130 // (For efficiency reasons, this operation is specialized for certain 131 // barrier types. Semantically, it should be thought of as a call to the 132 // virtual "_work" function below, which must implement the barrier.) 133 void write_region(MemRegion mr); 134 135 protected: 136 virtual void write_region_work(MemRegion mr) = 0; 137 138 public: 139 // Inform the BarrierSet that the the covered heap region that starts 140 // with "base" has been changed to have the given size (possibly from 0, 141 // for initialization.) 142 virtual void resize_covered_region(MemRegion new_region) = 0; 143 144 // If the barrier set imposes any alignment restrictions on boundaries 145 // within the heap, this function tells whether they are met. | 103 104 public: 105 // Operations on arrays, or general regions (e.g., for "clone") may be 106 // optimized by some barriers. 107 108 // Below length is the # array elements being written 109 virtual void write_ref_array_pre(oop* dst, int length, 110 bool dest_uninitialized = false) {} 111 virtual void write_ref_array_pre(narrowOop* dst, int length, 112 bool dest_uninitialized = false) {} 113 // Below count is the # array elements being written, starting 114 // at the address "start", which may not necessarily be HeapWord-aligned 115 inline void write_ref_array(HeapWord* start, size_t count); 116 117 // Static versions, suitable for calling from generated code; 118 // count is # array elements being written, starting with "start", 119 // which may not necessarily be HeapWord-aligned. 120 static void static_write_ref_array_pre(HeapWord* start, size_t count); 121 static void static_write_ref_array_post(HeapWord* start, size_t count); 122 123 protected: 124 virtual void write_ref_array_work(MemRegion mr) = 0; 125 126 public: 127 // (For efficiency reasons, this operation is specialized for certain 128 // barrier types. Semantically, it should be thought of as a call to the 129 // virtual "_work" function below, which must implement the barrier.) 130 void write_region(MemRegion mr); 131 132 protected: 133 virtual void write_region_work(MemRegion mr) = 0; 134 135 public: 136 // Inform the BarrierSet that the the covered heap region that starts 137 // with "base" has been changed to have the given size (possibly from 0, 138 // for initialization.) 139 virtual void resize_covered_region(MemRegion new_region) = 0; 140 141 // If the barrier set imposes any alignment restrictions on boundaries 142 // within the heap, this function tells whether they are met. |