concurrent root iterator

0 /*
1  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  */
22 
23 #include "precompiled.hpp"
24 #include "gc/z/zAddressRangeMap.inline.hpp"
25 #include "gc/z/zBarrier.inline.hpp"
26 #include "gc/z/zGlobals.hpp"
27 #include "gc/z/zHeapIterator.hpp"
28 #include "gc/z/zOop.inline.hpp"
29 #include "gc/z/zRootsIterator.hpp"
30 #include "memory/iterator.inline.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "utilities/bitMap.inline.hpp"
33 #include "utilities/stack.inline.hpp"
34 
35 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
36 private:
37   CHeapBitMap _map;
38 
39 public:
40   ZHeapIteratorBitMap(size_t size_in_bits) :
41       _map(size_in_bits) {}
42 
43   bool try_set_bit(size_t index) {
44     if (_map.at(index)) {
45       return false;
46     }
47 
48     _map.set_bit(index);
49     return true;
50   }
51 };
52 
53 class ZHeapIteratorRootOopClosure : public OopClosure {
54 private:
55   ZHeapIterator* const _iter;
56   ObjectClosure* const _cl;
57 
58 public:
59   ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) :
60       _iter(iter),
61       _cl(cl) {}
62 
63   virtual void do_oop(oop* p) {
64     // Load barrier needed here for the same reason we
65     // need fixup_partial_loads() in ZHeap::mark_end()
66     const oop obj = ZBarrier::load_barrier_on_oop_field(p);
67     _iter->push(obj);
68     _iter->drain(_cl);
69   }
70 
71   virtual void do_oop(narrowOop* p) {
72     ShouldNotReachHere();
73   }
74 };
75 
76 class ZHeapIteratorPushOopClosure : public BasicOopIterateClosure {
77 private:
78   ZHeapIterator* const _iter;
79   const oop            _base;
80   const bool           _visit_referents;
81 
82 public:
83   ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) :
84       _iter(iter),
85       _base(base),
86       _visit_referents(iter->visit_referents()) {}
87 
88   oop load_oop(oop* p) {
89     if (_visit_referents) {
90       return HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
91     } else {
92       return HeapAccess<>::oop_load(p);
93     }
94   }
95 
96   virtual ReferenceIterationMode reference_iteration_mode() {
97     return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
98   }
99 
100   virtual void do_oop(oop* p) {
101     const oop obj = load_oop(p);
102     _iter->push(obj);
103   }
104 
105   virtual void do_oop(narrowOop* p) {
106     ShouldNotReachHere();
107   }
108 
109 #ifdef ASSERT
110   virtual bool should_verify_oops() {
111     return false;
112   }
113 #endif
114 };
115 
116 ZHeapIterator::ZHeapIterator(bool visit_referents) :
117     _visit_stack(),
118     _visit_map(),
119     _visit_referents(visit_referents) {}
120 
121 ZHeapIterator::~ZHeapIterator() {
122   ZVisitMapIterator iter(&_visit_map);
123   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
124     delete map;
125   }
126 }
127 
128 size_t ZHeapIterator::object_index_max() const {
129   return ZPageSizeMin >> ZObjectAlignmentSmallShift;
130 }
131 
132 size_t ZHeapIterator::object_index(oop obj) const {
133   const uintptr_t addr = ZOop::to_address(obj);
134   const uintptr_t offset = ZAddress::offset(addr);
135   const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
136   return (offset & mask) >> ZObjectAlignmentSmallShift;
137 }
138 
139 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
140   const uintptr_t addr = ZOop::to_address(obj);
141   ZHeapIteratorBitMap* map = _visit_map.get(addr);
142   if (map == NULL) {
143     map = new ZHeapIteratorBitMap(object_index_max());
144     _visit_map.put(addr, map);
145   }
146 
147   return map;
148 }
149 
150 void ZHeapIterator::push(oop obj) {
151   if (obj == NULL) {
152     // Ignore
153     return;
154   }
155 
156   ZHeapIteratorBitMap* const map = object_map(obj);
157   const size_t index = object_index(obj);
158   if (!map->try_set_bit(index)) {
159     // Already pushed
160     return;
161   }
162 
163   // Push
164   _visit_stack.push(obj);
165 }
166 
167 void ZHeapIterator::drain(ObjectClosure* cl) {
168   while (!_visit_stack.is_empty()) {
169     const oop obj = _visit_stack.pop();
170 
171     // Visit
172     cl->do_object(obj);
173 
174     // Push members to visit
175     ZHeapIteratorPushOopClosure push_cl(this, obj);
176     obj->oop_iterate(&push_cl);
177   }
178 }
179 
180 bool ZHeapIterator::visit_referents() const {
181   return _visit_referents;
182 }
183 
184 void ZHeapIterator::objects_do(ObjectClosure* cl) {
185   ZHeapIteratorRootOopClosure root_cl(this, cl);
186   ZRootsIterator roots;
187   ZConcurrentRootsIterator concurrent_roots;
188 
189   // Follow roots. Note that we also visit the JVMTI weak tag map
190   // as if they were strong roots to make sure we visit all tagged
191   // objects, even those that might now have become unreachable.
192   // If we didn't do this the user would have expected to see
193   // ObjectFree events for unreachable objects in the tag map.
194   roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
195   concurrent_roots.oops_do(&root_cl);
196 }
--- EOF ---