1 /* 2 * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP 27 28 #include "memory/genOopClosures.hpp" 29 30 ///////////////////////////////////////////////////////////////// 31 // Closures used by ConcurrentMarkSweepGeneration's collector 32 ///////////////////////////////////////////////////////////////// 33 class ConcurrentMarkSweepGeneration; 34 class CMSBitMap; 35 class CMSMarkStack; 36 class CMSCollector; 37 class MarkFromRootsClosure; 38 class Par_MarkFromRootsClosure; 39 40 // Decode the oop and call do_oop on it. 41 #define DO_OOP_WORK_DEFN \ 42 void do_oop(oop obj); \ 43 template <class T> inline void do_oop_work(T* p) { \ 44 T heap_oop = oopDesc::load_heap_oop(p); \ 45 if (!oopDesc::is_null(heap_oop)) { \ 46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \ 47 do_oop(obj); \ 48 } \ 49 } 50 51 // Applies the given oop closure to all oops in all klasses visited. 52 class CMKlassClosure : public KlassClosure { 53 friend class CMSOopClosure; 54 friend class CMSOopsInGenClosure; 55 56 OopClosure* _oop_closure; 57 58 // Used when _oop_closure couldn't be set in an initialization list. 59 void initialize(OopClosure* oop_closure) { 60 assert(_oop_closure == NULL, "Should only be called once"); 61 _oop_closure = oop_closure; 62 } 63 public: 64 CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { } 65 66 void do_klass(Klass* k); 67 }; 68 69 // The base class for all CMS marking closures. 70 // It's used to proxy through the metadata to the oops defined in them. 71 class CMSOopClosure: public ExtendedOopClosure { 72 CMKlassClosure _klass_closure; 73 public: 74 CMSOopClosure() : ExtendedOopClosure() { 75 _klass_closure.initialize(this); 76 } 77 CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { 78 _klass_closure.initialize(this); 79 } 80 81 virtual bool do_metadata() { return do_metadata_nv(); } 82 inline bool do_metadata_nv() { return true; } 83 84 virtual void do_klass(Klass* k); 85 void do_klass_nv(Klass* k); 86 87 virtual void do_class_loader_data(ClassLoaderData* cld); 88 }; 89 90 // TODO: This duplication of the CMSOopClosure class is only needed because 91 // some CMS OopClosures derive from OopsInGenClosure. It would be good 92 // to get rid of them completely. 93 class CMSOopsInGenClosure: public OopsInGenClosure { 94 CMKlassClosure _klass_closure; 95 public: 96 CMSOopsInGenClosure() { 97 _klass_closure.initialize(this); 98 } 99 100 virtual bool do_metadata() { return do_metadata_nv(); } 101 inline bool do_metadata_nv() { return true; } 102 103 virtual void do_klass(Klass* k); 104 void do_klass_nv(Klass* k); 105 106 virtual void do_class_loader_data(ClassLoaderData* cld); 107 }; 108 109 class MarkRefsIntoClosure: public CMSOopsInGenClosure { 110 private: 111 const MemRegion _span; 112 CMSBitMap* _bitMap; 113 protected: 114 DO_OOP_WORK_DEFN 115 public: 116 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 117 virtual void do_oop(oop* p); 118 virtual void do_oop(narrowOop* p); 119 120 Prefetch::style prefetch_style() { 121 return Prefetch::do_read; 122 } 123 }; 124 125 class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure { 126 private: 127 const MemRegion _span; 128 CMSBitMap* _bitMap; 129 protected: 130 DO_OOP_WORK_DEFN 131 public: 132 Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap); 133 virtual void do_oop(oop* p); 134 virtual void do_oop(narrowOop* p); 135 136 Prefetch::style prefetch_style() { 137 return Prefetch::do_read; 138 } 139 }; 140 141 // A variant of the above used in certain kinds of CMS 142 // marking verification. 143 class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure { 144 private: 145 const MemRegion _span; 146 CMSBitMap* _verification_bm; 147 CMSBitMap* _cms_bm; 148 protected: 149 DO_OOP_WORK_DEFN 150 public: 151 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm, 152 CMSBitMap* cms_bm); 153 virtual void do_oop(oop* p); 154 virtual void do_oop(narrowOop* p); 155 156 Prefetch::style prefetch_style() { 157 return Prefetch::do_read; 158 } 159 }; 160 161 // The non-parallel version (the parallel version appears further below). 162 class PushAndMarkClosure: public CMSOopClosure { 163 private: 164 CMSCollector* _collector; 165 MemRegion _span; 166 CMSBitMap* _bit_map; 167 CMSBitMap* _mod_union_table; 168 CMSMarkStack* _mark_stack; 169 bool _concurrent_precleaning; 170 protected: 171 DO_OOP_WORK_DEFN 172 public: 173 PushAndMarkClosure(CMSCollector* collector, 174 MemRegion span, 175 ReferenceProcessor* rp, 176 CMSBitMap* bit_map, 177 CMSBitMap* mod_union_table, 178 CMSMarkStack* mark_stack, 179 bool concurrent_precleaning); 180 virtual void do_oop(oop* p); 181 virtual void do_oop(narrowOop* p); 182 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); } 183 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 184 185 Prefetch::style prefetch_style() { 186 return Prefetch::do_read; 187 } 188 }; 189 190 // In the parallel case, the bit map and the 191 // reference processor are currently all shared. Access to 192 // these shared mutable structures must use appropriate 193 // synchronization (for instance, via CAS). The marking stack 194 // used in the non-parallel case above is here replaced with 195 // an OopTaskQueue structure to allow efficient work stealing. 196 class Par_PushAndMarkClosure: public CMSOopClosure { 197 private: 198 CMSCollector* _collector; 199 MemRegion _span; 200 CMSBitMap* _bit_map; 201 OopTaskQueue* _work_queue; 202 protected: 203 DO_OOP_WORK_DEFN 204 public: 205 Par_PushAndMarkClosure(CMSCollector* collector, 206 MemRegion span, 207 ReferenceProcessor* rp, 208 CMSBitMap* bit_map, 209 OopTaskQueue* work_queue); 210 virtual void do_oop(oop* p); 211 virtual void do_oop(narrowOop* p); 212 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 213 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 214 215 Prefetch::style prefetch_style() { 216 return Prefetch::do_read; 217 } 218 }; 219 220 // The non-parallel version (the parallel version appears further below). 221 class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { 222 private: 223 MemRegion _span; 224 CMSBitMap* _bit_map; 225 CMSMarkStack* _mark_stack; 226 PushAndMarkClosure _pushAndMarkClosure; 227 CMSCollector* _collector; 228 Mutex* _freelistLock; 229 bool _yield; 230 // Whether closure is being used for concurrent precleaning 231 bool _concurrent_precleaning; 232 protected: 233 DO_OOP_WORK_DEFN 234 public: 235 MarkRefsIntoAndScanClosure(MemRegion span, 236 ReferenceProcessor* rp, 237 CMSBitMap* bit_map, 238 CMSBitMap* mod_union_table, 239 CMSMarkStack* mark_stack, 240 CMSCollector* collector, 241 bool should_yield, 242 bool concurrent_precleaning); 243 virtual void do_oop(oop* p); 244 virtual void do_oop(narrowOop* p); 245 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 246 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 247 248 Prefetch::style prefetch_style() { 249 return Prefetch::do_read; 250 } 251 void set_freelistLock(Mutex* m) { 252 _freelistLock = m; 253 } 254 255 private: 256 inline void do_yield_check(); 257 void do_yield_work(); 258 bool take_from_overflow_list(); 259 }; 260 261 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit 262 // stack and the bitMap are shared, so access needs to be suitably 263 // sycnhronized. An OopTaskQueue structure, supporting efficient 264 // workstealing, replaces a CMSMarkStack for storing grey objects. 265 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { 266 private: 267 MemRegion _span; 268 CMSBitMap* _bit_map; 269 OopTaskQueue* _work_queue; 270 const uint _low_water_mark; 271 Par_PushAndMarkClosure _par_pushAndMarkClosure; 272 protected: 273 DO_OOP_WORK_DEFN 274 public: 275 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector, 276 MemRegion span, 277 ReferenceProcessor* rp, 278 CMSBitMap* bit_map, 279 OopTaskQueue* work_queue); 280 virtual void do_oop(oop* p); 281 virtual void do_oop(narrowOop* p); 282 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 283 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 284 285 Prefetch::style prefetch_style() { 286 return Prefetch::do_read; 287 } 288 void trim_queue(uint size); 289 }; 290 291 // This closure is used during the concurrent marking phase 292 // following the first checkpoint. Its use is buried in 293 // the closure MarkFromRootsClosure. 294 class PushOrMarkClosure: public CMSOopClosure { 295 private: 296 CMSCollector* _collector; 297 MemRegion _span; 298 CMSBitMap* _bitMap; 299 CMSMarkStack* _markStack; 300 HeapWord* const _finger; 301 MarkFromRootsClosure* const 302 _parent; 303 protected: 304 DO_OOP_WORK_DEFN 305 public: 306 PushOrMarkClosure(CMSCollector* cms_collector, 307 MemRegion span, 308 CMSBitMap* bitMap, 309 CMSMarkStack* markStack, 310 HeapWord* finger, 311 MarkFromRootsClosure* parent); 312 virtual void do_oop(oop* p); 313 virtual void do_oop(narrowOop* p); 314 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } 315 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 316 317 // Deal with a stack overflow condition 318 void handle_stack_overflow(HeapWord* lost); 319 private: 320 inline void do_yield_check(); 321 }; 322 323 // A parallel (MT) version of the above. 324 // This closure is used during the concurrent marking phase 325 // following the first checkpoint. Its use is buried in 326 // the closure Par_MarkFromRootsClosure. 327 class Par_PushOrMarkClosure: public CMSOopClosure { 328 private: 329 CMSCollector* _collector; 330 MemRegion _whole_span; 331 MemRegion _span; // local chunk 332 CMSBitMap* _bit_map; 333 OopTaskQueue* _work_queue; 334 CMSMarkStack* _overflow_stack; 335 HeapWord* const _finger; 336 HeapWord** const _global_finger_addr; 337 Par_MarkFromRootsClosure* const 338 _parent; 339 protected: 340 DO_OOP_WORK_DEFN 341 public: 342 Par_PushOrMarkClosure(CMSCollector* cms_collector, 343 MemRegion span, 344 CMSBitMap* bit_map, 345 OopTaskQueue* work_queue, 346 CMSMarkStack* mark_stack, 347 HeapWord* finger, 348 HeapWord** global_finger_addr, 349 Par_MarkFromRootsClosure* parent); 350 virtual void do_oop(oop* p); 351 virtual void do_oop(narrowOop* p); 352 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 353 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 354 355 // Deal with a stack overflow condition 356 void handle_stack_overflow(HeapWord* lost); 357 private: 358 inline void do_yield_check(); 359 }; 360 361 // For objects in CMS generation, this closure marks 362 // given objects (transitively) as being reachable/live. 363 // This is currently used during the (weak) reference object 364 // processing phase of the CMS final checkpoint step, as 365 // well as during the concurrent precleaning of the discovered 366 // reference lists. 367 class CMSKeepAliveClosure: public CMSOopClosure { 368 private: 369 CMSCollector* _collector; 370 const MemRegion _span; 371 CMSMarkStack* _mark_stack; 372 CMSBitMap* _bit_map; 373 bool _concurrent_precleaning; 374 protected: 375 DO_OOP_WORK_DEFN 376 public: 377 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span, 378 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 379 bool cpc); 380 bool concurrent_precleaning() const { return _concurrent_precleaning; } 381 virtual void do_oop(oop* p); 382 virtual void do_oop(narrowOop* p); 383 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 384 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 385 }; 386 387 class CMSInnerParMarkAndPushClosure: public CMSOopClosure { 388 private: 389 CMSCollector* _collector; 390 MemRegion _span; 391 OopTaskQueue* _work_queue; 392 CMSBitMap* _bit_map; 393 protected: 394 DO_OOP_WORK_DEFN 395 public: 396 CMSInnerParMarkAndPushClosure(CMSCollector* collector, 397 MemRegion span, CMSBitMap* bit_map, 398 OopTaskQueue* work_queue); 399 virtual void do_oop(oop* p); 400 virtual void do_oop(narrowOop* p); 401 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 402 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 403 }; 404 405 // A parallel (MT) version of the above, used when 406 // reference processing is parallel; the only difference 407 // is in the do_oop method. 408 class CMSParKeepAliveClosure: public CMSOopClosure { 409 private: 410 MemRegion _span; 411 OopTaskQueue* _work_queue; 412 CMSBitMap* _bit_map; 413 CMSInnerParMarkAndPushClosure 414 _mark_and_push; 415 const uint _low_water_mark; 416 void trim_queue(uint max); 417 protected: 418 DO_OOP_WORK_DEFN 419 public: 420 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span, 421 CMSBitMap* bit_map, OopTaskQueue* work_queue); 422 virtual void do_oop(oop* p); 423 virtual void do_oop(narrowOop* p); 424 }; 425 426 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP --- EOF ---