1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 27 28 #include "memory/referencePolicy.hpp" 29 #include "memory/referenceProcessorStats.hpp" 30 #include "memory/referenceType.hpp" 31 #include "oops/instanceRefKlass.hpp" 32 33 class GCTimer; 34 35 // ReferenceProcessor class encapsulates the per-"collector" processing 36 // of java.lang.Reference objects for GC. The interface is useful for supporting 37 // a generational abstraction, in particular when there are multiple 38 // generations that are being independently collected -- possibly 39 // concurrently and/or incrementally. Note, however, that the 40 // ReferenceProcessor class abstracts away from a generational setting 41 // by using only a heap interval (called "span" below), thus allowing 42 // its use in a straightforward manner in a general, non-generational 43 // setting. 44 // 45 // The basic idea is that each ReferenceProcessor object concerns 46 // itself with ("weak") reference processing in a specific "span" 47 // of the heap of interest to a specific collector. Currently, 48 // the span is a convex interval of the heap, but, efficiency 49 // apart, there seems to be no reason it couldn't be extended 50 // (with appropriate modifications) to any "non-convex interval". 51 52 // forward references 53 class ReferencePolicy; 54 class AbstractRefProcTaskExecutor; 55 56 // List of discovered references. 57 class DiscoveredList { 58 public: 59 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 60 oop head() const { 61 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 62 _oop_head; 63 } 64 HeapWord* adr_head() { 65 return UseCompressedOops ? (HeapWord*)&_compressed_head : 66 (HeapWord*)&_oop_head; 67 } 68 void set_head(oop o) { 69 if (UseCompressedOops) { 70 // Must compress the head ptr. 71 _compressed_head = oopDesc::encode_heap_oop(o); 72 } else { 73 _oop_head = o; 74 } 75 } 76 bool is_empty() const { return head() == NULL; } 77 size_t length() { return _len; } 78 void set_length(size_t len) { _len = len; } 79 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 80 void dec_length(size_t dec) { _len -= dec; } 81 private: 82 // Set value depending on UseCompressedOops. This could be a template class 83 // but then we have to fix all the instantiations and declarations that use this class. 84 oop _oop_head; 85 narrowOop _compressed_head; 86 size_t _len; 87 }; 88 89 // Iterator for the list of discovered references. 90 class DiscoveredListIterator { 91 private: 92 DiscoveredList& _refs_list; 93 HeapWord* _prev_next; 94 oop _prev; 95 oop _ref; 96 HeapWord* _discovered_addr; 97 oop _next; 98 HeapWord* _referent_addr; 99 oop _referent; 100 OopClosure* _keep_alive; 101 BoolObjectClosure* _is_alive; 102 bool _discovered_list_needs_post_barrier; 103 104 DEBUG_ONLY( 105 oop _first_seen; // cyclic linked list check 106 ) 107 108 NOT_PRODUCT( 109 size_t _processed; 110 size_t _removed; 111 ) 112 113 public: 114 inline DiscoveredListIterator(DiscoveredList& refs_list, 115 OopClosure* keep_alive, 116 BoolObjectClosure* is_alive, 117 bool discovered_list_needs_post_barrier = false): 118 _refs_list(refs_list), 119 _prev_next(refs_list.adr_head()), 120 _prev(NULL), 121 _ref(refs_list.head()), 122 #ifdef ASSERT 123 _first_seen(refs_list.head()), 124 #endif 125 #ifndef PRODUCT 126 _processed(0), 127 _removed(0), 128 #endif 129 _next(NULL), 130 _keep_alive(keep_alive), 131 _is_alive(is_alive), 132 _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier) 133 { } 134 135 // End Of List. 136 inline bool has_next() const { return _ref != NULL; } 137 138 // Get oop to the Reference object. 139 inline oop obj() const { return _ref; } 140 141 // Get oop to the referent object. 142 inline oop referent() const { return _referent; } 143 144 // Returns true if referent is alive. 145 inline bool is_referent_alive() const { 146 return _is_alive->do_object_b(_referent); 147 } 148 149 // Loads data for the current reference. 150 // The "allow_null_referent" argument tells us to allow for the possibility 151 // of a NULL referent in the discovered Reference object. This typically 152 // happens in the case of concurrent collectors that may have done the 153 // discovery concurrently, or interleaved, with mutator execution. 154 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 155 156 // Move to the next discovered reference. 157 inline void next() { 158 _prev_next = _discovered_addr; 159 _prev = _ref; 160 move_to_next(); 161 } 162 163 // Remove the current reference from the list 164 void remove(); 165 166 // Make the Reference object active again. 167 void make_active(); 168 169 // Make the referent alive. 170 inline void make_referent_alive() { 171 if (UseCompressedOops) { 172 _keep_alive->do_oop((narrowOop*)_referent_addr); 173 } else { 174 _keep_alive->do_oop((oop*)_referent_addr); 175 } 176 } 177 178 // Update the discovered field. 179 inline void update_discovered() { 180 // First _prev_next ref actually points into DiscoveredList (gross). 181 if (UseCompressedOops) { 182 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 183 _keep_alive->do_oop((narrowOop*)_prev_next); 184 } 185 } else { 186 if (!oopDesc::is_null(*(oop*)_prev_next)) { 187 _keep_alive->do_oop((oop*)_prev_next); 188 } 189 } 190 } 191 192 // NULL out referent pointer. 193 void clear_referent(); 194 195 // Statistics 196 NOT_PRODUCT( 197 inline size_t processed() const { return _processed; } 198 inline size_t removed() const { return _removed; } 199 ) 200 201 inline void move_to_next() { 202 if (_ref == _next) { 203 // End of the list. 204 _ref = NULL; 205 } else { 206 _ref = _next; 207 } 208 assert(_ref != _first_seen, "cyclic ref_list found"); 209 NOT_PRODUCT(_processed++); 210 } 211 }; 212 213 class ReferenceProcessor : public CHeapObj<mtGC> { 214 215 private: 216 size_t total_count(DiscoveredList lists[]); 217 218 protected: 219 // Compatibility with pre-4965777 JDK's 220 static bool _pending_list_uses_discovered_field; 221 222 // The SoftReference master timestamp clock 223 static jlong _soft_ref_timestamp_clock; 224 225 MemRegion _span; // (right-open) interval of heap 226 // subject to wkref discovery 227 228 bool _discovering_refs; // true when discovery enabled 229 bool _discovery_is_atomic; // if discovery is atomic wrt 230 // other collectors in configuration 231 bool _discovery_is_mt; // true if reference discovery is MT. 232 233 // If true, setting "next" field of a discovered refs list requires 234 // write post barrier. (Must be true if used in a collector in which 235 // elements of a discovered list may be moved during discovery: for 236 // example, a collector like Garbage-First that moves objects during a 237 // long-term concurrent marking phase that does weak reference 238 // discovery.) 239 bool _discovered_list_needs_post_barrier; 240 241 bool _enqueuing_is_done; // true if all weak references enqueued 242 bool _processing_is_mt; // true during phases when 243 // reference processing is MT. 244 uint _next_id; // round-robin mod _num_q counter in 245 // support of work distribution 246 247 // For collectors that do not keep GC liveness information 248 // in the object header, this field holds a closure that 249 // helps the reference processor determine the reachability 250 // of an oop. It is currently initialized to NULL for all 251 // collectors except for CMS and G1. 252 BoolObjectClosure* _is_alive_non_header; 253 254 // Soft ref clearing policies 255 // . the default policy 256 static ReferencePolicy* _default_soft_ref_policy; 257 // . the "clear all" policy 258 static ReferencePolicy* _always_clear_soft_ref_policy; 259 // . the current policy below is either one of the above 260 ReferencePolicy* _current_soft_ref_policy; 261 262 // The discovered ref lists themselves 263 264 // The active MT'ness degree of the queues below 265 uint _num_q; 266 // The maximum MT'ness degree of the queues below 267 uint _max_num_q; 268 269 // Master array of discovered oops 270 DiscoveredList* _discovered_refs; 271 272 // Arrays of lists of oops, one per thread (pointers into master array above) 273 DiscoveredList* _discoveredSoftRefs; 274 DiscoveredList* _discoveredWeakRefs; 275 DiscoveredList* _discoveredFinalRefs; 276 DiscoveredList* _discoveredPhantomRefs; 277 278 public: 279 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } 280 281 uint num_q() { return _num_q; } 282 uint max_num_q() { return _max_num_q; } 283 void set_active_mt_degree(uint v) { _num_q = v; } 284 285 DiscoveredList* discovered_refs() { return _discovered_refs; } 286 287 ReferencePolicy* setup_policy(bool always_clear) { 288 _current_soft_ref_policy = always_clear ? 289 _always_clear_soft_ref_policy : _default_soft_ref_policy; 290 _current_soft_ref_policy->setup(); // snapshot the policy threshold 291 return _current_soft_ref_policy; 292 } 293 294 // Process references with a certain reachability level. 295 size_t process_discovered_reflist(DiscoveredList refs_lists[], 296 ReferencePolicy* policy, 297 bool clear_referent, 298 BoolObjectClosure* is_alive, 299 OopClosure* keep_alive, 300 VoidClosure* complete_gc, 301 AbstractRefProcTaskExecutor* task_executor); 302 303 void process_phaseJNI(BoolObjectClosure* is_alive, 304 OopClosure* keep_alive, 305 VoidClosure* complete_gc); 306 307 // Work methods used by the method process_discovered_reflist 308 // Phase1: keep alive all those referents that are otherwise 309 // dead but which must be kept alive by policy (and their closure). 310 void process_phase1(DiscoveredList& refs_list, 311 ReferencePolicy* policy, 312 BoolObjectClosure* is_alive, 313 OopClosure* keep_alive, 314 VoidClosure* complete_gc); 315 // Phase2: remove all those references whose referents are 316 // reachable. 317 inline void process_phase2(DiscoveredList& refs_list, 318 BoolObjectClosure* is_alive, 319 OopClosure* keep_alive, 320 VoidClosure* complete_gc) { 321 if (discovery_is_atomic()) { 322 // complete_gc is ignored in this case for this phase 323 pp2_work(refs_list, is_alive, keep_alive); 324 } else { 325 assert(complete_gc != NULL, "Error"); 326 pp2_work_concurrent_discovery(refs_list, is_alive, 327 keep_alive, complete_gc); 328 } 329 } 330 // Work methods in support of process_phase2 331 void pp2_work(DiscoveredList& refs_list, 332 BoolObjectClosure* is_alive, 333 OopClosure* keep_alive); 334 void pp2_work_concurrent_discovery( 335 DiscoveredList& refs_list, 336 BoolObjectClosure* is_alive, 337 OopClosure* keep_alive, 338 VoidClosure* complete_gc); 339 // Phase3: process the referents by either clearing them 340 // or keeping them alive (and their closure) 341 void process_phase3(DiscoveredList& refs_list, 342 bool clear_referent, 343 BoolObjectClosure* is_alive, 344 OopClosure* keep_alive, 345 VoidClosure* complete_gc); 346 347 // Enqueue references with a certain reachability level 348 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); 349 350 // "Preclean" all the discovered reference lists 351 // by removing references with strongly reachable referents. 352 // The first argument is a predicate on an oop that indicates 353 // its (strong) reachability and the second is a closure that 354 // may be used to incrementalize or abort the precleaning process. 355 // The caller is responsible for taking care of potential 356 // interference with concurrent operations on these lists 357 // (or predicates involved) by other threads. Currently 358 // only used by the CMS collector. 359 void preclean_discovered_references(BoolObjectClosure* is_alive, 360 OopClosure* keep_alive, 361 VoidClosure* complete_gc, 362 YieldClosure* yield, 363 GCTimer* gc_timer); 364 365 // Delete entries in the discovered lists that have 366 // either a null referent or are not active. Such 367 // Reference objects can result from the clearing 368 // or enqueueing of Reference objects concurrent 369 // with their discovery by a (concurrent) collector. 370 // For a definition of "active" see java.lang.ref.Reference; 371 // Refs are born active, become inactive when enqueued, 372 // and never become active again. The state of being 373 // active is encoded as follows: A Ref is active 374 // if and only if its "next" field is NULL. 375 void clean_up_discovered_references(); 376 void clean_up_discovered_reflist(DiscoveredList& refs_list); 377 378 // Returns the name of the discovered reference list 379 // occupying the i / _num_q slot. 380 const char* list_name(uint i); 381 382 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 383 384 protected: 385 // Set the 'discovered' field of the given reference to 386 // the given value - emitting post barriers depending upon 387 // the value of _discovered_list_needs_post_barrier. 388 void set_discovered(oop ref, oop value); 389 390 // "Preclean" the given discovered reference list 391 // by removing references with strongly reachable referents. 392 // Currently used in support of CMS only. 393 void preclean_discovered_reflist(DiscoveredList& refs_list, 394 BoolObjectClosure* is_alive, 395 OopClosure* keep_alive, 396 VoidClosure* complete_gc, 397 YieldClosure* yield); 398 399 // round-robin mod _num_q (not: _not_ mode _max_num_q) 400 uint next_id() { 401 uint id = _next_id; 402 if (++_next_id == _num_q) { 403 _next_id = 0; 404 } 405 return id; 406 } 407 DiscoveredList* get_discovered_list(ReferenceType rt); 408 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 409 HeapWord* discovered_addr); 410 void verify_ok_to_handle_reflists() PRODUCT_RETURN; 411 412 void clear_discovered_references(DiscoveredList& refs_list); 413 void abandon_partial_discovered_list(DiscoveredList& refs_list); 414 415 // Calculate the number of jni handles. 416 unsigned int count_jni_refs(); 417 418 // Balances reference queues. 419 void balance_queues(DiscoveredList ref_lists[]); 420 421 // Update (advance) the soft ref master clock field. 422 void update_soft_ref_master_clock(); 423 424 public: 425 // Default parameters give you a vanilla reference processor. 426 ReferenceProcessor(MemRegion span, 427 bool mt_processing = false, uint mt_processing_degree = 1, 428 bool mt_discovery = false, uint mt_discovery_degree = 1, 429 bool atomic_discovery = true, 430 BoolObjectClosure* is_alive_non_header = NULL, 431 bool discovered_list_needs_post_barrier = false); 432 433 // RefDiscoveryPolicy values 434 enum DiscoveryPolicy { 435 ReferenceBasedDiscovery = 0, 436 ReferentBasedDiscovery = 1, 437 DiscoveryPolicyMin = ReferenceBasedDiscovery, 438 DiscoveryPolicyMax = ReferentBasedDiscovery 439 }; 440 441 static void init_statics(); 442 443 public: 444 // get and set "is_alive_non_header" field 445 BoolObjectClosure* is_alive_non_header() { 446 return _is_alive_non_header; 447 } 448 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 449 _is_alive_non_header = is_alive_non_header; 450 } 451 452 // get and set span 453 MemRegion span() { return _span; } 454 void set_span(MemRegion span) { _span = span; } 455 456 // start and stop weak ref discovery 457 void enable_discovery(bool verify_disabled, bool check_no_refs); 458 void disable_discovery() { _discovering_refs = false; } 459 bool discovery_enabled() { return _discovering_refs; } 460 461 // whether discovery is atomic wrt other collectors 462 bool discovery_is_atomic() const { return _discovery_is_atomic; } 463 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 464 465 // whether the JDK in which we are embedded is a pre-4965777 JDK, 466 // and thus whether or not it uses the discovered field to chain 467 // the entries in the pending list. 468 static bool pending_list_uses_discovered_field() { 469 return _pending_list_uses_discovered_field; 470 } 471 472 // whether discovery is done by multiple threads same-old-timeously 473 bool discovery_is_mt() const { return _discovery_is_mt; } 474 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 475 476 // Whether we are in a phase when _processing_ is MT. 477 bool processing_is_mt() const { return _processing_is_mt; } 478 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 479 480 // whether all enqueueing of weak references is complete 481 bool enqueuing_is_done() { return _enqueuing_is_done; } 482 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 483 484 // iterate over oops 485 void weak_oops_do(OopClosure* f); // weak roots 486 487 // Balance each of the discovered lists. 488 void balance_all_queues(); 489 void verify_list(DiscoveredList& ref_list); 490 491 // Discover a Reference object, using appropriate discovery criteria 492 bool discover_reference(oop obj, ReferenceType rt); 493 494 // Process references found during GC (called by the garbage collector) 495 ReferenceProcessorStats 496 process_discovered_references(BoolObjectClosure* is_alive, 497 OopClosure* keep_alive, 498 VoidClosure* complete_gc, 499 AbstractRefProcTaskExecutor* task_executor, 500 GCTimer *gc_timer); 501 502 // Enqueue references at end of GC (called by the garbage collector) 503 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); 504 505 // If a discovery is in process that is being superceded, abandon it: all 506 // the discovered lists will be empty, and all the objects on them will 507 // have NULL discovered fields. Must be called only at a safepoint. 508 void abandon_partial_discovery(); 509 510 // debugging 511 void verify_no_references_recorded() PRODUCT_RETURN; 512 void verify_referent(oop obj) PRODUCT_RETURN; 513 514 // clear the discovered lists (unlinking each entry). 515 void clear_discovered_references() PRODUCT_RETURN; 516 }; 517 518 // A utility class to disable reference discovery in 519 // the scope which contains it, for given ReferenceProcessor. 520 class NoRefDiscovery: StackObj { 521 private: 522 ReferenceProcessor* _rp; 523 bool _was_discovering_refs; 524 public: 525 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 526 _was_discovering_refs = _rp->discovery_enabled(); 527 if (_was_discovering_refs) { 528 _rp->disable_discovery(); 529 } 530 } 531 532 ~NoRefDiscovery() { 533 if (_was_discovering_refs) { 534 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); 535 } 536 } 537 }; 538 539 540 // A utility class to temporarily mutate the span of the 541 // given ReferenceProcessor in the scope that contains it. 542 class ReferenceProcessorSpanMutator: StackObj { 543 private: 544 ReferenceProcessor* _rp; 545 MemRegion _saved_span; 546 547 public: 548 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, 549 MemRegion span): 550 _rp(rp) { 551 _saved_span = _rp->span(); 552 _rp->set_span(span); 553 } 554 555 ~ReferenceProcessorSpanMutator() { 556 _rp->set_span(_saved_span); 557 } 558 }; 559 560 // A utility class to temporarily change the MT'ness of 561 // reference discovery for the given ReferenceProcessor 562 // in the scope that contains it. 563 class ReferenceProcessorMTDiscoveryMutator: StackObj { 564 private: 565 ReferenceProcessor* _rp; 566 bool _saved_mt; 567 568 public: 569 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 570 bool mt): 571 _rp(rp) { 572 _saved_mt = _rp->discovery_is_mt(); 573 _rp->set_mt_discovery(mt); 574 } 575 576 ~ReferenceProcessorMTDiscoveryMutator() { 577 _rp->set_mt_discovery(_saved_mt); 578 } 579 }; 580 581 582 // A utility class to temporarily change the disposition 583 // of the "is_alive_non_header" closure field of the 584 // given ReferenceProcessor in the scope that contains it. 585 class ReferenceProcessorIsAliveMutator: StackObj { 586 private: 587 ReferenceProcessor* _rp; 588 BoolObjectClosure* _saved_cl; 589 590 public: 591 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 592 BoolObjectClosure* cl): 593 _rp(rp) { 594 _saved_cl = _rp->is_alive_non_header(); 595 _rp->set_is_alive_non_header(cl); 596 } 597 598 ~ReferenceProcessorIsAliveMutator() { 599 _rp->set_is_alive_non_header(_saved_cl); 600 } 601 }; 602 603 // A utility class to temporarily change the disposition 604 // of the "discovery_is_atomic" field of the 605 // given ReferenceProcessor in the scope that contains it. 606 class ReferenceProcessorAtomicMutator: StackObj { 607 private: 608 ReferenceProcessor* _rp; 609 bool _saved_atomic_discovery; 610 611 public: 612 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 613 bool atomic): 614 _rp(rp) { 615 _saved_atomic_discovery = _rp->discovery_is_atomic(); 616 _rp->set_atomic_discovery(atomic); 617 } 618 619 ~ReferenceProcessorAtomicMutator() { 620 _rp->set_atomic_discovery(_saved_atomic_discovery); 621 } 622 }; 623 624 625 // A utility class to temporarily change the MT processing 626 // disposition of the given ReferenceProcessor instance 627 // in the scope that contains it. 628 class ReferenceProcessorMTProcMutator: StackObj { 629 private: 630 ReferenceProcessor* _rp; 631 bool _saved_mt; 632 633 public: 634 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 635 bool mt): 636 _rp(rp) { 637 _saved_mt = _rp->processing_is_mt(); 638 _rp->set_mt_processing(mt); 639 } 640 641 ~ReferenceProcessorMTProcMutator() { 642 _rp->set_mt_processing(_saved_mt); 643 } 644 }; 645 646 647 // This class is an interface used to implement task execution for the 648 // reference processing. 649 class AbstractRefProcTaskExecutor { 650 public: 651 652 // Abstract tasks to execute. 653 class ProcessTask; 654 class EnqueueTask; 655 656 // Executes a task using worker threads. 657 virtual void execute(ProcessTask& task) = 0; 658 virtual void execute(EnqueueTask& task) = 0; 659 660 // Switch to single threaded mode. 661 virtual void set_single_threaded_mode() { }; 662 }; 663 664 // Abstract reference processing task to execute. 665 class AbstractRefProcTaskExecutor::ProcessTask { 666 protected: 667 ProcessTask(ReferenceProcessor& ref_processor, 668 DiscoveredList refs_lists[], 669 bool marks_oops_alive) 670 : _ref_processor(ref_processor), 671 _refs_lists(refs_lists), 672 _marks_oops_alive(marks_oops_alive) 673 { } 674 675 public: 676 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 677 OopClosure& keep_alive, 678 VoidClosure& complete_gc) = 0; 679 680 // Returns true if a task marks some oops as alive. 681 bool marks_oops_alive() const 682 { return _marks_oops_alive; } 683 684 protected: 685 ReferenceProcessor& _ref_processor; 686 DiscoveredList* _refs_lists; 687 const bool _marks_oops_alive; 688 }; 689 690 // Abstract reference processing task to execute. 691 class AbstractRefProcTaskExecutor::EnqueueTask { 692 protected: 693 EnqueueTask(ReferenceProcessor& ref_processor, 694 DiscoveredList refs_lists[], 695 HeapWord* pending_list_addr, 696 int n_queues) 697 : _ref_processor(ref_processor), 698 _refs_lists(refs_lists), 699 _pending_list_addr(pending_list_addr), 700 _n_queues(n_queues) 701 { } 702 703 public: 704 virtual void work(unsigned int work_id) = 0; 705 706 protected: 707 ReferenceProcessor& _ref_processor; 708 DiscoveredList* _refs_lists; 709 HeapWord* _pending_list_addr; 710 int _n_queues; 711 }; 712 713 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP