src/share/vm/memory/referenceProcessor.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc9 Sdiff src/share/vm/memory

src/share/vm/memory/referenceProcessor.hpp

Print this page
rev 6446 : [mq]: ref-write-new-fix


  82   // Set value depending on UseCompressedOops. This could be a template class
  83   // but then we have to fix all the instantiations and declarations that use this class.
  84   oop       _oop_head;
  85   narrowOop _compressed_head;
  86   size_t _len;
  87 };
  88 
  89 // Iterator for the list of discovered references.
  90 class DiscoveredListIterator {
  91 private:
  92   DiscoveredList&    _refs_list;
  93   HeapWord*          _prev_next;
  94   oop                _prev;
  95   oop                _ref;
  96   HeapWord*          _discovered_addr;
  97   oop                _next;
  98   HeapWord*          _referent_addr;
  99   oop                _referent;
 100   OopClosure*        _keep_alive;
 101   BoolObjectClosure* _is_alive;
 102   bool               _discovered_list_needs_post_barrier;
 103 
 104   DEBUG_ONLY(
 105   oop                _first_seen; // cyclic linked list check
 106   )
 107 
 108   NOT_PRODUCT(
 109   size_t             _processed;
 110   size_t             _removed;
 111   )
 112 
 113 public:
 114   inline DiscoveredListIterator(DiscoveredList&    refs_list,
 115                                 OopClosure*        keep_alive,
 116                                 BoolObjectClosure* is_alive,
 117                                 bool               discovered_list_needs_post_barrier = false):
 118     _refs_list(refs_list),
 119     _prev_next(refs_list.adr_head()),
 120     _prev(NULL),
 121     _ref(refs_list.head()),
 122 #ifdef ASSERT
 123     _first_seen(refs_list.head()),
 124 #endif
 125 #ifndef PRODUCT
 126     _processed(0),
 127     _removed(0),
 128 #endif
 129     _next(NULL),
 130     _keep_alive(keep_alive),
 131     _is_alive(is_alive),
 132     _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
 133 { }
 134 
 135   // End Of List.
 136   inline bool has_next() const { return _ref != NULL; }
 137 
 138   // Get oop to the Reference object.
 139   inline oop obj() const { return _ref; }
 140 
 141   // Get oop to the referent object.
 142   inline oop referent() const { return _referent; }
 143 
 144   // Returns true if referent is alive.
 145   inline bool is_referent_alive() const {
 146     return _is_alive->do_object_b(_referent);
 147   }
 148 
 149   // Loads data for the current reference.
 150   // The "allow_null_referent" argument tells us to allow for the possibility
 151   // of a NULL referent in the discovered Reference object. This typically
 152   // happens in the case of concurrent collectors that may have done the


 213 class ReferenceProcessor : public CHeapObj<mtGC> {
 214 
 215  private:
 216   size_t total_count(DiscoveredList lists[]);
 217 
 218  protected:
 219   // Compatibility with pre-4965777 JDK's
 220   static bool _pending_list_uses_discovered_field;
 221 
 222   // The SoftReference master timestamp clock
 223   static jlong _soft_ref_timestamp_clock;
 224 
 225   MemRegion   _span;                    // (right-open) interval of heap
 226                                         // subject to wkref discovery
 227 
 228   bool        _discovering_refs;        // true when discovery enabled
 229   bool        _discovery_is_atomic;     // if discovery is atomic wrt
 230                                         // other collectors in configuration
 231   bool        _discovery_is_mt;         // true if reference discovery is MT.
 232 
 233   // If true, setting "next" field of a discovered refs list requires
 234   // write post barrier.  (Must be true if used in a collector in which
 235   // elements of a discovered list may be moved during discovery: for
 236   // example, a collector like Garbage-First that moves objects during a
 237   // long-term concurrent marking phase that does weak reference
 238   // discovery.)
 239   bool        _discovered_list_needs_post_barrier;
 240 
 241   bool        _enqueuing_is_done;       // true if all weak references enqueued
 242   bool        _processing_is_mt;        // true during phases when
 243                                         // reference processing is MT.
 244   uint        _next_id;                 // round-robin mod _num_q counter in
 245                                         // support of work distribution
 246 
 247   // For collectors that do not keep GC liveness information
 248   // in the object header, this field holds a closure that
 249   // helps the reference processor determine the reachability
 250   // of an oop. It is currently initialized to NULL for all
 251   // collectors except for CMS and G1.
 252   BoolObjectClosure* _is_alive_non_header;
 253 
 254   // Soft ref clearing policies
 255   // . the default policy
 256   static ReferencePolicy*   _default_soft_ref_policy;
 257   // . the "clear all" policy
 258   static ReferencePolicy*   _always_clear_soft_ref_policy;
 259   // . the current policy below is either one of the above
 260   ReferencePolicy*          _current_soft_ref_policy;


 365   // Delete entries in the discovered lists that have
 366   // either a null referent or are not active. Such
 367   // Reference objects can result from the clearing
 368   // or enqueueing of Reference objects concurrent
 369   // with their discovery by a (concurrent) collector.
 370   // For a definition of "active" see java.lang.ref.Reference;
 371   // Refs are born active, become inactive when enqueued,
 372   // and never become active again. The state of being
 373   // active is encoded as follows: A Ref is active
 374   // if and only if its "next" field is NULL.
 375   void clean_up_discovered_references();
 376   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 377 
 378   // Returns the name of the discovered reference list
 379   // occupying the i / _num_q slot.
 380   const char* list_name(uint i);
 381 
 382   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 383 
 384  protected:
 385   // Set the 'discovered' field of the given reference to
 386   // the given value - emitting post barriers depending upon
 387   // the value of _discovered_list_needs_post_barrier.
 388   void set_discovered(oop ref, oop value);
 389 
 390   // "Preclean" the given discovered reference list
 391   // by removing references with strongly reachable referents.
 392   // Currently used in support of CMS only.
 393   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 394                                    BoolObjectClosure* is_alive,
 395                                    OopClosure*        keep_alive,
 396                                    VoidClosure*       complete_gc,
 397                                    YieldClosure*      yield);
 398 
 399   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 400   uint next_id() {
 401     uint id = _next_id;
 402     if (++_next_id == _num_q) {
 403       _next_id = 0;
 404     }
 405     return id;
 406   }
 407   DiscoveredList* get_discovered_list(ReferenceType rt);
 408   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 409                                         HeapWord* discovered_addr);
 410   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 411 
 412   void clear_discovered_references(DiscoveredList& refs_list);
 413   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 414 
 415   // Calculate the number of jni handles.
 416   unsigned int count_jni_refs();
 417 
 418   // Balances reference queues.
 419   void balance_queues(DiscoveredList ref_lists[]);
 420 
 421   // Update (advance) the soft ref master clock field.
 422   void update_soft_ref_master_clock();
 423 
 424  public:
 425   // Default parameters give you a vanilla reference processor.
 426   ReferenceProcessor(MemRegion span,
 427                      bool mt_processing = false, uint mt_processing_degree = 1,
 428                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 429                      bool atomic_discovery = true,
 430                      BoolObjectClosure* is_alive_non_header = NULL,
 431                      bool discovered_list_needs_post_barrier = false);
 432 
 433   // RefDiscoveryPolicy values
 434   enum DiscoveryPolicy {
 435     ReferenceBasedDiscovery = 0,
 436     ReferentBasedDiscovery  = 1,
 437     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 438     DiscoveryPolicyMax      = ReferentBasedDiscovery
 439   };
 440 
 441   static void init_statics();
 442 
 443  public:
 444   // get and set "is_alive_non_header" field
 445   BoolObjectClosure* is_alive_non_header() {
 446     return _is_alive_non_header;
 447   }
 448   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 449     _is_alive_non_header = is_alive_non_header;
 450   }
 451 




  82   // Set value depending on UseCompressedOops. This could be a template class
  83   // but then we have to fix all the instantiations and declarations that use this class.
  84   oop       _oop_head;
  85   narrowOop _compressed_head;
  86   size_t _len;
  87 };
  88 
  89 // Iterator for the list of discovered references.
  90 class DiscoveredListIterator {
  91 private:
  92   DiscoveredList&    _refs_list;
  93   HeapWord*          _prev_next;
  94   oop                _prev;
  95   oop                _ref;
  96   HeapWord*          _discovered_addr;
  97   oop                _next;
  98   HeapWord*          _referent_addr;
  99   oop                _referent;
 100   OopClosure*        _keep_alive;
 101   BoolObjectClosure* _is_alive;

 102 
 103   DEBUG_ONLY(
 104   oop                _first_seen; // cyclic linked list check
 105   )
 106 
 107   NOT_PRODUCT(
 108   size_t             _processed;
 109   size_t             _removed;
 110   )
 111 
 112 public:
 113   inline DiscoveredListIterator(DiscoveredList&    refs_list,
 114                                 OopClosure*        keep_alive,
 115                                 BoolObjectClosure* is_alive):

 116     _refs_list(refs_list),
 117     _prev_next(refs_list.adr_head()),
 118     _prev(NULL),
 119     _ref(refs_list.head()),
 120 #ifdef ASSERT
 121     _first_seen(refs_list.head()),
 122 #endif
 123 #ifndef PRODUCT
 124     _processed(0),
 125     _removed(0),
 126 #endif
 127     _next(NULL),
 128     _keep_alive(keep_alive),
 129     _is_alive(is_alive)

 130 { }
 131 
 132   // End Of List.
 133   inline bool has_next() const { return _ref != NULL; }
 134 
 135   // Get oop to the Reference object.
 136   inline oop obj() const { return _ref; }
 137 
 138   // Get oop to the referent object.
 139   inline oop referent() const { return _referent; }
 140 
 141   // Returns true if referent is alive.
 142   inline bool is_referent_alive() const {
 143     return _is_alive->do_object_b(_referent);
 144   }
 145 
 146   // Loads data for the current reference.
 147   // The "allow_null_referent" argument tells us to allow for the possibility
 148   // of a NULL referent in the discovered Reference object. This typically
 149   // happens in the case of concurrent collectors that may have done the


 210 class ReferenceProcessor : public CHeapObj<mtGC> {
 211 
 212  private:
 213   size_t total_count(DiscoveredList lists[]);
 214 
 215  protected:
 216   // Compatibility with pre-4965777 JDK's
 217   static bool _pending_list_uses_discovered_field;
 218 
 219   // The SoftReference master timestamp clock
 220   static jlong _soft_ref_timestamp_clock;
 221 
 222   MemRegion   _span;                    // (right-open) interval of heap
 223                                         // subject to wkref discovery
 224 
 225   bool        _discovering_refs;        // true when discovery enabled
 226   bool        _discovery_is_atomic;     // if discovery is atomic wrt
 227                                         // other collectors in configuration
 228   bool        _discovery_is_mt;         // true if reference discovery is MT.
 229 








 230   bool        _enqueuing_is_done;       // true if all weak references enqueued
 231   bool        _processing_is_mt;        // true during phases when
 232                                         // reference processing is MT.
 233   uint        _next_id;                 // round-robin mod _num_q counter in
 234                                         // support of work distribution
 235 
 236   // For collectors that do not keep GC liveness information
 237   // in the object header, this field holds a closure that
 238   // helps the reference processor determine the reachability
 239   // of an oop. It is currently initialized to NULL for all
 240   // collectors except for CMS and G1.
 241   BoolObjectClosure* _is_alive_non_header;
 242 
 243   // Soft ref clearing policies
 244   // . the default policy
 245   static ReferencePolicy*   _default_soft_ref_policy;
 246   // . the "clear all" policy
 247   static ReferencePolicy*   _always_clear_soft_ref_policy;
 248   // . the current policy below is either one of the above
 249   ReferencePolicy*          _current_soft_ref_policy;


 354   // Delete entries in the discovered lists that have
 355   // either a null referent or are not active. Such
 356   // Reference objects can result from the clearing
 357   // or enqueueing of Reference objects concurrent
 358   // with their discovery by a (concurrent) collector.
 359   // For a definition of "active" see java.lang.ref.Reference;
 360   // Refs are born active, become inactive when enqueued,
 361   // and never become active again. The state of being
 362   // active is encoded as follows: A Ref is active
 363   // if and only if its "next" field is NULL.
 364   void clean_up_discovered_references();
 365   void clean_up_discovered_reflist(DiscoveredList& refs_list);
 366 
 367   // Returns the name of the discovered reference list
 368   // occupying the i / _num_q slot.
 369   const char* list_name(uint i);
 370 
 371   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 372 
 373  protected:





 374   // "Preclean" the given discovered reference list
 375   // by removing references with strongly reachable referents.
 376   // Currently used in support of CMS only.
 377   void preclean_discovered_reflist(DiscoveredList&    refs_list,
 378                                    BoolObjectClosure* is_alive,
 379                                    OopClosure*        keep_alive,
 380                                    VoidClosure*       complete_gc,
 381                                    YieldClosure*      yield);
 382 
 383   // round-robin mod _num_q (not: _not_ mode _max_num_q)
 384   uint next_id() {
 385     uint id = _next_id;
 386     if (++_next_id == _num_q) {
 387       _next_id = 0;
 388     }
 389     return id;
 390   }
 391   DiscoveredList* get_discovered_list(ReferenceType rt);
 392   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
 393                                         HeapWord* discovered_addr);
 394   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
 395 
 396   void clear_discovered_references(DiscoveredList& refs_list);
 397   void abandon_partial_discovered_list(DiscoveredList& refs_list);
 398 
 399   // Calculate the number of jni handles.
 400   unsigned int count_jni_refs();
 401 
 402   // Balances reference queues.
 403   void balance_queues(DiscoveredList ref_lists[]);
 404 
 405   // Update (advance) the soft ref master clock field.
 406   void update_soft_ref_master_clock();
 407 
 408  public:
 409   // Default parameters give you a vanilla reference processor.
 410   ReferenceProcessor(MemRegion span,
 411                      bool mt_processing = false, uint mt_processing_degree = 1,
 412                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
 413                      bool atomic_discovery = true,
 414                      BoolObjectClosure* is_alive_non_header = NULL);

 415 
 416   // RefDiscoveryPolicy values
 417   enum DiscoveryPolicy {
 418     ReferenceBasedDiscovery = 0,
 419     ReferentBasedDiscovery  = 1,
 420     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
 421     DiscoveryPolicyMax      = ReferentBasedDiscovery
 422   };
 423 
 424   static void init_statics();
 425 
 426  public:
 427   // get and set "is_alive_non_header" field
 428   BoolObjectClosure* is_alive_non_header() {
 429     return _is_alive_non_header;
 430   }
 431   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
 432     _is_alive_non_header = is_alive_non_header;
 433   }
 434 


src/share/vm/memory/referenceProcessor.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File