< prev index next >

src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp

Print this page
rev 57716 : [mq]: remove_cbl_mon
   1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  26 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  27 
  28 #include "gc/g1/g1BufferNodeList.hpp"
  29 #include "gc/g1/g1FreeIdSet.hpp"
  30 #include "gc/shared/ptrQueue.hpp"
  31 #include "memory/allocation.hpp"

  32 

  33 class G1DirtyCardQueueSet;
  34 class G1RedirtyCardsQueueSet;
  35 class Thread;
  36 class Monitor;
  37 
  38 // A ptrQueue whose elements are "oops", pointers to object heads.
  39 class G1DirtyCardQueue: public PtrQueue {
  40 protected:
  41   virtual void handle_completed_buffer();
  42 
  43 public:
  44   G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
  45 
  46   // Flush before destroying; queue may be used to capture pending work while
  47   // doing something else, with auto-flush on completion.
  48   ~G1DirtyCardQueue();
  49 
  50   // Process queue entries and release resources.
  51   void flush() { flush_impl(); }
  52 
  53   inline G1DirtyCardQueueSet* dirty_card_qset() const;
  54 
  55   // Compiler support.
  56   static ByteSize byte_offset_of_index() {
  57     return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
  58   }
  59   using PtrQueue::byte_width_of_index;
  60 
  61   static ByteSize byte_offset_of_buf() {
  62     return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
  63   }
  64   using PtrQueue::byte_width_of_buf;
  65 
  66 };
  67 
  68 class G1DirtyCardQueueSet: public PtrQueueSet {
  69   Monitor* _cbl_mon;  // Protects the list and count members.
  70   BufferNode* _completed_buffers_head;
  71   BufferNode* _completed_buffers_tail;
  72 
  73   // Number of actual cards in the list of completed buffers.






  74   volatile size_t _num_cards;

  75 
  76   size_t _process_cards_threshold;
  77   volatile bool _process_completed_buffers;

















  78 

  79   void abandon_completed_buffers();
  80 


  81   // Refine the cards in "node" from its index to buffer_size.
  82   // Stops processing if SuspendibleThreadSet::should_yield() is true.
  83   // Returns true if the entire buffer was processed, false if there
  84   // is a pending yield request.  The node's index is updated to exclude
  85   // the processed elements, e.g. up to the element before processing
  86   // stopped, or one past the last element if the entire buffer was
  87   // processed. Increments *total_refined_cards by the number of cards
  88   // processed and removed from the buffer.
  89   bool refine_buffer(BufferNode* node, uint worker_id, size_t* total_refined_cards);
  90 
  91   bool mut_process_buffer(BufferNode* node);
  92 
  93   // If the queue contains more cards than configured here, the
  94   // mutator must start doing some of the concurrent refinement work.
  95   size_t _max_cards;
  96   size_t _max_cards_padding;
  97   static const size_t MaxCardsUnlimited = SIZE_MAX;
  98 
  99   G1FreeIdSet _free_ids;
 100 
 101   // Array of cumulative dirty cards refined by mutator threads.
 102   // Array has an entry per id in _free_ids.
 103   size_t* _mutator_refined_cards_counters;
 104 
 105 public:
 106   G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator);
 107   ~G1DirtyCardQueueSet();
 108 




 109   // The number of parallel ids that can be claimed to allow collector or
 110   // mutator threads to do card-processing work.
 111   static uint num_par_ids();
 112 
 113   static void handle_zero_index_for_thread(Thread* t);
 114 
 115   // Either process the entire buffer and return true, or enqueue the
 116   // buffer and return false.  If the buffer is completely processed,
 117   // it can be reused in place.
 118   bool process_or_enqueue_completed_buffer(BufferNode* node);
 119 
 120   virtual void enqueue_completed_buffer(BufferNode* node);
 121 
 122   // If the number of completed buffers is > stop_at, then remove and
 123   // return a completed buffer from the list.  Otherwise, return NULL.
 124   BufferNode* get_completed_buffer(size_t stop_at = 0);
 125 
 126   // The number of cards in completed buffers. Read without synchronization.
 127   size_t num_cards() const { return _num_cards; }
 128 
 129   // Verify that _num_cards is equal to the sum of actual cards
 130   // in the completed buffers.
 131   void verify_num_cards() const NOT_DEBUG_RETURN;
 132 
 133   bool process_completed_buffers() { return _process_completed_buffers; }
 134   void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
 135 
 136   // Get/Set the number of cards that triggers log processing.
 137   // Log processing should be done when the number of cards exceeds the
 138   // threshold.
 139   void set_process_cards_threshold(size_t sz) {
 140     _process_cards_threshold = sz;
 141   }
 142   size_t process_cards_threshold() const {
 143     return _process_cards_threshold;
 144   }
 145   static const size_t ProcessCardsThresholdNever = SIZE_MAX;
 146 
 147   // Notify the consumer if the number of buffers crossed the threshold
 148   void notify_if_necessary();
 149 
 150   void merge_bufferlists(G1RedirtyCardsQueueSet* src);
 151 
 152   G1BufferNodeList take_all_completed_buffers();
 153 
 154   // If there are more than stop_at cards in the completed buffers, pop


   1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  26 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
  27 
  28 #include "gc/g1/g1BufferNodeList.hpp"
  29 #include "gc/g1/g1FreeIdSet.hpp"
  30 #include "gc/shared/ptrQueue.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/padded.hpp"
  33 
  34 class G1ConcurrentRefineThread;
  35 class G1DirtyCardQueueSet;
  36 class G1RedirtyCardsQueueSet;
  37 class Thread;

  38 
  39 // A ptrQueue whose elements are "oops", pointers to object heads.
  40 class G1DirtyCardQueue: public PtrQueue {
  41 protected:
  42   virtual void handle_completed_buffer();
  43 
  44 public:
  45   G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
  46 
  47   // Flush before destroying; queue may be used to capture pending work while
  48   // doing something else, with auto-flush on completion.
  49   ~G1DirtyCardQueue();
  50 
  51   // Process queue entries and release resources.
  52   void flush() { flush_impl(); }
  53 
  54   inline G1DirtyCardQueueSet* dirty_card_qset() const;
  55 
  56   // Compiler support.
  57   static ByteSize byte_offset_of_index() {
  58     return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>();
  59   }
  60   using PtrQueue::byte_width_of_index;
  61 
  62   static ByteSize byte_offset_of_buf() {
  63     return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>();
  64   }
  65   using PtrQueue::byte_width_of_buf;
  66 
  67 };
  68 
  69 class G1DirtyCardQueueSet: public PtrQueueSet {
  70   G1ConcurrentRefineThread* _primary_refinement_thread;
  71   // Add padding for improved performance for shared access.  There's only
  72   // one instance of this class, so using a little extra space is fine.
  73   // _completed_buffers_{head,tail} and _num_cards are isolated to their
  74   // own cache lines.  Other members are not, even if shared access, because
  75   // they aren't as critical to performance.
  76   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(G1ConcurrentRefineThread*));
  77   BufferNode* volatile _completed_buffers_head;
  78   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode* volatile));
  79   BufferNode* volatile _completed_buffers_tail;
  80   DEFINE_PAD_MINUS_SIZE(3, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode* volatile));
  81   volatile size_t _num_cards;
  82   DEFINE_PAD_MINUS_SIZE(4, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
  83 
  84   DEBUG_ONLY(mutable volatile int _concurrency;)
  85   class ConcurrentVerifier;
  86   class NonconcurrentVerifier;
  87 
  88   size_t append_buffers(BufferNode* first, BufferNode* last, size_t card_count);
  89   // Verify _num_cards == sum of cards in the completed queue.
  90   void verify_num_cards() const NOT_DEBUG_RETURN;
  91 
  92   struct PauseList {
  93     BufferNode* volatile _head;
  94     BufferNode* _tail;
  95     PauseList() : _head(NULL), _tail(NULL) {}
  96   };
  97   PauseList _paused[2];
  98 
  99   void record_paused_buffer(BufferNode* node);
 100   void enqueue_paused_buffers_aux(size_t index);
 101   void enqueue_previous_paused_buffers();
 102   void enqueue_all_paused_buffers();
 103 
 104   void clear_completed_buffers();
 105   void abandon_completed_buffers();
 106 
 107   size_t _process_cards_threshold;
 108 
 109   // Refine the cards in "node" from its index to buffer_size.
 110   // Stops processing if SuspendibleThreadSet::should_yield() is true.
 111   // Returns true if the entire buffer was processed, false if there
 112   // is a pending yield request.  The node's index is updated to exclude
 113   // the processed elements, e.g. up to the element before processing
 114   // stopped, or one past the last element if the entire buffer was
 115   // processed. Increments *total_refined_cards by the number of cards
 116   // processed and removed from the buffer.
 117   bool refine_buffer(BufferNode* node, uint worker_id, size_t* total_refined_cards);
 118 
 119   bool mut_process_buffer(BufferNode* node);
 120 
 121   // If the queue contains more cards than configured here, the
 122   // mutator must start doing some of the concurrent refinement work.
 123   size_t _max_cards;
 124   size_t _max_cards_padding;
 125   static const size_t MaxCardsUnlimited = SIZE_MAX;
 126 
 127   G1FreeIdSet _free_ids;
 128 
 129   // Array of cumulative dirty cards refined by mutator threads.
 130   // Array has an entry per id in _free_ids.
 131   size_t* _mutator_refined_cards_counters;
 132 
 133 public:
 134   G1DirtyCardQueueSet(BufferNode::Allocator* allocator);
 135   ~G1DirtyCardQueueSet();
 136 
 137   void set_primary_refinement_thread(G1ConcurrentRefineThread* thread) {
 138     _primary_refinement_thread = thread;
 139   }
 140 
 141   // The number of parallel ids that can be claimed to allow collector or
 142   // mutator threads to do card-processing work.
 143   static uint num_par_ids();
 144 
 145   static void handle_zero_index_for_thread(Thread* t);
 146 
 147   // Either process the entire buffer and return true, or enqueue the
 148   // buffer and return false.  If the buffer is completely processed,
 149   // it can be reused in place.
 150   bool process_or_enqueue_completed_buffer(BufferNode* node);
 151 
 152   virtual void enqueue_completed_buffer(BufferNode* node);
 153 
 154   // If the number of completed buffers is > stop_at, then remove and
 155   // return a completed buffer from the list.  Otherwise, return NULL.
 156   BufferNode* get_completed_buffer(size_t stop_at = 0);
 157 
 158   // The number of cards in completed buffers. Read without synchronization.
 159   size_t num_cards() const { return _num_cards; }







 160 
 161   // Get/Set the number of cards that triggers log processing.
 162   // Log processing should be done when the number of cards exceeds the
 163   // threshold.
 164   void set_process_cards_threshold(size_t sz) {
 165     _process_cards_threshold = sz;
 166   }
 167   size_t process_cards_threshold() const {
 168     return _process_cards_threshold;
 169   }
 170   static const size_t ProcessCardsThresholdNever = SIZE_MAX;
 171 
 172   // Notify the consumer if the number of buffers crossed the threshold
 173   void notify_if_necessary();
 174 
 175   void merge_bufferlists(G1RedirtyCardsQueueSet* src);
 176 
 177   G1BufferNodeList take_all_completed_buffers();
 178 
 179   // If there are more than stop_at cards in the completed buffers, pop


< prev index next >