1 /*
   2  * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
  26 #define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
  27 
  28 #include "gc/parallel/psPromotionLAB.hpp"
  29 #include "gc/shared/copyFailedInfo.hpp"
  30 #include "gc/shared/gcTrace.hpp"
  31 #include "gc/shared/preservedMarks.hpp"
  32 #include "gc/shared/taskqueue.hpp"
  33 #include "memory/padded.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 
  36 //
  37 // psPromotionManager is used by a single thread to manage object survival
  38 // during a scavenge. The promotion manager contains thread local data only.
  39 //
  40 // NOTE! Be careful when allocating the stacks on cheap. If you are going
  41 // to use a promotion manager in more than one thread, the stacks MUST be
  42 // on cheap. This can lead to memory leaks, though, as they are not auto
  43 // deallocated.
  44 //
  45 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
  46 //
  47 
  48 class MutableSpace;
  49 class PSOldGen;
  50 class ParCompactionManager;
  51 
  52 class PSPromotionManager {
  53   friend class PSScavenge;
  54   friend class ScavengeRootsTask;
  55   friend class PSRefProcTaskExecutor;
  56   friend class PSRefProcTask;
  57 
  58  private:
  59   typedef OverflowTaskQueue<StarTask, mtGC>           OopStarTaskQueue;
  60   typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet;
  61 
  62   static PaddedEnd<PSPromotionManager>* _manager_array;
  63   static OopStarTaskQueueSet*           _stack_array_depth;
  64   static PreservedMarksSet*             _preserved_marks_set;
  65   static PSOldGen*                      _old_gen;
  66   static MutableSpace*                  _young_space;
  67 
  68 #if TASKQUEUE_STATS
  69   size_t                              _masked_pushes;
  70   size_t                              _masked_steals;
  71   size_t                              _arrays_chunked;
  72   size_t                              _array_chunks_processed;
  73 
  74   void print_local_stats(outputStream* const out, uint i) const;
  75   static void print_taskqueue_stats();
  76 
  77   void reset_stats();
  78 #endif // TASKQUEUE_STATS
  79 
  80   PSYoungPromotionLAB                 _young_lab;
  81   PSOldPromotionLAB                   _old_lab;
  82   bool                                _young_gen_is_full;
  83   bool                                _old_gen_is_full;
  84 
  85   OopStarTaskQueue                    _claimed_stack_depth;
  86   OverflowTaskQueue<oop, mtGC>        _claimed_stack_breadth;
  87 
  88   bool                                _totally_drain;
  89   uint                                _target_stack_size;
  90 
  91   uint                                _array_chunk_size;
  92   uint                                _min_array_size_for_chunking;
  93 
  94   PreservedMarks*                     _preserved_marks;
  95   PromotionFailedInfo                 _promotion_failed_info;
  96 
  97   // Accessors
  98   static PSOldGen* old_gen()         { return _old_gen; }
  99   static MutableSpace* young_space() { return _young_space; }
 100 
 101   inline static PSPromotionManager* manager_array(uint index);
 102   template <class T> inline void claim_or_forward_internal_depth(T* p);
 103 
 104   // On the task queues we push reference locations as well as
 105   // partially-scanned arrays (in the latter case, we push an oop to
 106   // the from-space image of the array and the length on the
 107   // from-space image indicates how many entries on the array we still
 108   // need to scan. To be able to distinguish between reference
 109   // locations and partially-scanned array oops we simply mask the
 110   // latter oops with 0x01. The next three methods do the masking,
 111   // unmasking, and checking whether the oop is masked or not. Notice
 112   // that the signature of the mask and unmask methods looks a bit
 113   // strange, as they accept and return different types (oop and
 114   // oop*). This is because of the difference in types between what
 115   // the task queue holds (oop*) and oops to partially-scanned arrays
 116   // (oop). We do all the necessary casting in the mask / unmask
 117   // methods to avoid sprinkling the rest of the code with more casts.
 118 
 119   // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
 120   // future masks) can't conflict with COMPRESSED_OOP_MASK
 121 #define PS_CHUNKED_ARRAY_OOP_MASK  0x2
 122 
 123   bool is_oop_masked(StarTask p) {
 124     // If something is marked chunked it's always treated like wide oop*
 125     return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
 126                                   PS_CHUNKED_ARRAY_OOP_MASK;
 127   }
 128 
 129   oop* mask_chunked_array_oop(oop obj) {
 130     assert(!is_oop_masked(cast_from_oop<oop*>(obj)), "invariant");
 131     oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
 132     assert(is_oop_masked(ret), "invariant");
 133     return ret;
 134   }
 135 
 136   oop unmask_chunked_array_oop(StarTask p) {
 137     assert(is_oop_masked(p), "invariant");
 138     assert(!p.is_narrow(), "chunked array oops cannot be narrow");
 139     oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
 140     oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
 141     assert(!is_oop_masked(cast_from_oop<oop*>(ret)), "invariant");
 142     return ret;
 143   }
 144 
 145   template <class T> void  process_array_chunk_work(oop obj,
 146                                                     int start, int end);
 147   void process_array_chunk(oop old);
 148 
 149   template <class T> void push_depth(T* p);
 150 
 151   inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
 152                                     uint age, bool tenured,
 153                                     const PSPromotionLAB* lab);
 154 
 155  protected:
 156   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
 157  public:
 158   // Static
 159   static void initialize();
 160 
 161   static void pre_scavenge();
 162   static bool post_scavenge(YoungGCTracer& gc_tracer);
 163 
 164   static PSPromotionManager* gc_thread_promotion_manager(uint index);
 165   static PSPromotionManager* vm_thread_promotion_manager();
 166 
 167   static bool steal_depth(int queue_num, StarTask& t);
 168 
 169   PSPromotionManager();
 170 
 171   // Accessors
 172   OopStarTaskQueue* claimed_stack_depth() {
 173     return &_claimed_stack_depth;
 174   }
 175 
 176   bool young_gen_is_full()             { return _young_gen_is_full; }
 177 
 178   bool old_gen_is_full()               { return _old_gen_is_full; }
 179   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
 180 
 181   // Promotion methods
 182   template<bool promote_immediately> oop copy_to_survivor_space(oop o);
 183   oop oop_promotion_failed(oop obj, markWord obj_mark);
 184 
 185   void reset();
 186   void register_preserved_marks(PreservedMarks* preserved_marks);
 187   static void restore_preserved_marks();
 188 
 189   void flush_labs();
 190   void drain_stacks(bool totally_drain) {
 191     drain_stacks_depth(totally_drain);
 192   }
 193  public:
 194   void drain_stacks_cond_depth() {
 195     if (claimed_stack_depth()->size() > _target_stack_size) {
 196       drain_stacks_depth(false);
 197     }
 198   }
 199   void drain_stacks_depth(bool totally_drain);
 200 
 201   bool stacks_empty() {
 202     return claimed_stack_depth()->is_empty();
 203   }
 204 
 205   inline void process_popped_location_depth(StarTask p);
 206 
 207   static bool should_scavenge(oop* p, bool check_to_space = false);
 208   static bool should_scavenge(narrowOop* p, bool check_to_space = false);
 209 
 210   template <class T, bool promote_immediately>
 211   void copy_and_push_safe_barrier(T* p);
 212 
 213   template <class T> inline void claim_or_forward_depth(T* p);
 214 
 215   TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
 216 
 217   void push_contents(oop obj);
 218 };
 219 
 220 #endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP