1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP 26 #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP 27 28 #include "gc/parallel/psParallelCompact.hpp" 29 #include "gc/shared/taskqueue.hpp" 30 #include "memory/allocation.hpp" 31 #include "utilities/stack.hpp" 32 33 class MutableSpace; 34 class PSOldGen; 35 class ParCompactionManager; 36 class ObjectStartArray; 37 class ParallelCompactData; 38 class ParMarkBitMap; 39 40 class ParCompactionManager : public CHeapObj<mtGC> { 41 friend class ParMarkBitMap; 42 friend class PSParallelCompact; 43 friend class CompactionWithStealingTask; 44 friend class UpdateAndFillClosure; 45 friend class RefProcTaskExecutor; 46 friend class PCRefProcTask; 47 friend class MarkFromRootsTask; 48 friend class UpdateDensePrefixAndCompactionTask; 49 50 public: 51 52 // ------------------------ Don't putback if not needed 53 // Actions that the compaction manager should take. 54 enum Action { 55 Update, 56 Copy, 57 UpdateAndCopy, 58 CopyAndUpdate, 59 NotValid 60 }; 61 // ------------------------ End don't putback if not needed 62 63 private: 64 // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB 65 #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) 66 typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue; 67 typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet; 68 #undef QUEUE_SIZE 69 70 static ParCompactionManager** _manager_array; 71 static OopTaskQueueSet* _stack_array; 72 static ObjArrayTaskQueueSet* _objarray_queues; 73 static ObjectStartArray* _start_array; 74 static RegionTaskQueueSet* _region_array; 75 static PSOldGen* _old_gen; 76 77 private: 78 OverflowTaskQueue<oop, mtGC> _marking_stack; 79 ObjArrayTaskQueue _objarray_stack; 80 size_t _next_shadow_region; 81 82 // Is there a way to reuse the _marking_stack for the 83 // saving empty regions? For now just create a different 84 // type of TaskQueue. 85 RegionTaskQueue _region_stack; 86 87 static ParMarkBitMap* _mark_bitmap; 88 89 // Contains currently free shadow regions. We use it in 90 // a LIFO fashion for better data locality and utilization. 91 static GrowableArray<size_t>* _shadow_region_array; 92 93 // Provides mutual exclusive access of _shadow_region_array. 94 // See pop/push_shadow_region_mt_safe() below 95 static Monitor* _shadow_region_monitor; 96 97 Action _action; 98 99 HeapWord* _last_query_beg; 100 oop _last_query_obj; 101 size_t _last_query_ret; 102 103 static PSOldGen* old_gen() { return _old_gen; } 104 static ObjectStartArray* start_array() { return _start_array; } 105 static OopTaskQueueSet* stack_array() { return _stack_array; } 106 107 static void initialize(ParMarkBitMap* mbm); 108 109 protected: 110 // Array of task queues. Needed by the task terminator. 111 static RegionTaskQueueSet* region_array() { return _region_array; } 112 OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; } 113 114 // Pushes onto the marking stack. If the marking stack is full, 115 // pushes onto the overflow stack. 116 void stack_push(oop obj); 117 // Do not implement an equivalent stack_pop. Deal with the 118 // marking stack and overflow stack directly. 119 120 public: 121 static const size_t InvalidShadow = ~0; 122 static size_t pop_shadow_region_mt_safe(PSParallelCompact::RegionData* region_ptr); 123 static void push_shadow_region_mt_safe(size_t shadow_region); 124 static void push_shadow_region(size_t shadow_region); 125 static void remove_all_shadow_regions(); 126 127 inline size_t next_shadow_region() { return _next_shadow_region; } 128 inline void set_next_shadow_region(size_t record) { _next_shadow_region = record; } 129 inline size_t move_next_shadow_region_by(size_t workers) { 130 _next_shadow_region += workers; 131 return next_shadow_region(); 132 } 133 134 void reset_bitmap_query_cache() { 135 _last_query_beg = NULL; 136 _last_query_obj = NULL; 137 _last_query_ret = 0; 138 } 139 140 Action action() { return _action; } 141 void set_action(Action v) { _action = v; } 142 143 // Bitmap query support, cache last query and result 144 HeapWord* last_query_begin() { return _last_query_beg; } 145 oop last_query_object() { return _last_query_obj; } 146 size_t last_query_return() { return _last_query_ret; } 147 148 void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; } 149 void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; } 150 void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; } 151 152 static void reset_all_bitmap_query_caches(); 153 154 RegionTaskQueue* region_stack() { return &_region_stack; } 155 156 inline static ParCompactionManager* manager_array(uint index); 157 158 ParCompactionManager(); 159 160 // Pushes onto the region stack at the given index. If the 161 // region stack is full, 162 // pushes onto the region overflow stack. 163 static void verify_region_list_empty(uint stack_index); 164 ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } 165 166 // void drain_stacks(); 167 168 bool should_update(); 169 bool should_copy(); 170 171 // Save for later processing. Must not fail. 172 inline void push(oop obj); 173 inline void push_objarray(oop objarray, size_t index); 174 inline void push_region(size_t index); 175 176 // Check mark and maybe push on marking stack. 177 template <typename T> inline void mark_and_push(T* p); 178 179 inline void follow_klass(Klass* klass); 180 181 void follow_class_loader(ClassLoaderData* klass); 182 183 // Access function for compaction managers 184 static ParCompactionManager* gc_thread_compaction_manager(uint index); 185 186 static bool steal(int queue_num, oop& t); 187 static bool steal_objarray(int queue_num, ObjArrayTask& t); 188 static bool steal(int queue_num, size_t& region); 189 190 // Process tasks remaining on any marking stack 191 void follow_marking_stacks(); 192 inline bool marking_stacks_empty() const; 193 194 // Process tasks remaining on any stack 195 void drain_region_stacks(); 196 197 void follow_contents(oop obj); 198 void follow_array(objArrayOop array, int index); 199 200 void update_contents(oop obj); 201 202 class FollowStackClosure: public VoidClosure { 203 private: 204 ParCompactionManager* _compaction_manager; 205 public: 206 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } 207 virtual void do_void(); 208 }; 209 }; 210 211 inline ParCompactionManager* ParCompactionManager::manager_array(uint index) { 212 assert(_manager_array != NULL, "access of NULL manager_array"); 213 assert(index <= ParallelGCThreads, "out of range manager_array access"); 214 return _manager_array[index]; 215 } 216 217 bool ParCompactionManager::marking_stacks_empty() const { 218 return _marking_stack.is_empty() && _objarray_stack.is_empty(); 219 } 220 221 #endif // SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP