rev 7555 : imported patch 8067469-g1-ignores-alwayspretouch
1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "runtime/atomic.inline.hpp" 27 #include "utilities/macros.hpp" 28 #if INCLUDE_ALL_GCS 29 #include "gc_implementation/shared/mutableSpace.hpp" 30 #include "gc_implementation/shared/spaceDecorator.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/safepoint.hpp" 33 #include "runtime/thread.hpp" 34 #endif // INCLUDE_ALL_GCS 35 36 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 37 38 MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) { 39 assert(MutableSpace::alignment() >= 0 && 40 MutableSpace::alignment() % os::vm_page_size() == 0, 41 "Space should be aligned"); 42 _mangler = new MutableSpaceMangler(this); 43 } 44 45 MutableSpace::~MutableSpace() { 46 delete _mangler; 47 } 48 49 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) { 50 if (!mr.is_empty()) { 51 size_t page_size = UseLargePages ? alignment() : os::vm_page_size(); 52 HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size); 53 HeapWord *end = (HeapWord*)round_down((intptr_t) mr.end(), page_size); 54 if (end > start) { 55 size_t size = pointer_delta(end, start, sizeof(char)); 56 if (clear_space) { 57 // Prefer page reallocation to migration. 58 os::free_memory((char*)start, size, page_size); 59 } 60 os::numa_make_global((char*)start, size); 61 } 62 } 63 } 64 65 void MutableSpace::pretouch_pages(MemRegion mr) { 66 for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) { 67 char t = *p; *p = t; 68 } 69 } 70 71 void MutableSpace::initialize(MemRegion mr, 72 bool clear_space, 73 bool mangle_space, 74 bool setup_pages) { 75 76 assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()), 77 "invalid space boundaries"); 78 79 if (setup_pages && (UseNUMA || AlwaysPreTouch)) { 80 // The space may move left and right or expand/shrink. 81 // We'd like to enforce the desired page placement. 82 MemRegion head, tail; 83 if (last_setup_region().is_empty()) { 84 // If it's the first initialization don't limit the amount of work. 85 head = mr; 86 tail = MemRegion(mr.end(), mr.end()); 87 } else { 88 // Is there an intersection with the address space? 89 MemRegion intersection = last_setup_region().intersection(mr); 90 if (intersection.is_empty()) { 91 intersection = MemRegion(mr.end(), mr.end()); 92 } 93 // All the sizes below are in words. 94 size_t head_size = 0, tail_size = 0; 95 if (mr.start() <= intersection.start()) { 96 head_size = pointer_delta(intersection.start(), mr.start()); 97 } 98 if(intersection.end() <= mr.end()) { 99 tail_size = pointer_delta(mr.end(), intersection.end()); 100 } 101 // Limit the amount of page manipulation if necessary. 102 if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) { 103 const size_t change_size = head_size + tail_size; 104 const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord; 105 head_size = MIN2((size_t)(setup_rate_words * head_size / change_size), 106 head_size); 107 tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size), 108 tail_size); 109 } 110 head = MemRegion(intersection.start() - head_size, intersection.start()); 111 tail = MemRegion(intersection.end(), intersection.end() + tail_size); 112 } 113 assert(mr.contains(head) && mr.contains(tail), "Sanity"); 114 115 if (UseNUMA) { 116 numa_setup_pages(head, clear_space); 117 numa_setup_pages(tail, clear_space); 118 } 119 120 if (AlwaysPreTouch) { 121 pretouch_pages(head); 122 pretouch_pages(tail); 123 } 124 125 // Remember where we stopped so that we can continue later. 126 set_last_setup_region(MemRegion(head.start(), tail.end())); 127 } 128 129 set_bottom(mr.start()); 130 set_end(mr.end()); 131 132 if (clear_space) { 133 clear(mangle_space); 134 } 135 } 136 137 void MutableSpace::clear(bool mangle_space) { 138 set_top(bottom()); 139 if (ZapUnusedHeapArea && mangle_space) { 140 mangle_unused_area(); 141 } 142 } 143 144 #ifndef PRODUCT 145 void MutableSpace::check_mangled_unused_area(HeapWord* limit) { 146 mangler()->check_mangled_unused_area(limit); 147 } 148 149 void MutableSpace::check_mangled_unused_area_complete() { 150 mangler()->check_mangled_unused_area_complete(); 151 } 152 153 // Mangle only the unused space that has not previously 154 // been mangled and that has not been allocated since being 155 // mangled. 156 void MutableSpace::mangle_unused_area() { 157 mangler()->mangle_unused_area(); 158 } 159 160 void MutableSpace::mangle_unused_area_complete() { 161 mangler()->mangle_unused_area_complete(); 162 } 163 164 void MutableSpace::mangle_region(MemRegion mr) { 165 SpaceMangler::mangle_region(mr); 166 } 167 168 void MutableSpace::set_top_for_allocations(HeapWord* v) { 169 mangler()->set_top_for_allocations(v); 170 } 171 172 void MutableSpace::set_top_for_allocations() { 173 mangler()->set_top_for_allocations(top()); 174 } 175 #endif 176 177 // This version requires locking. */ 178 HeapWord* MutableSpace::allocate(size_t size) { 179 assert(Heap_lock->owned_by_self() || 180 (SafepointSynchronize::is_at_safepoint() && 181 Thread::current()->is_VM_thread()), 182 "not locked"); 183 HeapWord* obj = top(); 184 if (pointer_delta(end(), obj) >= size) { 185 HeapWord* new_top = obj + size; 186 set_top(new_top); 187 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), 188 "checking alignment"); 189 return obj; 190 } else { 191 return NULL; 192 } 193 } 194 195 // This version is lock-free. 196 HeapWord* MutableSpace::cas_allocate(size_t size) { 197 do { 198 HeapWord* obj = top(); 199 if (pointer_delta(end(), obj) >= size) { 200 HeapWord* new_top = obj + size; 201 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 202 // result can be one of two: 203 // the old top value: the exchange succeeded 204 // otherwise: the new value of the top is returned. 205 if (result != obj) { 206 continue; // another thread beat us to the allocation, try again 207 } 208 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), 209 "checking alignment"); 210 return obj; 211 } else { 212 return NULL; 213 } 214 } while (true); 215 } 216 217 // Try to deallocate previous allocation. Returns true upon success. 218 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { 219 HeapWord* expected_top = obj + size; 220 return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top; 221 } 222 223 void MutableSpace::oop_iterate(ExtendedOopClosure* cl) { 224 HeapWord* obj_addr = bottom(); 225 HeapWord* t = top(); 226 // Could call objects iterate, but this is easier. 227 while (obj_addr < t) { 228 obj_addr += oop(obj_addr)->oop_iterate(cl); 229 } 230 } 231 232 void MutableSpace::oop_iterate_no_header(OopClosure* cl) { 233 HeapWord* obj_addr = bottom(); 234 HeapWord* t = top(); 235 // Could call objects iterate, but this is easier. 236 while (obj_addr < t) { 237 obj_addr += oop(obj_addr)->oop_iterate_no_header(cl); 238 } 239 } 240 241 void MutableSpace::object_iterate(ObjectClosure* cl) { 242 HeapWord* p = bottom(); 243 while (p < top()) { 244 cl->do_object(oop(p)); 245 p += oop(p)->size(); 246 } 247 } 248 249 void MutableSpace::print_short() const { print_short_on(tty); } 250 void MutableSpace::print_short_on( outputStream* st) const { 251 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K, 252 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes())); 253 } 254 255 void MutableSpace::print() const { print_on(tty); } 256 void MutableSpace::print_on(outputStream* st) const { 257 MutableSpace::print_short_on(st); 258 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")", 259 bottom(), top(), end()); 260 } 261 262 void MutableSpace::verify() { 263 HeapWord* p = bottom(); 264 HeapWord* t = top(); 265 HeapWord* prev_p = NULL; 266 while (p < t) { 267 oop(p)->verify(); 268 prev_p = p; 269 p += oop(p)->size(); 270 } 271 guarantee(p == top(), "end of last object must match end of space"); 272 } --- EOF ---