1 /* 2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1PageBasedVirtualSpace.hpp" 27 #include "gc/shared/workgroup.hpp" 28 #include "oops/markOop.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/os.inline.hpp" 32 #include "services/memTracker.hpp" 33 #include "utilities/align.hpp" 34 #include "utilities/bitMap.inline.hpp" 35 36 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) : 37 _low_boundary(NULL), _high_boundary(NULL), _committed(mtGC), _page_size(0), _special(false), 38 _dirty(mtGC), _executable(false) { 39 initialize_with_page_size(rs, used_size, page_size); 40 } 41 42 void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) { 43 guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); 44 45 vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); 46 vmassert(page_size > 0, "Page size must be non-zero."); 47 48 guarantee(is_aligned(rs.base(), page_size), 49 "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size); 50 guarantee(is_aligned(used_size, os::vm_page_size()), 51 "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size); 52 guarantee(used_size <= rs.size(), 53 "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()); 54 guarantee(is_aligned(rs.size(), page_size), 55 "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size); 56 57 _low_boundary = rs.base(); 58 _high_boundary = _low_boundary + used_size; 59 60 _special = rs.special(); 61 _executable = rs.executable(); 62 63 _page_size = page_size; 64 65 vmassert(_committed.size() == 0, "virtual space initialized more than once"); 66 BitMap::idx_t size_in_pages = rs.size() / page_size; 67 _committed.initialize(size_in_pages); 68 if (_special) { 69 _dirty.initialize(size_in_pages); 70 } 71 72 _tail_size = used_size % _page_size; 73 } 74 75 G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { 76 // This does not release memory it never reserved. 77 // Caller must release via rs.release(); 78 _low_boundary = NULL; 79 _high_boundary = NULL; 80 _special = false; 81 _executable = false; 82 _page_size = 0; 83 _tail_size = 0; 84 } 85 86 size_t G1PageBasedVirtualSpace::committed_size() const { 87 size_t result = _committed.count_one_bits() * _page_size; 88 // The last page might not be in full. 89 if (is_last_page_partial() && _committed.at(_committed.size() - 1)) { 90 result -= _page_size - _tail_size; 91 } 92 return result; 93 } 94 95 size_t G1PageBasedVirtualSpace::reserved_size() const { 96 return pointer_delta(_high_boundary, _low_boundary, sizeof(char)); 97 } 98 99 size_t G1PageBasedVirtualSpace::uncommitted_size() const { 100 return reserved_size() - committed_size(); 101 } 102 103 size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { 104 return (addr - _low_boundary) / _page_size; 105 } 106 107 bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const { 108 size_t end_page = start_page + size_in_pages; 109 return _committed.get_next_zero_offset(start_page, end_page) >= end_page; 110 } 111 112 bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const { 113 size_t end_page = start_page + size_in_pages; 114 return _committed.get_next_one_offset(start_page, end_page) >= end_page; 115 } 116 117 char* G1PageBasedVirtualSpace::page_start(size_t index) const { 118 return _low_boundary + index * _page_size; 119 } 120 121 bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const { 122 guarantee(index <= _committed.size(), 123 "Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()); 124 return index == _committed.size(); 125 } 126 127 void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) { 128 vmassert(num_pages > 0, "No full pages to commit"); 129 vmassert(start + num_pages <= _committed.size(), 130 "Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " " 131 "that is outside of managed space of " SIZE_FORMAT " pages", 132 start, start + num_pages, _committed.size()); 133 134 char* start_addr = page_start(start); 135 size_t size = num_pages * _page_size; 136 137 os::commit_memory_or_exit(start_addr, size, _page_size, _executable, 138 err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", 139 p2i(start_addr), p2i(start_addr + size), size)); 140 } 141 142 void G1PageBasedVirtualSpace::commit_tail() { 143 vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here"); 144 145 char* const aligned_end_address = align_down(_high_boundary, _page_size); 146 os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable, 147 err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", 148 p2i(aligned_end_address), p2i(_high_boundary), _tail_size)); 149 } 150 151 void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) { 152 guarantee(start_page < end_page, 153 "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); 154 guarantee(end_page <= _committed.size(), 155 "Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()); 156 157 size_t pages = end_page - start_page; 158 bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial(); 159 160 // If we have to commit some (partial) tail area, decrease the amount of pages to avoid 161 // committing that in the full-page commit code. 162 if (need_to_commit_tail) { 163 pages--; 164 } 165 166 if (pages > 0) { 167 commit_preferred_pages(start_page, pages); 168 } 169 170 if (need_to_commit_tail) { 171 commit_tail(); 172 } 173 } 174 175 char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const { 176 return MIN2(_high_boundary, page_start(end_page)); 177 } 178 179 void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) { 180 guarantee(start_page < end_page, 181 "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); 182 183 os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page), _page_size); 184 } 185 186 bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) { 187 // We need to make sure to commit all pages covered by the given area. 188 guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted"); 189 190 bool zero_filled = true; 191 size_t end_page = start_page + size_in_pages; 192 193 if (_special) { 194 // Check for dirty pages and update zero_filled if any found. 195 if (_dirty.get_next_one_offset(start_page, end_page) < end_page) { 196 zero_filled = false; 197 _dirty.clear_range(start_page, end_page); 198 } 199 } else { 200 commit_internal(start_page, end_page); 201 } 202 _committed.set_range(start_page, end_page); 203 204 return zero_filled; 205 } 206 207 void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) { 208 guarantee(start_page < end_page, 209 "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); 210 211 char* start_addr = page_start(start_page); 212 os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char))); 213 } 214 215 void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) { 216 guarantee(is_area_committed(start_page, size_in_pages), "checking"); 217 218 size_t end_page = start_page + size_in_pages; 219 if (_special) { 220 // Mark that memory is dirty. If committed again the memory might 221 // need to be cleared explicitly. 222 _dirty.set_range(start_page, end_page); 223 } else { 224 uncommit_internal(start_page, end_page); 225 } 226 227 _committed.clear_range(start_page, end_page); 228 } 229 230 class G1PretouchTask : public AbstractGangTask { 231 private: 232 char* volatile _cur_addr; 233 char* const _start_addr; 234 char* const _end_addr; 235 size_t const _page_size; 236 public: 237 G1PretouchTask(char* start_address, char* end_address, size_t page_size) : 238 AbstractGangTask("G1 PreTouch", 239 Universe::is_fully_initialized() && 240 Thread::current()->is_Named_thread() ? GCId::current_raw() : 241 // During VM initialization there is 242 // no GC cycle that this task can be 243 // associated with. 244 GCId::undefined()), 245 _cur_addr(start_address), 246 _start_addr(start_address), 247 _end_addr(end_address), 248 _page_size(page_size) { 249 } 250 251 virtual void work(uint worker_id) { 252 size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); 253 while (true) { 254 char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size; 255 if (touch_addr < _start_addr || touch_addr >= _end_addr) { 256 break; 257 } 258 char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char))); 259 os::pretouch_memory(touch_addr, end_addr, _page_size); 260 } 261 } 262 263 static size_t chunk_size() { return PreTouchParallelChunkSize; } 264 }; 265 266 void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) { 267 G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size); 268 269 if (pretouch_gang != NULL) { 270 size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size)); 271 272 uint num_workers = MIN2((uint)num_chunks, pretouch_gang->active_workers()); 273 log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.", 274 cl.name(), num_workers, num_chunks, size_in_pages * _page_size); 275 pretouch_gang->run_task(&cl, num_workers); 276 } else { 277 log_debug(gc, heap)("Running %s pre-touching " SIZE_FORMAT "B.", 278 cl.name(), size_in_pages * _page_size); 279 cl.work(0); 280 } 281 } 282 283 bool G1PageBasedVirtualSpace::contains(const void* p) const { 284 return _low_boundary <= (const char*) p && (const char*) p < _high_boundary; 285 } 286 287 #ifndef PRODUCT 288 void G1PageBasedVirtualSpace::print_on(outputStream* out) { 289 out->print ("Virtual space:"); 290 if (_special) out->print(" (pinned in memory)"); 291 out->cr(); 292 out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); 293 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); 294 out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size); 295 out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); 296 } 297 298 void G1PageBasedVirtualSpace::print() { 299 print_on(tty); 300 } 301 #endif --- EOF ---