1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_memory(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_memory(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  96                                        const size_t prefix_size,
  97                                        const size_t prefix_align,
  98                                        const size_t suffix_size,
  99                                        const size_t suffix_align)
 100 {
 101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 102 
 103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 104   if (raw_addr == NULL) return NULL;
 105 
 106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 107                                        prefix_align, suffix_size,
 108                                        suffix_align);
 109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 110     fatal("os::release_memory failed");
 111   }
 112 
 113 #ifdef ASSERT
 114   if (result != NULL) {
 115     const size_t raw = size_t(raw_addr);
 116     const size_t res = size_t(result);
 117     assert(res >= raw, "alignment decreased start addr");
 118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 119            "alignment increased end addr");
 120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 122            "bad alignment of suffix");
 123   }
 124 #endif
 125 
 126   return result;
 127 }
 128 
 129 // Helper method.
 130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 131                                            const size_t size, bool special)
 132 {
 133   if (base == requested_address || requested_address == NULL)
 134     return false; // did not fail
 135 
 136   if (base != NULL) {
 137     // Different reserve address may be acceptable in other cases
 138     // but for compressed oops heap should be at requested address.
 139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 140     if (PrintCompressedOopsMode) {
 141       tty->cr();
 142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 143     }
 144     // OS ignored requested address. Try different address.
 145     if (special) {
 146       if (!os::release_memory_special(base, size)) {
 147         fatal("os::release_memory_special failed");
 148       }
 149     } else {
 150       if (!os::release_memory(base, size)) {
 151         fatal("os::release_memory failed");
 152       }
 153     }
 154   }
 155   return true;
 156 }
 157 
 158 ReservedSpace::ReservedSpace(const size_t suffix_size,
 159                              const size_t suffix_align,
 160                              char* requested_address,
 161                              const size_t noaccess_prefix)
 162 {
 163   assert(suffix_size != 0, "sanity");
 164   assert(suffix_align != 0, "sanity");
 165   assert((suffix_size & (suffix_align - 1)) == 0,
 166     "suffix_size not divisible by suffix_align");
 167 
 168   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 169   // Add in noaccess_prefix to prefix
 170   const size_t adjusted_prefix_size = noaccess_prefix;
 171   const size_t size = adjusted_prefix_size + suffix_size;
 172 
 173   // On systems where the entire region has to be reserved and committed up
 174   // front, the compound alignment normally done by this method is unnecessary.
 175   const bool try_reserve_special = UseLargePages &&
 176     suffix_align == os::large_page_size();
 177   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 178     initialize(size, suffix_align, true, requested_address, noaccess_prefix,
 179                false);
 180     return;
 181   }
 182 
 183   _base = NULL;
 184   _size = 0;
 185   _alignment = 0;
 186   _special = false;
 187   _noaccess_prefix = 0;
 188   _executable = false;
 189 
 190   // Optimistically try to reserve the exact size needed.
 191   char* addr;
 192   if (requested_address != 0) {
 193     requested_address -= noaccess_prefix; // adjust address
 194     assert(requested_address != NULL, "huge noaccess prefix?");
 195     addr = os::attempt_reserve_memory_at(size, requested_address);
 196     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 197       // OS ignored requested address. Try different address.
 198       addr = NULL;
 199     }
 200   } else {
 201     addr = os::reserve_memory(size, NULL, suffix_align);
 202   }
 203   if (addr == NULL) return;
 204 
 205   // Check whether the result has the needed alignment
 206   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 207   if (ofs != 0) {
 208     // Wrong alignment.  Release, allocate more space and do manual alignment.
 209     //
 210     // On most operating systems, another allocation with a somewhat larger size
 211     // will return an address "close to" that of the previous allocation.  The
 212     // result is often the same address (if the kernel hands out virtual
 213     // addresses from low to high), or an address that is offset by the increase
 214     // in size.  Exploit that to minimize the amount of extra space requested.
 215     if (!os::release_memory(addr, size)) {
 216       fatal("os::release_memory failed");
 217     }
 218 
 219     const size_t extra = MAX2(ofs, suffix_align - ofs);
 220     addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
 221                              suffix_size, suffix_align);
 222     if (addr == NULL) {
 223       // Try an even larger region.  If this fails, address space is exhausted.
 224       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 225                                suffix_align, suffix_size, suffix_align);
 226     }
 227 
 228     if (requested_address != 0 &&
 229         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 230       // As a result of the alignment constraints, the allocated addr differs
 231       // from the requested address. Return back to the caller who can
 232       // take remedial action (like try again without a requested address).
 233       assert(_base == NULL, "should be");
 234       return;
 235     }
 236   }
 237 
 238   _base = addr;
 239   _size = size;
 240   _alignment = suffix_align;
 241   _noaccess_prefix = noaccess_prefix;
 242 }
 243 
 244 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 245                                char* requested_address,
 246                                const size_t noaccess_prefix,
 247                                bool executable) {
 248   const size_t granularity = os::vm_allocation_granularity();
 249   assert((size & (granularity - 1)) == 0,
 250          "size not aligned to os::vm_allocation_granularity()");
 251   assert((alignment & (granularity - 1)) == 0,
 252          "alignment not aligned to os::vm_allocation_granularity()");
 253   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 254          "not a power of 2");
 255 
 256   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 257 
 258   // Assert that if noaccess_prefix is used, it is the same as alignment.
 259   assert(noaccess_prefix == 0 ||
 260          noaccess_prefix == alignment, "noaccess prefix wrong");
 261 
 262   _base = NULL;
 263   _size = 0;
 264   _special = false;
 265   _executable = executable;
 266   _alignment = 0;
 267   _noaccess_prefix = 0;
 268   if (size == 0) {
 269     return;
 270   }
 271 
 272   // If OS doesn't support demand paging for large page memory, we need
 273   // to use reserve_memory_special() to reserve and pin the entire region.
 274   bool special = large && !os::can_commit_large_page_memory();
 275   char* base = NULL;
 276 
 277   if (requested_address != 0) {
 278     requested_address -= noaccess_prefix; // adjust requested address
 279     assert(requested_address != NULL, "huge noaccess prefix?");
 280   }
 281 
 282   if (special) {
 283 
 284     base = os::reserve_memory_special(size, requested_address, executable);
 285 
 286     if (base != NULL) {
 287       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 288         // OS ignored requested address. Try different address.
 289         return;
 290       }
 291       // Check alignment constraints
 292       assert((uintptr_t) base % alignment == 0,
 293              "Large pages returned a non-aligned address");
 294       _special = true;
 295     } else {
 296       // failed; try to reserve regular memory below
 297       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 298                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 299         if (PrintCompressedOopsMode) {
 300           tty->cr();
 301           tty->print_cr("Reserve regular memory without large pages.");
 302         }
 303       }
 304     }
 305   }
 306 
 307   if (base == NULL) {
 308     // Optimistically assume that the OSes returns an aligned base pointer.
 309     // When reserving a large address range, most OSes seem to align to at
 310     // least 64K.
 311 
 312     // If the memory was requested at a particular address, use
 313     // os::attempt_reserve_memory_at() to avoid over mapping something
 314     // important.  If available space is not detected, return NULL.
 315 
 316     if (requested_address != 0) {
 317       base = os::attempt_reserve_memory_at(size, requested_address);
 318       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 319         // OS ignored requested address. Try different address.
 320         base = NULL;
 321       }
 322     } else {
 323       base = os::reserve_memory(size, NULL, alignment);
 324     }
 325 
 326     if (base == NULL) return;
 327 
 328     // Check alignment constraints
 329     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 330       // Base not aligned, retry
 331       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 332       // Reserve size large enough to do manual alignment and
 333       // increase size to a multiple of the desired alignment
 334       size = align_size_up(size, alignment);
 335       size_t extra_size = size + alignment;
 336       do {
 337         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 338         if (extra_base == NULL) return;
 339         // Do manual alignement
 340         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 341         assert(base >= extra_base, "just checking");
 342         // Re-reserve the region at the aligned base address.
 343         os::release_memory(extra_base, extra_size);
 344         base = os::reserve_memory(size, base);
 345       } while (base == NULL);
 346 
 347       if (requested_address != 0 &&
 348           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 349         // As a result of the alignment constraints, the allocated base differs
 350         // from the requested address. Return back to the caller who can
 351         // take remedial action (like try again without a requested address).
 352         assert(_base == NULL, "should be");
 353         return;
 354       }
 355     }
 356   }
 357   // Done
 358   _base = base;
 359   _size = size;
 360   _alignment = alignment;
 361   _noaccess_prefix = noaccess_prefix;
 362 
 363   // Assert that if noaccess_prefix is used, it is the same as alignment.
 364   assert(noaccess_prefix == 0 ||
 365          noaccess_prefix == _alignment, "noaccess prefix wrong");
 366 
 367   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 368          "area must be distinguisable from marks for mark-sweep");
 369   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 370          "area must be distinguisable from marks for mark-sweep");
 371 }
 372 
 373 
 374 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 375                              bool special, bool executable) {
 376   assert((size % os::vm_allocation_granularity()) == 0,
 377          "size not allocation aligned");
 378   _base = base;
 379   _size = size;
 380   _alignment = alignment;
 381   _noaccess_prefix = 0;
 382   _special = special;
 383   _executable = executable;
 384 }
 385 
 386 
 387 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 388                                         bool split, bool realloc) {
 389   assert(partition_size <= size(), "partition failed");
 390   if (split) {
 391     os::split_reserved_memory(base(), size(), partition_size, realloc);
 392   }
 393   ReservedSpace result(base(), partition_size, alignment, special(),
 394                        executable());
 395   return result;
 396 }
 397 
 398 
 399 ReservedSpace
 400 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 401   assert(partition_size <= size(), "partition failed");
 402   ReservedSpace result(base() + partition_size, size() - partition_size,
 403                        alignment, special(), executable());
 404   return result;
 405 }
 406 
 407 
 408 size_t ReservedSpace::page_align_size_up(size_t size) {
 409   return align_size_up(size, os::vm_page_size());
 410 }
 411 
 412 
 413 size_t ReservedSpace::page_align_size_down(size_t size) {
 414   return align_size_down(size, os::vm_page_size());
 415 }
 416 
 417 
 418 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 419   return align_size_up(size, os::vm_allocation_granularity());
 420 }
 421 
 422 
 423 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 424   return align_size_down(size, os::vm_allocation_granularity());
 425 }
 426 
 427 
 428 void ReservedSpace::release() {
 429   if (is_reserved()) {
 430     char *real_base = _base - _noaccess_prefix;
 431     const size_t real_size = _size + _noaccess_prefix;
 432     if (special()) {
 433       os::release_memory_special(real_base, real_size);
 434     } else{
 435       os::release_memory(real_base, real_size);
 436     }
 437     _base = NULL;
 438     _size = 0;
 439     _noaccess_prefix = 0;
 440     _special = false;
 441     _executable = false;
 442   }
 443 }
 444 
 445 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 446   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 447                                       (Universe::narrow_oop_base() != NULL) &&
 448                                       Universe::narrow_oop_use_implicit_null_checks()),
 449          "noaccess_prefix should be used only with non zero based compressed oops");
 450 
 451   // If there is no noaccess prefix, return.
 452   if (_noaccess_prefix == 0) return;
 453 
 454   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 455          "must be at least page size big");
 456 
 457   // Protect memory at the base of the allocated region.
 458   // If special, the page was committed (only matters on windows)
 459   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 460                           _special)) {
 461     fatal("cannot protect protection page");
 462   }
 463   if (PrintCompressedOopsMode) {
 464     tty->cr();
 465     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 466   }
 467 
 468   _base += _noaccess_prefix;
 469   _size -= _noaccess_prefix;
 470   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 471          "must be exactly of required size and alignment");
 472 }
 473 
 474 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 475                                      bool large, char* requested_address) :
 476   ReservedSpace(size, alignment, large,
 477                 requested_address,
 478                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 479                  Universe::narrow_oop_use_implicit_null_checks()) ?
 480                   lcm(os::vm_page_size(), alignment) : 0) {
 481   if (base() > 0) {
 482     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 483   }
 484 
 485   // Only reserved space for the java heap should have a noaccess_prefix
 486   // if using compressed oops.
 487   protect_noaccess_prefix(size);
 488 }
 489 
 490 ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
 491                                      const size_t alignment,
 492                                      char* requested_address) :
 493   ReservedSpace(heap_space_size, alignment,
 494                 requested_address,
 495                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 496                  Universe::narrow_oop_use_implicit_null_checks()) ?
 497                   lcm(os::vm_page_size(), alignment) : 0) {
 498   if (base() > 0) {
 499     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 500   }
 501   protect_noaccess_prefix(heap_space_size);
 502 }
 503 
 504 // Reserve space for code segment.  Same as Java heap only we mark this as
 505 // executable.
 506 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 507                                      size_t rs_align,
 508                                      bool large) :
 509   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 510   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 511 }
 512 
 513 // VirtualSpace
 514 
 515 VirtualSpace::VirtualSpace() {
 516   _low_boundary           = NULL;
 517   _high_boundary          = NULL;
 518   _low                    = NULL;
 519   _high                   = NULL;
 520   _lower_high             = NULL;
 521   _middle_high            = NULL;
 522   _upper_high             = NULL;
 523   _lower_high_boundary    = NULL;
 524   _middle_high_boundary   = NULL;
 525   _upper_high_boundary    = NULL;
 526   _lower_alignment        = 0;
 527   _middle_alignment       = 0;
 528   _upper_alignment        = 0;
 529   _special                = false;
 530   _executable             = false;
 531 }
 532 
 533 
 534 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 535   if(!rs.is_reserved()) return false;  // allocation failed.
 536   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 537   _low_boundary  = rs.base();
 538   _high_boundary = low_boundary() + rs.size();
 539 
 540   _low = low_boundary();
 541   _high = low();
 542 
 543   _special = rs.special();
 544   _executable = rs.executable();
 545 
 546   // When a VirtualSpace begins life at a large size, make all future expansion
 547   // and shrinking occur aligned to a granularity of large pages.  This avoids
 548   // fragmentation of physical addresses that inhibits the use of large pages
 549   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 550   // page size, the only spaces that get handled this way are codecache and
 551   // the heap itself, both of which provide a substantial performance
 552   // boost in many benchmarks when covered by large pages.
 553   //
 554   // No attempt is made to force large page alignment at the very top and
 555   // bottom of the space if they are not aligned so already.
 556   _lower_alignment  = os::vm_page_size();
 557   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 558   _upper_alignment  = os::vm_page_size();
 559 
 560   // End of each region
 561   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 562   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 563   _upper_high_boundary = high_boundary();
 564 
 565   // High address of each region
 566   _lower_high = low_boundary();
 567   _middle_high = lower_high_boundary();
 568   _upper_high = middle_high_boundary();
 569 
 570   // commit to initial size
 571   if (committed_size > 0) {
 572     if (!expand_by(committed_size)) {
 573       return false;
 574     }
 575   }
 576   return true;
 577 }
 578 
 579 
 580 VirtualSpace::~VirtualSpace() {
 581   release();
 582 }
 583 
 584 
 585 void VirtualSpace::release() {
 586   // This does not release memory it never reserved.
 587   // Caller must release via rs.release();
 588   _low_boundary           = NULL;
 589   _high_boundary          = NULL;
 590   _low                    = NULL;
 591   _high                   = NULL;
 592   _lower_high             = NULL;
 593   _middle_high            = NULL;
 594   _upper_high             = NULL;
 595   _lower_high_boundary    = NULL;
 596   _middle_high_boundary   = NULL;
 597   _upper_high_boundary    = NULL;
 598   _lower_alignment        = 0;
 599   _middle_alignment       = 0;
 600   _upper_alignment        = 0;
 601   _special                = false;
 602   _executable             = false;
 603 }
 604 
 605 
 606 size_t VirtualSpace::committed_size() const {
 607   return pointer_delta(high(), low(), sizeof(char));
 608 }
 609 
 610 
 611 size_t VirtualSpace::reserved_size() const {
 612   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 613 }
 614 
 615 
 616 size_t VirtualSpace::uncommitted_size()  const {
 617   return reserved_size() - committed_size();
 618 }
 619 
 620 
 621 bool VirtualSpace::contains(const void* p) const {
 622   return low() <= (const char*) p && (const char*) p < high();
 623 }
 624 
 625 /*
 626    First we need to determine if a particular virtual space is using large
 627    pages.  This is done at the initialize function and only virtual spaces
 628    that are larger than LargePageSizeInBytes use large pages.  Once we
 629    have determined this, all expand_by and shrink_by calls must grow and
 630    shrink by large page size chunks.  If a particular request
 631    is within the current large page, the call to commit and uncommit memory
 632    can be ignored.  In the case that the low and high boundaries of this
 633    space is not large page aligned, the pages leading to the first large
 634    page address and the pages after the last large page address must be
 635    allocated with default pages.
 636 */
 637 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 638   if (uncommitted_size() < bytes) return false;
 639 
 640   if (special()) {
 641     // don't commit memory if the entire space is pinned in memory
 642     _high += bytes;
 643     return true;
 644   }
 645 
 646   char* previous_high = high();
 647   char* unaligned_new_high = high() + bytes;
 648   assert(unaligned_new_high <= high_boundary(),
 649          "cannot expand by more than upper boundary");
 650 
 651   // Calculate where the new high for each of the regions should be.  If
 652   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 653   // then the unaligned lower and upper new highs would be the
 654   // lower_high() and upper_high() respectively.
 655   char* unaligned_lower_new_high =
 656     MIN2(unaligned_new_high, lower_high_boundary());
 657   char* unaligned_middle_new_high =
 658     MIN2(unaligned_new_high, middle_high_boundary());
 659   char* unaligned_upper_new_high =
 660     MIN2(unaligned_new_high, upper_high_boundary());
 661 
 662   // Align the new highs based on the regions alignment.  lower and upper
 663   // alignment will always be default page size.  middle alignment will be
 664   // LargePageSizeInBytes if the actual size of the virtual space is in
 665   // fact larger than LargePageSizeInBytes.
 666   char* aligned_lower_new_high =
 667     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 668   char* aligned_middle_new_high =
 669     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 670   char* aligned_upper_new_high =
 671     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 672 
 673   // Determine which regions need to grow in this expand_by call.
 674   // If you are growing in the lower region, high() must be in that
 675   // region so calcuate the size based on high().  For the middle and
 676   // upper regions, determine the starting point of growth based on the
 677   // location of high().  By getting the MAX of the region's low address
 678   // (or the prevoius region's high address) and high(), we can tell if it
 679   // is an intra or inter region growth.
 680   size_t lower_needs = 0;
 681   if (aligned_lower_new_high > lower_high()) {
 682     lower_needs =
 683       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 684   }
 685   size_t middle_needs = 0;
 686   if (aligned_middle_new_high > middle_high()) {
 687     middle_needs =
 688       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 689   }
 690   size_t upper_needs = 0;
 691   if (aligned_upper_new_high > upper_high()) {
 692     upper_needs =
 693       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 694   }
 695 
 696   // Check contiguity.
 697   assert(low_boundary() <= lower_high() &&
 698          lower_high() <= lower_high_boundary(),
 699          "high address must be contained within the region");
 700   assert(lower_high_boundary() <= middle_high() &&
 701          middle_high() <= middle_high_boundary(),
 702          "high address must be contained within the region");
 703   assert(middle_high_boundary() <= upper_high() &&
 704          upper_high() <= upper_high_boundary(),
 705          "high address must be contained within the region");
 706 
 707   // Commit regions
 708   if (lower_needs > 0) {
 709     assert(low_boundary() <= lower_high() &&
 710            lower_high() + lower_needs <= lower_high_boundary(),
 711            "must not expand beyond region");
 712     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 713       debug_only(warning("os::commit_memory failed"));
 714       return false;
 715     } else {
 716       _lower_high += lower_needs;
 717      }
 718   }
 719   if (middle_needs > 0) {
 720     assert(lower_high_boundary() <= middle_high() &&
 721            middle_high() + middle_needs <= middle_high_boundary(),
 722            "must not expand beyond region");
 723     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 724                            _executable)) {
 725       debug_only(warning("os::commit_memory failed"));
 726       return false;
 727     }
 728     _middle_high += middle_needs;
 729   }
 730   if (upper_needs > 0) {
 731     assert(middle_high_boundary() <= upper_high() &&
 732            upper_high() + upper_needs <= upper_high_boundary(),
 733            "must not expand beyond region");
 734     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 735       debug_only(warning("os::commit_memory failed"));
 736       return false;
 737     } else {
 738       _upper_high += upper_needs;
 739     }
 740   }
 741 
 742   if (pre_touch || AlwaysPreTouch) {
 743     int vm_ps = os::vm_page_size();
 744     for (char* curr = previous_high;
 745          curr < unaligned_new_high;
 746          curr += vm_ps) {
 747       // Note the use of a write here; originally we tried just a read, but
 748       // since the value read was unused, the optimizer removed the read.
 749       // If we ever have a concurrent touchahead thread, we'll want to use
 750       // a read, to avoid the potential of overwriting data (if a mutator
 751       // thread beats the touchahead thread to a page).  There are various
 752       // ways of making sure this read is not optimized away: for example,
 753       // generating the code for a read procedure at runtime.
 754       *curr = 0;
 755     }
 756   }
 757 
 758   _high += bytes;
 759   return true;
 760 }
 761 
 762 // A page is uncommitted if the contents of the entire page is deemed unusable.
 763 // Continue to decrement the high() pointer until it reaches a page boundary
 764 // in which case that particular page can now be uncommitted.
 765 void VirtualSpace::shrink_by(size_t size) {
 766   if (committed_size() < size)
 767     fatal("Cannot shrink virtual space to negative size");
 768 
 769   if (special()) {
 770     // don't uncommit if the entire space is pinned in memory
 771     _high -= size;
 772     return;
 773   }
 774 
 775   char* unaligned_new_high = high() - size;
 776   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 777 
 778   // Calculate new unaligned address
 779   char* unaligned_upper_new_high =
 780     MAX2(unaligned_new_high, middle_high_boundary());
 781   char* unaligned_middle_new_high =
 782     MAX2(unaligned_new_high, lower_high_boundary());
 783   char* unaligned_lower_new_high =
 784     MAX2(unaligned_new_high, low_boundary());
 785 
 786   // Align address to region's alignment
 787   char* aligned_upper_new_high =
 788     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 789   char* aligned_middle_new_high =
 790     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 791   char* aligned_lower_new_high =
 792     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 793 
 794   // Determine which regions need to shrink
 795   size_t upper_needs = 0;
 796   if (aligned_upper_new_high < upper_high()) {
 797     upper_needs =
 798       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 799   }
 800   size_t middle_needs = 0;
 801   if (aligned_middle_new_high < middle_high()) {
 802     middle_needs =
 803       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 804   }
 805   size_t lower_needs = 0;
 806   if (aligned_lower_new_high < lower_high()) {
 807     lower_needs =
 808       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 809   }
 810 
 811   // Check contiguity.
 812   assert(middle_high_boundary() <= upper_high() &&
 813          upper_high() <= upper_high_boundary(),
 814          "high address must be contained within the region");
 815   assert(lower_high_boundary() <= middle_high() &&
 816          middle_high() <= middle_high_boundary(),
 817          "high address must be contained within the region");
 818   assert(low_boundary() <= lower_high() &&
 819          lower_high() <= lower_high_boundary(),
 820          "high address must be contained within the region");
 821 
 822   // Uncommit
 823   if (upper_needs > 0) {
 824     assert(middle_high_boundary() <= aligned_upper_new_high &&
 825            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 826            "must not shrink beyond region");
 827     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 828       debug_only(warning("os::uncommit_memory failed"));
 829       return;
 830     } else {
 831       _upper_high -= upper_needs;
 832     }
 833   }
 834   if (middle_needs > 0) {
 835     assert(lower_high_boundary() <= aligned_middle_new_high &&
 836            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 837            "must not shrink beyond region");
 838     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 839       debug_only(warning("os::uncommit_memory failed"));
 840       return;
 841     } else {
 842       _middle_high -= middle_needs;
 843     }
 844   }
 845   if (lower_needs > 0) {
 846     assert(low_boundary() <= aligned_lower_new_high &&
 847            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 848            "must not shrink beyond region");
 849     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 850       debug_only(warning("os::uncommit_memory failed"));
 851       return;
 852     } else {
 853       _lower_high -= lower_needs;
 854     }
 855   }
 856 
 857   _high -= size;
 858 }
 859 
 860 #ifndef PRODUCT
 861 void VirtualSpace::check_for_contiguity() {
 862   // Check contiguity.
 863   assert(low_boundary() <= lower_high() &&
 864          lower_high() <= lower_high_boundary(),
 865          "high address must be contained within the region");
 866   assert(lower_high_boundary() <= middle_high() &&
 867          middle_high() <= middle_high_boundary(),
 868          "high address must be contained within the region");
 869   assert(middle_high_boundary() <= upper_high() &&
 870          upper_high() <= upper_high_boundary(),
 871          "high address must be contained within the region");
 872   assert(low() >= low_boundary(), "low");
 873   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 874   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 875   assert(high() <= upper_high(), "upper high");
 876 }
 877 
 878 void VirtualSpace::print() {
 879   tty->print   ("Virtual space:");
 880   if (special()) tty->print(" (pinned in memory)");
 881   tty->cr();
 882   tty->print_cr(" - committed: %ld", committed_size());
 883   tty->print_cr(" - reserved:  %ld", reserved_size());
 884   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 885   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 886 }
 887 
 888 #endif