diff a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -36,14 +36,14 @@ // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), - _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { + _alignment(0), _special(false), _fd(-1), _executable(false) { } -ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { +ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd(-1) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment; @@ -60,16 +60,16 @@ initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, - char* requested_address) : _fd_for_heap(-1) { + char* requested_address) : _fd(-1) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, - bool special, bool executable) : _fd_for_heap(-1) { + bool special, bool executable) : _fd(-1) { assert((size % os::vm_allocation_granularity()) == 0, "size not allocation aligned"); _base = base; _size = size; _alignment = alignment; @@ -134,25 +134,13 @@ _noaccess_prefix = 0; if (size == 0) { return; } - // If OS doesn't support demand paging for large page memory, we need - // to use reserve_memory_special() to reserve and pin the entire region. - // If there is a backing file directory for this space then whether - // large pages are allocated is up to the filesystem of the backing file. - // So we ignore the UseLargePages flag in this case. - bool special = large && !os::can_commit_large_page_memory(); - if (special && _fd_for_heap != -1) { - special = false; - if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || - !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { - log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap"); - } - } - char* base = NULL; + bool special = large && !os::can_commit_large_page_memory(); + assert(special || _fd != -1); if (special) { base = os::reserve_memory_special(size, alignment, requested_address, executable); @@ -184,32 +172,32 @@ // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); - if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) { + base = os::attempt_reserve_memory_at(size, requested_address, _fd); + if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd != -1)) { // OS ignored requested address. Try different address. base = NULL; } } else { - base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + base = os::reserve_memory(size, NULL, alignment, _fd); } if (base == NULL) return; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry - unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/); + unmap_or_release_memory(base, size, _fd != -1 /*is_file_mapped*/); // Make sure that size is aligned size = align_up(size, alignment); - base = os::reserve_memory_aligned(size, alignment, _fd_for_heap); + base = os::reserve_memory_aligned(size, alignment, _fd); if (requested_address != 0 && - failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) { + failed_to_reserve_as_requested(base, requested_address, size, false, _fd != -1)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can // take remedial action (like try again without a requested address). assert(_base == NULL, "should be"); return; @@ -219,11 +207,11 @@ // Done _base = base; _size = size; _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true - if (_fd_for_heap != -1) { + if (_fd != -1) { _special = true; } } ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split) { @@ -264,11 +252,11 @@ void ReservedSpace::release() { if (is_reserved()) { char *real_base = _base - _noaccess_prefix; const size_t real_size = _size + _noaccess_prefix; if (special()) { - if (_fd_for_heap != -1) { + if (_fd != -1) { os::unmap_memory(real_base, real_size); } else { os::release_memory_special(real_base, real_size); } } else{ @@ -327,24 +315,13 @@ if (_base != NULL) { // We tried before, but we didn't like the address delivered. release(); } - // If OS doesn't support demand paging for large page memory, we need - // to use reserve_memory_special() to reserve and pin the entire region. - // If there is a backing file directory for this space then whether - // large pages are allocated is up to the filesystem of the backing file. - // So we ignore the UseLargePages flag in this case. - bool special = large && !os::can_commit_large_page_memory(); - if (special && _fd_for_heap != -1) { - special = false; - if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || - !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { - log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set."); - } - } char* base = NULL; + bool special = large && !os::can_commit_large_page_memory(); + assert(special || _fd != -1); log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX, p2i(requested_address), size); @@ -376,24 +353,24 @@ // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); + base = os::attempt_reserve_memory_at(size, requested_address, _fd); } else { - base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + base = os::reserve_memory(size, NULL, alignment, _fd); } } if (base == NULL) { return; } // Done _base = base; _size = size; _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true - if (_fd_for_heap != -1) { + if (_fd != -1) { _special = true; } // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { @@ -593,12 +570,12 @@ if (size == 0) { return; } if (heap_allocation_directory != NULL) { - _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); - if (_fd_for_heap == -1) { + _fd = os::create_file_for_heap(heap_allocation_directory); + if (_fd == -1) { vm_exit_during_initialization( err_msg("Could not create file for Heap at location %s", heap_allocation_directory)); } } @@ -624,12 +601,12 @@ if (base() != NULL) { MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); } - if (_fd_for_heap != -1) { - os::close(_fd_for_heap); + if (_fd != -1) { + os::close(_fd); } } MemRegion ReservedHeapSpace::region() const { return MemRegion((HeapWord*)base(), (HeapWord*)end());