--- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2014-12-17 15:24:33.922051374 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2014-12-17 15:24:33.858049532 +0100 @@ -131,6 +131,9 @@ _committed.set_range(start, start + size_in_pages); MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize); + if (AlwaysPreTouch) { + os::pretouch_memory((char*)result.start(), (char*)result.end()); + } return result; } --- old/src/share/vm/gc_implementation/shared/mutableSpace.cpp 2014-12-17 15:24:34.277061590 +0100 +++ new/src/share/vm/gc_implementation/shared/mutableSpace.cpp 2014-12-17 15:24:34.214059777 +0100 @@ -63,9 +63,7 @@ } void MutableSpace::pretouch_pages(MemRegion mr) { - for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) { - char t = *p; *p = t; - } + os::pretouch_memory((char*)mr.start(), (char*)mr.end()); } void MutableSpace::initialize(MemRegion mr, --- old/src/share/vm/runtime/os.cpp 2014-12-17 15:24:34.637071950 +0100 +++ new/src/share/vm/runtime/os.cpp 2014-12-17 15:24:34.572070079 +0100 @@ -1588,6 +1588,11 @@ return res; } +void os::pretouch_memory(char* start, char* end) { + for (volatile char *p = start; p < end; p += os::vm_page_size()) { + *p = 0; + } +} char* os::map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, --- old/src/share/vm/runtime/os.hpp 2014-12-17 15:24:35.027083173 +0100 +++ new/src/share/vm/runtime/os.hpp 2014-12-17 15:24:34.953081044 +0100 @@ -311,6 +311,12 @@ static bool uncommit_memory(char* addr, size_t bytes); static bool release_memory(char* addr, size_t bytes); + // Touch memory pages that cover the memory range from start to end (exclusive) + // to make the OS back the memory range with actual memory. + // Current implementation may not touch the last page if unaligned addresses + // are passed. + static void pretouch_memory(char* start, char* end); + enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; static bool protect_memory(char* addr, size_t bytes, ProtType prot, bool is_committed = true); --- old/src/share/vm/runtime/virtualspace.cpp 2014-12-17 15:24:35.461095663 +0100 +++ new/src/share/vm/runtime/virtualspace.cpp 2014-12-17 15:24:35.383093419 +0100 @@ -615,19 +615,7 @@ } if (pre_touch || AlwaysPreTouch) { - int vm_ps = os::vm_page_size(); - for (char* curr = previous_high; - curr < unaligned_new_high; - curr += vm_ps) { - // Note the use of a write here; originally we tried just a read, but - // since the value read was unused, the optimizer removed the read. - // If we ever have a concurrent touchahead thread, we'll want to use - // a read, to avoid the potential of overwriting data (if a mutator - // thread beats the touchahead thread to a page). There are various - // ways of making sure this read is not optimized away: for example, - // generating the code for a read procedure at runtime. - *curr = 0; - } + os::pretouch_memory(previous_high, unaligned_new_high); } _high += bytes;