< prev index next >

src/hotspot/share/memory/metaspaceShared.cpp

Print this page

@@ -39,18 +39,19 @@
 #include "gc/shared/softRefPolicy.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "logging/log.hpp"
 #include "logging/logMessage.hpp"
+#include "memory/archiveUtils.hpp"
+#include "memory/dynamicArchive.hpp"
 #include "memory/filemap.hpp"
 #include "memory/heapShared.inline.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "memory/dynamicArchive.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/methodData.hpp"

@@ -65,11 +66,11 @@
 #include "runtime/signature.hpp"
 #include "runtime/timerTrace.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vmOperations.hpp"
 #include "utilities/align.hpp"
-#include "utilities/bitMap.hpp"
+#include "utilities/bitMap.inline.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
 #if INCLUDE_G1GC
 #include "gc/g1/g1CollectedHeap.hpp"
 #endif

@@ -80,12 +81,12 @@
 bool MetaspaceShared::_has_error_classes;
 bool MetaspaceShared::_archive_loading_failed = false;
 bool MetaspaceShared::_remapped_readwrite = false;
 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
-size_t MetaspaceShared::_core_spaces_size = 0;
 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
+intx MetaspaceShared::_mapping_delta;
 
 // The CDS archive is divided into the following regions:
 //     mc  - misc code (the method entry trampolines)
 //     rw  - read-write metadata
 //     ro  - read-only metadata and read-only tables

@@ -145,13 +146,25 @@
   expand_top_to(newtop);
   memset(p, 0, newtop - p);
   return p;
 }
 
+void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
+  assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
+  intptr_t *p = (intptr_t*)_top;
+  char* newtop = _top + sizeof(intptr_t);
+  expand_top_to(newtop);
+  *p = n;
+  if (need_to_mark) {
+    ArchivePtrMarker::mark_pointer(p);
+  }
+}
+
 void DumpRegion::print(size_t total_bytes) const {
   tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
-                _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
+                _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
+                p2i(_base + MetaspaceShared::final_delta()));
 }
 
 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));

@@ -170,18 +183,18 @@
     next->_base = next->_top = this->_end;
     next->_end = MetaspaceShared::shared_rs()->end();
   }
 }
 
-DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
-size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
+static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
+static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 
 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
   // Start with 0 committed bytes. The memory will be committed as needed by
   // MetaspaceShared::commit_shared_space_to().
   if (!_shared_vs.initialize(_shared_rs, 0)) {
-    vm_exit_during_initialization("Unable to allocate memory for shared space");
+    fatal("Unable to allocate memory for shared space");
   }
   first_space->init(&_shared_rs, (char*)first_space_bottom);
 }
 
 DumpRegion* MetaspaceShared::misc_code_dump_space() {

@@ -207,77 +220,36 @@
 
 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
   return _ro_region.allocate(num_bytes);
 }
 
-void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
-  assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
-
-  // If using shared space, open the file that contains the shared space
-  // and map in the memory before initializing the rest of metaspace (so
-  // the addresses don't conflict)
-  FileMapInfo* mapinfo = new FileMapInfo(true);
-
-  // Open the shared archive file, read and validate the header. If
-  // initialization fails, shared spaces [UseSharedSpaces] are
-  // disabled and the file is closed.
-  // Map in spaces now also
-  if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) {
-    size_t cds_total = core_spaces_size();
-    address cds_address = (address)mapinfo->region_addr(0);
-    char* cds_end = (char *)align_up(cds_address + cds_total,
-                                     Metaspace::reserve_alignment());
-
-    // Mapping the dynamic archive before allocating the class space
-    cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end);
-
-#ifdef _LP64
-    if (Metaspace::using_class_space()) {
-      // If UseCompressedClassPointers is set then allocate the metaspace area
-      // above the heap and above the CDS area (if it exists).
-      Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
-      // map_heap_regions() compares the current narrow oop and klass encodings
-      // with the archived ones, so it must be done after all encodings are determined.
-      mapinfo->map_heap_regions();
-    }
-    CompressedKlassPointers::set_range(CompressedClassSpaceSize);
-#endif // _LP64
-  } else {
-    assert(!mapinfo->is_open() && !UseSharedSpaces,
-           "archive file not closed or shared spaces not disabled.");
-  }
-}
-
-char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces(
-        char* static_start, char* static_end) {
-  assert(UseSharedSpaces, "must be runtime");
-  char* cds_end = static_end;
-  if (!DynamicDumpSharedSpaces) {
-    address dynamic_top = DynamicArchive::map();
-    if (dynamic_top != NULL) {
-      assert(dynamic_top > (address)static_start, "Unexpected layout");
-      MetaspaceObj::expand_shared_metaspace_range(dynamic_top);
-      cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment());
-    }
+// When reserving an address range using ReservedSpace, we need an alignment that satisfies both:
+// os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions,
+//                                    while keeping the first range at offset 0 of this range.
+// Metaspace::reserve_alignment()  -- so we can pass the region to
+//                                    Metaspace::allocate_metaspace_compressed_klass_ptrs.
+size_t MetaspaceShared::reserved_space_alignment() {
+  size_t os_align = os::vm_allocation_granularity();
+  size_t ms_align = Metaspace::reserve_alignment();
+  if (os_align >= ms_align) {
+    assert(os_align % ms_align == 0, "must be a multiple");
+    return os_align;
+  } else {
+    assert(ms_align % os_align == 0, "must be a multiple");
+    return ms_align;
   }
-  return cds_end;
 }
 
-ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment,
-                                                  bool large, char* requested_address) {
-  if (requested_address != NULL) {
-    _shared_rs = ReservedSpace(size, alignment, large, requested_address);
-  } else {
-    _shared_rs = ReservedSpace(size, alignment, large);
-  }
-  return &_shared_rs;
+ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) {
+  bool large_pages = false; // Don't use large pages for the CDS archive.
+  assert(is_aligned(requested_address, reserved_space_alignment()), "must be");
+  return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address);
 }
 
 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   assert(DumpSharedSpaces, "should be called for dump time only");
-  const size_t reserve_alignment = Metaspace::reserve_alignment();
-  bool large_pages = false; // No large pages when dumping the CDS archive.
+  const size_t reserve_alignment = reserved_space_alignment();
   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 
 #ifdef _LP64
   // On 64-bit VM, the heap and class space layout will be the same as if
   // you're running in -Xshare:on mode:

@@ -294,19 +266,27 @@
 #else
   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
   size_t cds_total = align_down(256*M, reserve_alignment);
 #endif
 
+  bool use_requested_base = true;
+  DEBUG_ONLY(
+    if (SharedBaseAddress == 0) {
+      log_info(cds)("SharedBaseAddress == 0: always allocate class space at an alternative address");
+      use_requested_base = false;
+    })
+
   // First try to reserve the space at the specified SharedBaseAddress.
-  //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
-  reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
+  assert(!_shared_rs.is_reserved(), "must be");
+  if (use_requested_base) {
+    _shared_rs = reserve_shared_space(cds_total, shared_base);
+  }
   if (_shared_rs.is_reserved()) {
     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   } else {
     // Get a mmap region anywhere if the SharedBaseAddress fails.
-    //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
-    reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
+    _shared_rs = reserve_shared_space(cds_total);
   }
   if (!_shared_rs.is_reserved()) {
     vm_exit_during_initialization("Unable to reserve memory for shared space",
                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
   }

@@ -440,19 +420,25 @@
   size_t commit =MAX2(min_bytes, preferred_bytes);
   commit = MIN2(commit, uncommitted);
   assert(commit <= uncommitted, "sanity");
 
   bool result = _shared_vs.expand_by(commit, false);
+  ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high());
+
   if (!result) {
     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
                                           need_committed_size));
   }
 
   log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9)  " bytes ending at %p]",
                 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
 }
 
+void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
+  ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
+}
+
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
 void MetaspaceShared::serialize(SerializeClosure* soc) {
   int tag = 0;

@@ -467,10 +453,11 @@
   soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
   soc->do_tag(sizeof(Symbol));
 
   // Dump/restore miscellaneous metadata.
+  JavaClasses::serialize_offsets(soc);
   Universe::serialize(soc);
   soc->do_tag(--tag);
 
   // Dump/restore references to commonly used names and signatures.
   vmSymbols::serialize(soc);

@@ -480,11 +467,10 @@
   SymbolTable::serialize_shared_table_header(soc);
   StringTable::serialize_shared_table_header(soc);
   HeapShared::serialize_subgraph_info_table_header(soc);
   SystemDictionaryShared::serialize_dictionary_headers(soc);
 
-  JavaClasses::serialize_offsets(soc);
   InstanceMirrorKlass::serialize_offsets(soc);
   soc->do_tag(--tag);
 
   serialize_cloned_cpp_vtptrs(soc);
   soc->do_tag(--tag);

@@ -703,11 +689,13 @@
   }
 
   // Switch the vtable pointer to point to the cloned vtable.
   static void patch(Metadata* obj) {
     assert(DumpSharedSpaces, "dump-time only");
+    assert(MetaspaceShared::is_in_output_space(obj), "must be");
     *(void**)obj = (void*)(_info->cloned_vtable());
+    ArchivePtrMarker::mark_pointer(obj);
   }
 
   static bool is_valid_shared_object(const T* obj) {
     intptr_t* vptr = *(intptr_t**)obj;
     return vptr == _info->cloned_vtable();

@@ -797,11 +785,12 @@
 
   return vtable_len;
 }
 
 #define ALLOC_CPP_VTABLE_CLONE(c) \
-  _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c);
+  _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
+  ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
 
 #define CLONE_CPP_VTABLE(c) \
   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
 
 #define ZERO_CPP_VTABLE(c) \

@@ -963,11 +952,11 @@
 void WriteClosure::do_region(u_char* start, size_t size) {
   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
   assert(size % sizeof(intptr_t) == 0, "bad size");
   do_tag((int)size);
   while (size > 0) {
-    _dump_region->append_intptr_t(*(intptr_t*)start);
+    _dump_region->append_intptr_t(*(intptr_t*)start, true);
     start += sizeof(intptr_t);
     size -= sizeof(intptr_t);
   }
 }
 

@@ -1127,13 +1116,17 @@
   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions,
                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
   void dump_symbols();
   char* dump_read_only_tables();
+  void print_class_stats();
   void print_region_stats();
+  void print_bitmap_region_stats(size_t size, size_t total_size);
   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
-                               const char *name, const size_t total_size);
+                               const char *name, size_t total_size);
+  void relocate_to_default_base_address(CHeapBitMap* ptrmap);
+
 public:
 
   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
   void doit();   // outline because gdb sucks
   static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only,  bool allow_exec) {

@@ -1235,10 +1228,11 @@
       }
       p = _rw_region.allocate(bytes, alignment);
       newtop = _rw_region.top();
     }
     memcpy(p, obj, bytes);
+
     assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once");
     _new_loc_table->add(obj, (address)p);
     log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
     if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) {
       log_info(cds, hashtables)("Expanded _new_loc_table to %d", _new_loc_table->table_size());

@@ -1274,18 +1268,28 @@
       address new_loc = get_new_loc(ref);
       RefRelocator refer;
       ref->metaspace_pointers_do_at(&refer, new_loc);
       return true; // recurse into ref.obj()
     }
+    virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
+      assert(type == _method_entry_ref, "only special type allowed for now");
+      address obj = ref->obj();
+      address new_obj = get_new_loc(ref);
+      size_t offset = pointer_delta(p, obj,  sizeof(u1));
+      intptr_t* new_p = (intptr_t*)(new_obj + offset);
+      assert(*p == *new_p, "must be a copy");
+      ArchivePtrMarker::mark_pointer((address*)new_p);
+    }
   };
 
   // Relocate a reference to point to its shallow copy
   class RefRelocator: public MetaspaceClosure {
   public:
     virtual bool do_ref(Ref* ref, bool read_only) {
       if (ref->not_null()) {
         ref->update(get_new_loc(ref));
+        ArchivePtrMarker::mark_pointer(ref->addr());
       }
       return false; // Do not recurse.
     }
   };
 

@@ -1348,11 +1352,10 @@
       IsRefInArchiveChecker checker;
       iterate_roots(&checker);
     }
 #endif
 
-
     // cleanup
     _ssc = NULL;
   }
 
   // We must relocate the System::_well_known_klasses only after we have copied the

@@ -1438,11 +1441,66 @@
   dump_archive_heap_oopmaps();
 
   return start;
 }
 
+void VM_PopulateDumpSharedSpace::print_class_stats() {
+  tty->print_cr("Number of classes %d", _global_klass_objects->length());
+  {
+    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
+    for (int i = 0; i < _global_klass_objects->length(); i++) {
+      Klass* k = _global_klass_objects->at(i);
+      if (k->is_instance_klass()) {
+        num_inst ++;
+      } else if (k->is_objArray_klass()) {
+        num_obj_array ++;
+      } else {
+        assert(k->is_typeArray_klass(), "sanity");
+        num_type_array ++;
+      }
+    }
+    tty->print_cr("    instance classes   = %5d", num_inst);
+    tty->print_cr("    obj array classes  = %5d", num_obj_array);
+    tty->print_cr("    type array classes = %5d", num_type_array);
+  }
+}
+
+void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) {
+  intx addr_delta = MetaspaceShared::final_delta();
+  if (addr_delta == 0) {
+    return;
+  }
+
+  // Patch all pointers that are marked by ptrmap within this region,
+  // where we have just dumped all the metaspace data.
+  address patch_base = (address)SharedBaseAddress;
+  address patch_end  = (address)_md_region.top();
+  size_t size = patch_end - patch_base;
+
+  // debug only -- the current value of the pointers to be patched must be within this
+  // range (i.e., must point to valid metaspace objects)
+  address valid_old_base = patch_base;
+  address valid_old_end  = patch_end;
+
+  // debug only -- after patching, the pointers must point inside this range
+  // (the requested location of the archive, as mapped at runtime).
+  address valid_new_base = (address)Arguments::default_SharedBaseAddress();
+  address valid_new_end  = valid_new_base + size;
+
+  log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
+                 "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
+                 p2i(valid_new_base), p2i(valid_new_end));
+
+  SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
+                              valid_new_base, valid_new_end, addr_delta);
+  ptrmap->iterate(&patcher);
+}
+
 void VM_PopulateDumpSharedSpace::doit() {
+  CHeapBitMap ptrmap;
+  MetaspaceShared::initialize_ptr_marker(&ptrmap);
+
   // We should no longer allocate anything from the metaspace, so that:
   //
   // (1) Metaspace::allocate might trigger GC if we have run out of
   //     committed metaspace, but we can't GC because we're running
   //     in the VM thread.

@@ -1470,28 +1528,11 @@
   SystemDictionaryShared::check_excluded_classes();
   _global_klass_objects = new GrowableArray<Klass*>(1000);
   CollectClassesClosure collect_classes;
   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
 
-  tty->print_cr("Number of classes %d", _global_klass_objects->length());
-  {
-    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
-    for (int i = 0; i < _global_klass_objects->length(); i++) {
-      Klass* k = _global_klass_objects->at(i);
-      if (k->is_instance_klass()) {
-        num_inst ++;
-      } else if (k->is_objArray_klass()) {
-        num_obj_array ++;
-      } else {
-        assert(k->is_typeArray_klass(), "sanity");
-        num_type_array ++;
-      }
-    }
-    tty->print_cr("    instance classes   = %5d", num_inst);
-    tty->print_cr("    obj array classes  = %5d", num_obj_array);
-    tty->print_cr("    type array classes = %5d", num_type_array);
-  }
+  print_class_stats();
 
   // Ensure the ConstMethods won't be modified at run-time
   tty->print("Updating ConstMethods ... ");
   rewrite_nofast_bytecodes_and_calculate_fingerprints();
   tty->print_cr("done. ");

@@ -1518,42 +1559,42 @@
 
   char* vtbl_list = _md_region.top();
   MetaspaceShared::allocate_cpp_vtable_clones();
   _md_region.pack();
 
-  // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size
-  // is just the spaces between the two ends.
-  size_t core_spaces_size = _md_region.end() - _mc_region.base();
-  assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
-         "should already be aligned");
-
   // During patching, some virtual methods may be called, so at this point
   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
   MetaspaceShared::patch_cpp_vtable_pointers();
 
   // The vtable clones contain addresses of the current process.
   // We don't want to write these addresses into the archive.
   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
 
+  // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress
+  // without runtime relocation.
+  ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top());
+  relocate_to_default_base_address(&ptrmap);
+
   // Create and write the archive file that maps the shared spaces.
 
   FileMapInfo* mapinfo = new FileMapInfo(true);
   mapinfo->populate_header(os::vm_allocation_granularity());
   mapinfo->set_serialized_data_start(serialized_data_start);
   mapinfo->set_misc_data_patching_start(vtbl_list);
   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
                                       MetaspaceShared::i2i_entry_code_buffers_size());
-  mapinfo->set_core_spaces_size(core_spaces_size);
   mapinfo->open_for_write();
 
   // NOTE: md contains the trampoline code for method entries, which are patched at run time,
   // so it needs to be read/write.
   write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
   write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
   write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
 
+  mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap());
+
   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
                                         _closed_archive_heap_regions,
                                         _closed_archive_heap_oopmaps,
                                         MetaspaceShared::first_closed_archive_heap_region,
                                         MetaspaceShared::max_closed_archive_heap_region);

@@ -1561,10 +1602,11 @@
                                         _open_archive_heap_regions,
                                         _open_archive_heap_oopmaps,
                                         MetaspaceShared::first_open_archive_heap_region,
                                         MetaspaceShared::max_open_archive_heap_region);
 
+  mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
   mapinfo->set_header_crc(mapinfo->compute_header_crc());
   mapinfo->write_header();
   mapinfo->close();
 
   // Restore the vtable in case we invoke any virtual methods.

@@ -1592,33 +1634,43 @@
   vm_direct_exit(0);
 }
 
 void VM_PopulateDumpSharedSpace::print_region_stats() {
   // Print statistics of all the regions
+  const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes();
+  const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment());
   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
                                 _mc_region.reserved()  + _md_region.reserved() +
+                                bitmap_reserved +
                                 _total_closed_archive_region_size +
                                 _total_open_archive_region_size;
   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
                              _mc_region.used()  + _md_region.used() +
+                             bitmap_used +
                              _total_closed_archive_region_size +
                              _total_open_archive_region_size;
   const double total_u_perc = percent_of(total_bytes, total_reserved);
 
   _mc_region.print(total_reserved);
   _rw_region.print(total_reserved);
   _ro_region.print(total_reserved);
   _md_region.print(total_reserved);
+  print_bitmap_region_stats(bitmap_reserved, total_reserved);
   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
 
   tty->print_cr("total    : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
                  total_bytes, total_reserved, total_u_perc);
 }
 
+void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
+  tty->print_cr("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
+                size, size/double(total_size)*100.0, size, p2i(NULL));
+}
+
 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
-                                                         const char *name, const size_t total_size) {
+                                                         const char *name, size_t total_size) {
   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
   for (int i = 0; i < arr_len; i++) {
       char* start = (char*)heap_mem->at(i).start();
       size_t size = heap_mem->at(i).byte_size();
       char* top = start + size;

@@ -1634,13 +1686,17 @@
   assert(DumpSharedSpaces, "sanity");
   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
   o->set_klass(k);
 }
 
-Klass* MetaspaceShared::get_relocated_klass(Klass *k) {
+Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
   assert(DumpSharedSpaces, "sanity");
-  return ArchiveCompactor::get_relocated_klass(k);
+  k = ArchiveCompactor::get_relocated_klass(k);
+  if (is_final) {
+    k = (Klass*)(address(k) + final_delta());
+  }
+  return k;
 }
 
 class LinkSharedClassesClosure : public KlassClosure {
   Thread* THREAD;
   bool    _made_progress;

@@ -1934,10 +1990,18 @@
            "Open archive heap region is not mapped");
     *p = HeapShared::decode_from_archive(o);
   }
 }
 
+void ReadClosure::do_mirror_oop(oop *p) {
+  do_oop(p);
+  oop mirror = *p;
+  if (mirror != NULL) {
+    java_lang_Class::update_archived_mirror_native_pointers(mirror);
+  }
+}
+
 void ReadClosure::do_region(u_char* start, size_t size) {
   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
   assert(size % sizeof(intptr_t) == 0, "bad size");
   do_tag((int)size);
   while (size > 0) {

@@ -1945,12 +2009,13 @@
     start += sizeof(intptr_t);
     size -= sizeof(intptr_t);
   }
 }
 
-void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) {
-  _shared_metaspace_static_top = top;
+void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
+  assert(base <= static_top && static_top <= top, "must be");
+  _shared_metaspace_static_top = static_top;
   MetaspaceObj::set_shared_metaspace_range(base, top);
 }
 
 // Return true if given address is in the misc data region
 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {

@@ -1971,89 +2036,345 @@
   } else {
     return false;
   }
 }
 
-// Map shared spaces at requested addresses and return if succeeded.
-bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
-  size_t image_alignment = mapinfo->alignment();
-
-#ifndef _WINDOWS
-  // Map in the shared memory and then map the regions on top of it.
-  // On Windows, don't map the memory here because it will cause the
-  // mappings of the regions to fail.
-  ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
-  if (!shared_rs.is_reserved()) return false;
-#endif
+void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
+  assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
+  MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
+  FileMapInfo* static_mapinfo = open_static_archive();
+  FileMapInfo* dynamic_mapinfo = NULL;
 
-  assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
+  if (static_mapinfo != NULL) {
+    dynamic_mapinfo = open_dynamic_archive();
 
-  // Map each shared region
-  int regions[] = {mc, rw, ro, md};
-  size_t len = sizeof(regions)/sizeof(int);
-  char* saved_base[] = {NULL, NULL, NULL, NULL};
-  char* top = mapinfo->map_regions(regions, saved_base, len );
-
-  if (top != NULL &&
-      (image_alignment == (size_t)os::vm_allocation_granularity()) &&
-      mapinfo->validate_shared_path_table()) {
-    // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
-    // fast checking in MetaspaceShared::is_in_shared_metaspace() and
-    // MetaspaceObj::is_shared().
-    _core_spaces_size = mapinfo->core_spaces_size();
-    set_shared_metaspace_range((void*)saved_base[0], (void*)top);
-    return true;
+    // First try to map at the requested address
+    result = map_archives(static_mapinfo, dynamic_mapinfo, true);
+    if (result == MAP_ARCHIVE_MMAP_FAILURE) {
+      // Mapping has failed (probably due to ASLR). Let's map at an address chosen
+      // by the OS.
+      result = map_archives(static_mapinfo, dynamic_mapinfo, false);
+    }
+  }
+
+  if (result == MAP_ARCHIVE_SUCCESS) {
+    bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
+    char* cds_base = static_mapinfo->mapped_base();
+    char* cds_end =  dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
+    set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
+    _mapping_delta = static_mapinfo->mapping_delta();
+    if (dynamic_mapped) {
+      FileMapInfo::set_shared_path_table(dynamic_mapinfo);
   } else {
-    mapinfo->unmap_regions(regions, saved_base, len);
-#ifndef _WINDOWS
-    // Release the entire mapped region
-    shared_rs.release();
-#endif
-    // If -Xshare:on is specified, print out the error message and exit VM,
-    // otherwise, set UseSharedSpaces to false and continue.
-    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
-      vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
+      FileMapInfo::set_shared_path_table(static_mapinfo);
+    }
     } else {
-      FLAG_SET_DEFAULT(UseSharedSpaces, false);
+    set_shared_metaspace_range(NULL, NULL, NULL);
+    UseSharedSpaces = false;
+    FileMapInfo::fail_continue("Unable to map shared spaces");
+    if (PrintSharedArchiveAndExit) {
+      vm_exit_during_initialization("Unable to use shared archive.");
+    }
+  }
+
+  if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
+    delete static_mapinfo;
+  }
+  if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
+    delete dynamic_mapinfo;
+  }
+}
+
+FileMapInfo* MetaspaceShared::open_static_archive() {
+  FileMapInfo* mapinfo = new FileMapInfo(true);
+  if (!mapinfo->initialize()) {
+    delete(mapinfo);
+    return NULL;
+  }
+  return mapinfo;
+}
+
+FileMapInfo* MetaspaceShared::open_dynamic_archive() {
+  if (DynamicDumpSharedSpaces) {
+    return NULL;
+  }
+  if (Arguments::GetSharedDynamicArchivePath() == NULL) {
+    return NULL;
+  }
+
+  FileMapInfo* mapinfo = new FileMapInfo(false);
+  if (!mapinfo->initialize()) {
+    delete(mapinfo);
+    return NULL;
+  }
+  return mapinfo;
+}
+
+MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
+                                               bool use_requested_addr) {
+  // Uncomment the next line to benchmark mapping at alternative locations.
+  // if (SharedBaseAddress == 0 && use_requested_addr) { return MAP_ARCHIVE_MMAP_FAILURE; }
+
+  if (dynamic_mapinfo != NULL) {
+    // Ensure that the OS won't be able to allocate new memory spaces between the two
+    // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
+    assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
+  }
+
+  ReservedSpace main_rs, archive_space_rs, class_space_rs;
+  MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
+  char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
+                                                                 use_requested_addr, main_rs, archive_space_rs,
+                                                                 class_space_rs);
+  if (mapped_base_address == NULL) {
+    result = MAP_ARCHIVE_MMAP_FAILURE;
+  } else {
+    log_debug(cds)("Reserved archive_space_rs     [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
+                   p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
+    log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
+                   p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
+    MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
+    MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
+                                     map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
+
+    if (static_result == MAP_ARCHIVE_SUCCESS) {
+      if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
+        result = MAP_ARCHIVE_SUCCESS;
+      } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
+        assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
+        // No need to retry mapping the dynamic archive again, as it will never succeed
+        // (bad file, etc) -- just keep the base archive.
+        log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
+                                  dynamic_mapinfo->full_path());
+        result = MAP_ARCHIVE_SUCCESS;
+        // FIXME: reduce archive space end ....
+      } else {
+        result = MAP_ARCHIVE_MMAP_FAILURE;
+      }
+    } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
+      result = MAP_ARCHIVE_OTHER_FAILURE;
+    } else {
+      result = MAP_ARCHIVE_MMAP_FAILURE;
+    }
+  }
+
+  if (result == MAP_ARCHIVE_SUCCESS) {
+    if (!main_rs.is_reserved() && class_space_rs.is_reserved()) {
+      MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass);
+    }
+    SharedBaseAddress = (size_t)mapped_base_address;
+    LP64_ONLY({
+        if (Metaspace::using_class_space()) {
+          assert(class_space_rs.is_reserved(), "must be");
+          char* cds_base = static_mapinfo->mapped_base();
+          Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base);
+          // map_heap_regions() compares the current narrow oop and klass encodings
+          // with the archived ones, so it must be done after all encodings are determined.
+          static_mapinfo->map_heap_regions();
+        }
+        CompressedKlassPointers::set_range(CompressedClassSpaceSize);
+      });
+  } else {
+    unmap_archive(static_mapinfo);
+    unmap_archive(dynamic_mapinfo);
+    release_reserved_spaces(main_rs, archive_space_rs, class_space_rs);
+  }
+
+  return result;
+}
+
+char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
+                                                          FileMapInfo* dynamic_mapinfo,
+                                                          bool use_requested_addr,
+                                                          ReservedSpace& main_rs,
+                                                          ReservedSpace& archive_space_rs,
+                                                          ReservedSpace& class_space_rs) {
+  const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space());
+  const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size());
+
+  if (use_klass_space) {
+    assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated");
+  }
+  if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) {
+    return NULL;
+  }
+
+  // Size and requested location of the archive_space_rs (for both static and dynamic archives)
+  size_t base_offset = static_mapinfo->mapping_base_offset();
+  size_t end_offset  = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
+  assert(base_offset == 0, "must be");
+  assert(is_aligned(end_offset,  os::vm_allocation_granularity()), "must be");
+  assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be");
+
+  // In case reserved_space_alignment() != os::vm_allocation_granularity()
+  assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be");
+  end_offset = align_up(end_offset, reserved_space_alignment()); 
+
+  size_t archive_space_size = end_offset - base_offset;
+
+  // Special handling for Windows because it cannot mmap into a reserved space:
+  //    use_requested_addr: We just map each region individually, and give up if any one of them fails.
+  //   !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap).
+  //                        We're going to patch all the pointers anyway so there's no benefit for mmap.
+
+  if (use_requested_addr) {
+    char* archive_space_base = static_mapinfo->requested_base_address() + base_offset;
+    char* archive_space_end  = archive_space_base + archive_space_size;
+    if (!MetaspaceShared::use_windows_memory_mapping()) {
+      archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base);
+      if (!archive_space_rs.is_reserved()) {
+        return NULL;
+      }
+    }
+    if (use_klass_space) {
+      // Make sure we can map the klass space immediately following the archive_space space
+      char* class_space_base = archive_space_end;
+      class_space_rs = reserve_shared_space(class_space_size, class_space_base);
+      if (!class_space_rs.is_reserved()) {
+        return NULL;
+      }
+    }
+    return static_mapinfo->requested_base_address();
+  } else {
+    if (use_klass_space) {
+      main_rs = reserve_shared_space(archive_space_size + class_space_size);
+      if (main_rs.is_reserved()) {
+        archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true);
+        class_space_rs = main_rs.last_part(archive_space_size);
+      }
+    } else {
+      main_rs = reserve_shared_space(archive_space_size);
+      archive_space_rs = main_rs;
+    }
+    if (archive_space_rs.is_reserved()) {
+      return archive_space_rs.base();
+    } else {
+      return NULL;
+    }
+  }
+}
+
+void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs,
+                                              ReservedSpace& archive_space_rs,
+                                              ReservedSpace& class_space_rs) {
+  if (main_rs.is_reserved()) {
+    assert(main_rs.contains(archive_space_rs.base()), "must be");
+    assert(main_rs.contains(class_space_rs.base()), "must be");
+    log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base()));
+    main_rs.release();
+  } else {
+    if (archive_space_rs.is_reserved()) {
+      log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
+      archive_space_rs.release();
+    }
+    if (class_space_rs.is_reserved()) {
+      log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
+      class_space_rs.release();
     }
-    return false;
+  }
+}
+
+static int static_regions[]  = {MetaspaceShared::mc,
+                                MetaspaceShared::rw,
+                                MetaspaceShared::ro,
+                                MetaspaceShared::md};
+static int dynamic_regions[] = {MetaspaceShared::rw,
+                                MetaspaceShared::ro,
+                                MetaspaceShared::mc};
+static int static_regions_count  = 4;
+static int dynamic_regions_count = 3;
+
+MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
+  assert(UseSharedSpaces, "must be runtime");
+  if (mapinfo == NULL) {
+    return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded.
+  }
+
+  mapinfo->set_is_mapped(false);
+
+  if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { // FIXME
+    // FIXME log
+    return MAP_ARCHIVE_OTHER_FAILURE;
+  }
+
+  MapArchiveResult result = mapinfo->is_static() ?
+    mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) :
+    mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs);
+
+  if (result != MAP_ARCHIVE_SUCCESS) {
+    unmap_archive(mapinfo);
+    return result;
+  }
+
+  if (mapinfo->is_static()) {
+    if (!mapinfo->validate_shared_path_table()) {
+      unmap_archive(mapinfo);
+      return MAP_ARCHIVE_OTHER_FAILURE;
+    }
+  } else {
+    if (!DynamicArchive::validate(mapinfo)) {
+      unmap_archive(mapinfo);
+      return MAP_ARCHIVE_OTHER_FAILURE;
+    }
+  }
+
+  mapinfo->set_is_mapped(true);
+  return MAP_ARCHIVE_SUCCESS;
+}
+
+void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
+  assert(UseSharedSpaces, "must be runtime");
+  if (mapinfo != NULL) {
+    if (mapinfo->is_static()) {
+      mapinfo->unmap_regions(static_regions, static_regions_count);
+    } else {
+      mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count);
+    }
+    mapinfo->set_is_mapped(false);
   }
 }
 
 // Read the miscellaneous data from the shared file, and
 // serialize it out to its various destinations.
 
 void MetaspaceShared::initialize_shared_spaces() {
-  FileMapInfo *mapinfo = FileMapInfo::current_info();
-  _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers();
-  _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size();
-  // _core_spaces_size is loaded from the shared archive immediatelly after mapping
-  assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity");
-  char* buffer = mapinfo->misc_data_patching_start();
+  FileMapInfo *static_mapinfo = FileMapInfo::current_info();
+  _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
+  _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
+  char* buffer = static_mapinfo->misc_data_patching_start();
   clone_cpp_vtables((intptr_t*)buffer);
 
   // Verify various attributes of the archive, plus initialize the
   // shared string/symbol tables
-  buffer = mapinfo->serialized_data_start();
+  buffer = static_mapinfo->serialized_data_start();
   intptr_t* array = (intptr_t*)buffer;
   ReadClosure rc(&array);
   serialize(&rc);
 
   // Initialize the run-time symbol table.
   SymbolTable::create_table();
 
-  mapinfo->patch_archived_heap_embedded_pointers();
+  static_mapinfo->patch_archived_heap_embedded_pointers();
 
   // Close the mapinfo file
-  mapinfo->close();
+  static_mapinfo->close();
+
+  FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
+  if (dynamic_mapinfo != NULL) {
+    intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start();
+    ReadClosure rc(&buffer);
+    SymbolTable::serialize_shared_table_header(&rc, false);
+    SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
+    dynamic_mapinfo->close();
+  }
 
   if (PrintSharedArchiveAndExit) {
     if (PrintSharedDictionary) {
       tty->print_cr("\nShared classes:\n");
       SystemDictionaryShared::print_on(tty);
     }
-    if (_archive_loading_failed) {
+    if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
       tty->print_cr("archive is invalid");
       vm_exit(1);
     } else {
       tty->print_cr("archive is valid");
       vm_exit(0);

@@ -2092,5 +2413,10 @@
   _md_region.print_out_of_space_msg(name, needed_bytes);
 
   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
                                 "Please reduce the number of shared classes.");
 }
+
+intx MetaspaceShared::final_delta() { // FIXME rename
+  return intx(Arguments::default_SharedBaseAddress())  // We want the archive to be mapped to here at runtime
+       - intx(SharedBaseAddress);   // .. but the archive is mapped at here at dump time
+}
< prev index next >