1 /*
2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classListParser.hpp"
28 #include "classfile/classLoaderExt.hpp"
29 #include "classfile/dictionary.hpp"
30 #include "classfile/loaderConstraints.hpp"
31 #include "classfile/placeholders.hpp"
32 #include "classfile/symbolTable.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "classfile/systemDictionaryShared.hpp"
36 #include "code/codeCache.hpp"
37 #include "interpreter/bytecodeStream.hpp"
38 #include "interpreter/bytecodes.hpp"
39 #include "logging/log.hpp"
40 #include "logging/logMessage.hpp"
41 #include "memory/filemap.hpp"
42 #include "memory/heapShared.hpp"
43 #include "memory/metaspace.hpp"
44 #include "memory/metaspaceClosure.hpp"
45 #include "memory/metaspaceShared.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "oops/compressedOops.inline.hpp"
48 #include "oops/instanceClassLoaderKlass.hpp"
49 #include "oops/instanceMirrorKlass.hpp"
50 #include "oops/instanceRefKlass.hpp"
51 #include "oops/objArrayKlass.hpp"
52 #include "oops/objArrayOop.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "oops/typeArrayKlass.hpp"
55 #include "prims/jvmtiRedefineClasses.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/os.hpp"
58 #include "runtime/safepointVerifiers.hpp"
59 #include "runtime/signature.hpp"
60 #include "runtime/timerTrace.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "runtime/vm_operations.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/defaultStream.hpp"
65 #include "utilities/hashtable.inline.hpp"
66 #if INCLUDE_G1GC
67 #include "gc/g1/g1Allocator.inline.hpp"
68 #include "gc/g1/g1CollectedHeap.hpp"
69 #endif
70
71 ReservedSpace MetaspaceShared::_shared_rs;
72 VirtualSpace MetaspaceShared::_shared_vs;
73 MetaspaceSharedStats MetaspaceShared::_stats;
74 bool MetaspaceShared::_has_error_classes;
75 bool MetaspaceShared::_archive_loading_failed = false;
76 bool MetaspaceShared::_remapped_readwrite = false;
77 bool MetaspaceShared::_open_archive_heap_region_mapped = false;
78 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
79 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
80 size_t MetaspaceShared::_core_spaces_size = 0;
81
82 // The CDS archive is divided into the following regions:
83 // mc - misc code (the method entry trampolines)
84 // rw - read-write metadata
85 // ro - read-only metadata and read-only tables
86 // md - misc data (the c++ vtables)
87 // od - optional data (original class files)
88 //
89 // s0 - shared strings(closed archive heap space) #0
90 // s1 - shared strings(closed archive heap space) #1 (may be empty)
91 // oa0 - open archive heap space #0
92 // oa1 - open archive heap space #1 (may be empty)
93 //
94 // The mc, rw, ro, md and od regions are linearly allocated, starting from
95 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
96 // are page-aligned, and there's no gap between any consecutive regions.
97 //
98 // These 5 regions are populated in the following steps:
99 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
100 // temporarily allocated outside of the shared regions. Only the method entry
101 // trampolines are written into the mc region.
102 // [2] ArchiveCompactor copies RW metadata into the rw region.
103 // [3] ArchiveCompactor copies RO metadata into the ro region.
104 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
105 // are copied into the ro region as read-only tables.
106 // [5] C++ vtables are copied into the md region.
107 // [6] Original class files are copied into the od region.
108 //
109 // The s0/s1 and oa0/oa1 regions are populated inside MetaspaceShared::dump_java_heap_objects.
110 // Their layout is independent of the other 5 regions.
111
112 class DumpRegion {
113 private:
114 const char* _name;
115 char* _base;
116 char* _top;
117 char* _end;
118 bool _is_packed;
119
120 char* expand_top_to(char* newtop) {
121 assert(is_allocatable(), "must be initialized and not packed");
122 assert(newtop >= _top, "must not grow backwards");
123 if (newtop > _end) {
124 MetaspaceShared::report_out_of_space(_name, newtop - _top);
125 ShouldNotReachHere();
126 }
127 MetaspaceShared::commit_shared_space_to(newtop);
128 _top = newtop;
129 return _top;
130 }
131
132 public:
133 DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
134
135 char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
136 char* p = (char*)align_up(_top, alignment);
137 char* newtop = p + align_up(num_bytes, alignment);
138 expand_top_to(newtop);
139 memset(p, 0, newtop - p);
140 return p;
141 }
142
143 void append_intptr_t(intptr_t n) {
144 assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
145 intptr_t *p = (intptr_t*)_top;
146 char* newtop = _top + sizeof(intptr_t);
147 expand_top_to(newtop);
148 *p = n;
149 }
150
151 char* base() const { return _base; }
152 char* top() const { return _top; }
153 char* end() const { return _end; }
154 size_t reserved() const { return _end - _base; }
155 size_t used() const { return _top - _base; }
156 bool is_packed() const { return _is_packed; }
157 bool is_allocatable() const {
158 return !is_packed() && _base != NULL;
159 }
160
161 void print(size_t total_bytes) const {
162 tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
163 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
164 }
165 void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
166 tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
167 _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
168 if (strcmp(_name, failing_region) == 0) {
169 tty->print_cr(" required = %d", int(needed_bytes));
170 } else {
171 tty->cr();
172 }
173 }
174
175 void init(const ReservedSpace* rs) {
176 _base = _top = rs->base();
177 _end = rs->end();
178 }
179 void init(char* b, char* t, char* e) {
180 _base = b;
181 _top = t;
182 _end = e;
183 }
184
185 void pack(DumpRegion* next = NULL) {
186 assert(!is_packed(), "sanity");
187 _end = (char*)align_up(_top, Metaspace::reserve_alignment());
188 _is_packed = true;
189 if (next != NULL) {
190 next->_base = next->_top = this->_end;
191 next->_end = MetaspaceShared::shared_rs()->end();
192 }
193 }
194 bool contains(char* p) {
195 return base() <= p && p < top();
196 }
197 };
198
199
200 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
201 size_t _total_string_region_size = 0, _total_open_archive_region_size = 0;
202
203 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
204 return _mc_region.allocate(num_bytes);
205 }
206
207 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
208 return _ro_region.allocate(num_bytes);
209 }
210
211 char* MetaspaceShared::read_only_space_top() {
212 return _ro_region.top();
213 }
214
215 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
216 assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
217
218 // If using shared space, open the file that contains the shared space
219 // and map in the memory before initializing the rest of metaspace (so
220 // the addresses don't conflict)
221 address cds_address = NULL;
222 FileMapInfo* mapinfo = new FileMapInfo();
223
224 // Open the shared archive file, read and validate the header. If
225 // initialization fails, shared spaces [UseSharedSpaces] are
226 // disabled and the file is closed.
227 // Map in spaces now also
228 if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
229 size_t cds_total = core_spaces_size();
230 cds_address = (address)mapinfo->header()->region_addr(0);
231 #ifdef _LP64
232 if (Metaspace::using_class_space()) {
233 char* cds_end = (char*)(cds_address + cds_total);
234 cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
235 // If UseCompressedClassPointers is set then allocate the metaspace area
236 // above the heap and above the CDS area (if it exists).
237 Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
238 // map_heap_regions() compares the current narrow oop and klass encodings
239 // with the archived ones, so it must be done after all encodings are determined.
240 mapinfo->map_heap_regions();
241 }
242 Universe::set_narrow_klass_range(CompressedClassSpaceSize);
243 #endif // _LP64
244 } else {
245 assert(!mapinfo->is_open() && !UseSharedSpaces,
246 "archive file not closed or shared spaces not disabled.");
247 }
248 }
249
250 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
251 assert(DumpSharedSpaces, "should be called for dump time only");
252 const size_t reserve_alignment = Metaspace::reserve_alignment();
253 bool large_pages = false; // No large pages when dumping the CDS archive.
254 char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
255
256 #ifdef _LP64
257 // On 64-bit VM, the heap and class space layout will be the same as if
258 // you're running in -Xshare:on mode:
259 //
260 // +-- SharedBaseAddress (default = 0x800000000)
261 // v
262 // +-..---------+---------+ ... +----+----+----+----+----+---------------+
263 // | Heap | Archive | | MC | RW | RO | MD | OD | class space |
264 // +-..---------+---------+ ... +----+----+----+----+----+---------------+
265 // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->|
266 //
267 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
268 const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
269 #else
270 // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
271 size_t cds_total = align_down(256*M, reserve_alignment);
272 #endif
273
274 // First try to reserve the space at the specified SharedBaseAddress.
275 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
276 if (_shared_rs.is_reserved()) {
277 assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
278 } else {
279 // Get a mmap region anywhere if the SharedBaseAddress fails.
280 _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
281 }
282 if (!_shared_rs.is_reserved()) {
283 vm_exit_during_initialization("Unable to reserve memory for shared space",
284 err_msg(SIZE_FORMAT " bytes.", cds_total));
285 }
286
287 #ifdef _LP64
288 // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
289 // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
290 // will store Klasses into this space.
291 // + The lower 3 GB is used for the archive -- when preload_classes() is done,
292 // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
293 // then the RO parts.
294
295 assert(UseCompressedOops && UseCompressedClassPointers,
296 "UseCompressedOops and UseCompressedClassPointers must be set");
297
298 size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
299 ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
300 CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
301 _shared_rs = _shared_rs.first_part(max_archive_size);
302
303 // Set up compress class pointers.
304 Universe::set_narrow_klass_base((address)_shared_rs.base());
305 // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
306 // with AOT.
307 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
308 // Set the range of klass addresses to 4GB.
309 Universe::set_narrow_klass_range(cds_total);
310
311 Metaspace::initialize_class_space(tmp_class_space);
312 tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
313 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
314
315 tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
316 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
317 #endif
318
319 // Start with 0 committed bytes. The memory will be committed as needed by
320 // MetaspaceShared::commit_shared_space_to().
321 if (!_shared_vs.initialize(_shared_rs, 0)) {
322 vm_exit_during_initialization("Unable to allocate memory for shared space");
323 }
324
325 _mc_region.init(&_shared_rs);
326 tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
327 _shared_rs.size(), p2i(_shared_rs.base()));
328 }
329
330 // Called by universe_post_init()
331 void MetaspaceShared::post_initialize(TRAPS) {
332 if (UseSharedSpaces) {
333 int size = FileMapInfo::get_number_of_shared_paths();
334 if (size > 0) {
335 SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
336 FileMapInfo::FileMapHeader* header = FileMapInfo::current_info()->header();
337 ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index);
338 ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index);
339 }
340 }
341
342 if (DumpSharedSpaces) {
343 if (SharedArchiveConfigFile) {
344 read_extra_data(SharedArchiveConfigFile, THREAD);
345 }
346 }
347 }
348
349 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
350 HashtableTextDump reader(filename);
351 reader.check_version("VERSION: 1.0");
352
353 while (reader.remain() > 0) {
354 int utf8_length;
355 int prefix_type = reader.scan_prefix(&utf8_length);
356 ResourceMark rm(THREAD);
357 char* utf8_buffer = NEW_RESOURCE_ARRAY(char, utf8_length);
358 reader.get_utf8(utf8_buffer, utf8_length);
359
360 if (prefix_type == HashtableTextDump::SymbolPrefix) {
361 SymbolTable::new_symbol(utf8_buffer, utf8_length, THREAD);
362 } else{
363 assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity");
364 utf8_buffer[utf8_length] = '\0';
365 oop s = StringTable::intern(utf8_buffer, THREAD);
366 }
367 }
368 }
369
370 void MetaspaceShared::commit_shared_space_to(char* newtop) {
371 assert(DumpSharedSpaces, "dump-time only");
372 char* base = _shared_rs.base();
373 size_t need_committed_size = newtop - base;
374 size_t has_committed_size = _shared_vs.committed_size();
375 if (need_committed_size < has_committed_size) {
376 return;
377 }
378
379 size_t min_bytes = need_committed_size - has_committed_size;
380 size_t preferred_bytes = 1 * M;
381 size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
382
383 size_t commit = MAX2(min_bytes, preferred_bytes);
384 assert(commit <= uncommitted, "sanity");
385
386 bool result = _shared_vs.expand_by(commit, false);
387 if (!result) {
388 vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
389 need_committed_size));
390 }
391
392 log_info(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
393 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
394 }
395
396 // Read/write a data stream for restoring/preserving metadata pointers and
397 // miscellaneous data from/to the shared archive file.
398
399 void MetaspaceShared::serialize(SerializeClosure* soc) {
400 int tag = 0;
401 soc->do_tag(--tag);
402
403 // Verify the sizes of various metadata in the system.
404 soc->do_tag(sizeof(Method));
405 soc->do_tag(sizeof(ConstMethod));
406 soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE));
407 soc->do_tag(sizeof(ConstantPool));
408 soc->do_tag(sizeof(ConstantPoolCache));
409 soc->do_tag(objArrayOopDesc::base_offset_in_bytes());
410 soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE));
411 soc->do_tag(sizeof(Symbol));
412
413 // Dump/restore miscellaneous metadata.
414 Universe::serialize(soc, true);
415 soc->do_tag(--tag);
416
417 // Dump/restore references to commonly used names and signatures.
418 vmSymbols::serialize(soc);
419 soc->do_tag(--tag);
420
421 // Dump/restore the symbol and string tables
422 SymbolTable::serialize(soc);
423 StringTable::serialize(soc);
424 soc->do_tag(--tag);
425
426 serialize_well_known_classes(soc);
427 soc->do_tag(--tag);
428
429 soc->do_tag(666);
430 }
431
432 void MetaspaceShared::serialize_well_known_classes(SerializeClosure* soc) {
433 java_lang_Class::serialize(soc);
434 java_lang_String::serialize(soc);
435 java_lang_System::serialize(soc);
436 java_lang_ClassLoader::serialize(soc);
437 java_lang_Throwable::serialize(soc);
438 java_lang_Thread::serialize(soc);
439 java_lang_ThreadGroup::serialize(soc);
440 java_lang_AssertionStatusDirectives::serialize(soc);
441 java_lang_ref_SoftReference::serialize(soc);
442 java_lang_invoke_MethodHandle::serialize(soc);
443 java_lang_invoke_DirectMethodHandle::serialize(soc);
444 java_lang_invoke_MemberName::serialize(soc);
445 java_lang_invoke_ResolvedMethodName::serialize(soc);
446 java_lang_invoke_LambdaForm::serialize(soc);
447 java_lang_invoke_MethodType::serialize(soc);
448 java_lang_invoke_CallSite::serialize(soc);
449 java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(soc);
450 java_security_AccessControlContext::serialize(soc);
451 java_lang_reflect_AccessibleObject::serialize(soc);
452 java_lang_reflect_Method::serialize(soc);
453 java_lang_reflect_Constructor::serialize(soc);
454 java_lang_reflect_Field::serialize(soc);
455 java_nio_Buffer::serialize(soc);
456 reflect_ConstantPool::serialize(soc);
457 reflect_UnsafeStaticFieldAccessorImpl::serialize(soc);
458 java_lang_reflect_Parameter::serialize(soc);
459 java_lang_Module::serialize(soc);
460 java_lang_StackTraceElement::serialize(soc);
461 java_lang_StackFrameInfo::serialize(soc);
462 java_lang_LiveStackFrameInfo::serialize(soc);
463 java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(soc);
464 jdk_internal_module_ArchivedModuleGraph::serialize(soc);
465 }
466
467 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
468 if (DumpSharedSpaces) {
469 if (_cds_i2i_entry_code_buffers == NULL) {
470 _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size);
471 _cds_i2i_entry_code_buffers_size = total_size;
472 }
473 } else if (UseSharedSpaces) {
474 assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized");
475 } else {
476 return NULL;
477 }
478
479 assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
480 return _cds_i2i_entry_code_buffers;
481 }
482
483 // CDS code for dumping shared archive.
484
485 // Global object for holding classes that have been loaded. Since this
486 // is run at a safepoint just before exit, this is the entire set of classes.
487 static GrowableArray<Klass*>* _global_klass_objects;
488
489 static void collect_array_classes(Klass* k) {
490 _global_klass_objects->append_if_missing(k);
491 if (k->is_array_klass()) {
492 // Add in the array classes too
493 ArrayKlass* ak = ArrayKlass::cast(k);
494 Klass* h = ak->higher_dimension();
495 if (h != NULL) {
496 h->array_klasses_do(collect_array_classes);
497 }
498 }
499 }
500
501 class CollectClassesClosure : public KlassClosure {
502 void do_klass(Klass* k) {
503 if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
504 if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) {
505 // Mark any class with signers and don't add to the _global_klass_objects
506 k->set_has_signer_and_not_archived();
507 } else {
508 _global_klass_objects->append_if_missing(k);
509 }
510 }
511 if (k->is_array_klass()) {
512 // Add in the array classes too
513 ArrayKlass* ak = ArrayKlass::cast(k);
514 Klass* h = ak->higher_dimension();
515 if (h != NULL) {
516 h->array_klasses_do(collect_array_classes);
517 }
518 }
519 }
520 };
521
522 static void remove_unshareable_in_classes() {
523 for (int i = 0; i < _global_klass_objects->length(); i++) {
524 Klass* k = _global_klass_objects->at(i);
525 if (!k->is_objArray_klass()) {
526 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
527 // on their array classes.
528 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
529 k->remove_unshareable_info();
530 }
531 }
532 }
533
534 static void remove_java_mirror_in_classes() {
535 for (int i = 0; i < _global_klass_objects->length(); i++) {
536 Klass* k = _global_klass_objects->at(i);
537 if (!k->is_objArray_klass()) {
538 // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
539 // on their array classes.
540 assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
541 k->remove_java_mirror();
542 }
543 }
544 }
545
546 static void clear_basic_type_mirrors() {
547 assert(!MetaspaceShared::is_heap_object_archiving_allowed(), "Sanity");
548 Universe::set_int_mirror(NULL);
549 Universe::set_float_mirror(NULL);
550 Universe::set_double_mirror(NULL);
551 Universe::set_byte_mirror(NULL);
552 Universe::set_bool_mirror(NULL);
553 Universe::set_char_mirror(NULL);
554 Universe::set_long_mirror(NULL);
555 Universe::set_short_mirror(NULL);
556 Universe::set_void_mirror(NULL);
557 }
558
559 static void rewrite_nofast_bytecode(Method* method) {
560 BytecodeStream bcs(method);
561 while (!bcs.is_last_bytecode()) {
562 Bytecodes::Code opcode = bcs.next();
563 switch (opcode) {
564 case Bytecodes::_getfield: *bcs.bcp() = Bytecodes::_nofast_getfield; break;
565 case Bytecodes::_putfield: *bcs.bcp() = Bytecodes::_nofast_putfield; break;
566 case Bytecodes::_aload_0: *bcs.bcp() = Bytecodes::_nofast_aload_0; break;
567 case Bytecodes::_iload: {
568 if (!bcs.is_wide()) {
569 *bcs.bcp() = Bytecodes::_nofast_iload;
570 }
571 break;
572 }
573 default: break;
574 }
575 }
576 }
577
578 // Walk all methods in the class list to ensure that they won't be modified at
579 // run time. This includes:
580 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
581 // at run time by RewriteBytecodes/RewriteFrequentPairs
582 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
583 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
584 for (int i = 0; i < _global_klass_objects->length(); i++) {
585 Klass* k = _global_klass_objects->at(i);
586 if (k->is_instance_klass()) {
587 InstanceKlass* ik = InstanceKlass::cast(k);
588 for (int i = 0; i < ik->methods()->length(); i++) {
589 Method* m = ik->methods()->at(i);
590 rewrite_nofast_bytecode(m);
591 Fingerprinter fp(m);
592 // The side effect of this call sets method's fingerprint field.
593 fp.fingerprint();
594 }
595 }
596 }
597 }
598
599 static void relocate_cached_class_file() {
600 for (int i = 0; i < _global_klass_objects->length(); i++) {
601 Klass* k = _global_klass_objects->at(i);
602 if (k->is_instance_klass()) {
603 InstanceKlass* ik = InstanceKlass::cast(k);
604 JvmtiCachedClassFileData* p = ik->get_archived_class_data();
605 if (p != NULL) {
606 int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
607 JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
608 q->length = p->length;
609 memcpy(q->data, p->data, p->length);
610 ik->set_archived_class_data(q);
611 }
612 }
613 }
614 }
615
616 NOT_PRODUCT(
617 static void assert_not_anonymous_class(InstanceKlass* k) {
618 assert(!(k->is_anonymous()), "cannot archive anonymous classes");
619 }
620
621 // Anonymous classes are not stored inside any dictionaries. They are created by
622 // SystemDictionary::parse_stream() with a non-null host_klass.
623 static void assert_no_anonymoys_classes_in_dictionaries() {
624 ClassLoaderDataGraph::dictionary_classes_do(assert_not_anonymous_class);
625 })
626
627 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
628 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
629 //
630 // Addresses of the vtables and the methods may be different across JVM runs,
631 // if libjvm.so is dynamically loaded at a different base address.
632 //
633 // To ensure that the Metadata objects in the CDS archive always have the correct vtable:
634 //
635 // + at dump time: we redirect the _vptr to point to our own vtables inside
636 // the CDS image
637 // + at run time: we clone the actual contents of the vtables from libjvm.so
638 // into our own tables.
639
640 // Currently, the archive contain ONLY the following types of objects that have C++ vtables.
641 #define CPP_VTABLE_PATCH_TYPES_DO(f) \
642 f(ConstantPool) \
643 f(InstanceKlass) \
644 f(InstanceClassLoaderKlass) \
645 f(InstanceMirrorKlass) \
646 f(InstanceRefKlass) \
647 f(Method) \
648 f(ObjArrayKlass) \
649 f(TypeArrayKlass)
650
651 class CppVtableInfo {
652 intptr_t _vtable_size;
653 intptr_t _cloned_vtable[1];
654 public:
655 static int num_slots(int vtable_size) {
656 return 1 + vtable_size; // Need to add the space occupied by _vtable_size;
657 }
658 int vtable_size() { return int(uintx(_vtable_size)); }
659 void set_vtable_size(int n) { _vtable_size = intptr_t(n); }
660 intptr_t* cloned_vtable() { return &_cloned_vtable[0]; }
661 void zero() { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); }
662 // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo
663 static size_t byte_size(int vtable_size) {
664 CppVtableInfo i;
665 return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1));
666 }
667 };
668
669 template <class T> class CppVtableCloner : public T {
670 static intptr_t* vtable_of(Metadata& m) {
671 return *((intptr_t**)&m);
672 }
673 static CppVtableInfo* _info;
674
675 static int get_vtable_length(const char* name);
676
677 public:
678 // Allocate and initialize the C++ vtable, starting from top, but do not go past end.
679 static intptr_t* allocate(const char* name);
680
681 // Clone the vtable to ...
682 static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
683
684 static void zero_vtable_clone() {
685 assert(DumpSharedSpaces, "dump-time only");
686 _info->zero();
687 }
688
689 // Switch the vtable pointer to point to the cloned vtable.
690 static void patch(Metadata* obj) {
691 assert(DumpSharedSpaces, "dump-time only");
692 *(void**)obj = (void*)(_info->cloned_vtable());
693 }
694
695 static bool is_valid_shared_object(const T* obj) {
696 intptr_t* vptr = *(intptr_t**)obj;
697 return vptr == _info->cloned_vtable();
698 }
699 };
700
701 template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
702
703 template <class T>
704 intptr_t* CppVtableCloner<T>::allocate(const char* name) {
705 assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment");
706 int n = get_vtable_length(name);
707 _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t));
708 _info->set_vtable_size(n);
709
710 intptr_t* p = clone_vtable(name, _info);
711 assert((char*)p == _md_region.top(), "must be");
712
713 return p;
714 }
715
716 template <class T>
717 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
718 if (!DumpSharedSpaces) {
719 assert(_info == 0, "_info is initialized only at dump time");
720 _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
721 }
722 T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
723 int n = info->vtable_size();
724 intptr_t* srcvtable = vtable_of(tmp);
725 intptr_t* dstvtable = info->cloned_vtable();
726
727 // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
728 // safe to do memcpy.
729 log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
730 memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
731 return dstvtable + n;
732 }
733
734 // To determine the size of the vtable for each type, we use the following
735 // trick by declaring 2 subclasses:
736 //
737 // class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} };
738 // class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
739 //
740 // CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
741 // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
742 // - The first N entries have are exactly the same as in InstanceKlass's vtable.
743 // - Their last entry is different.
744 //
745 // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables
746 // and find the first entry that's different.
747 //
748 // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more
749 // esoteric compilers.
750
751 template <class T> class CppVtableTesterB: public T {
752 public:
753 virtual int last_virtual_method() {return 1;}
754 };
755
756 template <class T> class CppVtableTesterA : public T {
757 public:
758 virtual void* last_virtual_method() {
759 // Make this different than CppVtableTesterB::last_virtual_method so the C++
760 // compiler/linker won't alias the two functions.
761 return NULL;
762 }
763 };
764
765 template <class T>
766 int CppVtableCloner<T>::get_vtable_length(const char* name) {
767 CppVtableTesterA<T> a;
768 CppVtableTesterB<T> b;
769
770 intptr_t* avtable = vtable_of(a);
771 intptr_t* bvtable = vtable_of(b);
772
773 // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc)
774 int vtable_len = 1;
775 for (; ; vtable_len++) {
776 if (avtable[vtable_len] != bvtable[vtable_len]) {
777 break;
778 }
779 }
780 log_debug(cds, vtables)("Found %3d vtable entries for %s", vtable_len, name);
781
782 return vtable_len;
783 }
784
785 #define ALLOC_CPP_VTABLE_CLONE(c) \
786 CppVtableCloner<c>::allocate(#c);
787
788 #define CLONE_CPP_VTABLE(c) \
789 p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
790
791 #define ZERO_CPP_VTABLE(c) \
792 CppVtableCloner<c>::zero_vtable_clone();
793
794 // This can be called at both dump time and run time.
795 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
796 assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
797 CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
798 return p;
799 }
800
801 void MetaspaceShared::zero_cpp_vtable_clones_for_writing() {
802 assert(DumpSharedSpaces, "dump-time only");
803 CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
804 }
805
806 // Allocate and initialize the C++ vtables, starting from top, but do not go past end.
807 void MetaspaceShared::allocate_cpp_vtable_clones() {
808 assert(DumpSharedSpaces, "dump-time only");
809 // Layout (each slot is a intptr_t):
810 // [number of slots in the first vtable = n1]
811 // [ <n1> slots for the first vtable]
812 // [number of slots in the first second = n2]
813 // [ <n2> slots for the second vtable]
814 // ...
815 // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
816 CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
817 }
818
819 // Switch the vtable pointer to point to the cloned vtable. We assume the
820 // vtable pointer is in first slot in object.
821 void MetaspaceShared::patch_cpp_vtable_pointers() {
822 int n = _global_klass_objects->length();
823 for (int i = 0; i < n; i++) {
824 Klass* obj = _global_klass_objects->at(i);
825 if (obj->is_instance_klass()) {
826 InstanceKlass* ik = InstanceKlass::cast(obj);
827 if (ik->is_class_loader_instance_klass()) {
828 CppVtableCloner<InstanceClassLoaderKlass>::patch(ik);
829 } else if (ik->is_reference_instance_klass()) {
830 CppVtableCloner<InstanceRefKlass>::patch(ik);
831 } else if (ik->is_mirror_instance_klass()) {
832 CppVtableCloner<InstanceMirrorKlass>::patch(ik);
833 } else {
834 CppVtableCloner<InstanceKlass>::patch(ik);
835 }
836 ConstantPool* cp = ik->constants();
837 CppVtableCloner<ConstantPool>::patch(cp);
838 for (int j = 0; j < ik->methods()->length(); j++) {
839 Method* m = ik->methods()->at(j);
840 CppVtableCloner<Method>::patch(m);
841 assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be");
842 }
843 } else if (obj->is_objArray_klass()) {
844 CppVtableCloner<ObjArrayKlass>::patch(obj);
845 } else {
846 assert(obj->is_typeArray_klass(), "sanity");
847 CppVtableCloner<TypeArrayKlass>::patch(obj);
848 }
849 }
850 }
851
852 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
853 assert(is_in_shared_metaspace(m), "must be");
854 return CppVtableCloner<Method>::is_valid_shared_object(m);
855 }
856
857 // Closure for serializing initialization data out to a data area to be
858 // written to the shared file.
859
860 class WriteClosure : public SerializeClosure {
861 private:
862 DumpRegion* _dump_region;
863
864 public:
865 WriteClosure(DumpRegion* r) {
866 _dump_region = r;
867 }
868
869 void do_ptr(void** p) {
870 _dump_region->append_intptr_t((intptr_t)*p);
871 }
872
873 void do_u4(u4* p) {
874 void* ptr = (void*)(uintx(*p));
875 do_ptr(&ptr);
876 }
877
878 void do_tag(int tag) {
879 _dump_region->append_intptr_t((intptr_t)tag);
880 }
881
882 void do_oop(oop* o) {
883 if (*o == NULL) {
884 _dump_region->append_intptr_t(0);
885 } else {
886 assert(MetaspaceShared::is_heap_object_archiving_allowed(),
887 "Archiving heap object is not allowed");
888 _dump_region->append_intptr_t(
889 (intptr_t)CompressedOops::encode_not_null(*o));
890 }
891 }
892
893 void do_region(u_char* start, size_t size) {
894 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
895 assert(size % sizeof(intptr_t) == 0, "bad size");
896 do_tag((int)size);
897 while (size > 0) {
898 _dump_region->append_intptr_t(*(intptr_t*)start);
899 start += sizeof(intptr_t);
900 size -= sizeof(intptr_t);
901 }
902 }
903
904 bool reading() const { return false; }
905 };
906
907 // This is for dumping detailed statistics for the allocations
908 // in the shared spaces.
909 class DumpAllocStats : public ResourceObj {
910 public:
911
912 // Here's poor man's enum inheritance
913 #define SHAREDSPACE_OBJ_TYPES_DO(f) \
914 METASPACE_OBJ_TYPES_DO(f) \
915 f(SymbolHashentry) \
916 f(SymbolBucket) \
917 f(StringHashentry) \
918 f(StringBucket) \
919 f(Other)
920
921 enum Type {
922 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
923 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
924 _number_of_types
925 };
926
927 static const char * type_name(Type type) {
928 switch(type) {
929 SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
930 default:
931 ShouldNotReachHere();
932 return NULL;
933 }
934 }
935
936 public:
937 enum { RO = 0, RW = 1 };
938
939 int _counts[2][_number_of_types];
940 int _bytes [2][_number_of_types];
941
942 DumpAllocStats() {
943 memset(_counts, 0, sizeof(_counts));
944 memset(_bytes, 0, sizeof(_bytes));
945 };
946
947 void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
948 assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
949 int which = (read_only) ? RO : RW;
950 _counts[which][type] ++;
951 _bytes [which][type] += byte_size;
952 }
953
954 void record_other_type(int byte_size, bool read_only) {
955 int which = (read_only) ? RO : RW;
956 _bytes [which][OtherType] += byte_size;
957 }
958 void print_stats(int ro_all, int rw_all, int mc_all, int md_all);
959 };
960
961 void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) {
962 // Calculate size of data that was not allocated by Metaspace::allocate()
963 MetaspaceSharedStats *stats = MetaspaceShared::stats();
964
965 // symbols
966 _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
967 _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
968
969 _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
970 _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
971
972 // strings
973 _counts[RO][StringHashentryType] = stats->string.hashentry_count;
974 _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
975
976 _counts[RO][StringBucketType] = stats->string.bucket_count;
977 _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
978
979 // TODO: count things like dictionary, vtable, etc
980 _bytes[RW][OtherType] += mc_all + md_all;
981 rw_all += mc_all + md_all; // mc/md are mapped Read/Write
982
983 // prevent divide-by-zero
984 if (ro_all < 1) {
985 ro_all = 1;
986 }
987 if (rw_all < 1) {
988 rw_all = 1;
989 }
990
991 int all_ro_count = 0;
992 int all_ro_bytes = 0;
993 int all_rw_count = 0;
994 int all_rw_bytes = 0;
995
996 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
997 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
998 const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
999 const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
1000
1001 LogMessage(cds) msg;
1002
1003 msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
1004 msg.info("%s", hdr);
1005 msg.info("%s", sep);
1006 for (int type = 0; type < int(_number_of_types); type ++) {
1007 const char *name = type_name((Type)type);
1008 int ro_count = _counts[RO][type];
1009 int ro_bytes = _bytes [RO][type];
1010 int rw_count = _counts[RW][type];
1011 int rw_bytes = _bytes [RW][type];
1012 int count = ro_count + rw_count;
1013 int bytes = ro_bytes + rw_bytes;
1014
1015 double ro_perc = percent_of(ro_bytes, ro_all);
1016 double rw_perc = percent_of(rw_bytes, rw_all);
1017 double perc = percent_of(bytes, ro_all + rw_all);
1018
1019 msg.info(fmt_stats, name,
1020 ro_count, ro_bytes, ro_perc,
1021 rw_count, rw_bytes, rw_perc,
1022 count, bytes, perc);
1023
1024 all_ro_count += ro_count;
1025 all_ro_bytes += ro_bytes;
1026 all_rw_count += rw_count;
1027 all_rw_bytes += rw_bytes;
1028 }
1029
1030 int all_count = all_ro_count + all_rw_count;
1031 int all_bytes = all_ro_bytes + all_rw_bytes;
1032
1033 double all_ro_perc = percent_of(all_ro_bytes, ro_all);
1034 double all_rw_perc = percent_of(all_rw_bytes, rw_all);
1035 double all_perc = percent_of(all_bytes, ro_all + rw_all);
1036
1037 msg.info("%s", sep);
1038 msg.info(fmt_stats, "Total",
1039 all_ro_count, all_ro_bytes, all_ro_perc,
1040 all_rw_count, all_rw_bytes, all_rw_perc,
1041 all_count, all_bytes, all_perc);
1042
1043 assert(all_ro_bytes == ro_all, "everything should have been counted");
1044 assert(all_rw_bytes == rw_all, "everything should have been counted");
1045
1046 #undef fmt_stats
1047 }
1048
1049 // Populate the shared space.
1050
1051 class VM_PopulateDumpSharedSpace: public VM_Operation {
1052 private:
1053 GrowableArray<MemRegion> *_closed_archive_heap_regions;
1054 GrowableArray<MemRegion> *_open_archive_heap_regions;
1055
1056 void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
1057 void dump_symbols();
1058 char* dump_read_only_tables();
1059 void print_region_stats();
1060 void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1061 const char *name, const size_t total_size);
1062 public:
1063
1064 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
1065 void doit(); // outline because gdb sucks
1066 static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
1067 bool allow_nested_vm_operations() const { return true; }
1068 }; // class VM_PopulateDumpSharedSpace
1069
1070 class SortedSymbolClosure: public SymbolClosure {
1071 GrowableArray<Symbol*> _symbols;
1072 virtual void do_symbol(Symbol** sym) {
1073 assert((*sym)->is_permanent(), "archived symbols must be permanent");
1074 _symbols.append(*sym);
1075 }
1076 static int compare_symbols_by_address(Symbol** a, Symbol** b) {
1077 if (a[0] < b[0]) {
1078 return -1;
1079 } else if (a[0] == b[0]) {
1080 return 0;
1081 } else {
1082 return 1;
1083 }
1084 }
1085
1086 public:
1087 SortedSymbolClosure() {
1088 SymbolTable::symbols_do(this);
1089 _symbols.sort(compare_symbols_by_address);
1090 }
1091 GrowableArray<Symbol*>* get_sorted_symbols() {
1092 return &_symbols;
1093 }
1094 };
1095
1096 // ArchiveCompactor --
1097 //
1098 // This class is the central piece of shared archive compaction -- all metaspace data are
1099 // initially allocated outside of the shared regions. ArchiveCompactor copies the
1100 // metaspace data into their final location in the shared regions.
1101
1102 class ArchiveCompactor : AllStatic {
1103 static DumpAllocStats* _alloc_stats;
1104 static SortedSymbolClosure* _ssc;
1105
1106 static unsigned my_hash(const address& a) {
1107 return primitive_hash<address>(a);
1108 }
1109 static bool my_equals(const address& a0, const address& a1) {
1110 return primitive_equals<address>(a0, a1);
1111 }
1112 typedef ResourceHashtable<
1113 address, address,
1114 ArchiveCompactor::my_hash, // solaris compiler doesn't like: primitive_hash<address>
1115 ArchiveCompactor::my_equals, // solaris compiler doesn't like: primitive_equals<address>
1116 16384, ResourceObj::C_HEAP> RelocationTable;
1117 static RelocationTable* _new_loc_table;
1118
1119 public:
1120 static void initialize() {
1121 _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats;
1122 _new_loc_table = new(ResourceObj::C_HEAP, mtInternal)RelocationTable;
1123 }
1124 static DumpAllocStats* alloc_stats() {
1125 return _alloc_stats;
1126 }
1127
1128 static void allocate(MetaspaceClosure::Ref* ref, bool read_only) {
1129 address obj = ref->obj();
1130 int bytes = ref->size() * BytesPerWord;
1131 char* p;
1132 size_t alignment = BytesPerWord;
1133 char* oldtop;
1134 char* newtop;
1135
1136 if (read_only) {
1137 oldtop = _ro_region.top();
1138 p = _ro_region.allocate(bytes, alignment);
1139 newtop = _ro_region.top();
1140 } else {
1141 oldtop = _rw_region.top();
1142 p = _rw_region.allocate(bytes, alignment);
1143 newtop = _rw_region.top();
1144 }
1145 memcpy(p, obj, bytes);
1146 bool isnew = _new_loc_table->put(obj, (address)p);
1147 log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
1148 assert(isnew, "must be");
1149
1150 _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
1151 if (ref->msotype() == MetaspaceObj::SymbolType) {
1152 uintx delta = MetaspaceShared::object_delta(p);
1153 if (delta > MAX_SHARED_DELTA) {
1154 // This is just a sanity check and should not appear in any real world usage. This
1155 // happens only if you allocate more than 2GB of Symbols and would require
1156 // millions of shared classes.
1157 vm_exit_during_initialization("Too many Symbols in the CDS archive",
1158 "Please reduce the number of shared classes.");
1159 }
1160 }
1161 }
1162
1163 static address get_new_loc(MetaspaceClosure::Ref* ref) {
1164 address* pp = _new_loc_table->get(ref->obj());
1165 assert(pp != NULL, "must be");
1166 return *pp;
1167 }
1168
1169 private:
1170 // Makes a shallow copy of visited MetaspaceObj's
1171 class ShallowCopier: public UniqueMetaspaceClosure {
1172 bool _read_only;
1173 public:
1174 ShallowCopier(bool read_only) : _read_only(read_only) {}
1175
1176 virtual void do_unique_ref(Ref* ref, bool read_only) {
1177 if (read_only == _read_only) {
1178 allocate(ref, read_only);
1179 }
1180 }
1181 };
1182
1183 // Relocate embedded pointers within a MetaspaceObj's shallow copy
1184 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
1185 public:
1186 virtual void do_unique_ref(Ref* ref, bool read_only) {
1187 address new_loc = get_new_loc(ref);
1188 RefRelocator refer;
1189 ref->metaspace_pointers_do_at(&refer, new_loc);
1190 }
1191 };
1192
1193 // Relocate a reference to point to its shallow copy
1194 class RefRelocator: public MetaspaceClosure {
1195 public:
1196 virtual bool do_ref(Ref* ref, bool read_only) {
1197 if (ref->not_null()) {
1198 ref->update(get_new_loc(ref));
1199 }
1200 return false; // Do not recurse.
1201 }
1202 };
1203
1204 #ifdef ASSERT
1205 class IsRefInArchiveChecker: public MetaspaceClosure {
1206 public:
1207 virtual bool do_ref(Ref* ref, bool read_only) {
1208 if (ref->not_null()) {
1209 char* obj = (char*)ref->obj();
1210 assert(_ro_region.contains(obj) || _rw_region.contains(obj),
1211 "must be relocated to point to CDS archive");
1212 }
1213 return false; // Do not recurse.
1214 }
1215 };
1216 #endif
1217
1218 public:
1219 static void copy_and_compact() {
1220 // We should no longer allocate anything from the metaspace, so that
1221 // we can have a stable set of MetaspaceObjs to work with.
1222 Metaspace::freeze();
1223
1224 ResourceMark rm;
1225 SortedSymbolClosure the_ssc; // StackObj
1226 _ssc = &the_ssc;
1227
1228 tty->print_cr("Scanning all metaspace objects ... ");
1229 {
1230 // allocate and shallow-copy RW objects, immediately following the MC region
1231 tty->print_cr("Allocating RW objects ... ");
1232 _mc_region.pack(&_rw_region);
1233
1234 ResourceMark rm;
1235 ShallowCopier rw_copier(false);
1236 iterate_roots(&rw_copier);
1237 }
1238 {
1239 // allocate and shallow-copy of RO object, immediately following the RW region
1240 tty->print_cr("Allocating RO objects ... ");
1241 _rw_region.pack(&_ro_region);
1242
1243 ResourceMark rm;
1244 ShallowCopier ro_copier(true);
1245 iterate_roots(&ro_copier);
1246 }
1247 {
1248 tty->print_cr("Relocating embedded pointers ... ");
1249 ResourceMark rm;
1250 ShallowCopyEmbeddedRefRelocator emb_reloc;
1251 iterate_roots(&emb_reloc);
1252 }
1253 {
1254 tty->print_cr("Relocating external roots ... ");
1255 ResourceMark rm;
1256 RefRelocator ext_reloc;
1257 iterate_roots(&ext_reloc);
1258 }
1259
1260 #ifdef ASSERT
1261 {
1262 tty->print_cr("Verifying external roots ... ");
1263 ResourceMark rm;
1264 IsRefInArchiveChecker checker;
1265 iterate_roots(&checker);
1266 }
1267 #endif
1268
1269
1270 // cleanup
1271 _ssc = NULL;
1272 }
1273
1274 // We must relocate the System::_well_known_klasses only after we have copied the
1275 // java objects in during dump_java_heap_objects(): during the object copy, we operate on
1276 // old objects which assert that their klass is the original klass.
1277 static void relocate_well_known_klasses() {
1278 {
1279 tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... ");
1280 ResourceMark rm;
1281 RefRelocator ext_reloc;
1282 SystemDictionary::well_known_klasses_do(&ext_reloc);
1283 }
1284 // NOTE: after this point, we shouldn't have any globals that can reach the old
1285 // objects.
1286
1287 // We cannot use any of the objects in the heap anymore (except for the objects
1288 // in the CDS shared string regions) because their headers no longer point to
1289 // valid Klasses.
1290 }
1291
1292 static void iterate_roots(MetaspaceClosure* it) {
1293 GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
1294 for (int i=0; i<symbols->length(); i++) {
1295 it->push(symbols->adr_at(i));
1296 }
1297 if (_global_klass_objects != NULL) {
1298 // Need to fix up the pointers
1299 for (int i = 0; i < _global_klass_objects->length(); i++) {
1300 // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed.
1301 it->push(_global_klass_objects->adr_at(i));
1302 }
1303 }
1304 FileMapInfo::metaspace_pointers_do(it);
1305 SystemDictionary::classes_do(it);
1306 Universe::metaspace_pointers_do(it);
1307 SymbolTable::metaspace_pointers_do(it);
1308 vmSymbols::metaspace_pointers_do(it);
1309 }
1310
1311 static Klass* get_relocated_klass(Klass* orig_klass) {
1312 assert(DumpSharedSpaces, "dump time only");
1313 address* pp = _new_loc_table->get((address)orig_klass);
1314 assert(pp != NULL, "must be");
1315 Klass* klass = (Klass*)(*pp);
1316 assert(klass->is_klass(), "must be");
1317 return klass;
1318 }
1319 };
1320
1321 DumpAllocStats* ArchiveCompactor::_alloc_stats;
1322 SortedSymbolClosure* ArchiveCompactor::_ssc;
1323 ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table;
1324
1325 void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx,
1326 DumpRegion* dump_region, bool read_only, bool allow_exec) {
1327 mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
1328 }
1329
1330 void VM_PopulateDumpSharedSpace::dump_symbols() {
1331 tty->print_cr("Dumping symbol table ...");
1332
1333 NOT_PRODUCT(SymbolTable::verify());
1334 SymbolTable::write_to_archive();
1335 }
1336
1337 char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
1338 char* oldtop = _ro_region.top();
1339 // Reorder the system dictionary. Moving the symbols affects
1340 // how the hash table indices are calculated.
1341 SystemDictionary::reorder_dictionary_for_sharing();
1342
1343 tty->print("Removing java_mirror ... ");
1344 if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1345 clear_basic_type_mirrors();
1346 }
1347 remove_java_mirror_in_classes();
1348 tty->print_cr("done. ");
1349 NOT_PRODUCT(SystemDictionary::verify();)
1350
1351 size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
1352 char* buckets_top = _ro_region.allocate(buckets_bytes, sizeof(intptr_t));
1353 SystemDictionary::copy_buckets(buckets_top, _ro_region.top());
1354
1355 size_t table_bytes = SystemDictionary::count_bytes_for_table();
1356 char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
1357 SystemDictionary::copy_table(table_top, _ro_region.top());
1358
1359 // Write the archived object sub-graph infos. For each klass with sub-graphs,
1360 // the info includes the static fields (sub-graph entry points) and Klasses
1361 // of objects included in the sub-graph.
1362 HeapShared::write_archived_subgraph_infos();
1363
1364 // Write the other data to the output array.
1365 WriteClosure wc(&_ro_region);
1366 MetaspaceShared::serialize(&wc);
1367
1368 char* newtop = _ro_region.top();
1369 ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - oldtop), true);
1370 return buckets_top;
1371 }
1372
1373 void VM_PopulateDumpSharedSpace::doit() {
1374 Thread* THREAD = VMThread::vm_thread();
1375
1376 FileMapInfo::check_nonempty_dir_in_shared_path_table();
1377
1378 NOT_PRODUCT(SystemDictionary::verify();)
1379 // The following guarantee is meant to ensure that no loader constraints
1380 // exist yet, since the constraints table is not shared. This becomes
1381 // more important now that we don't re-initialize vtables/itables for
1382 // shared classes at runtime, where constraints were previously created.
1383 guarantee(SystemDictionary::constraints()->number_of_entries() == 0,
1384 "loader constraints are not saved");
1385 guarantee(SystemDictionary::placeholders()->number_of_entries() == 0,
1386 "placeholders are not saved");
1387 // Revisit and implement this if we prelink method handle call sites:
1388 guarantee(SystemDictionary::invoke_method_table() == NULL ||
1389 SystemDictionary::invoke_method_table()->number_of_entries() == 0,
1390 "invoke method table is not saved");
1391
1392 // At this point, many classes have been loaded.
1393 // Gather systemDictionary classes in a global array and do everything to
1394 // that so we don't have to walk the SystemDictionary again.
1395 _global_klass_objects = new GrowableArray<Klass*>(1000);
1396 CollectClassesClosure collect_classes;
1397 ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
1398
1399 tty->print_cr("Number of classes %d", _global_klass_objects->length());
1400 {
1401 int num_type_array = 0, num_obj_array = 0, num_inst = 0;
1402 for (int i = 0; i < _global_klass_objects->length(); i++) {
1403 Klass* k = _global_klass_objects->at(i);
1404 if (k->is_instance_klass()) {
1405 num_inst ++;
1406 } else if (k->is_objArray_klass()) {
1407 num_obj_array ++;
1408 } else {
1409 assert(k->is_typeArray_klass(), "sanity");
1410 num_type_array ++;
1411 }
1412 }
1413 tty->print_cr(" instance classes = %5d", num_inst);
1414 tty->print_cr(" obj array classes = %5d", num_obj_array);
1415 tty->print_cr(" type array classes = %5d", num_type_array);
1416 }
1417
1418 // Ensure the ConstMethods won't be modified at run-time
1419 tty->print("Updating ConstMethods ... ");
1420 rewrite_nofast_bytecodes_and_calculate_fingerprints();
1421 tty->print_cr("done. ");
1422
1423 // Move classes from platform/system dictionaries into the boot dictionary
1424 SystemDictionary::combine_shared_dictionaries();
1425
1426 // Make sure all classes have a correct loader type.
1427 ClassLoaderData::the_null_class_loader_data()->dictionary()->classes_do(MetaspaceShared::check_shared_class_loader_type);
1428
1429 // Remove all references outside the metadata
1430 tty->print("Removing unshareable information ... ");
1431 remove_unshareable_in_classes();
1432 tty->print_cr("done. ");
1433
1434 // We don't support archiving anonymous classes. Verify that they are not stored in
1435 // the any dictionaries.
1436 NOT_PRODUCT(assert_no_anonymoys_classes_in_dictionaries());
1437
1438 SystemDictionaryShared::finalize_verification_constraints();
1439
1440 ArchiveCompactor::initialize();
1441 ArchiveCompactor::copy_and_compact();
1442
1443 dump_symbols();
1444
1445 // Dump supported java heap objects
1446 _closed_archive_heap_regions = NULL;
1447 _open_archive_heap_regions = NULL;
1448 dump_java_heap_objects();
1449
1450 ArchiveCompactor::relocate_well_known_klasses();
1451
1452 char* read_only_tables_start = dump_read_only_tables();
1453 _ro_region.pack(&_md_region);
1454
1455 char* vtbl_list = _md_region.top();
1456 MetaspaceShared::allocate_cpp_vtable_clones();
1457 _md_region.pack(&_od_region);
1458
1459 // Relocate the archived class file data into the od region
1460 relocate_cached_class_file();
1461 _od_region.pack();
1462
1463 // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
1464 // is just the spaces between the two ends.
1465 size_t core_spaces_size = _od_region.end() - _mc_region.base();
1466 assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
1467 "should already be aligned");
1468
1469 // During patching, some virtual methods may be called, so at this point
1470 // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
1471 MetaspaceShared::patch_cpp_vtable_pointers();
1472
1473 // The vtable clones contain addresses of the current process.
1474 // We don't want to write these addresses into the archive.
1475 MetaspaceShared::zero_cpp_vtable_clones_for_writing();
1476
1477 // Create and write the archive file that maps the shared spaces.
1478
1479 FileMapInfo* mapinfo = new FileMapInfo();
1480 mapinfo->populate_header(os::vm_allocation_granularity());
1481 mapinfo->set_read_only_tables_start(read_only_tables_start);
1482 mapinfo->set_misc_data_patching_start(vtbl_list);
1483 mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
1484 mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
1485 mapinfo->set_core_spaces_size(core_spaces_size);
1486
1487 for (int pass=1; pass<=2; pass++) {
1488 if (pass == 1) {
1489 // The first pass doesn't actually write the data to disk. All it
1490 // does is to update the fields in the mapinfo->_header.
1491 } else {
1492 // After the first pass, the contents of mapinfo->_header are finalized,
1493 // so we can compute the header's CRC, and write the contents of the header
1494 // and the regions into disk.
1495 mapinfo->open_for_write();
1496 mapinfo->set_header_crc(mapinfo->compute_header_crc());
1497 }
1498 mapinfo->write_header();
1499
1500 // NOTE: md contains the trampoline code for method entries, which are patched at run time,
1501 // so it needs to be read/write.
1502 write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true);
1503 write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
1504 write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
1505 write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
1506 write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
1507
1508 _total_string_region_size = mapinfo->write_archive_heap_regions(
1509 _closed_archive_heap_regions,
1510 MetaspaceShared::first_string,
1511 MetaspaceShared::max_strings);
1512 _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
1513 _open_archive_heap_regions,
1514 MetaspaceShared::first_open_archive_heap_region,
1515 MetaspaceShared::max_open_archive_heap_region);
1516 }
1517
1518 mapinfo->close();
1519
1520 // Restore the vtable in case we invoke any virtual methods.
1521 MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list);
1522
1523 print_region_stats();
1524
1525 if (log_is_enabled(Info, cds)) {
1526 ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
1527 int(_mc_region.used()), int(_md_region.used()));
1528 }
1529
1530 if (PrintSystemDictionaryAtExit) {
1531 SystemDictionary::print();
1532 }
1533 // There may be other pending VM operations that operate on the InstanceKlasses,
1534 // which will fail because InstanceKlasses::remove_unshareable_info()
1535 // has been called. Forget these operations and exit the VM directly.
1536 vm_direct_exit(0);
1537 }
1538
1539 void VM_PopulateDumpSharedSpace::print_region_stats() {
1540 // Print statistics of all the regions
1541 const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
1542 _mc_region.reserved() + _md_region.reserved() +
1543 _od_region.reserved() +
1544 _total_string_region_size +
1545 _total_open_archive_region_size;
1546 const size_t total_bytes = _ro_region.used() + _rw_region.used() +
1547 _mc_region.used() + _md_region.used() +
1548 _od_region.used() +
1549 _total_string_region_size +
1550 _total_open_archive_region_size;
1551 const double total_u_perc = percent_of(total_bytes, total_reserved);
1552
1553 _mc_region.print(total_reserved);
1554 _rw_region.print(total_reserved);
1555 _ro_region.print(total_reserved);
1556 _md_region.print(total_reserved);
1557 _od_region.print(total_reserved);
1558 print_heap_region_stats(_closed_archive_heap_regions, "st", total_reserved);
1559 print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
1560
1561 tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
1562 total_bytes, total_reserved, total_u_perc);
1563 }
1564
1565 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
1566 const char *name, const size_t total_size) {
1567 int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
1568 for (int i = 0; i < arr_len; i++) {
1569 char* start = (char*)heap_mem->at(i).start();
1570 size_t size = heap_mem->at(i).byte_size();
1571 char* top = start + size;
1572 tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1573 name, i, size, size/double(total_size)*100.0, size, p2i(start));
1574
1575 }
1576 }
1577
1578 // Update a Java object to point its Klass* to the new location after
1579 // shared archive has been compacted.
1580 void MetaspaceShared::relocate_klass_ptr(oop o) {
1581 assert(DumpSharedSpaces, "sanity");
1582 Klass* k = ArchiveCompactor::get_relocated_klass(o->klass());
1583 o->set_klass(k);
1584 }
1585
1586 Klass* MetaspaceShared::get_relocated_klass(Klass *k) {
1587 assert(DumpSharedSpaces, "sanity");
1588 return ArchiveCompactor::get_relocated_klass(k);
1589 }
1590
1591 class LinkSharedClassesClosure : public KlassClosure {
1592 Thread* THREAD;
1593 bool _made_progress;
1594 public:
1595 LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {}
1596
1597 void reset() { _made_progress = false; }
1598 bool made_progress() const { return _made_progress; }
1599
1600 void do_klass(Klass* k) {
1601 if (k->is_instance_klass()) {
1602 InstanceKlass* ik = InstanceKlass::cast(k);
1603 // Link the class to cause the bytecodes to be rewritten and the
1604 // cpcache to be created. Class verification is done according
1605 // to -Xverify setting.
1606 _made_progress |= MetaspaceShared::try_link_class(ik, THREAD);
1607 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1608
1609 ik->constants()->resolve_class_constants(THREAD);
1610 }
1611 }
1612 };
1613
1614 class CheckSharedClassesClosure : public KlassClosure {
1615 bool _made_progress;
1616 public:
1617 CheckSharedClassesClosure() : _made_progress(false) {}
1618
1619 void reset() { _made_progress = false; }
1620 bool made_progress() const { return _made_progress; }
1621 void do_klass(Klass* k) {
1622 if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) {
1623 _made_progress = true;
1624 }
1625 }
1626 };
1627
1628 void MetaspaceShared::check_shared_class_loader_type(InstanceKlass* ik) {
1629 ResourceMark rm;
1630 if (ik->shared_classpath_index() == UNREGISTERED_INDEX) {
1631 guarantee(ik->loader_type() == 0,
1632 "Class loader type must not be set for this class %s", ik->name()->as_C_string());
1633 } else {
1634 guarantee(ik->loader_type() != 0,
1635 "Class loader type must be set for this class %s", ik->name()->as_C_string());
1636 }
1637 }
1638
1639 void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
1640 // We need to iterate because verification may cause additional classes
1641 // to be loaded.
1642 LinkSharedClassesClosure link_closure(THREAD);
1643 do {
1644 link_closure.reset();
1645 ClassLoaderDataGraph::loaded_classes_do(&link_closure);
1646 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1647 } while (link_closure.made_progress());
1648
1649 if (_has_error_classes) {
1650 // Mark all classes whose super class or interfaces failed verification.
1651 CheckSharedClassesClosure check_closure;
1652 do {
1653 // Not completely sure if we need to do this iteratively. Anyway,
1654 // we should come here only if there are unverifiable classes, which
1655 // shouldn't happen in normal cases. So better safe than sorry.
1656 check_closure.reset();
1657 ClassLoaderDataGraph::loaded_classes_do(&check_closure);
1658 } while (check_closure.made_progress());
1659
1660 if (IgnoreUnverifiableClassesDuringDump) {
1661 // This is useful when running JCK or SQE tests. You should not
1662 // enable this when running real apps.
1663 SystemDictionary::remove_classes_in_error_state();
1664 } else {
1665 tty->print_cr("Please remove the unverifiable classes from your class list and try again");
1666 exit(1);
1667 }
1668 }
1669 }
1670
1671 void MetaspaceShared::prepare_for_dumping() {
1672 Arguments::check_unsupported_dumping_properties();
1673 ClassLoader::initialize_shared_path();
1674 }
1675
1676 // Preload classes from a list, populate the shared spaces and dump to a
1677 // file.
1678 void MetaspaceShared::preload_and_dump(TRAPS) {
1679 { TraceTime timer("Dump Shared Spaces", TRACETIME_LOG(Info, startuptime));
1680 ResourceMark rm;
1681 char class_list_path_str[JVM_MAXPATHLEN];
1682 // Preload classes to be shared.
1683 // Should use some os:: method rather than fopen() here. aB.
1684 const char* class_list_path;
1685 if (SharedClassListFile == NULL) {
1686 // Construct the path to the class list (in jre/lib)
1687 // Walk up two directories from the location of the VM and
1688 // optionally tack on "lib" (depending on platform)
1689 os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
1690 for (int i = 0; i < 3; i++) {
1691 char *end = strrchr(class_list_path_str, *os::file_separator());
1692 if (end != NULL) *end = '\0';
1693 }
1694 int class_list_path_len = (int)strlen(class_list_path_str);
1695 if (class_list_path_len >= 3) {
1696 if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
1697 if (class_list_path_len < JVM_MAXPATHLEN - 4) {
1698 jio_snprintf(class_list_path_str + class_list_path_len,
1699 sizeof(class_list_path_str) - class_list_path_len,
1700 "%slib", os::file_separator());
1701 class_list_path_len += 4;
1702 }
1703 }
1704 }
1705 if (class_list_path_len < JVM_MAXPATHLEN - 10) {
1706 jio_snprintf(class_list_path_str + class_list_path_len,
1707 sizeof(class_list_path_str) - class_list_path_len,
1708 "%sclasslist", os::file_separator());
1709 }
1710 class_list_path = class_list_path_str;
1711 } else {
1712 class_list_path = SharedClassListFile;
1713 }
1714
1715 tty->print_cr("Loading classes to share ...");
1716 _has_error_classes = false;
1717 int class_count = preload_classes(class_list_path, THREAD);
1718 if (ExtraSharedClassListFile) {
1719 class_count += preload_classes(ExtraSharedClassListFile, THREAD);
1720 }
1721 tty->print_cr("Loading classes to share: done.");
1722
1723 log_info(cds)("Shared spaces: preloaded %d classes", class_count);
1724
1725 // Rewrite and link classes
1726 tty->print_cr("Rewriting and linking classes ...");
1727
1728 // Link any classes which got missed. This would happen if we have loaded classes that
1729 // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
1730 // fails verification, all other interfaces that were not specified in the classlist but
1731 // are implemented by K are not verified.
1732 link_and_cleanup_shared_classes(CATCH);
1733 tty->print_cr("Rewriting and linking classes: done");
1734
1735 SystemDictionary::clear_invoke_method_table();
1736
1737 VM_PopulateDumpSharedSpace op;
1738 VMThread::execute(&op);
1739 }
1740 }
1741
1742
1743 int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) {
1744 ClassListParser parser(class_list_path);
1745 int class_count = 0;
1746
1747 while (parser.parse_one_line()) {
1748 Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
1749 if (HAS_PENDING_EXCEPTION) {
1750 if (klass == NULL &&
1751 (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
1752 // print a warning only when the pending exception is class not found
1753 tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name());
1754 }
1755 CLEAR_PENDING_EXCEPTION;
1756 }
1757 if (klass != NULL) {
1758 if (log_is_enabled(Trace, cds)) {
1759 ResourceMark rm;
1760 log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
1761 }
1762
1763 if (klass->is_instance_klass()) {
1764 InstanceKlass* ik = InstanceKlass::cast(klass);
1765
1766 // Link the class to cause the bytecodes to be rewritten and the
1767 // cpcache to be created. The linking is done as soon as classes
1768 // are loaded in order that the related data structures (klass and
1769 // cpCache) are located together.
1770 try_link_class(ik, THREAD);
1771 guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
1772 }
1773
1774 class_count++;
1775 }
1776 }
1777
1778 return class_count;
1779 }
1780
1781 // Returns true if the class's status has changed
1782 bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
1783 assert(DumpSharedSpaces, "should only be called during dumping");
1784 if (ik->init_state() < InstanceKlass::linked) {
1785 bool saved = BytecodeVerificationLocal;
1786 if (ik->loader_type() == 0 && ik->class_loader() == NULL) {
1787 // The verification decision is based on BytecodeVerificationRemote
1788 // for non-system classes. Since we are using the NULL classloader
1789 // to load non-system classes for customized class loaders during dumping,
1790 // we need to temporarily change BytecodeVerificationLocal to be the same as
1791 // BytecodeVerificationRemote. Note this can cause the parent system
1792 // classes also being verified. The extra overhead is acceptable during
1793 // dumping.
1794 BytecodeVerificationLocal = BytecodeVerificationRemote;
1795 }
1796 ik->link_class(THREAD);
1797 if (HAS_PENDING_EXCEPTION) {
1798 ResourceMark rm;
1799 tty->print_cr("Preload Warning: Verification failed for %s",
1800 ik->external_name());
1801 CLEAR_PENDING_EXCEPTION;
1802 ik->set_in_error_state();
1803 _has_error_classes = true;
1804 }
1805 BytecodeVerificationLocal = saved;
1806 return true;
1807 } else {
1808 return false;
1809 }
1810 }
1811
1812 #if INCLUDE_CDS_JAVA_HEAP
1813 void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
1814 if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
1815 if (log_is_enabled(Info, cds)) {
1816 log_info(cds)(
1817 "Archived java heap is not supported as UseG1GC, "
1818 "UseCompressedOops and UseCompressedClassPointers are required."
1819 "Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
1820 BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
1821 BOOL_TO_STR(UseCompressedClassPointers));
1822 }
1823 return;
1824 }
1825
1826 {
1827 NoSafepointVerifier nsv;
1828
1829 // Cache for recording where the archived objects are copied to
1830 MetaspaceShared::create_archive_object_cache();
1831
1832 tty->print_cr("Dumping objects to closed archive heap region ...");
1833 NOT_PRODUCT(StringTable::verify());
1834 // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
1835 _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
1836 MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions);
1837
1838 tty->print_cr("Dumping objects to open archive heap region ...");
1839 _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
1840 MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
1841
1842 MetaspaceShared::destroy_archive_object_cache();
1843 }
1844
1845 G1HeapVerifier::verify_archive_regions();
1846 }
1847
1848 void MetaspaceShared::dump_closed_archive_heap_objects(
1849 GrowableArray<MemRegion> * closed_archive) {
1850 assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects");
1851
1852 Thread* THREAD = Thread::current();
1853 G1CollectedHeap::heap()->begin_archive_alloc_range();
1854
1855 // Archive interned string objects
1856 StringTable::write_to_archive();
1857
1858 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
1859 os::vm_allocation_granularity());
1860 }
1861
1862 void MetaspaceShared::dump_open_archive_heap_objects(
1863 GrowableArray<MemRegion> * open_archive) {
1864 assert(UseG1GC, "Only support G1 GC");
1865 assert(UseCompressedOops && UseCompressedClassPointers,
1866 "Only support UseCompressedOops and UseCompressedClassPointers enabled");
1867
1868 Thread* THREAD = Thread::current();
1869 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
1870
1871 java_lang_Class::archive_basic_type_mirrors(THREAD);
1872
1873 MetaspaceShared::archive_klass_objects(THREAD);
1874
1875 HeapShared::archive_module_graph_objects(THREAD);
1876
1877 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
1878 os::vm_allocation_granularity());
1879 }
1880
1881 unsigned MetaspaceShared::obj_hash(oop const& p) {
1882 assert(!p->mark()->has_bias_pattern(),
1883 "this object should never have been locked"); // so identity_hash won't safepoin
1884 unsigned hash = (unsigned)p->identity_hash();
1885 return hash;
1886 }
1887
1888 MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
1889 oop MetaspaceShared::find_archived_heap_object(oop obj) {
1890 assert(DumpSharedSpaces, "dump-time only");
1891 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1892 oop* p = cache->get(obj);
1893 if (p != NULL) {
1894 return *p;
1895 } else {
1896 return NULL;
1897 }
1898 }
1899
1900 oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
1901 assert(DumpSharedSpaces, "dump-time only");
1902
1903 oop ao = find_archived_heap_object(obj);
1904 if (ao != NULL) {
1905 // already archived
1906 return ao;
1907 }
1908
1909 int len = obj->size();
1910 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
1911 return NULL;
1912 }
1913
1914 int hash = obj->identity_hash();
1915 oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
1916 if (archived_oop != NULL) {
1917 Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
1918 relocate_klass_ptr(archived_oop);
1919 ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
1920 cache->put(obj, archived_oop);
1921 }
1922 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
1923 p2i(obj), p2i(archived_oop));
1924 return archived_oop;
1925 }
1926
1927 oop MetaspaceShared::materialize_archived_object(oop obj) {
1928 if (obj != NULL) {
1929 return G1CollectedHeap::heap()->materialize_archived_object(obj);
1930 }
1931 return NULL;
1932 }
1933
1934 void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
1935 int i;
1936 for (i = 0; i < _global_klass_objects->length(); i++) {
1937 Klass* k = _global_klass_objects->at(i);
1938
1939 // archive mirror object
1940 java_lang_Class::archive_mirror(k, CHECK);
1941
1942 // archive the resolved_referenes array
1943 if (k->is_instance_klass()) {
1944 InstanceKlass* ik = InstanceKlass::cast(k);
1945 ik->constants()->archive_resolved_references(THREAD);
1946 }
1947 }
1948 }
1949
1950 bool MetaspaceShared::is_archive_object(oop p) {
1951 return (p == NULL) ? false : G1ArchiveAllocator::is_archive_object(p);
1952 }
1953
1954 void MetaspaceShared::fixup_mapped_heap_regions() {
1955 FileMapInfo *mapinfo = FileMapInfo::current_info();
1956 mapinfo->fixup_mapped_heap_regions();
1957 }
1958 #endif // INCLUDE_CDS_JAVA_HEAP
1959
1960 // Closure for serializing initialization data in from a data area
1961 // (ptr_array) read from the shared file.
1962
1963 class ReadClosure : public SerializeClosure {
1964 private:
1965 intptr_t** _ptr_array;
1966
1967 inline intptr_t nextPtr() {
1968 return *(*_ptr_array)++;
1969 }
1970
1971 public:
1972 ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
1973
1974 void do_ptr(void** p) {
1975 assert(*p == NULL, "initializing previous initialized pointer.");
1976 intptr_t obj = nextPtr();
1977 assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1978 "hit tag while initializing ptrs.");
1979 *p = (void*)obj;
1980 }
1981
1982 void do_u4(u4* p) {
1983 intptr_t obj = nextPtr();
1984 *p = (u4)(uintx(obj));
1985 }
1986
1987 void do_tag(int tag) {
1988 int old_tag;
1989 old_tag = (int)(intptr_t)nextPtr();
1990 // do_int(&old_tag);
1991 assert(tag == old_tag, "old tag doesn't match");
1992 FileMapInfo::assert_mark(tag == old_tag);
1993 }
1994
1995 void do_oop(oop *p) {
1996 narrowOop o = (narrowOop)nextPtr();
1997 if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) {
1998 p = NULL;
1999 } else {
2000 assert(MetaspaceShared::is_heap_object_archiving_allowed(),
2001 "Archived heap object is not allowed");
2002 assert(MetaspaceShared::open_archive_heap_region_mapped(),
2003 "Open archive heap region is not mapped");
2004 *p = CompressedOops::decode_not_null(o);
2005 }
2006 }
2007
2008 void do_region(u_char* start, size_t size) {
2009 assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
2010 assert(size % sizeof(intptr_t) == 0, "bad size");
2011 do_tag((int)size);
2012 while (size > 0) {
2013 *(intptr_t*)start = nextPtr();
2014 start += sizeof(intptr_t);
2015 size -= sizeof(intptr_t);
2016 }
2017 }
2018
2019 bool reading() const { return true; }
2020 };
2021
2022 // Return true if given address is in the misc data region
2023 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
2024 return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
2025 }
2026
2027 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
2028 if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
2029 return true;
2030 }
2031 return false;
2032 }
2033
2034 void MetaspaceShared::print_shared_spaces() {
2035 if (UseSharedSpaces) {
2036 FileMapInfo::current_info()->print_shared_spaces();
2037 }
2038 }
2039
2040
2041 // Map shared spaces at requested addresses and return if succeeded.
2042 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
2043 size_t image_alignment = mapinfo->alignment();
2044
2045 #ifndef _WINDOWS
2046 // Map in the shared memory and then map the regions on top of it.
2047 // On Windows, don't map the memory here because it will cause the
2048 // mappings of the regions to fail.
2049 ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
2050 if (!shared_rs.is_reserved()) return false;
2051 #endif
2052
2053 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
2054
2055 char* ro_base = NULL; char* ro_top;
2056 char* rw_base = NULL; char* rw_top;
2057 char* mc_base = NULL; char* mc_top;
2058 char* md_base = NULL; char* md_top;
2059 char* od_base = NULL; char* od_top;
2060
2061 // Map each shared region
2062 if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
2063 (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
2064 (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
2065 (md_base = mapinfo->map_region(md, &md_top)) != NULL &&
2066 (od_base = mapinfo->map_region(od, &od_top)) != NULL &&
2067 (image_alignment == (size_t)os::vm_allocation_granularity()) &&
2068 mapinfo->validate_shared_path_table()) {
2069 // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
2070 // fast checking in MetaspaceShared::is_in_shared_metaspace() and
2071 // MetaspaceObj::is_shared().
2072 //
2073 // We require that mc->rw->ro->md->od to be laid out consecutively, with no
2074 // gaps between them. That way, we can ensure that the OS won't be able to
2075 // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
2076 // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
2077 assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be");
2078 assert(od_top > ro_top && od_top > rw_top && od_top > md_top && od_top > mc_top , "must be");
2079 assert(mc_top == rw_base, "must be");
2080 assert(rw_top == ro_base, "must be");
2081 assert(ro_top == md_base, "must be");
2082 assert(md_top == od_base, "must be");
2083
2084 MetaspaceObj::_shared_metaspace_base = (void*)mc_base;
2085 MetaspaceObj::_shared_metaspace_top = (void*)od_top;
2086 return true;
2087 } else {
2088 // If there was a failure in mapping any of the spaces, unmap the ones
2089 // that succeeded
2090 if (ro_base != NULL) mapinfo->unmap_region(ro);
2091 if (rw_base != NULL) mapinfo->unmap_region(rw);
2092 if (mc_base != NULL) mapinfo->unmap_region(mc);
2093 if (md_base != NULL) mapinfo->unmap_region(md);
2094 if (od_base != NULL) mapinfo->unmap_region(od);
2095 #ifndef _WINDOWS
2096 // Release the entire mapped region
2097 shared_rs.release();
2098 #endif
2099 // If -Xshare:on is specified, print out the error message and exit VM,
2100 // otherwise, set UseSharedSpaces to false and continue.
2101 if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
2102 vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
2103 } else {
2104 FLAG_SET_DEFAULT(UseSharedSpaces, false);
2105 }
2106 return false;
2107 }
2108 }
2109
2110 // Read the miscellaneous data from the shared file, and
2111 // serialize it out to its various destinations.
2112
2113 void MetaspaceShared::initialize_shared_spaces() {
2114 FileMapInfo *mapinfo = FileMapInfo::current_info();
2115 _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers();
2116 _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size();
2117 _core_spaces_size = mapinfo->core_spaces_size();
2118 char* buffer = mapinfo->misc_data_patching_start();
2119 clone_cpp_vtables((intptr_t*)buffer);
2120
2121 // The rest of the data is now stored in the RW region
2122 buffer = mapinfo->read_only_tables_start();
2123 int sharedDictionaryLen = *(intptr_t*)buffer;
2124 buffer += sizeof(intptr_t);
2125 int number_of_entries = *(intptr_t*)buffer;
2126 buffer += sizeof(intptr_t);
2127 SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer,
2128 sharedDictionaryLen,
2129 number_of_entries);
2130 buffer += sharedDictionaryLen;
2131
2132 // The following data are the linked list elements
2133 // (HashtableEntry objects) for the shared dictionary table.
2134
2135 int len = *(intptr_t*)buffer; // skip over shared dictionary entries
2136 buffer += sizeof(intptr_t);
2137 buffer += len;
2138
2139 // The table of archived java heap object sub-graph infos
2140 buffer = HeapShared::read_archived_subgraph_infos(buffer);
2141
2142 // Verify various attributes of the archive, plus initialize the
2143 // shared string/symbol tables
2144 intptr_t* array = (intptr_t*)buffer;
2145 ReadClosure rc(&array);
2146 serialize(&rc);
2147
2148 // Initialize the run-time symbol table.
2149 SymbolTable::create_table();
2150
2151 // Close the mapinfo file
2152 mapinfo->close();
2153
2154 if (PrintSharedArchiveAndExit) {
2155 if (PrintSharedDictionary) {
2156 tty->print_cr("\nShared classes:\n");
2157 SystemDictionary::print_shared(tty);
2158 }
2159 if (_archive_loading_failed) {
2160 tty->print_cr("archive is invalid");
2161 vm_exit(1);
2162 } else {
2163 tty->print_cr("archive is valid");
2164 vm_exit(0);
2165 }
2166 }
2167 }
2168
2169 // JVM/TI RedefineClasses() support:
2170 bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
2171 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2172
2173 if (UseSharedSpaces) {
2174 // remap the shared readonly space to shared readwrite, private
2175 FileMapInfo* mapinfo = FileMapInfo::current_info();
2176 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
2177 return false;
2178 }
2179 _remapped_readwrite = true;
2180 }
2181 return true;
2182 }
2183
2184 void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) {
2185 // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
2186 // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
2187 // or so.
2188 _mc_region.print_out_of_space_msg(name, needed_bytes);
2189 _rw_region.print_out_of_space_msg(name, needed_bytes);
2190 _ro_region.print_out_of_space_msg(name, needed_bytes);
2191 _md_region.print_out_of_space_msg(name, needed_bytes);
2192 _od_region.print_out_of_space_msg(name, needed_bytes);
2193
2194 vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
2195 "Please reduce the number of shared classes.");
2196 }