1 /*
2 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "gc/shared/collectedHeap.hpp"
26 #include "gc/shared/collectorPolicy.hpp"
27 #include "gc/shared/gcLocker.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/binaryTreeDictionary.hpp"
30 #include "memory/filemap.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/metachunk.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/metaspaceGCThresholdUpdater.hpp"
35 #include "memory/metaspaceShared.hpp"
36 #include "memory/metaspaceTracer.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/atomic.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/init.hpp"
42 #include "runtime/java.hpp"
43 #include "runtime/mutex.hpp"
44 #include "runtime/orderAccess.inline.hpp"
45 #include "services/memTracker.hpp"
46 #include "services/memoryService.hpp"
47 #include "utilities/copy.hpp"
48 #include "utilities/debug.hpp"
49 #include "utilities/macros.hpp"
50
51 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
52 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
53
54 // Set this constant to enable slow integrity checking of the free chunk lists
55 const bool metaspace_slow_verify = false;
56
57 size_t const allocation_from_dictionary_limit = 4 * K;
58
59 MetaWord* last_allocated = 0;
60
61 size_t Metaspace::_compressed_class_space_size;
62 const MetaspaceTracer* Metaspace::_tracer = NULL;
63
64 // Used in declarations in SpaceManager and ChunkManager
65 enum ChunkIndex {
66 ZeroIndex = 0,
67 SpecializedIndex = ZeroIndex,
68 SmallIndex = SpecializedIndex + 1,
69 MediumIndex = SmallIndex + 1,
70 HumongousIndex = MediumIndex + 1,
71 NumberOfFreeLists = 3,
72 NumberOfInUseLists = 4
73 };
74
75 enum ChunkSizes { // in words.
76 ClassSpecializedChunk = 128,
77 SpecializedChunk = 128,
78 ClassSmallChunk = 256,
79 SmallChunk = 512,
80 ClassMediumChunk = 4 * K,
81 MediumChunk = 8 * K
82 };
83
84 static ChunkIndex next_chunk_index(ChunkIndex i) {
85 assert(i < NumberOfInUseLists, "Out of bound");
86 return (ChunkIndex) (i+1);
87 }
88
89 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
90 uint MetaspaceGC::_shrink_factor = 0;
91 bool MetaspaceGC::_should_concurrent_collect = false;
92
93 typedef class FreeList<Metachunk> ChunkList;
94
95 // Manages the global free lists of chunks.
96 class ChunkManager : public CHeapObj<mtInternal> {
97 friend class TestVirtualSpaceNodeTest;
98
99 // Free list of chunks of different sizes.
100 // SpecializedChunk
101 // SmallChunk
102 // MediumChunk
103 // HumongousChunk
104 ChunkList _free_chunks[NumberOfFreeLists];
105
106 // HumongousChunk
107 ChunkTreeDictionary _humongous_dictionary;
108
109 // ChunkManager in all lists of this type
110 size_t _free_chunks_total;
111 size_t _free_chunks_count;
112
113 void dec_free_chunks_total(size_t v) {
114 assert(_free_chunks_count > 0 &&
115 _free_chunks_total > 0,
116 "About to go negative");
117 Atomic::add_ptr(-1, &_free_chunks_count);
118 jlong minus_v = (jlong) - (jlong) v;
119 Atomic::add_ptr(minus_v, &_free_chunks_total);
120 }
121
122 // Debug support
123
124 size_t sum_free_chunks();
125 size_t sum_free_chunks_count();
126
127 void locked_verify_free_chunks_total();
128 void slow_locked_verify_free_chunks_total() {
129 if (metaspace_slow_verify) {
130 locked_verify_free_chunks_total();
131 }
132 }
133 void locked_verify_free_chunks_count();
134 void slow_locked_verify_free_chunks_count() {
135 if (metaspace_slow_verify) {
136 locked_verify_free_chunks_count();
137 }
138 }
139 void verify_free_chunks_count();
140
141 public:
142
143 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
144 : _free_chunks_total(0), _free_chunks_count(0) {
145 _free_chunks[SpecializedIndex].set_size(specialized_size);
146 _free_chunks[SmallIndex].set_size(small_size);
147 _free_chunks[MediumIndex].set_size(medium_size);
148 }
149
150 // add or delete (return) a chunk to the global freelist.
151 Metachunk* chunk_freelist_allocate(size_t word_size);
152
153 // Map a size to a list index assuming that there are lists
154 // for special, small, medium, and humongous chunks.
155 static ChunkIndex list_index(size_t size);
156
157 // Remove the chunk from its freelist. It is
158 // expected to be on one of the _free_chunks[] lists.
159 void remove_chunk(Metachunk* chunk);
160
161 // Add the simple linked list of chunks to the freelist of chunks
162 // of type index.
163 void return_chunks(ChunkIndex index, Metachunk* chunks);
164
165 // Total of the space in the free chunks list
166 size_t free_chunks_total_words();
167 size_t free_chunks_total_bytes();
168
169 // Number of chunks in the free chunks list
170 size_t free_chunks_count();
171
172 void inc_free_chunks_total(size_t v, size_t count = 1) {
173 Atomic::add_ptr(count, &_free_chunks_count);
174 Atomic::add_ptr(v, &_free_chunks_total);
175 }
176 ChunkTreeDictionary* humongous_dictionary() {
177 return &_humongous_dictionary;
178 }
179
180 ChunkList* free_chunks(ChunkIndex index);
181
182 // Returns the list for the given chunk word size.
183 ChunkList* find_free_chunks_list(size_t word_size);
184
185 // Remove from a list by size. Selects list based on size of chunk.
186 Metachunk* free_chunks_get(size_t chunk_word_size);
187
188 #define index_bounds_check(index) \
189 assert(index == SpecializedIndex || \
190 index == SmallIndex || \
191 index == MediumIndex || \
192 index == HumongousIndex, err_msg("Bad index: %d", (int) index))
193
194 size_t num_free_chunks(ChunkIndex index) const {
195 index_bounds_check(index);
196
197 if (index == HumongousIndex) {
198 return _humongous_dictionary.total_free_blocks();
199 }
200
201 ssize_t count = _free_chunks[index].count();
202 return count == -1 ? 0 : (size_t) count;
203 }
204
205 size_t size_free_chunks_in_bytes(ChunkIndex index) const {
206 index_bounds_check(index);
207
208 size_t word_size = 0;
209 if (index == HumongousIndex) {
210 word_size = _humongous_dictionary.total_size();
211 } else {
212 const size_t size_per_chunk_in_words = _free_chunks[index].size();
213 word_size = size_per_chunk_in_words * num_free_chunks(index);
214 }
215
216 return word_size * BytesPerWord;
217 }
218
219 MetaspaceChunkFreeListSummary chunk_free_list_summary() const {
220 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex),
221 num_free_chunks(SmallIndex),
222 num_free_chunks(MediumIndex),
223 num_free_chunks(HumongousIndex),
224 size_free_chunks_in_bytes(SpecializedIndex),
225 size_free_chunks_in_bytes(SmallIndex),
226 size_free_chunks_in_bytes(MediumIndex),
227 size_free_chunks_in_bytes(HumongousIndex));
228 }
229
230 // Debug support
231 void verify();
232 void slow_verify() {
233 if (metaspace_slow_verify) {
234 verify();
235 }
236 }
237 void locked_verify();
238 void slow_locked_verify() {
239 if (metaspace_slow_verify) {
240 locked_verify();
241 }
242 }
243 void verify_free_chunks_total();
244
245 void locked_print_free_chunks(outputStream* st);
246 void locked_print_sum_free_chunks(outputStream* st);
247
248 void print_on(outputStream* st) const;
249 };
250
251 // Used to manage the free list of Metablocks (a block corresponds
252 // to the allocation of a quantum of metadata).
253 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
254 BlockTreeDictionary* const _dictionary;
255
256 // Only allocate and split from freelist if the size of the allocation
257 // is at least 1/4th the size of the available block.
258 const static int WasteMultiplier = 4;
259
260 // Accessors
261 BlockTreeDictionary* dictionary() const { return _dictionary; }
262
263 public:
264 BlockFreelist();
265 ~BlockFreelist();
266
267 // Get and return a block to the free list
268 MetaWord* get_block(size_t word_size);
269 void return_block(MetaWord* p, size_t word_size);
270
271 size_t total_size() { return dictionary()->total_size(); }
272
273 void print_on(outputStream* st) const;
274 };
275
276 // A VirtualSpaceList node.
277 class VirtualSpaceNode : public CHeapObj<mtClass> {
278 friend class VirtualSpaceList;
279
280 // Link to next VirtualSpaceNode
281 VirtualSpaceNode* _next;
282
283 // total in the VirtualSpace
284 MemRegion _reserved;
285 ReservedSpace _rs;
286 VirtualSpace _virtual_space;
287 MetaWord* _top;
288 // count of chunks contained in this VirtualSpace
289 uintx _container_count;
290
291 // Convenience functions to access the _virtual_space
292 char* low() const { return virtual_space()->low(); }
293 char* high() const { return virtual_space()->high(); }
294
295 // The first Metachunk will be allocated at the bottom of the
296 // VirtualSpace
297 Metachunk* first_chunk() { return (Metachunk*) bottom(); }
298
299 // Committed but unused space in the virtual space
300 size_t free_words_in_vs() const;
301 public:
302
303 VirtualSpaceNode(size_t byte_size);
304 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
305 ~VirtualSpaceNode();
306
307 // Convenience functions for logical bottom and end
308 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
309 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
310
311 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
312
313 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
314 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
315
316 bool is_pre_committed() const { return _virtual_space.special(); }
317
318 // address of next available space in _virtual_space;
319 // Accessors
320 VirtualSpaceNode* next() { return _next; }
321 void set_next(VirtualSpaceNode* v) { _next = v; }
322
323 void set_reserved(MemRegion const v) { _reserved = v; }
324 void set_top(MetaWord* v) { _top = v; }
325
326 // Accessors
327 MemRegion* reserved() { return &_reserved; }
328 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
329
330 // Returns true if "word_size" is available in the VirtualSpace
331 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
332
333 MetaWord* top() const { return _top; }
334 void inc_top(size_t word_size) { _top += word_size; }
335
336 uintx container_count() { return _container_count; }
337 void inc_container_count();
338 void dec_container_count();
339 #ifdef ASSERT
340 uintx container_count_slow();
341 void verify_container_count();
342 #endif
343
344 // used and capacity in this single entry in the list
345 size_t used_words_in_vs() const;
346 size_t capacity_words_in_vs() const;
347
348 bool initialize();
349
350 // get space from the virtual space
351 Metachunk* take_from_committed(size_t chunk_word_size);
352
353 // Allocate a chunk from the virtual space and return it.
354 Metachunk* get_chunk_vs(size_t chunk_word_size);
355
356 // Expands/shrinks the committed space in a virtual space. Delegates
357 // to Virtualspace
358 bool expand_by(size_t min_words, size_t preferred_words);
359
360 // In preparation for deleting this node, remove all the chunks
361 // in the node from any freelist.
362 void purge(ChunkManager* chunk_manager);
363
364 // If an allocation doesn't fit in the current node a new node is created.
365 // Allocate chunks out of the remaining committed space in this node
366 // to avoid wasting that memory.
367 // This always adds up because all the chunk sizes are multiples of
368 // the smallest chunk size.
369 void retire(ChunkManager* chunk_manager);
370
371 #ifdef ASSERT
372 // Debug support
373 void mangle();
374 #endif
375
376 void print_on(outputStream* st) const;
377 };
378
379 #define assert_is_ptr_aligned(ptr, alignment) \
380 assert(is_ptr_aligned(ptr, alignment), \
381 err_msg(PTR_FORMAT " is not aligned to " \
382 SIZE_FORMAT, p2i(ptr), alignment))
383
384 #define assert_is_size_aligned(size, alignment) \
385 assert(is_size_aligned(size, alignment), \
386 err_msg(SIZE_FORMAT " is not aligned to " \
387 SIZE_FORMAT, size, alignment))
388
389
390 // Decide if large pages should be committed when the memory is reserved.
391 static bool should_commit_large_pages_when_reserving(size_t bytes) {
392 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
393 size_t words = bytes / BytesPerWord;
394 bool is_class = false; // We never reserve large pages for the class space.
395 if (MetaspaceGC::can_expand(words, is_class) &&
396 MetaspaceGC::allowed_expansion() >= words) {
397 return true;
398 }
399 }
400
401 return false;
402 }
403
404 // byte_size is the size of the associated virtualspace.
405 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
406 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
407
408 #if INCLUDE_CDS
409 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
410 // configurable address, generally at the top of the Java heap so other
411 // memory addresses don't conflict.
412 if (DumpSharedSpaces) {
413 bool large_pages = false; // No large pages when dumping the CDS archive.
414 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
415
416 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
417 if (_rs.is_reserved()) {
418 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
419 } else {
420 // Get a mmap region anywhere if the SharedBaseAddress fails.
421 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
422 }
423 MetaspaceShared::set_shared_rs(&_rs);
424 } else
425 #endif
426 {
427 bool large_pages = should_commit_large_pages_when_reserving(bytes);
428
429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430 }
431
432 if (_rs.is_reserved()) {
433 assert(_rs.base() != NULL, "Catch if we get a NULL address");
434 assert(_rs.size() != 0, "Catch if we get a 0 size");
435 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
436 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
437
438 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
439 }
440 }
441
442 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
443 Metachunk* chunk = first_chunk();
444 Metachunk* invalid_chunk = (Metachunk*) top();
445 while (chunk < invalid_chunk ) {
446 assert(chunk->is_tagged_free(), "Should be tagged free");
447 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
448 chunk_manager->remove_chunk(chunk);
449 assert(chunk->next() == NULL &&
450 chunk->prev() == NULL,
451 "Was not removed from its list");
452 chunk = (Metachunk*) next;
453 }
454 }
455
456 #ifdef ASSERT
457 uintx VirtualSpaceNode::container_count_slow() {
458 uintx count = 0;
459 Metachunk* chunk = first_chunk();
460 Metachunk* invalid_chunk = (Metachunk*) top();
461 while (chunk < invalid_chunk ) {
462 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
463 // Don't count the chunks on the free lists. Those are
464 // still part of the VirtualSpaceNode but not currently
465 // counted.
466 if (!chunk->is_tagged_free()) {
467 count++;
468 }
469 chunk = (Metachunk*) next;
470 }
471 return count;
472 }
473 #endif
474
475 // List of VirtualSpaces for metadata allocation.
476 class VirtualSpaceList : public CHeapObj<mtClass> {
477 friend class VirtualSpaceNode;
478
479 enum VirtualSpaceSizes {
480 VirtualSpaceSize = 256 * K
481 };
482
483 // Head of the list
484 VirtualSpaceNode* _virtual_space_list;
485 // virtual space currently being used for allocations
486 VirtualSpaceNode* _current_virtual_space;
487
488 // Is this VirtualSpaceList used for the compressed class space
489 bool _is_class;
490
491 // Sum of reserved and committed memory in the virtual spaces
492 size_t _reserved_words;
493 size_t _committed_words;
494
495 // Number of virtual spaces
496 size_t _virtual_space_count;
497
498 ~VirtualSpaceList();
499
500 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; }
501
502 void set_virtual_space_list(VirtualSpaceNode* v) {
503 _virtual_space_list = v;
504 }
505 void set_current_virtual_space(VirtualSpaceNode* v) {
506 _current_virtual_space = v;
507 }
508
509 void link_vs(VirtualSpaceNode* new_entry);
510
511 // Get another virtual space and add it to the list. This
512 // is typically prompted by a failed attempt to allocate a chunk
513 // and is typically followed by the allocation of a chunk.
514 bool create_new_virtual_space(size_t vs_word_size);
515
516 // Chunk up the unused committed space in the current
517 // virtual space and add the chunks to the free list.
518 void retire_current_virtual_space();
519
520 public:
521 VirtualSpaceList(size_t word_size);
522 VirtualSpaceList(ReservedSpace rs);
523
524 size_t free_bytes();
525
526 Metachunk* get_new_chunk(size_t word_size,
527 size_t grow_chunks_by_words,
528 size_t medium_chunk_bunch);
529
530 bool expand_node_by(VirtualSpaceNode* node,
531 size_t min_words,
532 size_t preferred_words);
533
534 bool expand_by(size_t min_words,
535 size_t preferred_words);
536
537 VirtualSpaceNode* current_virtual_space() {
538 return _current_virtual_space;
539 }
540
541 bool is_class() const { return _is_class; }
542
543 bool initialization_succeeded() { return _virtual_space_list != NULL; }
544
545 size_t reserved_words() { return _reserved_words; }
546 size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
547 size_t committed_words() { return _committed_words; }
548 size_t committed_bytes() { return committed_words() * BytesPerWord; }
549
550 void inc_reserved_words(size_t v);
551 void dec_reserved_words(size_t v);
552 void inc_committed_words(size_t v);
553 void dec_committed_words(size_t v);
554 void inc_virtual_space_count();
555 void dec_virtual_space_count();
556
557 bool contains(const void* ptr);
558
559 // Unlink empty VirtualSpaceNodes and free it.
560 void purge(ChunkManager* chunk_manager);
561
562 void print_on(outputStream* st) const;
563
564 class VirtualSpaceListIterator : public StackObj {
565 VirtualSpaceNode* _virtual_spaces;
566 public:
567 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) :
568 _virtual_spaces(virtual_spaces) {}
569
570 bool repeat() {
571 return _virtual_spaces != NULL;
572 }
573
574 VirtualSpaceNode* get_next() {
575 VirtualSpaceNode* result = _virtual_spaces;
576 if (_virtual_spaces != NULL) {
577 _virtual_spaces = _virtual_spaces->next();
578 }
579 return result;
580 }
581 };
582 };
583
584 class Metadebug : AllStatic {
585 // Debugging support for Metaspaces
586 static int _allocation_fail_alot_count;
587
588 public:
589
590 static void init_allocation_fail_alot_count();
591 #ifdef ASSERT
592 static bool test_metadata_failure();
593 #endif
594 };
595
596 int Metadebug::_allocation_fail_alot_count = 0;
597
598 // SpaceManager - used by Metaspace to handle allocations
599 class SpaceManager : public CHeapObj<mtClass> {
600 friend class Metaspace;
601 friend class Metadebug;
602
603 private:
604
605 // protects allocations
606 Mutex* const _lock;
607
608 // Type of metadata allocated.
609 Metaspace::MetadataType _mdtype;
610
611 // List of chunks in use by this SpaceManager. Allocations
612 // are done from the current chunk. The list is used for deallocating
613 // chunks when the SpaceManager is freed.
614 Metachunk* _chunks_in_use[NumberOfInUseLists];
615 Metachunk* _current_chunk;
616
617 // Maximum number of small chunks to allocate to a SpaceManager
618 static uint const _small_chunk_limit;
619
620 // Sum of all space in allocated chunks
621 size_t _allocated_blocks_words;
622
623 // Sum of all allocated chunks
624 size_t _allocated_chunks_words;
625 size_t _allocated_chunks_count;
626
627 // Free lists of blocks are per SpaceManager since they
628 // are assumed to be in chunks in use by the SpaceManager
629 // and all chunks in use by a SpaceManager are freed when
630 // the class loader using the SpaceManager is collected.
631 BlockFreelist _block_freelists;
632
633 // protects virtualspace and chunk expansions
634 static const char* _expand_lock_name;
635 static const int _expand_lock_rank;
636 static Mutex* const _expand_lock;
637
638 private:
639 // Accessors
640 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
641 void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
642 _chunks_in_use[index] = v;
643 }
644
645 BlockFreelist* block_freelists() const {
646 return (BlockFreelist*) &_block_freelists;
647 }
648
649 Metaspace::MetadataType mdtype() { return _mdtype; }
650
651 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
652 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
653
654 Metachunk* current_chunk() const { return _current_chunk; }
655 void set_current_chunk(Metachunk* v) {
656 _current_chunk = v;
657 }
658
659 Metachunk* find_current_chunk(size_t word_size);
660
661 // Add chunk to the list of chunks in use
662 void add_chunk(Metachunk* v, bool make_current);
663 void retire_current_chunk();
664
665 Mutex* lock() const { return _lock; }
666
667 const char* chunk_size_name(ChunkIndex index) const;
668
669 protected:
670 void initialize();
671
672 public:
673 SpaceManager(Metaspace::MetadataType mdtype,
674 Mutex* lock);
675 ~SpaceManager();
676
677 enum ChunkMultiples {
678 MediumChunkMultiple = 4
679 };
680
681 bool is_class() { return _mdtype == Metaspace::ClassType; }
682
683 // Accessors
684 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
685 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
686 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
687 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
688
689 size_t smallest_chunk_size() { return specialized_chunk_size(); }
690
691 size_t allocated_blocks_words() const { return _allocated_blocks_words; }
692 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
693 size_t allocated_chunks_words() const { return _allocated_chunks_words; }
694 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; }
695 size_t allocated_chunks_count() const { return _allocated_chunks_count; }
696
697 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
698
699 static Mutex* expand_lock() { return _expand_lock; }
700
701 // Increment the per Metaspace and global running sums for Metachunks
702 // by the given size. This is used when a Metachunk to added to
703 // the in-use list.
704 void inc_size_metrics(size_t words);
705 // Increment the per Metaspace and global running sums Metablocks by the given
706 // size. This is used when a Metablock is allocated.
707 void inc_used_metrics(size_t words);
708 // Delete the portion of the running sums for this SpaceManager. That is,
709 // the globals running sums for the Metachunks and Metablocks are
710 // decremented for all the Metachunks in-use by this SpaceManager.
711 void dec_total_from_size_metrics();
712
713 // Set the sizes for the initial chunks.
714 void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
715 size_t* chunk_word_size,
716 size_t* class_chunk_word_size);
717
718 size_t sum_capacity_in_chunks_in_use() const;
719 size_t sum_used_in_chunks_in_use() const;
720 size_t sum_free_in_chunks_in_use() const;
721 size_t sum_waste_in_chunks_in_use() const;
722 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const;
723
724 size_t sum_count_in_chunks_in_use();
725 size_t sum_count_in_chunks_in_use(ChunkIndex i);
726
727 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words);
728
729 // Block allocation and deallocation.
730 // Allocates a block from the current chunk
731 MetaWord* allocate(size_t word_size);
732 // Allocates a block from a small chunk
733 MetaWord* get_small_chunk_and_allocate(size_t word_size);
734
735 // Helper for allocations
736 MetaWord* allocate_work(size_t word_size);
737
738 // Returns a block to the per manager freelist
739 void deallocate(MetaWord* p, size_t word_size);
740
741 // Based on the allocation size and a minimum chunk size,
742 // returned chunk size (for expanding space for chunk allocation).
743 size_t calc_chunk_size(size_t allocation_word_size);
744
745 // Called when an allocation from the current chunk fails.
746 // Gets a new chunk (may require getting a new virtual space),
747 // and allocates from that chunk.
748 MetaWord* grow_and_allocate(size_t word_size);
749
750 // Notify memory usage to MemoryService.
751 void track_metaspace_memory_usage();
752
753 // debugging support.
754
755 void dump(outputStream* const out) const;
756 void print_on(outputStream* st) const;
757 void locked_print_chunks_in_use_on(outputStream* st) const;
758
759 void verify();
760 void verify_chunk_size(Metachunk* chunk);
761 NOT_PRODUCT(void mangle_freed_chunks();)
762 #ifdef ASSERT
763 void verify_allocated_blocks_words();
764 #endif
765
766 size_t get_raw_word_size(size_t word_size) {
767 size_t byte_size = word_size * BytesPerWord;
768
769 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
770 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
771
772 size_t raw_word_size = raw_bytes_size / BytesPerWord;
773 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
774
775 return raw_word_size;
776 }
777 };
778
779 uint const SpaceManager::_small_chunk_limit = 4;
780
781 const char* SpaceManager::_expand_lock_name =
782 "SpaceManager chunk allocation lock";
783 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
784 Mutex* const SpaceManager::_expand_lock =
785 new Mutex(SpaceManager::_expand_lock_rank,
786 SpaceManager::_expand_lock_name,
787 Mutex::_allow_vm_block_flag,
788 Monitor::_safepoint_check_never);
789
790 void VirtualSpaceNode::inc_container_count() {
791 assert_lock_strong(SpaceManager::expand_lock());
792 _container_count++;
793 DEBUG_ONLY(verify_container_count();)
794 }
795
796 void VirtualSpaceNode::dec_container_count() {
797 assert_lock_strong(SpaceManager::expand_lock());
798 _container_count--;
799 }
800
801 #ifdef ASSERT
802 void VirtualSpaceNode::verify_container_count() {
803 assert(_container_count == container_count_slow(),
804 err_msg("Inconsistency in container_count _container_count " UINTX_FORMAT
805 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()));
806 }
807 #endif
808
809 // BlockFreelist methods
810
811 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
812
813 BlockFreelist::~BlockFreelist() {
814 if (Verbose && TraceMetadataChunkAllocation) {
815 dictionary()->print_free_lists(gclog_or_tty);
816 }
817 delete _dictionary;
818 }
819
820 void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
821 Metablock* free_chunk = ::new (p) Metablock(word_size);
822 dictionary()->return_chunk(free_chunk);
823 }
824
825 MetaWord* BlockFreelist::get_block(size_t word_size) {
826 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
827 // Dark matter. Too small for dictionary.
828 return NULL;
829 }
830
831 Metablock* free_block =
832 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
833 if (free_block == NULL) {
834 return NULL;
835 }
836
837 const size_t block_size = free_block->size();
838 if (block_size > WasteMultiplier * word_size) {
839 return_block((MetaWord*)free_block, block_size);
840 return NULL;
841 }
842
843 MetaWord* new_block = (MetaWord*)free_block;
844 assert(block_size >= word_size, "Incorrect size of block from freelist");
845 const size_t unused = block_size - word_size;
846 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
847 return_block(new_block + word_size, unused);
848 }
849
850 return new_block;
851 }
852
853 void BlockFreelist::print_on(outputStream* st) const {
854 dictionary()->print_free_lists(st);
855 }
856
857 // VirtualSpaceNode methods
858
859 VirtualSpaceNode::~VirtualSpaceNode() {
860 _rs.release();
861 #ifdef ASSERT
862 size_t word_size = sizeof(*this) / BytesPerWord;
863 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
864 #endif
865 }
866
867 size_t VirtualSpaceNode::used_words_in_vs() const {
868 return pointer_delta(top(), bottom(), sizeof(MetaWord));
869 }
870
871 // Space committed in the VirtualSpace
872 size_t VirtualSpaceNode::capacity_words_in_vs() const {
873 return pointer_delta(end(), bottom(), sizeof(MetaWord));
874 }
875
876 size_t VirtualSpaceNode::free_words_in_vs() const {
877 return pointer_delta(end(), top(), sizeof(MetaWord));
878 }
879
880 // Allocates the chunk from the virtual space only.
881 // This interface is also used internally for debugging. Not all
882 // chunks removed here are necessarily used for allocation.
883 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
884 // Bottom of the new chunk
885 MetaWord* chunk_limit = top();
886 assert(chunk_limit != NULL, "Not safe to call this method");
887
888 // The virtual spaces are always expanded by the
889 // commit granularity to enforce the following condition.
890 // Without this the is_available check will not work correctly.
891 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
892 "The committed memory doesn't match the expanded memory.");
893
894 if (!is_available(chunk_word_size)) {
895 if (TraceMetadataChunkAllocation) {
896 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
897 // Dump some information about the virtual space that is nearly full
898 print_on(gclog_or_tty);
899 }
900 return NULL;
901 }
902
903 // Take the space (bump top on the current virtual space).
904 inc_top(chunk_word_size);
905
906 // Initialize the chunk
907 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
908 return result;
909 }
910
911
912 // Expand the virtual space (commit more of the reserved space)
913 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
914 size_t min_bytes = min_words * BytesPerWord;
915 size_t preferred_bytes = preferred_words * BytesPerWord;
916
917 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
918
919 if (uncommitted < min_bytes) {
920 return false;
921 }
922
923 size_t commit = MIN2(preferred_bytes, uncommitted);
924 bool result = virtual_space()->expand_by(commit, false);
925
926 assert(result, "Failed to commit memory");
927
928 return result;
929 }
930
931 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
932 assert_lock_strong(SpaceManager::expand_lock());
933 Metachunk* result = take_from_committed(chunk_word_size);
934 if (result != NULL) {
935 inc_container_count();
936 }
937 return result;
938 }
939
940 bool VirtualSpaceNode::initialize() {
941
942 if (!_rs.is_reserved()) {
943 return false;
944 }
945
946 // These are necessary restriction to make sure that the virtual space always
947 // grows in steps of Metaspace::commit_alignment(). If both base and size are
948 // aligned only the middle alignment of the VirtualSpace is used.
949 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
950 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
951
952 // ReservedSpaces marked as special will have the entire memory
953 // pre-committed. Setting a committed size will make sure that
954 // committed_size and actual_committed_size agrees.
955 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
956
957 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
958 Metaspace::commit_alignment());
959 if (result) {
960 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
961 "Checking that the pre-committed memory was registered by the VirtualSpace");
962
963 set_top((MetaWord*)virtual_space()->low());
964 set_reserved(MemRegion((HeapWord*)_rs.base(),
965 (HeapWord*)(_rs.base() + _rs.size())));
966
967 assert(reserved()->start() == (HeapWord*) _rs.base(),
968 err_msg("Reserved start was not set properly " PTR_FORMAT
969 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())));
970 assert(reserved()->word_size() == _rs.size() / BytesPerWord,
971 err_msg("Reserved size was not set properly " SIZE_FORMAT
972 " != " SIZE_FORMAT, reserved()->word_size(),
973 _rs.size() / BytesPerWord));
974 }
975
976 return result;
977 }
978
979 void VirtualSpaceNode::print_on(outputStream* st) const {
980 size_t used = used_words_in_vs();
981 size_t capacity = capacity_words_in_vs();
982 VirtualSpace* vs = virtual_space();
983 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used "
984 "[" PTR_FORMAT ", " PTR_FORMAT ", "
985 PTR_FORMAT ", " PTR_FORMAT ")",
986 p2i(vs), capacity / K,
987 capacity == 0 ? 0 : used * 100 / capacity,
988 p2i(bottom()), p2i(top()), p2i(end()),
989 p2i(vs->high_boundary()));
990 }
991
992 #ifdef ASSERT
993 void VirtualSpaceNode::mangle() {
994 size_t word_size = capacity_words_in_vs();
995 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
996 }
997 #endif // ASSERT
998
999 // VirtualSpaceList methods
1000 // Space allocated from the VirtualSpace
1001
1002 VirtualSpaceList::~VirtualSpaceList() {
1003 VirtualSpaceListIterator iter(virtual_space_list());
1004 while (iter.repeat()) {
1005 VirtualSpaceNode* vsl = iter.get_next();
1006 delete vsl;
1007 }
1008 }
1009
1010 void VirtualSpaceList::inc_reserved_words(size_t v) {
1011 assert_lock_strong(SpaceManager::expand_lock());
1012 _reserved_words = _reserved_words + v;
1013 }
1014 void VirtualSpaceList::dec_reserved_words(size_t v) {
1015 assert_lock_strong(SpaceManager::expand_lock());
1016 _reserved_words = _reserved_words - v;
1017 }
1018
1019 #define assert_committed_below_limit() \
1020 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
1021 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
1022 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
1023 MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
1024
1025 void VirtualSpaceList::inc_committed_words(size_t v) {
1026 assert_lock_strong(SpaceManager::expand_lock());
1027 _committed_words = _committed_words + v;
1028
1029 assert_committed_below_limit();
1030 }
1031 void VirtualSpaceList::dec_committed_words(size_t v) {
1032 assert_lock_strong(SpaceManager::expand_lock());
1033 _committed_words = _committed_words - v;
1034
1035 assert_committed_below_limit();
1036 }
1037
1038 void VirtualSpaceList::inc_virtual_space_count() {
1039 assert_lock_strong(SpaceManager::expand_lock());
1040 _virtual_space_count++;
1041 }
1042 void VirtualSpaceList::dec_virtual_space_count() {
1043 assert_lock_strong(SpaceManager::expand_lock());
1044 _virtual_space_count--;
1045 }
1046
1047 void ChunkManager::remove_chunk(Metachunk* chunk) {
1048 size_t word_size = chunk->word_size();
1049 ChunkIndex index = list_index(word_size);
1050 if (index != HumongousIndex) {
1051 free_chunks(index)->remove_chunk(chunk);
1052 } else {
1053 humongous_dictionary()->remove_chunk(chunk);
1054 }
1055
1056 // Chunk is being removed from the chunks free list.
1057 dec_free_chunks_total(chunk->word_size());
1058 }
1059
1060 // Walk the list of VirtualSpaceNodes and delete
1061 // nodes with a 0 container_count. Remove Metachunks in
1062 // the node from their respective freelists.
1063 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
1064 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
1065 assert_lock_strong(SpaceManager::expand_lock());
1066 // Don't use a VirtualSpaceListIterator because this
1067 // list is being changed and a straightforward use of an iterator is not safe.
1068 VirtualSpaceNode* purged_vsl = NULL;
1069 VirtualSpaceNode* prev_vsl = virtual_space_list();
1070 VirtualSpaceNode* next_vsl = prev_vsl;
1071 while (next_vsl != NULL) {
1072 VirtualSpaceNode* vsl = next_vsl;
1073 next_vsl = vsl->next();
1074 // Don't free the current virtual space since it will likely
1075 // be needed soon.
1076 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
1077 // Unlink it from the list
1078 if (prev_vsl == vsl) {
1079 // This is the case of the current node being the first node.
1080 assert(vsl == virtual_space_list(), "Expected to be the first node");
1081 set_virtual_space_list(vsl->next());
1082 } else {
1083 prev_vsl->set_next(vsl->next());
1084 }
1085
1086 vsl->purge(chunk_manager);
1087 dec_reserved_words(vsl->reserved_words());
1088 dec_committed_words(vsl->committed_words());
1089 dec_virtual_space_count();
1090 purged_vsl = vsl;
1091 delete vsl;
1092 } else {
1093 prev_vsl = vsl;
1094 }
1095 }
1096 #ifdef ASSERT
1097 if (purged_vsl != NULL) {
1098 // List should be stable enough to use an iterator here.
1099 VirtualSpaceListIterator iter(virtual_space_list());
1100 while (iter.repeat()) {
1101 VirtualSpaceNode* vsl = iter.get_next();
1102 assert(vsl != purged_vsl, "Purge of vsl failed");
1103 }
1104 }
1105 #endif
1106 }
1107
1108
1109 // This function looks at the mmap regions in the metaspace without locking.
1110 // The chunks are added with store ordering and not deleted except for at
1111 // unloading time during a safepoint.
1112 bool VirtualSpaceList::contains(const void* ptr) {
1113 // List should be stable enough to use an iterator here because removing virtual
1114 // space nodes is only allowed at a safepoint.
1115 VirtualSpaceListIterator iter(virtual_space_list());
1116 while (iter.repeat()) {
1117 VirtualSpaceNode* vsn = iter.get_next();
1118 if (vsn->contains(ptr)) {
1119 return true;
1120 }
1121 }
1122 return false;
1123 }
1124
1125 void VirtualSpaceList::retire_current_virtual_space() {
1126 assert_lock_strong(SpaceManager::expand_lock());
1127
1128 VirtualSpaceNode* vsn = current_virtual_space();
1129
1130 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
1131 Metaspace::chunk_manager_metadata();
1132
1133 vsn->retire(cm);
1134 }
1135
1136 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
1137 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
1138 ChunkIndex index = (ChunkIndex)i;
1139 size_t chunk_size = chunk_manager->free_chunks(index)->size();
1140
1141 while (free_words_in_vs() >= chunk_size) {
1142 DEBUG_ONLY(verify_container_count();)
1143 Metachunk* chunk = get_chunk_vs(chunk_size);
1144 assert(chunk != NULL, "allocation should have been successful");
1145
1146 chunk_manager->return_chunks(index, chunk);
1147 chunk_manager->inc_free_chunks_total(chunk_size);
1148 DEBUG_ONLY(verify_container_count();)
1149 }
1150 }
1151 assert(free_words_in_vs() == 0, "should be empty now");
1152 }
1153
1154 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
1155 _is_class(false),
1156 _virtual_space_list(NULL),
1157 _current_virtual_space(NULL),
1158 _reserved_words(0),
1159 _committed_words(0),
1160 _virtual_space_count(0) {
1161 MutexLockerEx cl(SpaceManager::expand_lock(),
1162 Mutex::_no_safepoint_check_flag);
1163 create_new_virtual_space(word_size);
1164 }
1165
1166 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
1167 _is_class(true),
1168 _virtual_space_list(NULL),
1169 _current_virtual_space(NULL),
1170 _reserved_words(0),
1171 _committed_words(0),
1172 _virtual_space_count(0) {
1173 MutexLockerEx cl(SpaceManager::expand_lock(),
1174 Mutex::_no_safepoint_check_flag);
1175 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
1176 bool succeeded = class_entry->initialize();
1177 if (succeeded) {
1178 link_vs(class_entry);
1179 }
1180 }
1181
1182 size_t VirtualSpaceList::free_bytes() {
1183 return virtual_space_list()->free_words_in_vs() * BytesPerWord;
1184 }
1185
1186 // Allocate another meta virtual space and add it to the list.
1187 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
1188 assert_lock_strong(SpaceManager::expand_lock());
1189
1190 if (is_class()) {
1191 assert(false, "We currently don't support more than one VirtualSpace for"
1192 " the compressed class space. The initialization of the"
1193 " CCS uses another code path and should not hit this path.");
1194 return false;
1195 }
1196
1197 if (vs_word_size == 0) {
1198 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
1199 return false;
1200 }
1201
1202 // Reserve the space
1203 size_t vs_byte_size = vs_word_size * BytesPerWord;
1204 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
1205
1206 // Allocate the meta virtual space and initialize it.
1207 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
1208 if (!new_entry->initialize()) {
1209 delete new_entry;
1210 return false;
1211 } else {
1212 assert(new_entry->reserved_words() == vs_word_size,
1213 "Reserved memory size differs from requested memory size");
1214 // ensure lock-free iteration sees fully initialized node
1215 OrderAccess::storestore();
1216 link_vs(new_entry);
1217 return true;
1218 }
1219 }
1220
1221 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1222 if (virtual_space_list() == NULL) {
1223 set_virtual_space_list(new_entry);
1224 } else {
1225 current_virtual_space()->set_next(new_entry);
1226 }
1227 set_current_virtual_space(new_entry);
1228 inc_reserved_words(new_entry->reserved_words());
1229 inc_committed_words(new_entry->committed_words());
1230 inc_virtual_space_count();
1231 #ifdef ASSERT
1232 new_entry->mangle();
1233 #endif
1234 if (TraceMetavirtualspaceAllocation && Verbose) {
1235 VirtualSpaceNode* vsl = current_virtual_space();
1236 vsl->print_on(gclog_or_tty);
1237 }
1238 }
1239
1240 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1241 size_t min_words,
1242 size_t preferred_words) {
1243 size_t before = node->committed_words();
1244
1245 bool result = node->expand_by(min_words, preferred_words);
1246
1247 size_t after = node->committed_words();
1248
1249 // after and before can be the same if the memory was pre-committed.
1250 assert(after >= before, "Inconsistency");
1251 inc_committed_words(after - before);
1252
1253 return result;
1254 }
1255
1256 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
1257 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
1258 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
1259 assert(min_words <= preferred_words, "Invalid arguments");
1260
1261 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
1262 return false;
1263 }
1264
1265 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
1266 if (allowed_expansion_words < min_words) {
1267 return false;
1268 }
1269
1270 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
1271
1272 // Commit more memory from the the current virtual space.
1273 bool vs_expanded = expand_node_by(current_virtual_space(),
1274 min_words,
1275 max_expansion_words);
1276 if (vs_expanded) {
1277 return true;
1278 }
1279 retire_current_virtual_space();
1280
1281 // Get another virtual space.
1282 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
1283 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
1284
1285 if (create_new_virtual_space(grow_vs_words)) {
1286 if (current_virtual_space()->is_pre_committed()) {
1287 // The memory was pre-committed, so we are done here.
1288 assert(min_words <= current_virtual_space()->committed_words(),
1289 "The new VirtualSpace was pre-committed, so it"
1290 "should be large enough to fit the alloc request.");
1291 return true;
1292 }
1293
1294 return expand_node_by(current_virtual_space(),
1295 min_words,
1296 max_expansion_words);
1297 }
1298
1299 return false;
1300 }
1301
1302 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
1303 size_t grow_chunks_by_words,
1304 size_t medium_chunk_bunch) {
1305
1306 // Allocate a chunk out of the current virtual space.
1307 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1308
1309 if (next != NULL) {
1310 return next;
1311 }
1312
1313 // The expand amount is currently only determined by the requested sizes
1314 // and not how much committed memory is left in the current virtual space.
1315
1316 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
1317 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
1318 if (min_word_size >= preferred_word_size) {
1319 // Can happen when humongous chunks are allocated.
1320 preferred_word_size = min_word_size;
1321 }
1322
1323 bool expanded = expand_by(min_word_size, preferred_word_size);
1324 if (expanded) {
1325 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
1326 assert(next != NULL, "The allocation was expected to succeed after the expansion");
1327 }
1328
1329 return next;
1330 }
1331
1332 void VirtualSpaceList::print_on(outputStream* st) const {
1333 if (TraceMetadataChunkAllocation && Verbose) {
1334 VirtualSpaceListIterator iter(virtual_space_list());
1335 while (iter.repeat()) {
1336 VirtualSpaceNode* node = iter.get_next();
1337 node->print_on(st);
1338 }
1339 }
1340 }
1341
1342 // MetaspaceGC methods
1343
1344 // VM_CollectForMetadataAllocation is the vm operation used to GC.
1345 // Within the VM operation after the GC the attempt to allocate the metadata
1346 // should succeed. If the GC did not free enough space for the metaspace
1347 // allocation, the HWM is increased so that another virtualspace will be
1348 // allocated for the metadata. With perm gen the increase in the perm
1349 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
1350 // metaspace policy uses those as the small and large steps for the HWM.
1351 //
1352 // After the GC the compute_new_size() for MetaspaceGC is called to
1353 // resize the capacity of the metaspaces. The current implementation
1354 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
1355 // to resize the Java heap by some GC's. New flags can be implemented
1356 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
1357 // free space is desirable in the metaspace capacity to decide how much
1358 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
1359 // free space is desirable in the metaspace capacity before decreasing
1360 // the HWM.
1361
1362 // Calculate the amount to increase the high water mark (HWM).
1363 // Increase by a minimum amount (MinMetaspaceExpansion) so that
1364 // another expansion is not requested too soon. If that is not
1365 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
1366 // If that is still not enough, expand by the size of the allocation
1367 // plus some.
1368 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
1369 size_t min_delta = MinMetaspaceExpansion;
1370 size_t max_delta = MaxMetaspaceExpansion;
1371 size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
1372
1373 if (delta <= min_delta) {
1374 delta = min_delta;
1375 } else if (delta <= max_delta) {
1376 // Don't want to hit the high water mark on the next
1377 // allocation so make the delta greater than just enough
1378 // for this allocation.
1379 delta = max_delta;
1380 } else {
1381 // This allocation is large but the next ones are probably not
1382 // so increase by the minimum.
1383 delta = delta + min_delta;
1384 }
1385
1386 assert_is_size_aligned(delta, Metaspace::commit_alignment());
1387
1388 return delta;
1389 }
1390
1391 size_t MetaspaceGC::capacity_until_GC() {
1392 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1393 assert(value >= MetaspaceSize, "Not initialized properly?");
1394 return value;
1395 }
1396
1397 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1398 assert_is_size_aligned(v, Metaspace::commit_alignment());
1399
1400 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1401 size_t new_value = capacity_until_GC + v;
1402
1403 if (new_value < capacity_until_GC) {
1404 // The addition wrapped around, set new_value to aligned max value.
1405 new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
1406 }
1407
1408 intptr_t expected = (intptr_t) capacity_until_GC;
1409 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1410
1411 if (expected != actual) {
1412 return false;
1413 }
1414
1415 if (new_cap_until_GC != NULL) {
1416 *new_cap_until_GC = new_value;
1417 }
1418 if (old_cap_until_GC != NULL) {
1419 *old_cap_until_GC = capacity_until_GC;
1420 }
1421 return true;
1422 }
1423
1424 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1425 assert_is_size_aligned(v, Metaspace::commit_alignment());
1426
1427 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1428 }
1429
1430 void MetaspaceGC::initialize() {
1431 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1432 // we can't do a GC during initialization.
1433 _capacity_until_GC = MaxMetaspaceSize;
1434 }
1435
1436 void MetaspaceGC::post_initialize() {
1437 // Reset the high-water mark once the VM initialization is done.
1438 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1439 }
1440
1441 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1442 // Check if the compressed class space is full.
1443 if (is_class && Metaspace::using_class_space()) {
1444 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1445 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1446 return false;
1447 }
1448 }
1449
1450 // Check if the user has imposed a limit on the metaspace memory.
1451 size_t committed_bytes = MetaspaceAux::committed_bytes();
1452 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
1453 return false;
1454 }
1455
1456 return true;
1457 }
1458
1459 size_t MetaspaceGC::allowed_expansion() {
1460 size_t committed_bytes = MetaspaceAux::committed_bytes();
1461 size_t capacity_until_gc = capacity_until_GC();
1462
1463 assert(capacity_until_gc >= committed_bytes,
1464 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
1465 capacity_until_gc, committed_bytes));
1466
1467 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
1468 size_t left_until_GC = capacity_until_gc - committed_bytes;
1469 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
1470
1471 return left_to_commit / BytesPerWord;
1472 }
1473
1474 void MetaspaceGC::compute_new_size() {
1475 assert(_shrink_factor <= 100, "invalid shrink factor");
1476 uint current_shrink_factor = _shrink_factor;
1477 _shrink_factor = 0;
1478
1479 // Using committed_bytes() for used_after_gc is an overestimation, since the
1480 // chunk free lists are included in committed_bytes() and the memory in an
1481 // un-fragmented chunk free list is available for future allocations.
1482 // However, if the chunk free lists becomes fragmented, then the memory may
1483 // not be available for future allocations and the memory is therefore "in use".
1484 // Including the chunk free lists in the definition of "in use" is therefore
1485 // necessary. Not including the chunk free lists can cause capacity_until_GC to
1486 // shrink below committed_bytes() and this has caused serious bugs in the past.
1487 const size_t used_after_gc = MetaspaceAux::committed_bytes();
1488 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
1489
1490 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
1491 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1492
1493 const double min_tmp = used_after_gc / maximum_used_percentage;
1494 size_t minimum_desired_capacity =
1495 (size_t)MIN2(min_tmp, double(max_uintx));
1496 // Don't shrink less than the initial generation size
1497 minimum_desired_capacity = MAX2(minimum_desired_capacity,
1498 MetaspaceSize);
1499
1500 if (PrintGCDetails && Verbose) {
1501 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
1502 gclog_or_tty->print_cr(" "
1503 " minimum_free_percentage: %6.2f"
1504 " maximum_used_percentage: %6.2f",
1505 minimum_free_percentage,
1506 maximum_used_percentage);
1507 gclog_or_tty->print_cr(" "
1508 " used_after_gc : %6.1fKB",
1509 used_after_gc / (double) K);
1510 }
1511
1512
1513 size_t shrink_bytes = 0;
1514 if (capacity_until_GC < minimum_desired_capacity) {
1515 // If we have less capacity below the metaspace HWM, then
1516 // increment the HWM.
1517 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
1518 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
1519 // Don't expand unless it's significant
1520 if (expand_bytes >= MinMetaspaceExpansion) {
1521 size_t new_capacity_until_GC = 0;
1522 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
1523 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
1524
1525 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1526 new_capacity_until_GC,
1527 MetaspaceGCThresholdUpdater::ComputeNewSize);
1528 if (PrintGCDetails && Verbose) {
1529 gclog_or_tty->print_cr(" expanding:"
1530 " minimum_desired_capacity: %6.1fKB"
1531 " expand_bytes: %6.1fKB"
1532 " MinMetaspaceExpansion: %6.1fKB"
1533 " new metaspace HWM: %6.1fKB",
1534 minimum_desired_capacity / (double) K,
1535 expand_bytes / (double) K,
1536 MinMetaspaceExpansion / (double) K,
1537 new_capacity_until_GC / (double) K);
1538 }
1539 }
1540 return;
1541 }
1542
1543 // No expansion, now see if we want to shrink
1544 // We would never want to shrink more than this
1545 assert(capacity_until_GC >= minimum_desired_capacity,
1546 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
1547 capacity_until_GC, minimum_desired_capacity));
1548 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
1549
1550 // Should shrinking be considered?
1551 if (MaxMetaspaceFreeRatio < 100) {
1552 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
1553 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1554 const double max_tmp = used_after_gc / minimum_used_percentage;
1555 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
1556 maximum_desired_capacity = MAX2(maximum_desired_capacity,
1557 MetaspaceSize);
1558 if (PrintGCDetails && Verbose) {
1559 gclog_or_tty->print_cr(" "
1560 " maximum_free_percentage: %6.2f"
1561 " minimum_used_percentage: %6.2f",
1562 maximum_free_percentage,
1563 minimum_used_percentage);
1564 gclog_or_tty->print_cr(" "
1565 " minimum_desired_capacity: %6.1fKB"
1566 " maximum_desired_capacity: %6.1fKB",
1567 minimum_desired_capacity / (double) K,
1568 maximum_desired_capacity / (double) K);
1569 }
1570
1571 assert(minimum_desired_capacity <= maximum_desired_capacity,
1572 "sanity check");
1573
1574 if (capacity_until_GC > maximum_desired_capacity) {
1575 // Capacity too large, compute shrinking size
1576 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
1577 // We don't want shrink all the way back to initSize if people call
1578 // System.gc(), because some programs do that between "phases" and then
1579 // we'd just have to grow the heap up again for the next phase. So we
1580 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
1581 // on the third call, and 100% by the fourth call. But if we recompute
1582 // size without shrinking, it goes back to 0%.
1583 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
1584
1585 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
1586
1587 assert(shrink_bytes <= max_shrink_bytes,
1588 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
1589 shrink_bytes, max_shrink_bytes));
1590 if (current_shrink_factor == 0) {
1591 _shrink_factor = 10;
1592 } else {
1593 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
1594 }
1595 if (PrintGCDetails && Verbose) {
1596 gclog_or_tty->print_cr(" "
1597 " shrinking:"
1598 " initSize: %.1fK"
1599 " maximum_desired_capacity: %.1fK",
1600 MetaspaceSize / (double) K,
1601 maximum_desired_capacity / (double) K);
1602 gclog_or_tty->print_cr(" "
1603 " shrink_bytes: %.1fK"
1604 " current_shrink_factor: %d"
1605 " new shrink factor: %d"
1606 " MinMetaspaceExpansion: %.1fK",
1607 shrink_bytes / (double) K,
1608 current_shrink_factor,
1609 _shrink_factor,
1610 MinMetaspaceExpansion / (double) K);
1611 }
1612 }
1613 }
1614
1615 // Don't shrink unless it's significant
1616 if (shrink_bytes >= MinMetaspaceExpansion &&
1617 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
1618 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
1619 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
1620 new_capacity_until_GC,
1621 MetaspaceGCThresholdUpdater::ComputeNewSize);
1622 }
1623 }
1624
1625 // Metadebug methods
1626
1627 void Metadebug::init_allocation_fail_alot_count() {
1628 if (MetadataAllocationFailALot) {
1629 _allocation_fail_alot_count =
1630 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0));
1631 }
1632 }
1633
1634 #ifdef ASSERT
1635 bool Metadebug::test_metadata_failure() {
1636 if (MetadataAllocationFailALot &&
1637 Threads::is_vm_complete()) {
1638 if (_allocation_fail_alot_count > 0) {
1639 _allocation_fail_alot_count--;
1640 } else {
1641 if (TraceMetadataChunkAllocation && Verbose) {
1642 gclog_or_tty->print_cr("Metadata allocation failing for "
1643 "MetadataAllocationFailALot");
1644 }
1645 init_allocation_fail_alot_count();
1646 return true;
1647 }
1648 }
1649 return false;
1650 }
1651 #endif
1652
1653 // ChunkManager methods
1654
1655 size_t ChunkManager::free_chunks_total_words() {
1656 return _free_chunks_total;
1657 }
1658
1659 size_t ChunkManager::free_chunks_total_bytes() {
1660 return free_chunks_total_words() * BytesPerWord;
1661 }
1662
1663 size_t ChunkManager::free_chunks_count() {
1664 #ifdef ASSERT
1665 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
1666 MutexLockerEx cl(SpaceManager::expand_lock(),
1667 Mutex::_no_safepoint_check_flag);
1668 // This lock is only needed in debug because the verification
1669 // of the _free_chunks_totals walks the list of free chunks
1670 slow_locked_verify_free_chunks_count();
1671 }
1672 #endif
1673 return _free_chunks_count;
1674 }
1675
1676 void ChunkManager::locked_verify_free_chunks_total() {
1677 assert_lock_strong(SpaceManager::expand_lock());
1678 assert(sum_free_chunks() == _free_chunks_total,
1679 err_msg("_free_chunks_total " SIZE_FORMAT " is not the"
1680 " same as sum " SIZE_FORMAT, _free_chunks_total,
1681 sum_free_chunks()));
1682 }
1683
1684 void ChunkManager::verify_free_chunks_total() {
1685 MutexLockerEx cl(SpaceManager::expand_lock(),
1686 Mutex::_no_safepoint_check_flag);
1687 locked_verify_free_chunks_total();
1688 }
1689
1690 void ChunkManager::locked_verify_free_chunks_count() {
1691 assert_lock_strong(SpaceManager::expand_lock());
1692 assert(sum_free_chunks_count() == _free_chunks_count,
1693 err_msg("_free_chunks_count " SIZE_FORMAT " is not the"
1694 " same as sum " SIZE_FORMAT, _free_chunks_count,
1695 sum_free_chunks_count()));
1696 }
1697
1698 void ChunkManager::verify_free_chunks_count() {
1699 #ifdef ASSERT
1700 MutexLockerEx cl(SpaceManager::expand_lock(),
1701 Mutex::_no_safepoint_check_flag);
1702 locked_verify_free_chunks_count();
1703 #endif
1704 }
1705
1706 void ChunkManager::verify() {
1707 MutexLockerEx cl(SpaceManager::expand_lock(),
1708 Mutex::_no_safepoint_check_flag);
1709 locked_verify();
1710 }
1711
1712 void ChunkManager::locked_verify() {
1713 locked_verify_free_chunks_count();
1714 locked_verify_free_chunks_total();
1715 }
1716
1717 void ChunkManager::locked_print_free_chunks(outputStream* st) {
1718 assert_lock_strong(SpaceManager::expand_lock());
1719 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1720 _free_chunks_total, _free_chunks_count);
1721 }
1722
1723 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) {
1724 assert_lock_strong(SpaceManager::expand_lock());
1725 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT,
1726 sum_free_chunks(), sum_free_chunks_count());
1727 }
1728 ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
1729 return &_free_chunks[index];
1730 }
1731
1732 // These methods that sum the free chunk lists are used in printing
1733 // methods that are used in product builds.
1734 size_t ChunkManager::sum_free_chunks() {
1735 assert_lock_strong(SpaceManager::expand_lock());
1736 size_t result = 0;
1737 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1738 ChunkList* list = free_chunks(i);
1739
1740 if (list == NULL) {
1741 continue;
1742 }
1743
1744 result = result + list->count() * list->size();
1745 }
1746 result = result + humongous_dictionary()->total_size();
1747 return result;
1748 }
1749
1750 size_t ChunkManager::sum_free_chunks_count() {
1751 assert_lock_strong(SpaceManager::expand_lock());
1752 size_t count = 0;
1753 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
1754 ChunkList* list = free_chunks(i);
1755 if (list == NULL) {
1756 continue;
1757 }
1758 count = count + list->count();
1759 }
1760 count = count + humongous_dictionary()->total_free_blocks();
1761 return count;
1762 }
1763
1764 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
1765 ChunkIndex index = list_index(word_size);
1766 assert(index < HumongousIndex, "No humongous list");
1767 return free_chunks(index);
1768 }
1769
1770 Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
1771 assert_lock_strong(SpaceManager::expand_lock());
1772
1773 slow_locked_verify();
1774
1775 Metachunk* chunk = NULL;
1776 if (list_index(word_size) != HumongousIndex) {
1777 ChunkList* free_list = find_free_chunks_list(word_size);
1778 assert(free_list != NULL, "Sanity check");
1779
1780 chunk = free_list->head();
1781
1782 if (chunk == NULL) {
1783 return NULL;
1784 }
1785
1786 // Remove the chunk as the head of the list.
1787 free_list->remove_chunk(chunk);
1788
1789 if (TraceMetadataChunkAllocation && Verbose) {
1790 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
1791 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
1792 p2i(free_list), p2i(chunk), chunk->word_size());
1793 }
1794 } else {
1795 chunk = humongous_dictionary()->get_chunk(
1796 word_size,
1797 FreeBlockDictionary<Metachunk>::atLeast);
1798
1799 if (chunk == NULL) {
1800 return NULL;
1801 }
1802
1803 if (TraceMetadataHumongousAllocation) {
1804 size_t waste = chunk->word_size() - word_size;
1805 gclog_or_tty->print_cr("Free list allocate humongous chunk size "
1806 SIZE_FORMAT " for requested size " SIZE_FORMAT
1807 " waste " SIZE_FORMAT,
1808 chunk->word_size(), word_size, waste);
1809 }
1810 }
1811
1812 // Chunk is being removed from the chunks free list.
1813 dec_free_chunks_total(chunk->word_size());
1814
1815 // Remove it from the links to this freelist
1816 chunk->set_next(NULL);
1817 chunk->set_prev(NULL);
1818 #ifdef ASSERT
1819 // Chunk is no longer on any freelist. Setting to false make container_count_slow()
1820 // work.
1821 chunk->set_is_tagged_free(false);
1822 #endif
1823 chunk->container()->inc_container_count();
1824
1825 slow_locked_verify();
1826 return chunk;
1827 }
1828
1829 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1830 assert_lock_strong(SpaceManager::expand_lock());
1831 slow_locked_verify();
1832
1833 // Take from the beginning of the list
1834 Metachunk* chunk = free_chunks_get(word_size);
1835 if (chunk == NULL) {
1836 return NULL;
1837 }
1838
1839 assert((word_size <= chunk->word_size()) ||
1840 list_index(chunk->word_size() == HumongousIndex),
1841 "Non-humongous variable sized chunk");
1842 if (TraceMetadataChunkAllocation) {
1843 size_t list_count;
1844 if (list_index(word_size) < HumongousIndex) {
1845 ChunkList* list = find_free_chunks_list(word_size);
1846 list_count = list->count();
1847 } else {
1848 list_count = humongous_dictionary()->total_count();
1849 }
1850 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
1851 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1852 p2i(this), p2i(chunk), chunk->word_size(), list_count);
1853 locked_print_free_chunks(gclog_or_tty);
1854 }
1855
1856 return chunk;
1857 }
1858
1859 void ChunkManager::print_on(outputStream* out) const {
1860 if (PrintFLSStatistics != 0) {
1861 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
1862 }
1863 }
1864
1865 // SpaceManager methods
1866
1867 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type,
1868 size_t* chunk_word_size,
1869 size_t* class_chunk_word_size) {
1870 switch (type) {
1871 case Metaspace::BootMetaspaceType:
1872 *chunk_word_size = Metaspace::first_chunk_word_size();
1873 *class_chunk_word_size = Metaspace::first_class_chunk_word_size();
1874 break;
1875 case Metaspace::ROMetaspaceType:
1876 *chunk_word_size = SharedReadOnlySize / wordSize;
1877 *class_chunk_word_size = ClassSpecializedChunk;
1878 break;
1879 case Metaspace::ReadWriteMetaspaceType:
1880 *chunk_word_size = SharedReadWriteSize / wordSize;
1881 *class_chunk_word_size = ClassSpecializedChunk;
1882 break;
1883 case Metaspace::AnonymousMetaspaceType:
1884 case Metaspace::ReflectionMetaspaceType:
1885 *chunk_word_size = SpecializedChunk;
1886 *class_chunk_word_size = ClassSpecializedChunk;
1887 break;
1888 default:
1889 *chunk_word_size = SmallChunk;
1890 *class_chunk_word_size = ClassSmallChunk;
1891 break;
1892 }
1893 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0,
1894 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT
1895 " class " SIZE_FORMAT,
1896 *chunk_word_size, *class_chunk_word_size));
1897 }
1898
1899 size_t SpaceManager::sum_free_in_chunks_in_use() const {
1900 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1901 size_t free = 0;
1902 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1903 Metachunk* chunk = chunks_in_use(i);
1904 while (chunk != NULL) {
1905 free += chunk->free_word_size();
1906 chunk = chunk->next();
1907 }
1908 }
1909 return free;
1910 }
1911
1912 size_t SpaceManager::sum_waste_in_chunks_in_use() const {
1913 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1914 size_t result = 0;
1915 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1916 result += sum_waste_in_chunks_in_use(i);
1917 }
1918
1919 return result;
1920 }
1921
1922 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
1923 size_t result = 0;
1924 Metachunk* chunk = chunks_in_use(index);
1925 // Count the free space in all the chunk but not the
1926 // current chunk from which allocations are still being done.
1927 while (chunk != NULL) {
1928 if (chunk != current_chunk()) {
1929 result += chunk->free_word_size();
1930 }
1931 chunk = chunk->next();
1932 }
1933 return result;
1934 }
1935
1936 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
1937 // For CMS use "allocated_chunks_words()" which does not need the
1938 // Metaspace lock. For the other collectors sum over the
1939 // lists. Use both methods as a check that "allocated_chunks_words()"
1940 // is correct. That is, sum_capacity_in_chunks() is too expensive
1941 // to use in the product and allocated_chunks_words() should be used
1942 // but allow for checking that allocated_chunks_words() returns the same
1943 // value as sum_capacity_in_chunks_in_use() which is the definitive
1944 // answer.
1945 if (UseConcMarkSweepGC) {
1946 return allocated_chunks_words();
1947 } else {
1948 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1949 size_t sum = 0;
1950 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1951 Metachunk* chunk = chunks_in_use(i);
1952 while (chunk != NULL) {
1953 sum += chunk->word_size();
1954 chunk = chunk->next();
1955 }
1956 }
1957 return sum;
1958 }
1959 }
1960
1961 size_t SpaceManager::sum_count_in_chunks_in_use() {
1962 size_t count = 0;
1963 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1964 count = count + sum_count_in_chunks_in_use(i);
1965 }
1966
1967 return count;
1968 }
1969
1970 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
1971 size_t count = 0;
1972 Metachunk* chunk = chunks_in_use(i);
1973 while (chunk != NULL) {
1974 count++;
1975 chunk = chunk->next();
1976 }
1977 return count;
1978 }
1979
1980
1981 size_t SpaceManager::sum_used_in_chunks_in_use() const {
1982 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
1983 size_t used = 0;
1984 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1985 Metachunk* chunk = chunks_in_use(i);
1986 while (chunk != NULL) {
1987 used += chunk->used_word_size();
1988 chunk = chunk->next();
1989 }
1990 }
1991 return used;
1992 }
1993
1994 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
1995
1996 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
1997 Metachunk* chunk = chunks_in_use(i);
1998 st->print("SpaceManager: %s " PTR_FORMAT,
1999 chunk_size_name(i), p2i(chunk));
2000 if (chunk != NULL) {
2001 st->print_cr(" free " SIZE_FORMAT,
2002 chunk->free_word_size());
2003 } else {
2004 st->cr();
2005 }
2006 }
2007
2008 chunk_manager()->locked_print_free_chunks(st);
2009 chunk_manager()->locked_print_sum_free_chunks(st);
2010 }
2011
2012 size_t SpaceManager::calc_chunk_size(size_t word_size) {
2013
2014 // Decide between a small chunk and a medium chunk. Up to
2015 // _small_chunk_limit small chunks can be allocated.
2016 // After that a medium chunk is preferred.
2017 size_t chunk_word_size;
2018 if (chunks_in_use(MediumIndex) == NULL &&
2019 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
2020 chunk_word_size = (size_t) small_chunk_size();
2021 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2022 chunk_word_size = medium_chunk_size();
2023 }
2024 } else {
2025 chunk_word_size = medium_chunk_size();
2026 }
2027
2028 // Might still need a humongous chunk. Enforce
2029 // humongous allocations sizes to be aligned up to
2030 // the smallest chunk size.
2031 size_t if_humongous_sized_chunk =
2032 align_size_up(word_size + Metachunk::overhead(),
2033 smallest_chunk_size());
2034 chunk_word_size =
2035 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
2036
2037 assert(!SpaceManager::is_humongous(word_size) ||
2038 chunk_word_size == if_humongous_sized_chunk,
2039 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT
2040 " chunk_word_size " SIZE_FORMAT,
2041 word_size, chunk_word_size));
2042 if (TraceMetadataHumongousAllocation &&
2043 SpaceManager::is_humongous(word_size)) {
2044 gclog_or_tty->print_cr("Metadata humongous allocation:");
2045 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
2046 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
2047 chunk_word_size);
2048 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
2049 Metachunk::overhead());
2050 }
2051 return chunk_word_size;
2052 }
2053
2054 void SpaceManager::track_metaspace_memory_usage() {
2055 if (is_init_completed()) {
2056 if (is_class()) {
2057 MemoryService::track_compressed_class_memory_usage();
2058 }
2059 MemoryService::track_metaspace_memory_usage();
2060 }
2061 }
2062
2063 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
2064 assert(vs_list()->current_virtual_space() != NULL,
2065 "Should have been set");
2066 assert(current_chunk() == NULL ||
2067 current_chunk()->allocate(word_size) == NULL,
2068 "Don't need to expand");
2069 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
2070
2071 if (TraceMetadataChunkAllocation && Verbose) {
2072 size_t words_left = 0;
2073 size_t words_used = 0;
2074 if (current_chunk() != NULL) {
2075 words_left = current_chunk()->free_word_size();
2076 words_used = current_chunk()->used_word_size();
2077 }
2078 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT
2079 " words " SIZE_FORMAT " words used " SIZE_FORMAT
2080 " words left",
2081 word_size, words_used, words_left);
2082 }
2083
2084 // Get another chunk
2085 size_t grow_chunks_by_words = calc_chunk_size(word_size);
2086 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
2087
2088 MetaWord* mem = NULL;
2089
2090 // If a chunk was available, add it to the in-use chunk list
2091 // and do an allocation from it.
2092 if (next != NULL) {
2093 // Add to this manager's list of chunks in use.
2094 add_chunk(next, false);
2095 mem = next->allocate(word_size);
2096 }
2097
2098 // Track metaspace memory usage statistic.
2099 track_metaspace_memory_usage();
2100
2101 return mem;
2102 }
2103
2104 void SpaceManager::print_on(outputStream* st) const {
2105
2106 for (ChunkIndex i = ZeroIndex;
2107 i < NumberOfInUseLists ;
2108 i = next_chunk_index(i) ) {
2109 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT,
2110 p2i(chunks_in_use(i)),
2111 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size());
2112 }
2113 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT
2114 " Humongous " SIZE_FORMAT,
2115 sum_waste_in_chunks_in_use(SmallIndex),
2116 sum_waste_in_chunks_in_use(MediumIndex),
2117 sum_waste_in_chunks_in_use(HumongousIndex));
2118 // block free lists
2119 if (block_freelists() != NULL) {
2120 st->print_cr("total in block free lists " SIZE_FORMAT,
2121 block_freelists()->total_size());
2122 }
2123 }
2124
2125 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
2126 Mutex* lock) :
2127 _mdtype(mdtype),
2128 _allocated_blocks_words(0),
2129 _allocated_chunks_words(0),
2130 _allocated_chunks_count(0),
2131 _lock(lock)
2132 {
2133 initialize();
2134 }
2135
2136 void SpaceManager::inc_size_metrics(size_t words) {
2137 assert_lock_strong(SpaceManager::expand_lock());
2138 // Total of allocated Metachunks and allocated Metachunks count
2139 // for each SpaceManager
2140 _allocated_chunks_words = _allocated_chunks_words + words;
2141 _allocated_chunks_count++;
2142 // Global total of capacity in allocated Metachunks
2143 MetaspaceAux::inc_capacity(mdtype(), words);
2144 // Global total of allocated Metablocks.
2145 // used_words_slow() includes the overhead in each
2146 // Metachunk so include it in the used when the
2147 // Metachunk is first added (so only added once per
2148 // Metachunk).
2149 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2150 }
2151
2152 void SpaceManager::inc_used_metrics(size_t words) {
2153 // Add to the per SpaceManager total
2154 Atomic::add_ptr(words, &_allocated_blocks_words);
2155 // Add to the global total
2156 MetaspaceAux::inc_used(mdtype(), words);
2157 }
2158
2159 void SpaceManager::dec_total_from_size_metrics() {
2160 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2161 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2162 // Also deduct the overhead per Metachunk
2163 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2164 }
2165
2166 void SpaceManager::initialize() {
2167 Metadebug::init_allocation_fail_alot_count();
2168 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2169 _chunks_in_use[i] = NULL;
2170 }
2171 _current_chunk = NULL;
2172 if (TraceMetadataChunkAllocation && Verbose) {
2173 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, p2i(this));
2174 }
2175 }
2176
2177 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
2178 if (chunks == NULL) {
2179 return;
2180 }
2181 ChunkList* list = free_chunks(index);
2182 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
2183 assert_lock_strong(SpaceManager::expand_lock());
2184 Metachunk* cur = chunks;
2185
2186 // This returns chunks one at a time. If a new
2187 // class List can be created that is a base class
2188 // of FreeList then something like FreeList::prepend()
2189 // can be used in place of this loop
2190 while (cur != NULL) {
2191 assert(cur->container() != NULL, "Container should have been set");
2192 cur->container()->dec_container_count();
2193 // Capture the next link before it is changed
2194 // by the call to return_chunk_at_head();
2195 Metachunk* next = cur->next();
2196 DEBUG_ONLY(cur->set_is_tagged_free(true);)
2197 list->return_chunk_at_head(cur);
2198 cur = next;
2199 }
2200 }
2201
2202 SpaceManager::~SpaceManager() {
2203 // This call this->_lock which can't be done while holding expand_lock()
2204 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2205 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2206 " allocated_chunks_words() " SIZE_FORMAT,
2207 sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
2208
2209 MutexLockerEx fcl(SpaceManager::expand_lock(),
2210 Mutex::_no_safepoint_check_flag);
2211
2212 chunk_manager()->slow_locked_verify();
2213
2214 dec_total_from_size_metrics();
2215
2216 if (TraceMetadataChunkAllocation && Verbose) {
2217 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, p2i(this));
2218 locked_print_chunks_in_use_on(gclog_or_tty);
2219 }
2220
2221 // Do not mangle freed Metachunks. The chunk size inside Metachunks
2222 // is during the freeing of a VirtualSpaceNodes.
2223
2224 // Have to update before the chunks_in_use lists are emptied
2225 // below.
2226 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
2227 sum_count_in_chunks_in_use());
2228
2229 // Add all the chunks in use by this space manager
2230 // to the global list of free chunks.
2231
2232 // Follow each list of chunks-in-use and add them to the
2233 // free lists. Each list is NULL terminated.
2234
2235 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) {
2236 if (TraceMetadataChunkAllocation && Verbose) {
2237 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s chunks to freelist",
2238 sum_count_in_chunks_in_use(i),
2239 chunk_size_name(i));
2240 }
2241 Metachunk* chunks = chunks_in_use(i);
2242 chunk_manager()->return_chunks(i, chunks);
2243 set_chunks_in_use(i, NULL);
2244 if (TraceMetadataChunkAllocation && Verbose) {
2245 gclog_or_tty->print_cr("updated freelist count " SSIZE_FORMAT " %s",
2246 chunk_manager()->free_chunks(i)->count(),
2247 chunk_size_name(i));
2248 }
2249 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
2250 }
2251
2252 // The medium chunk case may be optimized by passing the head and
2253 // tail of the medium chunk list to add_at_head(). The tail is often
2254 // the current chunk but there are probably exceptions.
2255
2256 // Humongous chunks
2257 if (TraceMetadataChunkAllocation && Verbose) {
2258 gclog_or_tty->print_cr("returned " SIZE_FORMAT " %s humongous chunks to dictionary",
2259 sum_count_in_chunks_in_use(HumongousIndex),
2260 chunk_size_name(HumongousIndex));
2261 gclog_or_tty->print("Humongous chunk dictionary: ");
2262 }
2263 // Humongous chunks are never the current chunk.
2264 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
2265
2266 while (humongous_chunks != NULL) {
2267 #ifdef ASSERT
2268 humongous_chunks->set_is_tagged_free(true);
2269 #endif
2270 if (TraceMetadataChunkAllocation && Verbose) {
2271 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
2272 p2i(humongous_chunks),
2273 humongous_chunks->word_size());
2274 }
2275 assert(humongous_chunks->word_size() == (size_t)
2276 align_size_up(humongous_chunks->word_size(),
2277 smallest_chunk_size()),
2278 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
2279 " granularity " SIZE_FORMAT,
2280 humongous_chunks->word_size(), smallest_chunk_size()));
2281 Metachunk* next_humongous_chunks = humongous_chunks->next();
2282 humongous_chunks->container()->dec_container_count();
2283 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
2284 humongous_chunks = next_humongous_chunks;
2285 }
2286 if (TraceMetadataChunkAllocation && Verbose) {
2287 gclog_or_tty->cr();
2288 gclog_or_tty->print_cr("updated dictionary count " SIZE_FORMAT " %s",
2289 chunk_manager()->humongous_dictionary()->total_count(),
2290 chunk_size_name(HumongousIndex));
2291 }
2292 chunk_manager()->slow_locked_verify();
2293 }
2294
2295 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
2296 switch (index) {
2297 case SpecializedIndex:
2298 return "Specialized";
2299 case SmallIndex:
2300 return "Small";
2301 case MediumIndex:
2302 return "Medium";
2303 case HumongousIndex:
2304 return "Humongous";
2305 default:
2306 return NULL;
2307 }
2308 }
2309
2310 ChunkIndex ChunkManager::list_index(size_t size) {
2311 switch (size) {
2312 case SpecializedChunk:
2313 assert(SpecializedChunk == ClassSpecializedChunk,
2314 "Need branch for ClassSpecializedChunk");
2315 return SpecializedIndex;
2316 case SmallChunk:
2317 case ClassSmallChunk:
2318 return SmallIndex;
2319 case MediumChunk:
2320 case ClassMediumChunk:
2321 return MediumIndex;
2322 default:
2323 assert(size > MediumChunk || size > ClassMediumChunk,
2324 "Not a humongous chunk");
2325 return HumongousIndex;
2326 }
2327 }
2328
2329 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
2330 assert_lock_strong(_lock);
2331 size_t raw_word_size = get_raw_word_size(word_size);
2332 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
2333 assert(raw_word_size >= min_size,
2334 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
2335 block_freelists()->return_block(p, raw_word_size);
2336 }
2337
2338 // Adds a chunk to the list of chunks in use.
2339 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
2340
2341 assert(new_chunk != NULL, "Should not be NULL");
2342 assert(new_chunk->next() == NULL, "Should not be on a list");
2343
2344 new_chunk->reset_empty();
2345
2346 // Find the correct list and and set the current
2347 // chunk for that list.
2348 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
2349
2350 if (index != HumongousIndex) {
2351 retire_current_chunk();
2352 set_current_chunk(new_chunk);
2353 new_chunk->set_next(chunks_in_use(index));
2354 set_chunks_in_use(index, new_chunk);
2355 } else {
2356 // For null class loader data and DumpSharedSpaces, the first chunk isn't
2357 // small, so small will be null. Link this first chunk as the current
2358 // chunk.
2359 if (make_current) {
2360 // Set as the current chunk but otherwise treat as a humongous chunk.
2361 set_current_chunk(new_chunk);
2362 }
2363 // Link at head. The _current_chunk only points to a humongous chunk for
2364 // the null class loader metaspace (class and data virtual space managers)
2365 // any humongous chunks so will not point to the tail
2366 // of the humongous chunks list.
2367 new_chunk->set_next(chunks_in_use(HumongousIndex));
2368 set_chunks_in_use(HumongousIndex, new_chunk);
2369
2370 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2371 }
2372
2373 // Add to the running sum of capacity
2374 inc_size_metrics(new_chunk->word_size());
2375
2376 assert(new_chunk->is_empty(), "Not ready for reuse");
2377 if (TraceMetadataChunkAllocation && Verbose) {
2378 gclog_or_tty->print("SpaceManager::add_chunk: " SIZE_FORMAT ") ",
2379 sum_count_in_chunks_in_use());
2380 new_chunk->print_on(gclog_or_tty);
2381 chunk_manager()->locked_print_free_chunks(gclog_or_tty);
2382 }
2383 }
2384
2385 void SpaceManager::retire_current_chunk() {
2386 if (current_chunk() != NULL) {
2387 size_t remaining_words = current_chunk()->free_word_size();
2388 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
2389 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
2390 inc_used_metrics(remaining_words);
2391 }
2392 }
2393 }
2394
2395 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
2396 size_t grow_chunks_by_words) {
2397 // Get a chunk from the chunk freelist
2398 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
2399
2400 if (next == NULL) {
2401 next = vs_list()->get_new_chunk(word_size,
2402 grow_chunks_by_words,
2403 medium_chunk_bunch());
2404 }
2405
2406 if (TraceMetadataHumongousAllocation && next != NULL &&
2407 SpaceManager::is_humongous(next->word_size())) {
2408 gclog_or_tty->print_cr(" new humongous chunk word size "
2409 PTR_FORMAT, next->word_size());
2410 }
2411
2412 return next;
2413 }
2414
2415 /*
2416 * The policy is to allocate up to _small_chunk_limit small chunks
2417 * after which only medium chunks are allocated. This is done to
2418 * reduce fragmentation. In some cases, this can result in a lot
2419 * of small chunks being allocated to the point where it's not
2420 * possible to expand. If this happens, there may be no medium chunks
2421 * available and OOME would be thrown. Instead of doing that,
2422 * if the allocation request size fits in a small chunk, an attempt
2423 * will be made to allocate a small chunk.
2424 */
2425 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2426 if (word_size + Metachunk::overhead() > small_chunk_size()) {
2427 return NULL;
2428 }
2429
2430 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2431 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2432
2433 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2434
2435 MetaWord* mem = NULL;
2436
2437 if (chunk != NULL) {
2438 // Add chunk to the in-use chunk list and do an allocation from it.
2439 // Add to this manager's list of chunks in use.
2440 add_chunk(chunk, false);
2441 mem = chunk->allocate(word_size);
2442
2443 inc_used_metrics(word_size);
2444
2445 // Track metaspace memory usage statistic.
2446 track_metaspace_memory_usage();
2447 }
2448
2449 return mem;
2450 }
2451
2452 MetaWord* SpaceManager::allocate(size_t word_size) {
2453 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2454
2455 size_t raw_word_size = get_raw_word_size(word_size);
2456 BlockFreelist* fl = block_freelists();
2457 MetaWord* p = NULL;
2458 // Allocation from the dictionary is expensive in the sense that
2459 // the dictionary has to be searched for a size. Don't allocate
2460 // from the dictionary until it starts to get fat. Is this
2461 // a reasonable policy? Maybe an skinny dictionary is fast enough
2462 // for allocations. Do some profiling. JJJ
2463 if (fl->total_size() > allocation_from_dictionary_limit) {
2464 p = fl->get_block(raw_word_size);
2465 }
2466 if (p == NULL) {
2467 p = allocate_work(raw_word_size);
2468 }
2469
2470 return p;
2471 }
2472
2473 // Returns the address of spaced allocated for "word_size".
2474 // This methods does not know about blocks (Metablocks)
2475 MetaWord* SpaceManager::allocate_work(size_t word_size) {
2476 assert_lock_strong(_lock);
2477 #ifdef ASSERT
2478 if (Metadebug::test_metadata_failure()) {
2479 return NULL;
2480 }
2481 #endif
2482 // Is there space in the current chunk?
2483 MetaWord* result = NULL;
2484
2485 // For DumpSharedSpaces, only allocate out of the current chunk which is
2486 // never null because we gave it the size we wanted. Caller reports out
2487 // of memory if this returns null.
2488 if (DumpSharedSpaces) {
2489 assert(current_chunk() != NULL, "should never happen");
2490 inc_used_metrics(word_size);
2491 return current_chunk()->allocate(word_size); // caller handles null result
2492 }
2493
2494 if (current_chunk() != NULL) {
2495 result = current_chunk()->allocate(word_size);
2496 }
2497
2498 if (result == NULL) {
2499 result = grow_and_allocate(word_size);
2500 }
2501
2502 if (result != NULL) {
2503 inc_used_metrics(word_size);
2504 assert(result != (MetaWord*) chunks_in_use(MediumIndex),
2505 "Head of the list is being allocated");
2506 }
2507
2508 return result;
2509 }
2510
2511 void SpaceManager::verify() {
2512 // If there are blocks in the dictionary, then
2513 // verification of chunks does not work since
2514 // being in the dictionary alters a chunk.
2515 if (block_freelists()->total_size() == 0) {
2516 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2517 Metachunk* curr = chunks_in_use(i);
2518 while (curr != NULL) {
2519 curr->verify();
2520 verify_chunk_size(curr);
2521 curr = curr->next();
2522 }
2523 }
2524 }
2525 }
2526
2527 void SpaceManager::verify_chunk_size(Metachunk* chunk) {
2528 assert(is_humongous(chunk->word_size()) ||
2529 chunk->word_size() == medium_chunk_size() ||
2530 chunk->word_size() == small_chunk_size() ||
2531 chunk->word_size() == specialized_chunk_size(),
2532 "Chunk size is wrong");
2533 return;
2534 }
2535
2536 #ifdef ASSERT
2537 void SpaceManager::verify_allocated_blocks_words() {
2538 // Verification is only guaranteed at a safepoint.
2539 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
2540 "Verification can fail if the applications is running");
2541 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
2542 err_msg("allocation total is not consistent " SIZE_FORMAT
2543 " vs " SIZE_FORMAT,
2544 allocated_blocks_words(), sum_used_in_chunks_in_use()));
2545 }
2546
2547 #endif
2548
2549 void SpaceManager::dump(outputStream* const out) const {
2550 size_t curr_total = 0;
2551 size_t waste = 0;
2552 uint i = 0;
2553 size_t used = 0;
2554 size_t capacity = 0;
2555
2556 // Add up statistics for all chunks in this SpaceManager.
2557 for (ChunkIndex index = ZeroIndex;
2558 index < NumberOfInUseLists;
2559 index = next_chunk_index(index)) {
2560 for (Metachunk* curr = chunks_in_use(index);
2561 curr != NULL;
2562 curr = curr->next()) {
2563 out->print("%d) ", i++);
2564 curr->print_on(out);
2565 curr_total += curr->word_size();
2566 used += curr->used_word_size();
2567 capacity += curr->word_size();
2568 waste += curr->free_word_size() + curr->overhead();;
2569 }
2570 }
2571
2572 if (TraceMetadataChunkAllocation && Verbose) {
2573 block_freelists()->print_on(out);
2574 }
2575
2576 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
2577 // Free space isn't wasted.
2578 waste -= free;
2579
2580 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT
2581 " free " SIZE_FORMAT " capacity " SIZE_FORMAT
2582 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
2583 }
2584
2585 #ifndef PRODUCT
2586 void SpaceManager::mangle_freed_chunks() {
2587 for (ChunkIndex index = ZeroIndex;
2588 index < NumberOfInUseLists;
2589 index = next_chunk_index(index)) {
2590 for (Metachunk* curr = chunks_in_use(index);
2591 curr != NULL;
2592 curr = curr->next()) {
2593 curr->mangle();
2594 }
2595 }
2596 }
2597 #endif // PRODUCT
2598
2599 // MetaspaceAux
2600
2601
2602 size_t MetaspaceAux::_capacity_words[] = {0, 0};
2603 size_t MetaspaceAux::_used_words[] = {0, 0};
2604
2605 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
2606 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2607 return list == NULL ? 0 : list->free_bytes();
2608 }
2609
2610 size_t MetaspaceAux::free_bytes() {
2611 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
2612 }
2613
2614 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
2615 assert_lock_strong(SpaceManager::expand_lock());
2616 assert(words <= capacity_words(mdtype),
2617 err_msg("About to decrement below 0: words " SIZE_FORMAT
2618 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2619 words, mdtype, capacity_words(mdtype)));
2620 _capacity_words[mdtype] -= words;
2621 }
2622
2623 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2624 assert_lock_strong(SpaceManager::expand_lock());
2625 // Needs to be atomic
2626 _capacity_words[mdtype] += words;
2627 }
2628
2629 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2630 assert(words <= used_words(mdtype),
2631 err_msg("About to decrement below 0: words " SIZE_FORMAT
2632 " is greater than _used_words[%u] " SIZE_FORMAT,
2633 words, mdtype, used_words(mdtype)));
2634 // For CMS deallocation of the Metaspaces occurs during the
2635 // sweep which is a concurrent phase. Protection by the expand_lock()
2636 // is not enough since allocation is on a per Metaspace basis
2637 // and protected by the Metaspace lock.
2638 jlong minus_words = (jlong) - (jlong) words;
2639 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2640 }
2641
2642 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2643 // _used_words tracks allocations for
2644 // each piece of metadata. Those allocations are
2645 // generally done concurrently by different application
2646 // threads so must be done atomically.
2647 Atomic::add_ptr(words, &_used_words[mdtype]);
2648 }
2649
2650 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2651 size_t used = 0;
2652 ClassLoaderDataGraphMetaspaceIterator iter;
2653 while (iter.repeat()) {
2654 Metaspace* msp = iter.get_next();
2655 // Sum allocated_blocks_words for each metaspace
2656 if (msp != NULL) {
2657 used += msp->used_words_slow(mdtype);
2658 }
2659 }
2660 return used * BytesPerWord;
2661 }
2662
2663 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2664 size_t free = 0;
2665 ClassLoaderDataGraphMetaspaceIterator iter;
2666 while (iter.repeat()) {
2667 Metaspace* msp = iter.get_next();
2668 if (msp != NULL) {
2669 free += msp->free_words_slow(mdtype);
2670 }
2671 }
2672 return free * BytesPerWord;
2673 }
2674
2675 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
2676 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
2677 return 0;
2678 }
2679 // Don't count the space in the freelists. That space will be
2680 // added to the capacity calculation as needed.
2681 size_t capacity = 0;
2682 ClassLoaderDataGraphMetaspaceIterator iter;
2683 while (iter.repeat()) {
2684 Metaspace* msp = iter.get_next();
2685 if (msp != NULL) {
2686 capacity += msp->capacity_words_slow(mdtype);
2687 }
2688 }
2689 return capacity * BytesPerWord;
2690 }
2691
2692 size_t MetaspaceAux::capacity_bytes_slow() {
2693 #ifdef PRODUCT
2694 // Use capacity_bytes() in PRODUCT instead of this function.
2695 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
2696 #endif
2697 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
2698 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
2699 assert(capacity_bytes() == class_capacity + non_class_capacity,
2700 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
2701 " class_capacity + non_class_capacity " SIZE_FORMAT
2702 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
2703 capacity_bytes(), class_capacity + non_class_capacity,
2704 class_capacity, non_class_capacity));
2705
2706 return class_capacity + non_class_capacity;
2707 }
2708
2709 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
2710 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2711 return list == NULL ? 0 : list->reserved_bytes();
2712 }
2713
2714 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
2715 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
2716 return list == NULL ? 0 : list->committed_bytes();
2717 }
2718
2719 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
2720
2721 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
2722 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
2723 if (chunk_manager == NULL) {
2724 return 0;
2725 }
2726 chunk_manager->slow_verify();
2727 return chunk_manager->free_chunks_total_words();
2728 }
2729
2730 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
2731 return free_chunks_total_words(mdtype) * BytesPerWord;
2732 }
2733
2734 size_t MetaspaceAux::free_chunks_total_words() {
2735 return free_chunks_total_words(Metaspace::ClassType) +
2736 free_chunks_total_words(Metaspace::NonClassType);
2737 }
2738
2739 size_t MetaspaceAux::free_chunks_total_bytes() {
2740 return free_chunks_total_words() * BytesPerWord;
2741 }
2742
2743 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
2744 return Metaspace::get_chunk_manager(mdtype) != NULL;
2745 }
2746
2747 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
2748 if (!has_chunk_free_list(mdtype)) {
2749 return MetaspaceChunkFreeListSummary();
2750 }
2751
2752 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
2753 return cm->chunk_free_list_summary();
2754 }
2755
2756 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
2757 gclog_or_tty->print(", [Metaspace:");
2758 if (PrintGCDetails && Verbose) {
2759 gclog_or_tty->print(" " SIZE_FORMAT
2760 "->" SIZE_FORMAT
2761 "(" SIZE_FORMAT ")",
2762 prev_metadata_used,
2763 used_bytes(),
2764 reserved_bytes());
2765 } else {
2766 gclog_or_tty->print(" " SIZE_FORMAT "K"
2767 "->" SIZE_FORMAT "K"
2768 "(" SIZE_FORMAT "K)",
2769 prev_metadata_used/K,
2770 used_bytes()/K,
2771 reserved_bytes()/K);
2772 }
2773
2774 gclog_or_tty->print("]");
2775 }
2776
2777 // This is printed when PrintGCDetails
2778 void MetaspaceAux::print_on(outputStream* out) {
2779 Metaspace::MetadataType nct = Metaspace::NonClassType;
2780
2781 out->print_cr(" Metaspace "
2782 "used " SIZE_FORMAT "K, "
2783 "capacity " SIZE_FORMAT "K, "
2784 "committed " SIZE_FORMAT "K, "
2785 "reserved " SIZE_FORMAT "K",
2786 used_bytes()/K,
2787 capacity_bytes()/K,
2788 committed_bytes()/K,
2789 reserved_bytes()/K);
2790
2791 if (Metaspace::using_class_space()) {
2792 Metaspace::MetadataType ct = Metaspace::ClassType;
2793 out->print_cr(" class space "
2794 "used " SIZE_FORMAT "K, "
2795 "capacity " SIZE_FORMAT "K, "
2796 "committed " SIZE_FORMAT "K, "
2797 "reserved " SIZE_FORMAT "K",
2798 used_bytes(ct)/K,
2799 capacity_bytes(ct)/K,
2800 committed_bytes(ct)/K,
2801 reserved_bytes(ct)/K);
2802 }
2803 }
2804
2805 // Print information for class space and data space separately.
2806 // This is almost the same as above.
2807 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
2808 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
2809 size_t capacity_bytes = capacity_bytes_slow(mdtype);
2810 size_t used_bytes = used_bytes_slow(mdtype);
2811 size_t free_bytes = free_bytes_slow(mdtype);
2812 size_t used_and_free = used_bytes + free_bytes +
2813 free_chunks_capacity_bytes;
2814 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT
2815 "K + unused in chunks " SIZE_FORMAT "K + "
2816 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT
2817 "K capacity in allocated chunks " SIZE_FORMAT "K",
2818 used_bytes / K,
2819 free_bytes / K,
2820 free_chunks_capacity_bytes / K,
2821 used_and_free / K,
2822 capacity_bytes / K);
2823 // Accounting can only be correct if we got the values during a safepoint
2824 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
2825 }
2826
2827 // Print total fragmentation for class metaspaces
2828 void MetaspaceAux::print_class_waste(outputStream* out) {
2829 assert(Metaspace::using_class_space(), "class metaspace not used");
2830 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
2831 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
2832 ClassLoaderDataGraphMetaspaceIterator iter;
2833 while (iter.repeat()) {
2834 Metaspace* msp = iter.get_next();
2835 if (msp != NULL) {
2836 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2837 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2838 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2839 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
2840 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2841 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
2842 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2843 }
2844 }
2845 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2846 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2847 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2848 "large count " SIZE_FORMAT,
2849 cls_specialized_count, cls_specialized_waste,
2850 cls_small_count, cls_small_waste,
2851 cls_medium_count, cls_medium_waste, cls_humongous_count);
2852 }
2853
2854 // Print total fragmentation for data and class metaspaces separately
2855 void MetaspaceAux::print_waste(outputStream* out) {
2856 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
2857 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
2858
2859 ClassLoaderDataGraphMetaspaceIterator iter;
2860 while (iter.repeat()) {
2861 Metaspace* msp = iter.get_next();
2862 if (msp != NULL) {
2863 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
2864 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
2865 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex);
2866 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex);
2867 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
2868 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
2869 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
2870 }
2871 }
2872 out->print_cr("Total fragmentation waste (words) doesn't count free space");
2873 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
2874 SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
2875 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
2876 "large count " SIZE_FORMAT,
2877 specialized_count, specialized_waste, small_count,
2878 small_waste, medium_count, medium_waste, humongous_count);
2879 if (Metaspace::using_class_space()) {
2880 print_class_waste(out);
2881 }
2882 }
2883
2884 // Dump global metaspace things from the end of ClassLoaderDataGraph
2885 void MetaspaceAux::dump(outputStream* out) {
2886 out->print_cr("All Metaspace:");
2887 out->print("data space: "); print_on(out, Metaspace::NonClassType);
2888 out->print("class space: "); print_on(out, Metaspace::ClassType);
2889 print_waste(out);
2890 }
2891
2892 void MetaspaceAux::verify_free_chunks() {
2893 Metaspace::chunk_manager_metadata()->verify();
2894 if (Metaspace::using_class_space()) {
2895 Metaspace::chunk_manager_class()->verify();
2896 }
2897 }
2898
2899 void MetaspaceAux::verify_capacity() {
2900 #ifdef ASSERT
2901 size_t running_sum_capacity_bytes = capacity_bytes();
2902 // For purposes of the running sum of capacity, verify against capacity
2903 size_t capacity_in_use_bytes = capacity_bytes_slow();
2904 assert(running_sum_capacity_bytes == capacity_in_use_bytes,
2905 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
2906 " capacity_bytes_slow()" SIZE_FORMAT,
2907 running_sum_capacity_bytes, capacity_in_use_bytes));
2908 for (Metaspace::MetadataType i = Metaspace::ClassType;
2909 i < Metaspace:: MetadataTypeCount;
2910 i = (Metaspace::MetadataType)(i + 1)) {
2911 size_t capacity_in_use_bytes = capacity_bytes_slow(i);
2912 assert(capacity_bytes(i) == capacity_in_use_bytes,
2913 err_msg("capacity_bytes(%u) " SIZE_FORMAT
2914 " capacity_bytes_slow(%u)" SIZE_FORMAT,
2915 i, capacity_bytes(i), i, capacity_in_use_bytes));
2916 }
2917 #endif
2918 }
2919
2920 void MetaspaceAux::verify_used() {
2921 #ifdef ASSERT
2922 size_t running_sum_used_bytes = used_bytes();
2923 // For purposes of the running sum of used, verify against used
2924 size_t used_in_use_bytes = used_bytes_slow();
2925 assert(used_bytes() == used_in_use_bytes,
2926 err_msg("used_bytes() " SIZE_FORMAT
2927 " used_bytes_slow()" SIZE_FORMAT,
2928 used_bytes(), used_in_use_bytes));
2929 for (Metaspace::MetadataType i = Metaspace::ClassType;
2930 i < Metaspace:: MetadataTypeCount;
2931 i = (Metaspace::MetadataType)(i + 1)) {
2932 size_t used_in_use_bytes = used_bytes_slow(i);
2933 assert(used_bytes(i) == used_in_use_bytes,
2934 err_msg("used_bytes(%u) " SIZE_FORMAT
2935 " used_bytes_slow(%u)" SIZE_FORMAT,
2936 i, used_bytes(i), i, used_in_use_bytes));
2937 }
2938 #endif
2939 }
2940
2941 void MetaspaceAux::verify_metrics() {
2942 verify_capacity();
2943 verify_used();
2944 }
2945
2946
2947 // Metaspace methods
2948
2949 size_t Metaspace::_first_chunk_word_size = 0;
2950 size_t Metaspace::_first_class_chunk_word_size = 0;
2951
2952 size_t Metaspace::_commit_alignment = 0;
2953 size_t Metaspace::_reserve_alignment = 0;
2954
2955 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
2956 initialize(lock, type);
2957 }
2958
2959 Metaspace::~Metaspace() {
2960 delete _vsm;
2961 if (using_class_space()) {
2962 delete _class_vsm;
2963 }
2964 }
2965
2966 VirtualSpaceList* Metaspace::_space_list = NULL;
2967 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2968
2969 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2970 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2971
2972 #define VIRTUALSPACEMULTIPLIER 2
2973
2974 #ifdef _LP64
2975 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2976
2977 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2978 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2979 // narrow_klass_base is the lower of the metaspace base and the cds base
2980 // (if cds is enabled). The narrow_klass_shift depends on the distance
2981 // between the lower base and higher address.
2982 address lower_base;
2983 address higher_address;
2984 #if INCLUDE_CDS
2985 if (UseSharedSpaces) {
2986 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2987 (address)(metaspace_base + compressed_class_space_size()));
2988 lower_base = MIN2(metaspace_base, cds_base);
2989 } else
2990 #endif
2991 {
2992 higher_address = metaspace_base + compressed_class_space_size();
2993 lower_base = metaspace_base;
2994
2995 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2996 // If compressed class space fits in lower 32G, we don't need a base.
2997 if (higher_address <= (address)klass_encoding_max) {
2998 lower_base = 0; // Effectively lower base is zero.
2999 }
3000 }
3001
3002 Universe::set_narrow_klass_base(lower_base);
3003
3004 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
3005 Universe::set_narrow_klass_shift(0);
3006 } else {
3007 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
3008 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
3009 }
3010 }
3011
3012 #if INCLUDE_CDS
3013 // Return TRUE if the specified metaspace_base and cds_base are close enough
3014 // to work with compressed klass pointers.
3015 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
3016 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
3017 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3018 address lower_base = MIN2((address)metaspace_base, cds_base);
3019 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
3020 (address)(metaspace_base + compressed_class_space_size()));
3021 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
3022 }
3023 #endif
3024
3025 // Try to allocate the metaspace at the requested addr.
3026 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
3027 assert(using_class_space(), "called improperly");
3028 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
3029 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
3030 "Metaspace size is too big");
3031 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
3032 assert_is_ptr_aligned(cds_base, _reserve_alignment);
3033 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
3034
3035 // Don't use large pages for the class space.
3036 bool large_pages = false;
3037
3038 #ifndef AARCH64
3039 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3040 _reserve_alignment,
3041 large_pages,
3042 requested_addr);
3043 #else // AARCH64
3044 ReservedSpace metaspace_rs;
3045
3046 // Our compressed klass pointers may fit nicely into the lower 32
3047 // bits.
3048 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
3049 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3050 _reserve_alignment,
3051 large_pages,
3052 requested_addr);
3053 }
3054
3055 if (! metaspace_rs.is_reserved()) {
3056 // Try to align metaspace so that we can decode a compressed klass
3057 // with a single MOVK instruction. We can do this iff the
3058 // compressed class base is a multiple of 4G.
3059 for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
3060 a < (char*)(1024*G);
3061 a += 4*G) {
3062
3063 #if INCLUDE_CDS
3064 if (UseSharedSpaces
3065 && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
3066 // We failed to find an aligned base that will reach. Fall
3067 // back to using our requested addr.
3068 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3069 _reserve_alignment,
3070 large_pages,
3071 requested_addr);
3072 break;
3073 }
3074 #endif
3075
3076 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3077 _reserve_alignment,
3078 large_pages,
3079 a);
3080 if (metaspace_rs.is_reserved())
3081 break;
3082 }
3083 }
3084
3085 #endif // AARCH64
3086
3087 if (!metaspace_rs.is_reserved()) {
3088 #if INCLUDE_CDS
3089 if (UseSharedSpaces) {
3090 size_t increment = align_size_up(1*G, _reserve_alignment);
3091
3092 // Keep trying to allocate the metaspace, increasing the requested_addr
3093 // by 1GB each time, until we reach an address that will no longer allow
3094 // use of CDS with compressed klass pointers.
3095 char *addr = requested_addr;
3096 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3097 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3098 addr = addr + increment;
3099 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3100 _reserve_alignment, large_pages, addr);
3101 }
3102 }
3103 #endif
3104 // If no successful allocation then try to allocate the space anywhere. If
3105 // that fails then OOM doom. At this point we cannot try allocating the
3106 // metaspace as if UseCompressedClassPointers is off because too much
3107 // initialization has happened that depends on UseCompressedClassPointers.
3108 // So, UseCompressedClassPointers cannot be turned off at this point.
3109 if (!metaspace_rs.is_reserved()) {
3110 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3111 _reserve_alignment, large_pages);
3112 if (!metaspace_rs.is_reserved()) {
3113 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
3114 compressed_class_space_size()));
3115 }
3116 }
3117 }
3118
3119 // If we got here then the metaspace got allocated.
3120 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3121
3122 #if INCLUDE_CDS
3123 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3124 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3125 FileMapInfo::stop_sharing_and_unmap(
3126 "Could not allocate metaspace at a compatible address");
3127 }
3128 #endif
3129 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3130 UseSharedSpaces ? (address)cds_base : 0);
3131
3132 initialize_class_space(metaspace_rs);
3133
3134 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3135 print_compressed_class_space(gclog_or_tty, requested_addr);
3136 }
3137 }
3138
3139 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3140 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3141 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3142 if (_class_space_list != NULL) {
3143 address base = (address)_class_space_list->current_virtual_space()->bottom();
3144 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3145 compressed_class_space_size(), p2i(base));
3146 if (requested_addr != 0) {
3147 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3148 }
3149 st->cr();
3150 }
3151 }
3152
3153 // For UseCompressedClassPointers the class space is reserved above the top of
3154 // the Java heap. The argument passed in is at the base of the compressed space.
3155 void Metaspace::initialize_class_space(ReservedSpace rs) {
3156 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3157 assert(rs.size() >= CompressedClassSpaceSize,
3158 err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize));
3159 assert(using_class_space(), "Must be using class space");
3160 _class_space_list = new VirtualSpaceList(rs);
3161 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
3162
3163 if (!_class_space_list->initialization_succeeded()) {
3164 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
3165 }
3166 }
3167
3168 #endif
3169
3170 void Metaspace::ergo_initialize() {
3171 if (DumpSharedSpaces) {
3172 // Using large pages when dumping the shared archive is currently not implemented.
3173 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
3174 }
3175
3176 size_t page_size = os::vm_page_size();
3177 if (UseLargePages && UseLargePagesInMetaspace) {
3178 page_size = os::large_page_size();
3179 }
3180
3181 _commit_alignment = page_size;
3182 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
3183
3184 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
3185 // override if MaxMetaspaceSize was set on the command line or not.
3186 // This information is needed later to conform to the specification of the
3187 // java.lang.management.MemoryUsage API.
3188 //
3189 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
3190 // globals.hpp to the aligned value, but this is not possible, since the
3191 // alignment depends on other flags being parsed.
3192 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment);
3193
3194 if (MetaspaceSize > MaxMetaspaceSize) {
3195 MetaspaceSize = MaxMetaspaceSize;
3196 }
3197
3198 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment);
3199
3200 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
3201
3202 if (MetaspaceSize < 256*K) {
3203 vm_exit_during_initialization("Too small initial Metaspace size");
3204 }
3205
3206 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3207 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3208
3209 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3210 set_compressed_class_space_size(CompressedClassSpaceSize);
3211 }
3212
3213 void Metaspace::global_initialize() {
3214 MetaspaceGC::initialize();
3215
3216 // Initialize the alignment for shared spaces.
3217 int max_alignment = os::vm_allocation_granularity();
3218 size_t cds_total = 0;
3219
3220 MetaspaceShared::set_max_alignment(max_alignment);
3221
3222 if (DumpSharedSpaces) {
3223 #if INCLUDE_CDS
3224 MetaspaceShared::estimate_regions_size();
3225
3226 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3227 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3228 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3229 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3230
3231 // make sure SharedReadOnlySize and SharedReadWriteSize are not less than
3232 // the minimum values.
3233 if (SharedReadOnlySize < MetaspaceShared::min_ro_size){
3234 report_out_of_shared_space(SharedReadOnly);
3235 }
3236
3237 if (SharedReadWriteSize < MetaspaceShared::min_rw_size){
3238 report_out_of_shared_space(SharedReadWrite);
3239 }
3240
3241 // the min_misc_data_size and min_misc_code_size estimates are based on
3242 // MetaspaceShared::generate_vtable_methods().
3243 // The minimum size only accounts for the vtable methods. Any size less than the
3244 // minimum required size would cause vm crash when allocating the vtable methods.
3245 uint min_misc_data_size = align_size_up(
3246 MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment);
3247
3248 if (SharedMiscDataSize < min_misc_data_size) {
3249 report_out_of_shared_space(SharedMiscData);
3250 }
3251
3252 uintx min_misc_code_size = align_size_up(
3253 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
3254 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
3255 max_alignment);
3256
3257 if (SharedMiscCodeSize < min_misc_code_size) {
3258 report_out_of_shared_space(SharedMiscCode);
3259 }
3260
3261 // Initialize with the sum of the shared space sizes. The read-only
3262 // and read write metaspace chunks will be allocated out of this and the
3263 // remainder is the misc code and data chunks.
3264 cds_total = FileMapInfo::shared_spaces_size();
3265 cds_total = align_size_up(cds_total, _reserve_alignment);
3266 _space_list = new VirtualSpaceList(cds_total/wordSize);
3267 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3268
3269 if (!_space_list->initialization_succeeded()) {
3270 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3271 }
3272
3273 #ifdef _LP64
3274 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3275 vm_exit_during_initialization("Unable to dump shared archive.",
3276 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3277 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3278 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(),
3279 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3280 }
3281
3282 // Set the compressed klass pointer base so that decoding of these pointers works
3283 // properly when creating the shared archive.
3284 assert(UseCompressedOops && UseCompressedClassPointers,
3285 "UseCompressedOops and UseCompressedClassPointers must be set");
3286 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3287 if (TraceMetavirtualspaceAllocation && Verbose) {
3288 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3289 p2i(_space_list->current_virtual_space()->bottom()));
3290 }
3291
3292 Universe::set_narrow_klass_shift(0);
3293 #endif // _LP64
3294 #endif // INCLUDE_CDS
3295 } else {
3296 #if INCLUDE_CDS
3297 // If using shared space, open the file that contains the shared space
3298 // and map in the memory before initializing the rest of metaspace (so
3299 // the addresses don't conflict)
3300 address cds_address = NULL;
3301 if (UseSharedSpaces) {
3302 FileMapInfo* mapinfo = new FileMapInfo();
3303
3304 // Open the shared archive file, read and validate the header. If
3305 // initialization fails, shared spaces [UseSharedSpaces] are
3306 // disabled and the file is closed.
3307 // Map in spaces now also
3308 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3309 cds_total = FileMapInfo::shared_spaces_size();
3310 cds_address = (address)mapinfo->region_base(0);
3311 } else {
3312 assert(!mapinfo->is_open() && !UseSharedSpaces,
3313 "archive file not closed or shared spaces not disabled.");
3314 }
3315 }
3316 #endif // INCLUDE_CDS
3317 #ifdef _LP64
3318 // If UseCompressedClassPointers is set then allocate the metaspace area
3319 // above the heap and above the CDS area (if it exists).
3320 if (using_class_space()) {
3321 if (UseSharedSpaces) {
3322 #if INCLUDE_CDS
3323 char* cds_end = (char*)(cds_address + cds_total);
3324 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3325 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3326 #endif
3327 } else {
3328 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3329 allocate_metaspace_compressed_klass_ptrs(base, 0);
3330 }
3331 }
3332 #endif // _LP64
3333
3334 // Initialize these before initializing the VirtualSpaceList
3335 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3336 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3337 // Make the first class chunk bigger than a medium chunk so it's not put
3338 // on the medium chunk list. The next chunk will be small and progress
3339 // from there. This size calculated by -version.
3340 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3341 (CompressedClassSpaceSize/BytesPerWord)*2);
3342 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3343 // Arbitrarily set the initial virtual space to a multiple
3344 // of the boot class loader size.
3345 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3346 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3347
3348 // Initialize the list of virtual spaces.
3349 _space_list = new VirtualSpaceList(word_size);
3350 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3351
3352 if (!_space_list->initialization_succeeded()) {
3353 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
3354 }
3355 }
3356
3357 _tracer = new MetaspaceTracer();
3358 }
3359
3360 void Metaspace::post_initialize() {
3361 MetaspaceGC::post_initialize();
3362 }
3363
3364 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
3365 size_t chunk_word_size,
3366 size_t chunk_bunch) {
3367 // Get a chunk from the chunk freelist
3368 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
3369 if (chunk != NULL) {
3370 return chunk;
3371 }
3372
3373 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
3374 }
3375
3376 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
3377
3378 assert(space_list() != NULL,
3379 "Metadata VirtualSpaceList has not been initialized");
3380 assert(chunk_manager_metadata() != NULL,
3381 "Metadata ChunkManager has not been initialized");
3382
3383 _vsm = new SpaceManager(NonClassType, lock);
3384 if (_vsm == NULL) {
3385 return;
3386 }
3387 size_t word_size;
3388 size_t class_word_size;
3389 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
3390
3391 if (using_class_space()) {
3392 assert(class_space_list() != NULL,
3393 "Class VirtualSpaceList has not been initialized");
3394 assert(chunk_manager_class() != NULL,
3395 "Class ChunkManager has not been initialized");
3396
3397 // Allocate SpaceManager for classes.
3398 _class_vsm = new SpaceManager(ClassType, lock);
3399 if (_class_vsm == NULL) {
3400 return;
3401 }
3402 }
3403
3404 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3405
3406 // Allocate chunk for metadata objects
3407 Metachunk* new_chunk = get_initialization_chunk(NonClassType,
3408 word_size,
3409 vsm()->medium_chunk_bunch());
3410 // For dumping shared archive, report error if allocation has failed.
3411 if (DumpSharedSpaces && new_chunk == NULL) {
3412 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord);
3413 }
3414 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
3415 if (new_chunk != NULL) {
3416 // Add to this manager's list of chunks in use and current_chunk().
3417 vsm()->add_chunk(new_chunk, true);
3418 }
3419
3420 // Allocate chunk for class metadata objects
3421 if (using_class_space()) {
3422 Metachunk* class_chunk = get_initialization_chunk(ClassType,
3423 class_word_size,
3424 class_vsm()->medium_chunk_bunch());
3425 if (class_chunk != NULL) {
3426 class_vsm()->add_chunk(class_chunk, true);
3427 } else {
3428 // For dumping shared archive, report error if allocation has failed.
3429 if (DumpSharedSpaces) {
3430 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord);
3431 }
3432 }
3433 }
3434
3435 _alloc_record_head = NULL;
3436 _alloc_record_tail = NULL;
3437 }
3438
3439 size_t Metaspace::align_word_size_up(size_t word_size) {
3440 size_t byte_size = word_size * wordSize;
3441 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
3442 }
3443
3444 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
3445 // DumpSharedSpaces doesn't use class metadata area (yet)
3446 // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
3447 if (is_class_space_allocation(mdtype)) {
3448 return class_vsm()->allocate(word_size);
3449 } else {
3450 return vsm()->allocate(word_size);
3451 }
3452 }
3453
3454 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
3455 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
3456 assert(delta_bytes > 0, "Must be");
3457
3458 size_t before = 0;
3459 size_t after = 0;
3460 MetaWord* res;
3461 bool incremented;
3462
3463 // Each thread increments the HWM at most once. Even if the thread fails to increment
3464 // the HWM, an allocation is still attempted. This is because another thread must then
3465 // have incremented the HWM and therefore the allocation might still succeed.
3466 do {
3467 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
3468 res = allocate(word_size, mdtype);
3469 } while (!incremented && res == NULL);
3470
3471 if (incremented) {
3472 tracer()->report_gc_threshold(before, after,
3473 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
3474 if (PrintGCDetails && Verbose) {
3475 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
3476 " to " SIZE_FORMAT, before, after);
3477 }
3478 }
3479
3480 return res;
3481 }
3482
3483 // Space allocated in the Metaspace. This may
3484 // be across several metadata virtual spaces.
3485 char* Metaspace::bottom() const {
3486 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces");
3487 return (char*)vsm()->current_chunk()->bottom();
3488 }
3489
3490 size_t Metaspace::used_words_slow(MetadataType mdtype) const {
3491 if (mdtype == ClassType) {
3492 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
3493 } else {
3494 return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
3495 }
3496 }
3497
3498 size_t Metaspace::free_words_slow(MetadataType mdtype) const {
3499 if (mdtype == ClassType) {
3500 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
3501 } else {
3502 return vsm()->sum_free_in_chunks_in_use();
3503 }
3504 }
3505
3506 // Space capacity in the Metaspace. It includes
3507 // space in the list of chunks from which allocations
3508 // have been made. Don't include space in the global freelist and
3509 // in the space available in the dictionary which
3510 // is already counted in some chunk.
3511 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
3512 if (mdtype == ClassType) {
3513 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
3514 } else {
3515 return vsm()->sum_capacity_in_chunks_in_use();
3516 }
3517 }
3518
3519 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
3520 return used_words_slow(mdtype) * BytesPerWord;
3521 }
3522
3523 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3524 return capacity_words_slow(mdtype) * BytesPerWord;
3525 }
3526
3527 size_t Metaspace::allocated_blocks_bytes() const {
3528 return vsm()->allocated_blocks_bytes() +
3529 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3530 }
3531
3532 size_t Metaspace::allocated_chunks_bytes() const {
3533 return vsm()->allocated_chunks_bytes() +
3534 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3535 }
3536
3537 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3538 assert(!SafepointSynchronize::is_at_safepoint()
3539 || Thread::current()->is_VM_thread(), "should be the VM thread");
3540
3541 if (DumpSharedSpaces && PrintSharedSpaces) {
3542 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3543 }
3544
3545 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3546
3547 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3548 // Dark matter. Too small for dictionary.
3549 #ifdef ASSERT
3550 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3551 #endif
3552 return;
3553 }
3554 if (is_class && using_class_space()) {
3555 class_vsm()->deallocate(ptr, word_size);
3556 } else {
3557 vsm()->deallocate(ptr, word_size);
3558 }
3559 }
3560
3561
3562 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3563 bool read_only, MetaspaceObj::Type type, TRAPS) {
3564 if (HAS_PENDING_EXCEPTION) {
3565 assert(false, "Should not allocate with exception pending");
3566 return NULL; // caller does a CHECK_NULL too
3567 }
3568
3569 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3570 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3571
3572 // Allocate in metaspaces without taking out a lock, because it deadlocks
3573 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3574 // to revisit this for application class data sharing.
3575 if (DumpSharedSpaces) {
3576 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3577 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3578 MetaWord* result = space->allocate(word_size, NonClassType);
3579 if (result == NULL) {
3580 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3581 }
3582 if (PrintSharedSpaces) {
3583 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3584 }
3585
3586 // Zero initialize.
3587 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3588
3589 return result;
3590 }
3591
3592 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3593
3594 // Try to allocate metadata.
3595 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3596
3597 if (result == NULL) {
3598 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3599
3600 // Allocation failed.
3601 if (is_init_completed()) {
3602 // Only start a GC if the bootstrapping has completed.
3603
3604 // Try to clean out some memory and retry.
3605 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
3606 loader_data, word_size, mdtype);
3607 }
3608 }
3609
3610 if (result == NULL) {
3611 SpaceManager* sm;
3612 if (is_class_space_allocation(mdtype)) {
3613 sm = loader_data->metaspace_non_null()->class_vsm();
3614 } else {
3615 sm = loader_data->metaspace_non_null()->vsm();
3616 }
3617
3618 result = sm->get_small_chunk_and_allocate(word_size);
3619
3620 if (result == NULL) {
3621 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
3622 }
3623 }
3624
3625 // Zero initialize.
3626 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3627
3628 return result;
3629 }
3630
3631 size_t Metaspace::class_chunk_size(size_t word_size) {
3632 assert(using_class_space(), "Has to use class space");
3633 return class_vsm()->calc_chunk_size(word_size);
3634 }
3635
3636 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3637 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3638
3639 // If result is still null, we are out of memory.
3640 if (Verbose && TraceMetadataChunkAllocation) {
3641 gclog_or_tty->print_cr("Metaspace allocation failed for size "
3642 SIZE_FORMAT, word_size);
3643 if (loader_data->metaspace_or_null() != NULL) {
3644 loader_data->dump(gclog_or_tty);
3645 }
3646 MetaspaceAux::dump(gclog_or_tty);
3647 }
3648
3649 bool out_of_compressed_class_space = false;
3650 if (is_class_space_allocation(mdtype)) {
3651 Metaspace* metaspace = loader_data->metaspace_non_null();
3652 out_of_compressed_class_space =
3653 MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3654 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3655 CompressedClassSpaceSize;
3656 }
3657
3658 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3659 const char* space_string = out_of_compressed_class_space ?
3660 "Compressed class space" : "Metaspace";
3661
3662 report_java_out_of_memory(space_string);
3663
3664 if (JvmtiExport::should_post_resource_exhausted()) {
3665 JvmtiExport::post_resource_exhausted(
3666 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
3667 space_string);
3668 }
3669
3670 if (!is_init_completed()) {
3671 vm_exit_during_initialization("OutOfMemoryError", space_string);
3672 }
3673
3674 if (out_of_compressed_class_space) {
3675 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3676 } else {
3677 THROW_OOP(Universe::out_of_memory_error_metaspace());
3678 }
3679 }
3680
3681 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3682 switch (mdtype) {
3683 case Metaspace::ClassType: return "Class";
3684 case Metaspace::NonClassType: return "Metadata";
3685 default:
3686 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3687 return NULL;
3688 }
3689 }
3690
3691 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3692 assert(DumpSharedSpaces, "sanity");
3693
3694 int byte_size = (int)word_size * HeapWordSize;
3695 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3696
3697 if (_alloc_record_head == NULL) {
3698 _alloc_record_head = _alloc_record_tail = rec;
3699 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3700 _alloc_record_tail->_next = rec;
3701 _alloc_record_tail = rec;
3702 } else {
3703 // slow linear search, but this doesn't happen that often, and only when dumping
3704 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3705 if (old->_ptr == ptr) {
3706 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3707 int remain_bytes = old->_byte_size - byte_size;
3708 assert(remain_bytes >= 0, "sanity");
3709 old->_type = type;
3710
3711 if (remain_bytes == 0) {
3712 delete(rec);
3713 } else {
3714 address remain_ptr = address(ptr) + byte_size;
3715 rec->_ptr = remain_ptr;
3716 rec->_byte_size = remain_bytes;
3717 rec->_type = MetaspaceObj::DeallocatedType;
3718 rec->_next = old->_next;
3719 old->_byte_size = byte_size;
3720 old->_next = rec;
3721 }
3722 return;
3723 }
3724 }
3725 assert(0, "reallocating a freed pointer that was not recorded");
3726 }
3727 }
3728
3729 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3730 assert(DumpSharedSpaces, "sanity");
3731
3732 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3733 if (rec->_ptr == ptr) {
3734 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3735 rec->_type = MetaspaceObj::DeallocatedType;
3736 return;
3737 }
3738 }
3739
3740 assert(0, "deallocating a pointer that was not recorded");
3741 }
3742
3743 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3744 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3745
3746 address last_addr = (address)bottom();
3747
3748 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3749 address ptr = rec->_ptr;
3750 if (last_addr < ptr) {
3751 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3752 }
3753 closure->doit(ptr, rec->_type, rec->_byte_size);
3754 last_addr = ptr + rec->_byte_size;
3755 }
3756
3757 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3758 if (last_addr < top) {
3759 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3760 }
3761 }
3762
3763 void Metaspace::purge(MetadataType mdtype) {
3764 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
3765 }
3766
3767 void Metaspace::purge() {
3768 MutexLockerEx cl(SpaceManager::expand_lock(),
3769 Mutex::_no_safepoint_check_flag);
3770 purge(NonClassType);
3771 if (using_class_space()) {
3772 purge(ClassType);
3773 }
3774 }
3775
3776 void Metaspace::print_on(outputStream* out) const {
3777 // Print both class virtual space counts and metaspace.
3778 if (Verbose) {
3779 vsm()->print_on(out);
3780 if (using_class_space()) {
3781 class_vsm()->print_on(out);
3782 }
3783 }
3784 }
3785
3786 bool Metaspace::contains(const void* ptr) {
3787 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
3788 return true;
3789 }
3790
3791 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
3792 return true;
3793 }
3794
3795 return get_space_list(NonClassType)->contains(ptr);
3796 }
3797
3798 void Metaspace::verify() {
3799 vsm()->verify();
3800 if (using_class_space()) {
3801 class_vsm()->verify();
3802 }
3803 }
3804
3805 void Metaspace::dump(outputStream* const out) const {
3806 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
3807 vsm()->dump(out);
3808 if (using_class_space()) {
3809 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
3810 class_vsm()->dump(out);
3811 }
3812 }
3813
3814 /////////////// Unit tests ///////////////
3815
3816 #ifndef PRODUCT
3817
3818 class TestMetaspaceAuxTest : AllStatic {
3819 public:
3820 static void test_reserved() {
3821 size_t reserved = MetaspaceAux::reserved_bytes();
3822
3823 assert(reserved > 0, "assert");
3824
3825 size_t committed = MetaspaceAux::committed_bytes();
3826 assert(committed <= reserved, "assert");
3827
3828 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
3829 assert(reserved_metadata > 0, "assert");
3830 assert(reserved_metadata <= reserved, "assert");
3831
3832 if (UseCompressedClassPointers) {
3833 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
3834 assert(reserved_class > 0, "assert");
3835 assert(reserved_class < reserved, "assert");
3836 }
3837 }
3838
3839 static void test_committed() {
3840 size_t committed = MetaspaceAux::committed_bytes();
3841
3842 assert(committed > 0, "assert");
3843
3844 size_t reserved = MetaspaceAux::reserved_bytes();
3845 assert(committed <= reserved, "assert");
3846
3847 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
3848 assert(committed_metadata > 0, "assert");
3849 assert(committed_metadata <= committed, "assert");
3850
3851 if (UseCompressedClassPointers) {
3852 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
3853 assert(committed_class > 0, "assert");
3854 assert(committed_class < committed, "assert");
3855 }
3856 }
3857
3858 static void test_virtual_space_list_large_chunk() {
3859 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
3860 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3861 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
3862 // vm_allocation_granularity aligned on Windows.
3863 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
3864 large_size += (os::vm_page_size()/BytesPerWord);
3865 vs_list->get_new_chunk(large_size, large_size, 0);
3866 }
3867
3868 static void test() {
3869 test_reserved();
3870 test_committed();
3871 test_virtual_space_list_large_chunk();
3872 }
3873 };
3874
3875 void TestMetaspaceAux_test() {
3876 TestMetaspaceAuxTest::test();
3877 }
3878
3879 class TestVirtualSpaceNodeTest {
3880 static void chunk_up(size_t words_left, size_t& num_medium_chunks,
3881 size_t& num_small_chunks,
3882 size_t& num_specialized_chunks) {
3883 num_medium_chunks = words_left / MediumChunk;
3884 words_left = words_left % MediumChunk;
3885
3886 num_small_chunks = words_left / SmallChunk;
3887 words_left = words_left % SmallChunk;
3888 // how many specialized chunks can we get?
3889 num_specialized_chunks = words_left / SpecializedChunk;
3890 assert(words_left % SpecializedChunk == 0, "should be nothing left");
3891 }
3892
3893 public:
3894 static void test() {
3895 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
3896 const size_t vsn_test_size_words = MediumChunk * 4;
3897 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
3898
3899 // The chunk sizes must be multiples of eachother, or this will fail
3900 STATIC_ASSERT(MediumChunk % SmallChunk == 0);
3901 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
3902
3903 { // No committed memory in VSN
3904 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3905 VirtualSpaceNode vsn(vsn_test_size_bytes);
3906 vsn.initialize();
3907 vsn.retire(&cm);
3908 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
3909 }
3910
3911 { // All of VSN is committed, half is used by chunks
3912 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3913 VirtualSpaceNode vsn(vsn_test_size_bytes);
3914 vsn.initialize();
3915 vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
3916 vsn.get_chunk_vs(MediumChunk);
3917 vsn.get_chunk_vs(MediumChunk);
3918 vsn.retire(&cm);
3919 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
3920 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
3921 }
3922
3923 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
3924 // This doesn't work for systems with vm_page_size >= 16K.
3925 if (page_chunks < MediumChunk) {
3926 // 4 pages of VSN is committed, some is used by chunks
3927 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3928 VirtualSpaceNode vsn(vsn_test_size_bytes);
3929
3930 vsn.initialize();
3931 vsn.expand_by(page_chunks, page_chunks);
3932 vsn.get_chunk_vs(SmallChunk);
3933 vsn.get_chunk_vs(SpecializedChunk);
3934 vsn.retire(&cm);
3935
3936 // committed - used = words left to retire
3937 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
3938
3939 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3940 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3941
3942 assert(num_medium_chunks == 0, "should not get any medium chunks");
3943 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3944 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3945 }
3946
3947 { // Half of VSN is committed, a humongous chunk is used
3948 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
3949 VirtualSpaceNode vsn(vsn_test_size_bytes);
3950 vsn.initialize();
3951 vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
3952 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
3953 vsn.retire(&cm);
3954
3955 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
3956 size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
3957 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
3958
3959 assert(num_medium_chunks == 0, "should not get any medium chunks");
3960 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
3961 assert(cm.sum_free_chunks() == words_left, "sizes should add up");
3962 }
3963
3964 }
3965
3966 #define assert_is_available_positive(word_size) \
3967 assert(vsn.is_available(word_size), \
3968 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
3969 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3970 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3971
3972 #define assert_is_available_negative(word_size) \
3973 assert(!vsn.is_available(word_size), \
3974 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
3975 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
3976 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())));
3977
3978 static void test_is_available_positive() {
3979 // Reserve some memory.
3980 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3981 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
3982
3983 // Commit some memory.
3984 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
3985 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
3986 assert(expanded, "Failed to commit");
3987
3988 // Check that is_available accepts the committed size.
3989 assert_is_available_positive(commit_word_size);
3990
3991 // Check that is_available accepts half the committed size.
3992 size_t expand_word_size = commit_word_size / 2;
3993 assert_is_available_positive(expand_word_size);
3994 }
3995
3996 static void test_is_available_negative() {
3997 // Reserve some memory.
3998 VirtualSpaceNode vsn(os::vm_allocation_granularity());
3999 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4000
4001 // Commit some memory.
4002 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4003 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4004 assert(expanded, "Failed to commit");
4005
4006 // Check that is_available doesn't accept a too large size.
4007 size_t two_times_commit_word_size = commit_word_size * 2;
4008 assert_is_available_negative(two_times_commit_word_size);
4009 }
4010
4011 static void test_is_available_overflow() {
4012 // Reserve some memory.
4013 VirtualSpaceNode vsn(os::vm_allocation_granularity());
4014 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
4015
4016 // Commit some memory.
4017 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
4018 bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
4019 assert(expanded, "Failed to commit");
4020
4021 // Calculate a size that will overflow the virtual space size.
4022 void* virtual_space_max = (void*)(uintptr_t)-1;
4023 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
4024 size_t overflow_size = bottom_to_max + BytesPerWord;
4025 size_t overflow_word_size = overflow_size / BytesPerWord;
4026
4027 // Check that is_available can handle the overflow.
4028 assert_is_available_negative(overflow_word_size);
4029 }
4030
4031 static void test_is_available() {
4032 TestVirtualSpaceNodeTest::test_is_available_positive();
4033 TestVirtualSpaceNodeTest::test_is_available_negative();
4034 TestVirtualSpaceNodeTest::test_is_available_overflow();
4035 }
4036 };
4037
4038 void TestVirtualSpaceNode_test() {
4039 TestVirtualSpaceNodeTest::test();
4040 TestVirtualSpaceNodeTest::test_is_available();
4041 }
4042 #endif