1 /* 2 * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc/shared/collectedHeap.hpp" 26 #include "gc/shared/collectorPolicy.hpp" 27 #include "gc/shared/gcLocker.hpp" 28 #include "logging/log.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/binaryTreeDictionary.hpp" 31 #include "memory/filemap.hpp" 32 #include "memory/freeList.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 53 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 54 55 // Set this constant to enable slow integrity checking of the free chunk lists 56 const bool metaspace_slow_verify = false; 57 58 size_t const allocation_from_dictionary_limit = 4 * K; 59 60 MetaWord* last_allocated = 0; 61 62 size_t Metaspace::_compressed_class_space_size; 63 const MetaspaceTracer* Metaspace::_tracer = NULL; 64 65 // Used in declarations in SpaceManager and ChunkManager 66 enum ChunkIndex { 67 ZeroIndex = 0, 68 SpecializedIndex = ZeroIndex, 69 SmallIndex = SpecializedIndex + 1, 70 MediumIndex = SmallIndex + 1, 71 HumongousIndex = MediumIndex + 1, 72 NumberOfFreeLists = 3, 73 NumberOfInUseLists = 4 74 }; 75 76 enum ChunkSizes { // in words. 77 ClassSpecializedChunk = 128, 78 SpecializedChunk = 128, 79 ClassSmallChunk = 256, 80 SmallChunk = 512, 81 ClassMediumChunk = 4 * K, 82 MediumChunk = 8 * K 83 }; 84 85 static ChunkIndex next_chunk_index(ChunkIndex i) { 86 assert(i < NumberOfInUseLists, "Out of bound"); 87 return (ChunkIndex) (i+1); 88 } 89 90 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 91 uint MetaspaceGC::_shrink_factor = 0; 92 bool MetaspaceGC::_should_concurrent_collect = false; 93 94 typedef class FreeList<Metachunk> ChunkList; 95 96 // Manages the global free lists of chunks. 97 class ChunkManager : public CHeapObj<mtInternal> { 98 friend class TestVirtualSpaceNodeTest; 99 100 // Free list of chunks of different sizes. 101 // SpecializedChunk 102 // SmallChunk 103 // MediumChunk 104 // HumongousChunk 105 ChunkList _free_chunks[NumberOfFreeLists]; 106 107 // HumongousChunk 108 ChunkTreeDictionary _humongous_dictionary; 109 110 // ChunkManager in all lists of this type 111 size_t _free_chunks_total; 112 size_t _free_chunks_count; 113 114 void dec_free_chunks_total(size_t v) { 115 assert(_free_chunks_count > 0 && 116 _free_chunks_total > 0, 117 "About to go negative"); 118 Atomic::add_ptr(-1, &_free_chunks_count); 119 jlong minus_v = (jlong) - (jlong) v; 120 Atomic::add_ptr(minus_v, &_free_chunks_total); 121 } 122 123 // Debug support 124 125 size_t sum_free_chunks(); 126 size_t sum_free_chunks_count(); 127 128 void locked_verify_free_chunks_total(); 129 void slow_locked_verify_free_chunks_total() { 130 if (metaspace_slow_verify) { 131 locked_verify_free_chunks_total(); 132 } 133 } 134 void locked_verify_free_chunks_count(); 135 void slow_locked_verify_free_chunks_count() { 136 if (metaspace_slow_verify) { 137 locked_verify_free_chunks_count(); 138 } 139 } 140 void verify_free_chunks_count(); 141 142 public: 143 144 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 145 : _free_chunks_total(0), _free_chunks_count(0) { 146 _free_chunks[SpecializedIndex].set_size(specialized_size); 147 _free_chunks[SmallIndex].set_size(small_size); 148 _free_chunks[MediumIndex].set_size(medium_size); 149 } 150 151 // add or delete (return) a chunk to the global freelist. 152 Metachunk* chunk_freelist_allocate(size_t word_size); 153 154 // Map a size to a list index assuming that there are lists 155 // for special, small, medium, and humongous chunks. 156 static ChunkIndex list_index(size_t size); 157 158 // Remove the chunk from its freelist. It is 159 // expected to be on one of the _free_chunks[] lists. 160 void remove_chunk(Metachunk* chunk); 161 162 // Add the simple linked list of chunks to the freelist of chunks 163 // of type index. 164 void return_chunks(ChunkIndex index, Metachunk* chunks); 165 166 // Total of the space in the free chunks list 167 size_t free_chunks_total_words(); 168 size_t free_chunks_total_bytes(); 169 170 // Number of chunks in the free chunks list 171 size_t free_chunks_count(); 172 173 void inc_free_chunks_total(size_t v, size_t count = 1) { 174 Atomic::add_ptr(count, &_free_chunks_count); 175 Atomic::add_ptr(v, &_free_chunks_total); 176 } 177 ChunkTreeDictionary* humongous_dictionary() { 178 return &_humongous_dictionary; 179 } 180 181 ChunkList* free_chunks(ChunkIndex index); 182 183 // Returns the list for the given chunk word size. 184 ChunkList* find_free_chunks_list(size_t word_size); 185 186 // Remove from a list by size. Selects list based on size of chunk. 187 Metachunk* free_chunks_get(size_t chunk_word_size); 188 189 #define index_bounds_check(index) \ 190 assert(index == SpecializedIndex || \ 191 index == SmallIndex || \ 192 index == MediumIndex || \ 193 index == HumongousIndex, "Bad index: %d", (int) index) 194 195 size_t num_free_chunks(ChunkIndex index) const { 196 index_bounds_check(index); 197 198 if (index == HumongousIndex) { 199 return _humongous_dictionary.total_free_blocks(); 200 } 201 202 ssize_t count = _free_chunks[index].count(); 203 return count == -1 ? 0 : (size_t) count; 204 } 205 206 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 207 index_bounds_check(index); 208 209 size_t word_size = 0; 210 if (index == HumongousIndex) { 211 word_size = _humongous_dictionary.total_size(); 212 } else { 213 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 214 word_size = size_per_chunk_in_words * num_free_chunks(index); 215 } 216 217 return word_size * BytesPerWord; 218 } 219 220 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 221 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 222 num_free_chunks(SmallIndex), 223 num_free_chunks(MediumIndex), 224 num_free_chunks(HumongousIndex), 225 size_free_chunks_in_bytes(SpecializedIndex), 226 size_free_chunks_in_bytes(SmallIndex), 227 size_free_chunks_in_bytes(MediumIndex), 228 size_free_chunks_in_bytes(HumongousIndex)); 229 } 230 231 // Debug support 232 void verify(); 233 void slow_verify() { 234 if (metaspace_slow_verify) { 235 verify(); 236 } 237 } 238 void locked_verify(); 239 void slow_locked_verify() { 240 if (metaspace_slow_verify) { 241 locked_verify(); 242 } 243 } 244 void verify_free_chunks_total(); 245 246 void locked_print_free_chunks(outputStream* st); 247 void locked_print_sum_free_chunks(outputStream* st); 248 249 void print_on(outputStream* st) const; 250 }; 251 252 class SmallBlocks : public CHeapObj<mtClass> { 253 const static uint _small_block_max_size = sizeof(TreeChunk<Metablock, FreeList<Metablock> >)/HeapWordSize; 254 const static uint _small_block_min_size = sizeof(Metablock)/HeapWordSize; 255 256 private: 257 FreeList<Metablock> _small_lists[_small_block_max_size - _small_block_min_size]; 258 259 FreeList<Metablock>& list_at(size_t word_size) { 260 assert(word_size >= _small_block_min_size, "There are no metaspace objects less than %u words", _small_block_min_size); 261 return _small_lists[word_size - _small_block_min_size]; 262 } 263 264 public: 265 SmallBlocks() { 266 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 267 uint k = i - _small_block_min_size; 268 _small_lists[k].set_size(i); 269 } 270 } 271 272 size_t total_size() const { 273 size_t result = 0; 274 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 275 uint k = i - _small_block_min_size; 276 result = result + _small_lists[k].count() * _small_lists[k].size(); 277 } 278 return result; 279 } 280 281 static uint small_block_max_size() { return _small_block_max_size; } 282 static uint small_block_min_size() { return _small_block_min_size; } 283 284 MetaWord* get_block(size_t word_size) { 285 if (list_at(word_size).count() > 0) { 286 MetaWord* new_block = (MetaWord*) list_at(word_size).get_chunk_at_head(); 287 return new_block; 288 } else { 289 return NULL; 290 } 291 } 292 void return_block(Metablock* free_chunk, size_t word_size) { 293 list_at(word_size).return_chunk_at_head(free_chunk, false); 294 assert(list_at(word_size).count() > 0, "Should have a chunk"); 295 } 296 297 void print_on(outputStream* st) const { 298 st->print_cr("SmallBlocks:"); 299 for (uint i = _small_block_min_size; i < _small_block_max_size; i++) { 300 uint k = i - _small_block_min_size; 301 st->print_cr("small_lists size " SIZE_FORMAT " count " SIZE_FORMAT, _small_lists[k].size(), _small_lists[k].count()); 302 } 303 } 304 }; 305 306 // Used to manage the free list of Metablocks (a block corresponds 307 // to the allocation of a quantum of metadata). 308 class BlockFreelist : public CHeapObj<mtClass> { 309 BlockTreeDictionary* const _dictionary; 310 SmallBlocks* _small_blocks; 311 312 // Only allocate and split from freelist if the size of the allocation 313 // is at least 1/4th the size of the available block. 314 const static int WasteMultiplier = 4; 315 316 // Accessors 317 BlockTreeDictionary* dictionary() const { return _dictionary; } 318 SmallBlocks* small_blocks() { 319 if (_small_blocks == NULL) { 320 _small_blocks = new SmallBlocks(); 321 } 322 return _small_blocks; 323 } 324 325 public: 326 BlockFreelist(); 327 ~BlockFreelist(); 328 329 // Get and return a block to the free list 330 MetaWord* get_block(size_t word_size); 331 void return_block(MetaWord* p, size_t word_size); 332 333 size_t total_size() const { 334 size_t result = dictionary()->total_size(); 335 if (_small_blocks != NULL) { 336 result = result + _small_blocks->total_size(); 337 } 338 return result; 339 } 340 341 static size_t min_dictionary_size() { return TreeChunk<Metablock, FreeList<Metablock> >::min_size(); } 342 void print_on(outputStream* st) const; 343 }; 344 345 // A VirtualSpaceList node. 346 class VirtualSpaceNode : public CHeapObj<mtClass> { 347 friend class VirtualSpaceList; 348 349 // Link to next VirtualSpaceNode 350 VirtualSpaceNode* _next; 351 352 // total in the VirtualSpace 353 MemRegion _reserved; 354 ReservedSpace _rs; 355 VirtualSpace _virtual_space; 356 MetaWord* _top; 357 // count of chunks contained in this VirtualSpace 358 uintx _container_count; 359 360 // Convenience functions to access the _virtual_space 361 char* low() const { return virtual_space()->low(); } 362 char* high() const { return virtual_space()->high(); } 363 364 // The first Metachunk will be allocated at the bottom of the 365 // VirtualSpace 366 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 367 368 // Committed but unused space in the virtual space 369 size_t free_words_in_vs() const; 370 public: 371 372 VirtualSpaceNode(size_t byte_size); 373 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 374 ~VirtualSpaceNode(); 375 376 // Convenience functions for logical bottom and end 377 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 378 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 379 380 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 381 382 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 383 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 384 385 bool is_pre_committed() const { return _virtual_space.special(); } 386 387 // address of next available space in _virtual_space; 388 // Accessors 389 VirtualSpaceNode* next() { return _next; } 390 void set_next(VirtualSpaceNode* v) { _next = v; } 391 392 void set_reserved(MemRegion const v) { _reserved = v; } 393 void set_top(MetaWord* v) { _top = v; } 394 395 // Accessors 396 MemRegion* reserved() { return &_reserved; } 397 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 398 399 // Returns true if "word_size" is available in the VirtualSpace 400 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 401 402 MetaWord* top() const { return _top; } 403 void inc_top(size_t word_size) { _top += word_size; } 404 405 uintx container_count() { return _container_count; } 406 void inc_container_count(); 407 void dec_container_count(); 408 #ifdef ASSERT 409 uintx container_count_slow(); 410 void verify_container_count(); 411 #endif 412 413 // used and capacity in this single entry in the list 414 size_t used_words_in_vs() const; 415 size_t capacity_words_in_vs() const; 416 417 bool initialize(); 418 419 // get space from the virtual space 420 Metachunk* take_from_committed(size_t chunk_word_size); 421 422 // Allocate a chunk from the virtual space and return it. 423 Metachunk* get_chunk_vs(size_t chunk_word_size); 424 425 // Expands/shrinks the committed space in a virtual space. Delegates 426 // to Virtualspace 427 bool expand_by(size_t min_words, size_t preferred_words); 428 429 // In preparation for deleting this node, remove all the chunks 430 // in the node from any freelist. 431 void purge(ChunkManager* chunk_manager); 432 433 // If an allocation doesn't fit in the current node a new node is created. 434 // Allocate chunks out of the remaining committed space in this node 435 // to avoid wasting that memory. 436 // This always adds up because all the chunk sizes are multiples of 437 // the smallest chunk size. 438 void retire(ChunkManager* chunk_manager); 439 440 #ifdef ASSERT 441 // Debug support 442 void mangle(); 443 #endif 444 445 void print_on(outputStream* st) const; 446 }; 447 448 #define assert_is_ptr_aligned(ptr, alignment) \ 449 assert(is_ptr_aligned(ptr, alignment), \ 450 PTR_FORMAT " is not aligned to " \ 451 SIZE_FORMAT, p2i(ptr), alignment) 452 453 #define assert_is_size_aligned(size, alignment) \ 454 assert(is_size_aligned(size, alignment), \ 455 SIZE_FORMAT " is not aligned to " \ 456 SIZE_FORMAT, size, alignment) 457 458 459 // Decide if large pages should be committed when the memory is reserved. 460 static bool should_commit_large_pages_when_reserving(size_t bytes) { 461 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 462 size_t words = bytes / BytesPerWord; 463 bool is_class = false; // We never reserve large pages for the class space. 464 if (MetaspaceGC::can_expand(words, is_class) && 465 MetaspaceGC::allowed_expansion() >= words) { 466 return true; 467 } 468 } 469 470 return false; 471 } 472 473 // byte_size is the size of the associated virtualspace. 474 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 475 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 476 477 #if INCLUDE_CDS 478 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 479 // configurable address, generally at the top of the Java heap so other 480 // memory addresses don't conflict. 481 if (DumpSharedSpaces) { 482 bool large_pages = false; // No large pages when dumping the CDS archive. 483 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 484 485 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 486 if (_rs.is_reserved()) { 487 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 488 } else { 489 // Get a mmap region anywhere if the SharedBaseAddress fails. 490 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 491 } 492 if (!_rs.is_reserved()) { 493 vm_exit_during_initialization("Unable to allocate memory for shared space", 494 err_msg(SIZE_FORMAT " bytes.", bytes)); 495 } 496 MetaspaceShared::initialize_shared_rs(&_rs); 497 } else 498 #endif 499 { 500 bool large_pages = should_commit_large_pages_when_reserving(bytes); 501 502 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 503 } 504 505 if (_rs.is_reserved()) { 506 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 507 assert(_rs.size() != 0, "Catch if we get a 0 size"); 508 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 509 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 510 511 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 512 } 513 } 514 515 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 516 Metachunk* chunk = first_chunk(); 517 Metachunk* invalid_chunk = (Metachunk*) top(); 518 while (chunk < invalid_chunk ) { 519 assert(chunk->is_tagged_free(), "Should be tagged free"); 520 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 521 chunk_manager->remove_chunk(chunk); 522 assert(chunk->next() == NULL && 523 chunk->prev() == NULL, 524 "Was not removed from its list"); 525 chunk = (Metachunk*) next; 526 } 527 } 528 529 #ifdef ASSERT 530 uintx VirtualSpaceNode::container_count_slow() { 531 uintx count = 0; 532 Metachunk* chunk = first_chunk(); 533 Metachunk* invalid_chunk = (Metachunk*) top(); 534 while (chunk < invalid_chunk ) { 535 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 536 // Don't count the chunks on the free lists. Those are 537 // still part of the VirtualSpaceNode but not currently 538 // counted. 539 if (!chunk->is_tagged_free()) { 540 count++; 541 } 542 chunk = (Metachunk*) next; 543 } 544 return count; 545 } 546 #endif 547 548 // List of VirtualSpaces for metadata allocation. 549 class VirtualSpaceList : public CHeapObj<mtClass> { 550 friend class VirtualSpaceNode; 551 552 enum VirtualSpaceSizes { 553 VirtualSpaceSize = 256 * K 554 }; 555 556 // Head of the list 557 VirtualSpaceNode* _virtual_space_list; 558 // virtual space currently being used for allocations 559 VirtualSpaceNode* _current_virtual_space; 560 561 // Is this VirtualSpaceList used for the compressed class space 562 bool _is_class; 563 564 // Sum of reserved and committed memory in the virtual spaces 565 size_t _reserved_words; 566 size_t _committed_words; 567 568 // Number of virtual spaces 569 size_t _virtual_space_count; 570 571 ~VirtualSpaceList(); 572 573 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 574 575 void set_virtual_space_list(VirtualSpaceNode* v) { 576 _virtual_space_list = v; 577 } 578 void set_current_virtual_space(VirtualSpaceNode* v) { 579 _current_virtual_space = v; 580 } 581 582 void link_vs(VirtualSpaceNode* new_entry); 583 584 // Get another virtual space and add it to the list. This 585 // is typically prompted by a failed attempt to allocate a chunk 586 // and is typically followed by the allocation of a chunk. 587 bool create_new_virtual_space(size_t vs_word_size); 588 589 // Chunk up the unused committed space in the current 590 // virtual space and add the chunks to the free list. 591 void retire_current_virtual_space(); 592 593 public: 594 VirtualSpaceList(size_t word_size); 595 VirtualSpaceList(ReservedSpace rs); 596 597 size_t free_bytes(); 598 599 Metachunk* get_new_chunk(size_t word_size, 600 size_t grow_chunks_by_words, 601 size_t medium_chunk_bunch); 602 603 bool expand_node_by(VirtualSpaceNode* node, 604 size_t min_words, 605 size_t preferred_words); 606 607 bool expand_by(size_t min_words, 608 size_t preferred_words); 609 610 VirtualSpaceNode* current_virtual_space() { 611 return _current_virtual_space; 612 } 613 614 bool is_class() const { return _is_class; } 615 616 bool initialization_succeeded() { return _virtual_space_list != NULL; } 617 618 size_t reserved_words() { return _reserved_words; } 619 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 620 size_t committed_words() { return _committed_words; } 621 size_t committed_bytes() { return committed_words() * BytesPerWord; } 622 623 void inc_reserved_words(size_t v); 624 void dec_reserved_words(size_t v); 625 void inc_committed_words(size_t v); 626 void dec_committed_words(size_t v); 627 void inc_virtual_space_count(); 628 void dec_virtual_space_count(); 629 630 bool contains(const void* ptr); 631 632 // Unlink empty VirtualSpaceNodes and free it. 633 void purge(ChunkManager* chunk_manager); 634 635 void print_on(outputStream* st) const; 636 637 class VirtualSpaceListIterator : public StackObj { 638 VirtualSpaceNode* _virtual_spaces; 639 public: 640 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 641 _virtual_spaces(virtual_spaces) {} 642 643 bool repeat() { 644 return _virtual_spaces != NULL; 645 } 646 647 VirtualSpaceNode* get_next() { 648 VirtualSpaceNode* result = _virtual_spaces; 649 if (_virtual_spaces != NULL) { 650 _virtual_spaces = _virtual_spaces->next(); 651 } 652 return result; 653 } 654 }; 655 }; 656 657 class Metadebug : AllStatic { 658 // Debugging support for Metaspaces 659 static int _allocation_fail_alot_count; 660 661 public: 662 663 static void init_allocation_fail_alot_count(); 664 #ifdef ASSERT 665 static bool test_metadata_failure(); 666 #endif 667 }; 668 669 int Metadebug::_allocation_fail_alot_count = 0; 670 671 // SpaceManager - used by Metaspace to handle allocations 672 class SpaceManager : public CHeapObj<mtClass> { 673 friend class Metaspace; 674 friend class Metadebug; 675 676 private: 677 678 // protects allocations 679 Mutex* const _lock; 680 681 // Type of metadata allocated. 682 Metaspace::MetadataType _mdtype; 683 684 // List of chunks in use by this SpaceManager. Allocations 685 // are done from the current chunk. The list is used for deallocating 686 // chunks when the SpaceManager is freed. 687 Metachunk* _chunks_in_use[NumberOfInUseLists]; 688 Metachunk* _current_chunk; 689 690 // Maximum number of small chunks to allocate to a SpaceManager 691 static uint const _small_chunk_limit; 692 693 // Sum of all space in allocated chunks 694 size_t _allocated_blocks_words; 695 696 // Sum of all allocated chunks 697 size_t _allocated_chunks_words; 698 size_t _allocated_chunks_count; 699 700 // Free lists of blocks are per SpaceManager since they 701 // are assumed to be in chunks in use by the SpaceManager 702 // and all chunks in use by a SpaceManager are freed when 703 // the class loader using the SpaceManager is collected. 704 BlockFreelist* _block_freelists; 705 706 // protects virtualspace and chunk expansions 707 static const char* _expand_lock_name; 708 static const int _expand_lock_rank; 709 static Mutex* const _expand_lock; 710 711 private: 712 // Accessors 713 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 714 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 715 _chunks_in_use[index] = v; 716 } 717 718 BlockFreelist* block_freelists() const { return _block_freelists; } 719 720 Metaspace::MetadataType mdtype() { return _mdtype; } 721 722 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 723 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 724 725 Metachunk* current_chunk() const { return _current_chunk; } 726 void set_current_chunk(Metachunk* v) { 727 _current_chunk = v; 728 } 729 730 Metachunk* find_current_chunk(size_t word_size); 731 732 // Add chunk to the list of chunks in use 733 void add_chunk(Metachunk* v, bool make_current); 734 void retire_current_chunk(); 735 736 Mutex* lock() const { return _lock; } 737 738 const char* chunk_size_name(ChunkIndex index) const; 739 740 protected: 741 void initialize(); 742 743 public: 744 SpaceManager(Metaspace::MetadataType mdtype, 745 Mutex* lock); 746 ~SpaceManager(); 747 748 enum ChunkMultiples { 749 MediumChunkMultiple = 4 750 }; 751 752 bool is_class() { return _mdtype == Metaspace::ClassType; } 753 754 // Accessors 755 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 756 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 757 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 758 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 759 760 size_t smallest_chunk_size() { return specialized_chunk_size(); } 761 762 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 763 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 764 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 765 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 766 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 767 768 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 769 770 static Mutex* expand_lock() { return _expand_lock; } 771 772 // Increment the per Metaspace and global running sums for Metachunks 773 // by the given size. This is used when a Metachunk to added to 774 // the in-use list. 775 void inc_size_metrics(size_t words); 776 // Increment the per Metaspace and global running sums Metablocks by the given 777 // size. This is used when a Metablock is allocated. 778 void inc_used_metrics(size_t words); 779 // Delete the portion of the running sums for this SpaceManager. That is, 780 // the globals running sums for the Metachunks and Metablocks are 781 // decremented for all the Metachunks in-use by this SpaceManager. 782 void dec_total_from_size_metrics(); 783 784 // Set the sizes for the initial chunks. 785 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 786 size_t* chunk_word_size, 787 size_t* class_chunk_word_size); 788 789 size_t sum_capacity_in_chunks_in_use() const; 790 size_t sum_used_in_chunks_in_use() const; 791 size_t sum_free_in_chunks_in_use() const; 792 size_t sum_waste_in_chunks_in_use() const; 793 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 794 795 size_t sum_count_in_chunks_in_use(); 796 size_t sum_count_in_chunks_in_use(ChunkIndex i); 797 798 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 799 800 // Block allocation and deallocation. 801 // Allocates a block from the current chunk 802 MetaWord* allocate(size_t word_size); 803 // Allocates a block from a small chunk 804 MetaWord* get_small_chunk_and_allocate(size_t word_size); 805 806 // Helper for allocations 807 MetaWord* allocate_work(size_t word_size); 808 809 // Returns a block to the per manager freelist 810 void deallocate(MetaWord* p, size_t word_size); 811 812 // Based on the allocation size and a minimum chunk size, 813 // returned chunk size (for expanding space for chunk allocation). 814 size_t calc_chunk_size(size_t allocation_word_size); 815 816 // Called when an allocation from the current chunk fails. 817 // Gets a new chunk (may require getting a new virtual space), 818 // and allocates from that chunk. 819 MetaWord* grow_and_allocate(size_t word_size); 820 821 // Notify memory usage to MemoryService. 822 void track_metaspace_memory_usage(); 823 824 // debugging support. 825 826 void dump(outputStream* const out) const; 827 void print_on(outputStream* st) const; 828 void locked_print_chunks_in_use_on(outputStream* st) const; 829 830 void verify(); 831 void verify_chunk_size(Metachunk* chunk); 832 #ifdef ASSERT 833 void verify_allocated_blocks_words(); 834 #endif 835 836 // This adjusts the size given to be greater than the minimum allocation size in 837 // words for data in metaspace. Esentially the minimum size is currently 3 words. 838 size_t get_allocation_word_size(size_t word_size) { 839 size_t byte_size = word_size * BytesPerWord; 840 841 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 842 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 843 844 size_t raw_word_size = raw_bytes_size / BytesPerWord; 845 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 846 847 return raw_word_size; 848 } 849 }; 850 851 uint const SpaceManager::_small_chunk_limit = 4; 852 853 const char* SpaceManager::_expand_lock_name = 854 "SpaceManager chunk allocation lock"; 855 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 856 Mutex* const SpaceManager::_expand_lock = 857 new Mutex(SpaceManager::_expand_lock_rank, 858 SpaceManager::_expand_lock_name, 859 Mutex::_allow_vm_block_flag, 860 Monitor::_safepoint_check_never); 861 862 void VirtualSpaceNode::inc_container_count() { 863 assert_lock_strong(SpaceManager::expand_lock()); 864 _container_count++; 865 } 866 867 void VirtualSpaceNode::dec_container_count() { 868 assert_lock_strong(SpaceManager::expand_lock()); 869 _container_count--; 870 } 871 872 #ifdef ASSERT 873 void VirtualSpaceNode::verify_container_count() { 874 assert(_container_count == container_count_slow(), 875 "Inconsistency in container_count _container_count " UINTX_FORMAT 876 " container_count_slow() " UINTX_FORMAT, _container_count, container_count_slow()); 877 } 878 #endif 879 880 // BlockFreelist methods 881 882 BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()), _small_blocks(NULL) {} 883 884 BlockFreelist::~BlockFreelist() { 885 delete _dictionary; 886 if (_small_blocks != NULL) { 887 delete _small_blocks; 888 } 889 } 890 891 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 892 assert(word_size >= SmallBlocks::small_block_min_size(), "never return dark matter"); 893 894 Metablock* free_chunk = ::new (p) Metablock(word_size); 895 if (word_size < SmallBlocks::small_block_max_size()) { 896 small_blocks()->return_block(free_chunk, word_size); 897 } else { 898 dictionary()->return_chunk(free_chunk); 899 } 900 log_trace(gc, metaspace, freelist, blocks)("returning block at " INTPTR_FORMAT " size = " 901 SIZE_FORMAT, p2i(free_chunk), word_size); 902 } 903 904 MetaWord* BlockFreelist::get_block(size_t word_size) { 905 assert(word_size >= SmallBlocks::small_block_min_size(), "never get dark matter"); 906 907 // Try small_blocks first. 908 if (word_size < SmallBlocks::small_block_max_size()) { 909 // Don't create small_blocks() until needed. small_blocks() allocates the small block list for 910 // this space manager. 911 MetaWord* new_block = (MetaWord*) small_blocks()->get_block(word_size); 912 if (new_block != NULL) { 913 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 914 p2i(new_block), word_size); 915 return new_block; 916 } 917 } 918 919 if (word_size < BlockFreelist::min_dictionary_size()) { 920 // If allocation in small blocks fails, this is Dark Matter. Too small for dictionary. 921 return NULL; 922 } 923 924 Metablock* free_block = 925 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 926 if (free_block == NULL) { 927 return NULL; 928 } 929 930 const size_t block_size = free_block->size(); 931 if (block_size > WasteMultiplier * word_size) { 932 return_block((MetaWord*)free_block, block_size); 933 return NULL; 934 } 935 936 MetaWord* new_block = (MetaWord*)free_block; 937 assert(block_size >= word_size, "Incorrect size of block from freelist"); 938 const size_t unused = block_size - word_size; 939 if (unused >= SmallBlocks::small_block_min_size()) { 940 return_block(new_block + word_size, unused); 941 } 942 943 log_trace(gc, metaspace, freelist, blocks)("getting block at " INTPTR_FORMAT " size = " SIZE_FORMAT, 944 p2i(new_block), word_size); 945 return new_block; 946 } 947 948 void BlockFreelist::print_on(outputStream* st) const { 949 dictionary()->print_free_lists(st); 950 if (_small_blocks != NULL) { 951 _small_blocks->print_on(st); 952 } 953 } 954 955 // VirtualSpaceNode methods 956 957 VirtualSpaceNode::~VirtualSpaceNode() { 958 _rs.release(); 959 #ifdef ASSERT 960 size_t word_size = sizeof(*this) / BytesPerWord; 961 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 962 #endif 963 } 964 965 size_t VirtualSpaceNode::used_words_in_vs() const { 966 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 967 } 968 969 // Space committed in the VirtualSpace 970 size_t VirtualSpaceNode::capacity_words_in_vs() const { 971 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 972 } 973 974 size_t VirtualSpaceNode::free_words_in_vs() const { 975 return pointer_delta(end(), top(), sizeof(MetaWord)); 976 } 977 978 // Allocates the chunk from the virtual space only. 979 // This interface is also used internally for debugging. Not all 980 // chunks removed here are necessarily used for allocation. 981 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 982 // Bottom of the new chunk 983 MetaWord* chunk_limit = top(); 984 assert(chunk_limit != NULL, "Not safe to call this method"); 985 986 // The virtual spaces are always expanded by the 987 // commit granularity to enforce the following condition. 988 // Without this the is_available check will not work correctly. 989 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 990 "The committed memory doesn't match the expanded memory."); 991 992 if (!is_available(chunk_word_size)) { 993 Log(gc, metaspace, freelist) log; 994 log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size); 995 // Dump some information about the virtual space that is nearly full 996 ResourceMark rm; 997 print_on(log.debug_stream()); 998 return NULL; 999 } 1000 1001 // Take the space (bump top on the current virtual space). 1002 inc_top(chunk_word_size); 1003 1004 // Initialize the chunk 1005 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 1006 return result; 1007 } 1008 1009 1010 // Expand the virtual space (commit more of the reserved space) 1011 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 1012 size_t min_bytes = min_words * BytesPerWord; 1013 size_t preferred_bytes = preferred_words * BytesPerWord; 1014 1015 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 1016 1017 if (uncommitted < min_bytes) { 1018 return false; 1019 } 1020 1021 size_t commit = MIN2(preferred_bytes, uncommitted); 1022 bool result = virtual_space()->expand_by(commit, false); 1023 1024 assert(result, "Failed to commit memory"); 1025 1026 return result; 1027 } 1028 1029 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 1030 assert_lock_strong(SpaceManager::expand_lock()); 1031 Metachunk* result = take_from_committed(chunk_word_size); 1032 if (result != NULL) { 1033 inc_container_count(); 1034 } 1035 return result; 1036 } 1037 1038 bool VirtualSpaceNode::initialize() { 1039 1040 if (!_rs.is_reserved()) { 1041 return false; 1042 } 1043 1044 // These are necessary restriction to make sure that the virtual space always 1045 // grows in steps of Metaspace::commit_alignment(). If both base and size are 1046 // aligned only the middle alignment of the VirtualSpace is used. 1047 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 1048 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 1049 1050 // ReservedSpaces marked as special will have the entire memory 1051 // pre-committed. Setting a committed size will make sure that 1052 // committed_size and actual_committed_size agrees. 1053 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 1054 1055 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 1056 Metaspace::commit_alignment()); 1057 if (result) { 1058 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 1059 "Checking that the pre-committed memory was registered by the VirtualSpace"); 1060 1061 set_top((MetaWord*)virtual_space()->low()); 1062 set_reserved(MemRegion((HeapWord*)_rs.base(), 1063 (HeapWord*)(_rs.base() + _rs.size()))); 1064 1065 assert(reserved()->start() == (HeapWord*) _rs.base(), 1066 "Reserved start was not set properly " PTR_FORMAT 1067 " != " PTR_FORMAT, p2i(reserved()->start()), p2i(_rs.base())); 1068 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 1069 "Reserved size was not set properly " SIZE_FORMAT 1070 " != " SIZE_FORMAT, reserved()->word_size(), 1071 _rs.size() / BytesPerWord); 1072 } 1073 1074 return result; 1075 } 1076 1077 void VirtualSpaceNode::print_on(outputStream* st) const { 1078 size_t used = used_words_in_vs(); 1079 size_t capacity = capacity_words_in_vs(); 1080 VirtualSpace* vs = virtual_space(); 1081 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, " SIZE_FORMAT_W(3) "%% used " 1082 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1083 PTR_FORMAT ", " PTR_FORMAT ")", 1084 p2i(vs), capacity / K, 1085 capacity == 0 ? 0 : used * 100 / capacity, 1086 p2i(bottom()), p2i(top()), p2i(end()), 1087 p2i(vs->high_boundary())); 1088 } 1089 1090 #ifdef ASSERT 1091 void VirtualSpaceNode::mangle() { 1092 size_t word_size = capacity_words_in_vs(); 1093 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1094 } 1095 #endif // ASSERT 1096 1097 // VirtualSpaceList methods 1098 // Space allocated from the VirtualSpace 1099 1100 VirtualSpaceList::~VirtualSpaceList() { 1101 VirtualSpaceListIterator iter(virtual_space_list()); 1102 while (iter.repeat()) { 1103 VirtualSpaceNode* vsl = iter.get_next(); 1104 delete vsl; 1105 } 1106 } 1107 1108 void VirtualSpaceList::inc_reserved_words(size_t v) { 1109 assert_lock_strong(SpaceManager::expand_lock()); 1110 _reserved_words = _reserved_words + v; 1111 } 1112 void VirtualSpaceList::dec_reserved_words(size_t v) { 1113 assert_lock_strong(SpaceManager::expand_lock()); 1114 _reserved_words = _reserved_words - v; 1115 } 1116 1117 #define assert_committed_below_limit() \ 1118 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1119 "Too much committed memory. Committed: " SIZE_FORMAT \ 1120 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1121 MetaspaceAux::committed_bytes(), MaxMetaspaceSize); 1122 1123 void VirtualSpaceList::inc_committed_words(size_t v) { 1124 assert_lock_strong(SpaceManager::expand_lock()); 1125 _committed_words = _committed_words + v; 1126 1127 assert_committed_below_limit(); 1128 } 1129 void VirtualSpaceList::dec_committed_words(size_t v) { 1130 assert_lock_strong(SpaceManager::expand_lock()); 1131 _committed_words = _committed_words - v; 1132 1133 assert_committed_below_limit(); 1134 } 1135 1136 void VirtualSpaceList::inc_virtual_space_count() { 1137 assert_lock_strong(SpaceManager::expand_lock()); 1138 _virtual_space_count++; 1139 } 1140 void VirtualSpaceList::dec_virtual_space_count() { 1141 assert_lock_strong(SpaceManager::expand_lock()); 1142 _virtual_space_count--; 1143 } 1144 1145 void ChunkManager::remove_chunk(Metachunk* chunk) { 1146 size_t word_size = chunk->word_size(); 1147 ChunkIndex index = list_index(word_size); 1148 if (index != HumongousIndex) { 1149 free_chunks(index)->remove_chunk(chunk); 1150 } else { 1151 humongous_dictionary()->remove_chunk(chunk); 1152 } 1153 1154 // Chunk is being removed from the chunks free list. 1155 dec_free_chunks_total(chunk->word_size()); 1156 } 1157 1158 // Walk the list of VirtualSpaceNodes and delete 1159 // nodes with a 0 container_count. Remove Metachunks in 1160 // the node from their respective freelists. 1161 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1162 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1163 assert_lock_strong(SpaceManager::expand_lock()); 1164 // Don't use a VirtualSpaceListIterator because this 1165 // list is being changed and a straightforward use of an iterator is not safe. 1166 VirtualSpaceNode* purged_vsl = NULL; 1167 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1168 VirtualSpaceNode* next_vsl = prev_vsl; 1169 while (next_vsl != NULL) { 1170 VirtualSpaceNode* vsl = next_vsl; 1171 DEBUG_ONLY(vsl->verify_container_count();) 1172 next_vsl = vsl->next(); 1173 // Don't free the current virtual space since it will likely 1174 // be needed soon. 1175 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1176 // Unlink it from the list 1177 if (prev_vsl == vsl) { 1178 // This is the case of the current node being the first node. 1179 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1180 set_virtual_space_list(vsl->next()); 1181 } else { 1182 prev_vsl->set_next(vsl->next()); 1183 } 1184 1185 vsl->purge(chunk_manager); 1186 dec_reserved_words(vsl->reserved_words()); 1187 dec_committed_words(vsl->committed_words()); 1188 dec_virtual_space_count(); 1189 purged_vsl = vsl; 1190 delete vsl; 1191 } else { 1192 prev_vsl = vsl; 1193 } 1194 } 1195 #ifdef ASSERT 1196 if (purged_vsl != NULL) { 1197 // List should be stable enough to use an iterator here. 1198 VirtualSpaceListIterator iter(virtual_space_list()); 1199 while (iter.repeat()) { 1200 VirtualSpaceNode* vsl = iter.get_next(); 1201 assert(vsl != purged_vsl, "Purge of vsl failed"); 1202 } 1203 } 1204 #endif 1205 } 1206 1207 1208 // This function looks at the mmap regions in the metaspace without locking. 1209 // The chunks are added with store ordering and not deleted except for at 1210 // unloading time during a safepoint. 1211 bool VirtualSpaceList::contains(const void* ptr) { 1212 // List should be stable enough to use an iterator here because removing virtual 1213 // space nodes is only allowed at a safepoint. 1214 VirtualSpaceListIterator iter(virtual_space_list()); 1215 while (iter.repeat()) { 1216 VirtualSpaceNode* vsn = iter.get_next(); 1217 if (vsn->contains(ptr)) { 1218 return true; 1219 } 1220 } 1221 return false; 1222 } 1223 1224 void VirtualSpaceList::retire_current_virtual_space() { 1225 assert_lock_strong(SpaceManager::expand_lock()); 1226 1227 VirtualSpaceNode* vsn = current_virtual_space(); 1228 1229 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1230 Metaspace::chunk_manager_metadata(); 1231 1232 vsn->retire(cm); 1233 } 1234 1235 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1236 DEBUG_ONLY(verify_container_count();) 1237 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1238 ChunkIndex index = (ChunkIndex)i; 1239 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1240 1241 while (free_words_in_vs() >= chunk_size) { 1242 Metachunk* chunk = get_chunk_vs(chunk_size); 1243 assert(chunk != NULL, "allocation should have been successful"); 1244 1245 chunk_manager->return_chunks(index, chunk); 1246 chunk_manager->inc_free_chunks_total(chunk_size); 1247 } 1248 DEBUG_ONLY(verify_container_count();) 1249 } 1250 assert(free_words_in_vs() == 0, "should be empty now"); 1251 } 1252 1253 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1254 _is_class(false), 1255 _virtual_space_list(NULL), 1256 _current_virtual_space(NULL), 1257 _reserved_words(0), 1258 _committed_words(0), 1259 _virtual_space_count(0) { 1260 MutexLockerEx cl(SpaceManager::expand_lock(), 1261 Mutex::_no_safepoint_check_flag); 1262 create_new_virtual_space(word_size); 1263 } 1264 1265 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1266 _is_class(true), 1267 _virtual_space_list(NULL), 1268 _current_virtual_space(NULL), 1269 _reserved_words(0), 1270 _committed_words(0), 1271 _virtual_space_count(0) { 1272 MutexLockerEx cl(SpaceManager::expand_lock(), 1273 Mutex::_no_safepoint_check_flag); 1274 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1275 bool succeeded = class_entry->initialize(); 1276 if (succeeded) { 1277 link_vs(class_entry); 1278 } 1279 } 1280 1281 size_t VirtualSpaceList::free_bytes() { 1282 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1283 } 1284 1285 // Allocate another meta virtual space and add it to the list. 1286 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1287 assert_lock_strong(SpaceManager::expand_lock()); 1288 1289 if (is_class()) { 1290 assert(false, "We currently don't support more than one VirtualSpace for" 1291 " the compressed class space. The initialization of the" 1292 " CCS uses another code path and should not hit this path."); 1293 return false; 1294 } 1295 1296 if (vs_word_size == 0) { 1297 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1298 return false; 1299 } 1300 1301 // Reserve the space 1302 size_t vs_byte_size = vs_word_size * BytesPerWord; 1303 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1304 1305 // Allocate the meta virtual space and initialize it. 1306 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1307 if (!new_entry->initialize()) { 1308 delete new_entry; 1309 return false; 1310 } else { 1311 assert(new_entry->reserved_words() == vs_word_size, 1312 "Reserved memory size differs from requested memory size"); 1313 // ensure lock-free iteration sees fully initialized node 1314 OrderAccess::storestore(); 1315 link_vs(new_entry); 1316 return true; 1317 } 1318 } 1319 1320 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1321 if (virtual_space_list() == NULL) { 1322 set_virtual_space_list(new_entry); 1323 } else { 1324 current_virtual_space()->set_next(new_entry); 1325 } 1326 set_current_virtual_space(new_entry); 1327 inc_reserved_words(new_entry->reserved_words()); 1328 inc_committed_words(new_entry->committed_words()); 1329 inc_virtual_space_count(); 1330 #ifdef ASSERT 1331 new_entry->mangle(); 1332 #endif 1333 if (log_is_enabled(Trace, gc, metaspace)) { 1334 Log(gc, metaspace) log; 1335 VirtualSpaceNode* vsl = current_virtual_space(); 1336 ResourceMark rm; 1337 vsl->print_on(log.trace_stream()); 1338 } 1339 } 1340 1341 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1342 size_t min_words, 1343 size_t preferred_words) { 1344 size_t before = node->committed_words(); 1345 1346 bool result = node->expand_by(min_words, preferred_words); 1347 1348 size_t after = node->committed_words(); 1349 1350 // after and before can be the same if the memory was pre-committed. 1351 assert(after >= before, "Inconsistency"); 1352 inc_committed_words(after - before); 1353 1354 return result; 1355 } 1356 1357 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1358 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1359 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1360 assert(min_words <= preferred_words, "Invalid arguments"); 1361 1362 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1363 return false; 1364 } 1365 1366 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1367 if (allowed_expansion_words < min_words) { 1368 return false; 1369 } 1370 1371 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1372 1373 // Commit more memory from the the current virtual space. 1374 bool vs_expanded = expand_node_by(current_virtual_space(), 1375 min_words, 1376 max_expansion_words); 1377 if (vs_expanded) { 1378 return true; 1379 } 1380 retire_current_virtual_space(); 1381 1382 // Get another virtual space. 1383 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1384 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1385 1386 if (create_new_virtual_space(grow_vs_words)) { 1387 if (current_virtual_space()->is_pre_committed()) { 1388 // The memory was pre-committed, so we are done here. 1389 assert(min_words <= current_virtual_space()->committed_words(), 1390 "The new VirtualSpace was pre-committed, so it" 1391 "should be large enough to fit the alloc request."); 1392 return true; 1393 } 1394 1395 return expand_node_by(current_virtual_space(), 1396 min_words, 1397 max_expansion_words); 1398 } 1399 1400 return false; 1401 } 1402 1403 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1404 size_t grow_chunks_by_words, 1405 size_t medium_chunk_bunch) { 1406 1407 // Allocate a chunk out of the current virtual space. 1408 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1409 1410 if (next != NULL) { 1411 return next; 1412 } 1413 1414 // The expand amount is currently only determined by the requested sizes 1415 // and not how much committed memory is left in the current virtual space. 1416 1417 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1418 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1419 if (min_word_size >= preferred_word_size) { 1420 // Can happen when humongous chunks are allocated. 1421 preferred_word_size = min_word_size; 1422 } 1423 1424 bool expanded = expand_by(min_word_size, preferred_word_size); 1425 if (expanded) { 1426 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1427 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1428 } 1429 1430 return next; 1431 } 1432 1433 void VirtualSpaceList::print_on(outputStream* st) const { 1434 VirtualSpaceListIterator iter(virtual_space_list()); 1435 while (iter.repeat()) { 1436 VirtualSpaceNode* node = iter.get_next(); 1437 node->print_on(st); 1438 } 1439 } 1440 1441 // MetaspaceGC methods 1442 1443 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1444 // Within the VM operation after the GC the attempt to allocate the metadata 1445 // should succeed. If the GC did not free enough space for the metaspace 1446 // allocation, the HWM is increased so that another virtualspace will be 1447 // allocated for the metadata. With perm gen the increase in the perm 1448 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1449 // metaspace policy uses those as the small and large steps for the HWM. 1450 // 1451 // After the GC the compute_new_size() for MetaspaceGC is called to 1452 // resize the capacity of the metaspaces. The current implementation 1453 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1454 // to resize the Java heap by some GC's. New flags can be implemented 1455 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1456 // free space is desirable in the metaspace capacity to decide how much 1457 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1458 // free space is desirable in the metaspace capacity before decreasing 1459 // the HWM. 1460 1461 // Calculate the amount to increase the high water mark (HWM). 1462 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1463 // another expansion is not requested too soon. If that is not 1464 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1465 // If that is still not enough, expand by the size of the allocation 1466 // plus some. 1467 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1468 size_t min_delta = MinMetaspaceExpansion; 1469 size_t max_delta = MaxMetaspaceExpansion; 1470 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1471 1472 if (delta <= min_delta) { 1473 delta = min_delta; 1474 } else if (delta <= max_delta) { 1475 // Don't want to hit the high water mark on the next 1476 // allocation so make the delta greater than just enough 1477 // for this allocation. 1478 delta = max_delta; 1479 } else { 1480 // This allocation is large but the next ones are probably not 1481 // so increase by the minimum. 1482 delta = delta + min_delta; 1483 } 1484 1485 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1486 1487 return delta; 1488 } 1489 1490 size_t MetaspaceGC::capacity_until_GC() { 1491 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1492 assert(value >= MetaspaceSize, "Not initialized properly?"); 1493 return value; 1494 } 1495 1496 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1497 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1498 1499 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1500 size_t new_value = capacity_until_GC + v; 1501 1502 if (new_value < capacity_until_GC) { 1503 // The addition wrapped around, set new_value to aligned max value. 1504 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1505 } 1506 1507 intptr_t expected = (intptr_t) capacity_until_GC; 1508 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1509 1510 if (expected != actual) { 1511 return false; 1512 } 1513 1514 if (new_cap_until_GC != NULL) { 1515 *new_cap_until_GC = new_value; 1516 } 1517 if (old_cap_until_GC != NULL) { 1518 *old_cap_until_GC = capacity_until_GC; 1519 } 1520 return true; 1521 } 1522 1523 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1524 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1525 1526 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1527 } 1528 1529 void MetaspaceGC::initialize() { 1530 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1531 // we can't do a GC during initialization. 1532 _capacity_until_GC = MaxMetaspaceSize; 1533 } 1534 1535 void MetaspaceGC::post_initialize() { 1536 // Reset the high-water mark once the VM initialization is done. 1537 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1538 } 1539 1540 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1541 // Check if the compressed class space is full. 1542 if (is_class && Metaspace::using_class_space()) { 1543 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1544 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1545 return false; 1546 } 1547 } 1548 1549 // Check if the user has imposed a limit on the metaspace memory. 1550 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1551 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1552 return false; 1553 } 1554 1555 return true; 1556 } 1557 1558 size_t MetaspaceGC::allowed_expansion() { 1559 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1560 size_t capacity_until_gc = capacity_until_GC(); 1561 1562 assert(capacity_until_gc >= committed_bytes, 1563 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1564 capacity_until_gc, committed_bytes); 1565 1566 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1567 size_t left_until_GC = capacity_until_gc - committed_bytes; 1568 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1569 1570 return left_to_commit / BytesPerWord; 1571 } 1572 1573 void MetaspaceGC::compute_new_size() { 1574 assert(_shrink_factor <= 100, "invalid shrink factor"); 1575 uint current_shrink_factor = _shrink_factor; 1576 _shrink_factor = 0; 1577 1578 // Using committed_bytes() for used_after_gc is an overestimation, since the 1579 // chunk free lists are included in committed_bytes() and the memory in an 1580 // un-fragmented chunk free list is available for future allocations. 1581 // However, if the chunk free lists becomes fragmented, then the memory may 1582 // not be available for future allocations and the memory is therefore "in use". 1583 // Including the chunk free lists in the definition of "in use" is therefore 1584 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1585 // shrink below committed_bytes() and this has caused serious bugs in the past. 1586 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1587 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1588 1589 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1590 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1591 1592 const double min_tmp = used_after_gc / maximum_used_percentage; 1593 size_t minimum_desired_capacity = 1594 (size_t)MIN2(min_tmp, double(max_uintx)); 1595 // Don't shrink less than the initial generation size 1596 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1597 MetaspaceSize); 1598 1599 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: "); 1600 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f", 1601 minimum_free_percentage, maximum_used_percentage); 1602 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K); 1603 1604 1605 size_t shrink_bytes = 0; 1606 if (capacity_until_GC < minimum_desired_capacity) { 1607 // If we have less capacity below the metaspace HWM, then 1608 // increment the HWM. 1609 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1610 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1611 // Don't expand unless it's significant 1612 if (expand_bytes >= MinMetaspaceExpansion) { 1613 size_t new_capacity_until_GC = 0; 1614 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1615 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1616 1617 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1618 new_capacity_until_GC, 1619 MetaspaceGCThresholdUpdater::ComputeNewSize); 1620 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB", 1621 minimum_desired_capacity / (double) K, 1622 expand_bytes / (double) K, 1623 MinMetaspaceExpansion / (double) K, 1624 new_capacity_until_GC / (double) K); 1625 } 1626 return; 1627 } 1628 1629 // No expansion, now see if we want to shrink 1630 // We would never want to shrink more than this 1631 assert(capacity_until_GC >= minimum_desired_capacity, 1632 SIZE_FORMAT " >= " SIZE_FORMAT, 1633 capacity_until_GC, minimum_desired_capacity); 1634 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1635 1636 // Should shrinking be considered? 1637 if (MaxMetaspaceFreeRatio < 100) { 1638 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1639 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1640 const double max_tmp = used_after_gc / minimum_used_percentage; 1641 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1642 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1643 MetaspaceSize); 1644 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f", 1645 maximum_free_percentage, minimum_used_percentage); 1646 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB", 1647 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); 1648 1649 assert(minimum_desired_capacity <= maximum_desired_capacity, 1650 "sanity check"); 1651 1652 if (capacity_until_GC > maximum_desired_capacity) { 1653 // Capacity too large, compute shrinking size 1654 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1655 // We don't want shrink all the way back to initSize if people call 1656 // System.gc(), because some programs do that between "phases" and then 1657 // we'd just have to grow the heap up again for the next phase. So we 1658 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1659 // on the third call, and 100% by the fourth call. But if we recompute 1660 // size without shrinking, it goes back to 0%. 1661 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1662 1663 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1664 1665 assert(shrink_bytes <= max_shrink_bytes, 1666 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1667 shrink_bytes, max_shrink_bytes); 1668 if (current_shrink_factor == 0) { 1669 _shrink_factor = 10; 1670 } else { 1671 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1672 } 1673 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK", 1674 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); 1675 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK", 1676 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); 1677 } 1678 } 1679 1680 // Don't shrink unless it's significant 1681 if (shrink_bytes >= MinMetaspaceExpansion && 1682 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1683 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1684 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1685 new_capacity_until_GC, 1686 MetaspaceGCThresholdUpdater::ComputeNewSize); 1687 } 1688 } 1689 1690 // Metadebug methods 1691 1692 void Metadebug::init_allocation_fail_alot_count() { 1693 if (MetadataAllocationFailALot) { 1694 _allocation_fail_alot_count = 1695 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1696 } 1697 } 1698 1699 #ifdef ASSERT 1700 bool Metadebug::test_metadata_failure() { 1701 if (MetadataAllocationFailALot && 1702 Threads::is_vm_complete()) { 1703 if (_allocation_fail_alot_count > 0) { 1704 _allocation_fail_alot_count--; 1705 } else { 1706 log_trace(gc, metaspace, freelist)("Metadata allocation failing for MetadataAllocationFailALot"); 1707 init_allocation_fail_alot_count(); 1708 return true; 1709 } 1710 } 1711 return false; 1712 } 1713 #endif 1714 1715 // ChunkManager methods 1716 1717 size_t ChunkManager::free_chunks_total_words() { 1718 return _free_chunks_total; 1719 } 1720 1721 size_t ChunkManager::free_chunks_total_bytes() { 1722 return free_chunks_total_words() * BytesPerWord; 1723 } 1724 1725 size_t ChunkManager::free_chunks_count() { 1726 #ifdef ASSERT 1727 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1728 MutexLockerEx cl(SpaceManager::expand_lock(), 1729 Mutex::_no_safepoint_check_flag); 1730 // This lock is only needed in debug because the verification 1731 // of the _free_chunks_totals walks the list of free chunks 1732 slow_locked_verify_free_chunks_count(); 1733 } 1734 #endif 1735 return _free_chunks_count; 1736 } 1737 1738 void ChunkManager::locked_verify_free_chunks_total() { 1739 assert_lock_strong(SpaceManager::expand_lock()); 1740 assert(sum_free_chunks() == _free_chunks_total, 1741 "_free_chunks_total " SIZE_FORMAT " is not the" 1742 " same as sum " SIZE_FORMAT, _free_chunks_total, 1743 sum_free_chunks()); 1744 } 1745 1746 void ChunkManager::verify_free_chunks_total() { 1747 MutexLockerEx cl(SpaceManager::expand_lock(), 1748 Mutex::_no_safepoint_check_flag); 1749 locked_verify_free_chunks_total(); 1750 } 1751 1752 void ChunkManager::locked_verify_free_chunks_count() { 1753 assert_lock_strong(SpaceManager::expand_lock()); 1754 assert(sum_free_chunks_count() == _free_chunks_count, 1755 "_free_chunks_count " SIZE_FORMAT " is not the" 1756 " same as sum " SIZE_FORMAT, _free_chunks_count, 1757 sum_free_chunks_count()); 1758 } 1759 1760 void ChunkManager::verify_free_chunks_count() { 1761 #ifdef ASSERT 1762 MutexLockerEx cl(SpaceManager::expand_lock(), 1763 Mutex::_no_safepoint_check_flag); 1764 locked_verify_free_chunks_count(); 1765 #endif 1766 } 1767 1768 void ChunkManager::verify() { 1769 MutexLockerEx cl(SpaceManager::expand_lock(), 1770 Mutex::_no_safepoint_check_flag); 1771 locked_verify(); 1772 } 1773 1774 void ChunkManager::locked_verify() { 1775 locked_verify_free_chunks_count(); 1776 locked_verify_free_chunks_total(); 1777 } 1778 1779 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1780 assert_lock_strong(SpaceManager::expand_lock()); 1781 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1782 _free_chunks_total, _free_chunks_count); 1783 } 1784 1785 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1786 assert_lock_strong(SpaceManager::expand_lock()); 1787 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1788 sum_free_chunks(), sum_free_chunks_count()); 1789 } 1790 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1791 return &_free_chunks[index]; 1792 } 1793 1794 // These methods that sum the free chunk lists are used in printing 1795 // methods that are used in product builds. 1796 size_t ChunkManager::sum_free_chunks() { 1797 assert_lock_strong(SpaceManager::expand_lock()); 1798 size_t result = 0; 1799 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1800 ChunkList* list = free_chunks(i); 1801 1802 if (list == NULL) { 1803 continue; 1804 } 1805 1806 result = result + list->count() * list->size(); 1807 } 1808 result = result + humongous_dictionary()->total_size(); 1809 return result; 1810 } 1811 1812 size_t ChunkManager::sum_free_chunks_count() { 1813 assert_lock_strong(SpaceManager::expand_lock()); 1814 size_t count = 0; 1815 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1816 ChunkList* list = free_chunks(i); 1817 if (list == NULL) { 1818 continue; 1819 } 1820 count = count + list->count(); 1821 } 1822 count = count + humongous_dictionary()->total_free_blocks(); 1823 return count; 1824 } 1825 1826 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1827 ChunkIndex index = list_index(word_size); 1828 assert(index < HumongousIndex, "No humongous list"); 1829 return free_chunks(index); 1830 } 1831 1832 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1833 assert_lock_strong(SpaceManager::expand_lock()); 1834 1835 slow_locked_verify(); 1836 1837 Metachunk* chunk = NULL; 1838 if (list_index(word_size) != HumongousIndex) { 1839 ChunkList* free_list = find_free_chunks_list(word_size); 1840 assert(free_list != NULL, "Sanity check"); 1841 1842 chunk = free_list->head(); 1843 1844 if (chunk == NULL) { 1845 return NULL; 1846 } 1847 1848 // Remove the chunk as the head of the list. 1849 free_list->remove_chunk(chunk); 1850 1851 log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1852 p2i(free_list), p2i(chunk), chunk->word_size()); 1853 } else { 1854 chunk = humongous_dictionary()->get_chunk( 1855 word_size, 1856 FreeBlockDictionary<Metachunk>::atLeast); 1857 1858 if (chunk == NULL) { 1859 return NULL; 1860 } 1861 1862 log_debug(gc, metaspace, alloc)("Free list allocate humongous chunk size " SIZE_FORMAT " for requested size " SIZE_FORMAT " waste " SIZE_FORMAT, 1863 chunk->word_size(), word_size, chunk->word_size() - word_size); 1864 } 1865 1866 // Chunk is being removed from the chunks free list. 1867 dec_free_chunks_total(chunk->word_size()); 1868 1869 // Remove it from the links to this freelist 1870 chunk->set_next(NULL); 1871 chunk->set_prev(NULL); 1872 #ifdef ASSERT 1873 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1874 // work. 1875 chunk->set_is_tagged_free(false); 1876 #endif 1877 chunk->container()->inc_container_count(); 1878 1879 slow_locked_verify(); 1880 return chunk; 1881 } 1882 1883 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1884 assert_lock_strong(SpaceManager::expand_lock()); 1885 slow_locked_verify(); 1886 1887 // Take from the beginning of the list 1888 Metachunk* chunk = free_chunks_get(word_size); 1889 if (chunk == NULL) { 1890 return NULL; 1891 } 1892 1893 assert((word_size <= chunk->word_size()) || 1894 list_index(chunk->word_size() == HumongousIndex), 1895 "Non-humongous variable sized chunk"); 1896 Log(gc, metaspace, freelist) log; 1897 if (log.is_debug()) { 1898 size_t list_count; 1899 if (list_index(word_size) < HumongousIndex) { 1900 ChunkList* list = find_free_chunks_list(word_size); 1901 list_count = list->count(); 1902 } else { 1903 list_count = humongous_dictionary()->total_count(); 1904 } 1905 log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1906 p2i(this), p2i(chunk), chunk->word_size(), list_count); 1907 ResourceMark rm; 1908 locked_print_free_chunks(log.debug_stream()); 1909 } 1910 1911 return chunk; 1912 } 1913 1914 void ChunkManager::print_on(outputStream* out) const { 1915 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(out); 1916 } 1917 1918 // SpaceManager methods 1919 1920 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1921 size_t* chunk_word_size, 1922 size_t* class_chunk_word_size) { 1923 switch (type) { 1924 case Metaspace::BootMetaspaceType: 1925 *chunk_word_size = Metaspace::first_chunk_word_size(); 1926 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1927 break; 1928 case Metaspace::ROMetaspaceType: 1929 *chunk_word_size = SharedReadOnlySize / wordSize; 1930 *class_chunk_word_size = ClassSpecializedChunk; 1931 break; 1932 case Metaspace::ReadWriteMetaspaceType: 1933 *chunk_word_size = SharedReadWriteSize / wordSize; 1934 *class_chunk_word_size = ClassSpecializedChunk; 1935 break; 1936 case Metaspace::AnonymousMetaspaceType: 1937 case Metaspace::ReflectionMetaspaceType: 1938 *chunk_word_size = SpecializedChunk; 1939 *class_chunk_word_size = ClassSpecializedChunk; 1940 break; 1941 default: 1942 *chunk_word_size = SmallChunk; 1943 *class_chunk_word_size = ClassSmallChunk; 1944 break; 1945 } 1946 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1947 "Initial chunks sizes bad: data " SIZE_FORMAT 1948 " class " SIZE_FORMAT, 1949 *chunk_word_size, *class_chunk_word_size); 1950 } 1951 1952 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1953 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1954 size_t free = 0; 1955 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1956 Metachunk* chunk = chunks_in_use(i); 1957 while (chunk != NULL) { 1958 free += chunk->free_word_size(); 1959 chunk = chunk->next(); 1960 } 1961 } 1962 return free; 1963 } 1964 1965 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1966 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1967 size_t result = 0; 1968 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1969 result += sum_waste_in_chunks_in_use(i); 1970 } 1971 1972 return result; 1973 } 1974 1975 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1976 size_t result = 0; 1977 Metachunk* chunk = chunks_in_use(index); 1978 // Count the free space in all the chunk but not the 1979 // current chunk from which allocations are still being done. 1980 while (chunk != NULL) { 1981 if (chunk != current_chunk()) { 1982 result += chunk->free_word_size(); 1983 } 1984 chunk = chunk->next(); 1985 } 1986 return result; 1987 } 1988 1989 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1990 // For CMS use "allocated_chunks_words()" which does not need the 1991 // Metaspace lock. For the other collectors sum over the 1992 // lists. Use both methods as a check that "allocated_chunks_words()" 1993 // is correct. That is, sum_capacity_in_chunks() is too expensive 1994 // to use in the product and allocated_chunks_words() should be used 1995 // but allow for checking that allocated_chunks_words() returns the same 1996 // value as sum_capacity_in_chunks_in_use() which is the definitive 1997 // answer. 1998 if (UseConcMarkSweepGC) { 1999 return allocated_chunks_words(); 2000 } else { 2001 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2002 size_t sum = 0; 2003 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2004 Metachunk* chunk = chunks_in_use(i); 2005 while (chunk != NULL) { 2006 sum += chunk->word_size(); 2007 chunk = chunk->next(); 2008 } 2009 } 2010 return sum; 2011 } 2012 } 2013 2014 size_t SpaceManager::sum_count_in_chunks_in_use() { 2015 size_t count = 0; 2016 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2017 count = count + sum_count_in_chunks_in_use(i); 2018 } 2019 2020 return count; 2021 } 2022 2023 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 2024 size_t count = 0; 2025 Metachunk* chunk = chunks_in_use(i); 2026 while (chunk != NULL) { 2027 count++; 2028 chunk = chunk->next(); 2029 } 2030 return count; 2031 } 2032 2033 2034 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2035 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2036 size_t used = 0; 2037 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2038 Metachunk* chunk = chunks_in_use(i); 2039 while (chunk != NULL) { 2040 used += chunk->used_word_size(); 2041 chunk = chunk->next(); 2042 } 2043 } 2044 return used; 2045 } 2046 2047 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2048 2049 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2050 Metachunk* chunk = chunks_in_use(i); 2051 st->print("SpaceManager: %s " PTR_FORMAT, 2052 chunk_size_name(i), p2i(chunk)); 2053 if (chunk != NULL) { 2054 st->print_cr(" free " SIZE_FORMAT, 2055 chunk->free_word_size()); 2056 } else { 2057 st->cr(); 2058 } 2059 } 2060 2061 chunk_manager()->locked_print_free_chunks(st); 2062 chunk_manager()->locked_print_sum_free_chunks(st); 2063 } 2064 2065 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2066 2067 // Decide between a small chunk and a medium chunk. Up to 2068 // _small_chunk_limit small chunks can be allocated. 2069 // After that a medium chunk is preferred. 2070 size_t chunk_word_size; 2071 if (chunks_in_use(MediumIndex) == NULL && 2072 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2073 chunk_word_size = (size_t) small_chunk_size(); 2074 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2075 chunk_word_size = medium_chunk_size(); 2076 } 2077 } else { 2078 chunk_word_size = medium_chunk_size(); 2079 } 2080 2081 // Might still need a humongous chunk. Enforce 2082 // humongous allocations sizes to be aligned up to 2083 // the smallest chunk size. 2084 size_t if_humongous_sized_chunk = 2085 align_size_up(word_size + Metachunk::overhead(), 2086 smallest_chunk_size()); 2087 chunk_word_size = 2088 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2089 2090 assert(!SpaceManager::is_humongous(word_size) || 2091 chunk_word_size == if_humongous_sized_chunk, 2092 "Size calculation is wrong, word_size " SIZE_FORMAT 2093 " chunk_word_size " SIZE_FORMAT, 2094 word_size, chunk_word_size); 2095 Log(gc, metaspace, alloc) log; 2096 if (log.is_debug() && SpaceManager::is_humongous(word_size)) { 2097 log.debug("Metadata humongous allocation:"); 2098 log.debug(" word_size " PTR_FORMAT, word_size); 2099 log.debug(" chunk_word_size " PTR_FORMAT, chunk_word_size); 2100 log.debug(" chunk overhead " PTR_FORMAT, Metachunk::overhead()); 2101 } 2102 return chunk_word_size; 2103 } 2104 2105 void SpaceManager::track_metaspace_memory_usage() { 2106 if (is_init_completed()) { 2107 if (is_class()) { 2108 MemoryService::track_compressed_class_memory_usage(); 2109 } 2110 MemoryService::track_metaspace_memory_usage(); 2111 } 2112 } 2113 2114 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2115 assert(vs_list()->current_virtual_space() != NULL, 2116 "Should have been set"); 2117 assert(current_chunk() == NULL || 2118 current_chunk()->allocate(word_size) == NULL, 2119 "Don't need to expand"); 2120 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2121 2122 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2123 size_t words_left = 0; 2124 size_t words_used = 0; 2125 if (current_chunk() != NULL) { 2126 words_left = current_chunk()->free_word_size(); 2127 words_used = current_chunk()->used_word_size(); 2128 } 2129 log_trace(gc, metaspace, freelist)("SpaceManager::grow_and_allocate for " SIZE_FORMAT " words " SIZE_FORMAT " words used " SIZE_FORMAT " words left", 2130 word_size, words_used, words_left); 2131 } 2132 2133 // Get another chunk 2134 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2135 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2136 2137 MetaWord* mem = NULL; 2138 2139 // If a chunk was available, add it to the in-use chunk list 2140 // and do an allocation from it. 2141 if (next != NULL) { 2142 // Add to this manager's list of chunks in use. 2143 add_chunk(next, false); 2144 mem = next->allocate(word_size); 2145 } 2146 2147 // Track metaspace memory usage statistic. 2148 track_metaspace_memory_usage(); 2149 2150 return mem; 2151 } 2152 2153 void SpaceManager::print_on(outputStream* st) const { 2154 2155 for (ChunkIndex i = ZeroIndex; 2156 i < NumberOfInUseLists ; 2157 i = next_chunk_index(i) ) { 2158 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " SIZE_FORMAT, 2159 p2i(chunks_in_use(i)), 2160 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2161 } 2162 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2163 " Humongous " SIZE_FORMAT, 2164 sum_waste_in_chunks_in_use(SmallIndex), 2165 sum_waste_in_chunks_in_use(MediumIndex), 2166 sum_waste_in_chunks_in_use(HumongousIndex)); 2167 // block free lists 2168 if (block_freelists() != NULL) { 2169 st->print_cr("total in block free lists " SIZE_FORMAT, 2170 block_freelists()->total_size()); 2171 } 2172 } 2173 2174 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2175 Mutex* lock) : 2176 _mdtype(mdtype), 2177 _allocated_blocks_words(0), 2178 _allocated_chunks_words(0), 2179 _allocated_chunks_count(0), 2180 _block_freelists(NULL), 2181 _lock(lock) 2182 { 2183 initialize(); 2184 } 2185 2186 void SpaceManager::inc_size_metrics(size_t words) { 2187 assert_lock_strong(SpaceManager::expand_lock()); 2188 // Total of allocated Metachunks and allocated Metachunks count 2189 // for each SpaceManager 2190 _allocated_chunks_words = _allocated_chunks_words + words; 2191 _allocated_chunks_count++; 2192 // Global total of capacity in allocated Metachunks 2193 MetaspaceAux::inc_capacity(mdtype(), words); 2194 // Global total of allocated Metablocks. 2195 // used_words_slow() includes the overhead in each 2196 // Metachunk so include it in the used when the 2197 // Metachunk is first added (so only added once per 2198 // Metachunk). 2199 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2200 } 2201 2202 void SpaceManager::inc_used_metrics(size_t words) { 2203 // Add to the per SpaceManager total 2204 Atomic::add_ptr(words, &_allocated_blocks_words); 2205 // Add to the global total 2206 MetaspaceAux::inc_used(mdtype(), words); 2207 } 2208 2209 void SpaceManager::dec_total_from_size_metrics() { 2210 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2211 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2212 // Also deduct the overhead per Metachunk 2213 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2214 } 2215 2216 void SpaceManager::initialize() { 2217 Metadebug::init_allocation_fail_alot_count(); 2218 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2219 _chunks_in_use[i] = NULL; 2220 } 2221 _current_chunk = NULL; 2222 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this)); 2223 } 2224 2225 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2226 if (chunks == NULL) { 2227 return; 2228 } 2229 ChunkList* list = free_chunks(index); 2230 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2231 assert_lock_strong(SpaceManager::expand_lock()); 2232 Metachunk* cur = chunks; 2233 2234 // This returns chunks one at a time. If a new 2235 // class List can be created that is a base class 2236 // of FreeList then something like FreeList::prepend() 2237 // can be used in place of this loop 2238 while (cur != NULL) { 2239 assert(cur->container() != NULL, "Container should have been set"); 2240 cur->container()->dec_container_count(); 2241 // Capture the next link before it is changed 2242 // by the call to return_chunk_at_head(); 2243 Metachunk* next = cur->next(); 2244 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2245 NOT_PRODUCT(cur->mangle(badMetaWordVal);) 2246 list->return_chunk_at_head(cur); 2247 cur = next; 2248 } 2249 } 2250 2251 SpaceManager::~SpaceManager() { 2252 // This call this->_lock which can't be done while holding expand_lock() 2253 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2254 "sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2255 " allocated_chunks_words() " SIZE_FORMAT, 2256 sum_capacity_in_chunks_in_use(), allocated_chunks_words()); 2257 2258 MutexLockerEx fcl(SpaceManager::expand_lock(), 2259 Mutex::_no_safepoint_check_flag); 2260 2261 chunk_manager()->slow_locked_verify(); 2262 2263 dec_total_from_size_metrics(); 2264 2265 Log(gc, metaspace, freelist) log; 2266 if (log.is_trace()) { 2267 log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this)); 2268 ResourceMark rm; 2269 locked_print_chunks_in_use_on(log.trace_stream()); 2270 if (block_freelists() != NULL) { 2271 block_freelists()->print_on(log.trace_stream()); 2272 } 2273 } 2274 2275 // Have to update before the chunks_in_use lists are emptied 2276 // below. 2277 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2278 sum_count_in_chunks_in_use()); 2279 2280 // Add all the chunks in use by this space manager 2281 // to the global list of free chunks. 2282 2283 // Follow each list of chunks-in-use and add them to the 2284 // free lists. Each list is NULL terminated. 2285 2286 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2287 log.trace("returned " SIZE_FORMAT " %s chunks to freelist", sum_count_in_chunks_in_use(i), chunk_size_name(i)); 2288 Metachunk* chunks = chunks_in_use(i); 2289 chunk_manager()->return_chunks(i, chunks); 2290 set_chunks_in_use(i, NULL); 2291 log.trace("updated freelist count " SSIZE_FORMAT " %s", chunk_manager()->free_chunks(i)->count(), chunk_size_name(i)); 2292 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2293 } 2294 2295 // The medium chunk case may be optimized by passing the head and 2296 // tail of the medium chunk list to add_at_head(). The tail is often 2297 // the current chunk but there are probably exceptions. 2298 2299 // Humongous chunks 2300 log.trace("returned " SIZE_FORMAT " %s humongous chunks to dictionary", 2301 sum_count_in_chunks_in_use(HumongousIndex), chunk_size_name(HumongousIndex)); 2302 log.trace("Humongous chunk dictionary: "); 2303 // Humongous chunks are never the current chunk. 2304 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2305 2306 while (humongous_chunks != NULL) { 2307 DEBUG_ONLY(humongous_chunks->set_is_tagged_free(true);) 2308 NOT_PRODUCT(humongous_chunks->mangle(badMetaWordVal);) 2309 log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size()); 2310 assert(humongous_chunks->word_size() == (size_t) 2311 align_size_up(humongous_chunks->word_size(), 2312 smallest_chunk_size()), 2313 "Humongous chunk size is wrong: word size " SIZE_FORMAT 2314 " granularity " SIZE_FORMAT, 2315 humongous_chunks->word_size(), smallest_chunk_size()); 2316 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2317 humongous_chunks->container()->dec_container_count(); 2318 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2319 humongous_chunks = next_humongous_chunks; 2320 } 2321 log.trace("updated dictionary count " SIZE_FORMAT " %s", chunk_manager()->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); 2322 chunk_manager()->slow_locked_verify(); 2323 2324 if (_block_freelists != NULL) { 2325 delete _block_freelists; 2326 } 2327 } 2328 2329 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2330 switch (index) { 2331 case SpecializedIndex: 2332 return "Specialized"; 2333 case SmallIndex: 2334 return "Small"; 2335 case MediumIndex: 2336 return "Medium"; 2337 case HumongousIndex: 2338 return "Humongous"; 2339 default: 2340 return NULL; 2341 } 2342 } 2343 2344 ChunkIndex ChunkManager::list_index(size_t size) { 2345 switch (size) { 2346 case SpecializedChunk: 2347 assert(SpecializedChunk == ClassSpecializedChunk, 2348 "Need branch for ClassSpecializedChunk"); 2349 return SpecializedIndex; 2350 case SmallChunk: 2351 case ClassSmallChunk: 2352 return SmallIndex; 2353 case MediumChunk: 2354 case ClassMediumChunk: 2355 return MediumIndex; 2356 default: 2357 assert(size > MediumChunk || size > ClassMediumChunk, 2358 "Not a humongous chunk"); 2359 return HumongousIndex; 2360 } 2361 } 2362 2363 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2364 assert_lock_strong(_lock); 2365 // Allocations and deallocations are in raw_word_size 2366 size_t raw_word_size = get_allocation_word_size(word_size); 2367 // Lazily create a block_freelist 2368 if (block_freelists() == NULL) { 2369 _block_freelists = new BlockFreelist(); 2370 } 2371 block_freelists()->return_block(p, raw_word_size); 2372 } 2373 2374 // Adds a chunk to the list of chunks in use. 2375 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2376 2377 assert(new_chunk != NULL, "Should not be NULL"); 2378 assert(new_chunk->next() == NULL, "Should not be on a list"); 2379 2380 new_chunk->reset_empty(); 2381 2382 // Find the correct list and and set the current 2383 // chunk for that list. 2384 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2385 2386 if (index != HumongousIndex) { 2387 retire_current_chunk(); 2388 set_current_chunk(new_chunk); 2389 new_chunk->set_next(chunks_in_use(index)); 2390 set_chunks_in_use(index, new_chunk); 2391 } else { 2392 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2393 // small, so small will be null. Link this first chunk as the current 2394 // chunk. 2395 if (make_current) { 2396 // Set as the current chunk but otherwise treat as a humongous chunk. 2397 set_current_chunk(new_chunk); 2398 } 2399 // Link at head. The _current_chunk only points to a humongous chunk for 2400 // the null class loader metaspace (class and data virtual space managers) 2401 // any humongous chunks so will not point to the tail 2402 // of the humongous chunks list. 2403 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2404 set_chunks_in_use(HumongousIndex, new_chunk); 2405 2406 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2407 } 2408 2409 // Add to the running sum of capacity 2410 inc_size_metrics(new_chunk->word_size()); 2411 2412 assert(new_chunk->is_empty(), "Not ready for reuse"); 2413 Log(gc, metaspace, freelist) log; 2414 if (log.is_trace()) { 2415 log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use()); 2416 ResourceMark rm; 2417 outputStream* out = log.trace_stream(); 2418 new_chunk->print_on(out); 2419 chunk_manager()->locked_print_free_chunks(out); 2420 } 2421 } 2422 2423 void SpaceManager::retire_current_chunk() { 2424 if (current_chunk() != NULL) { 2425 size_t remaining_words = current_chunk()->free_word_size(); 2426 if (remaining_words >= BlockFreelist::min_dictionary_size()) { 2427 MetaWord* ptr = current_chunk()->allocate(remaining_words); 2428 deallocate(ptr, remaining_words); 2429 inc_used_metrics(remaining_words); 2430 } 2431 } 2432 } 2433 2434 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2435 size_t grow_chunks_by_words) { 2436 // Get a chunk from the chunk freelist 2437 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2438 2439 if (next == NULL) { 2440 next = vs_list()->get_new_chunk(word_size, 2441 grow_chunks_by_words, 2442 medium_chunk_bunch()); 2443 } 2444 2445 Log(gc, metaspace, alloc) log; 2446 if (log.is_debug() && next != NULL && 2447 SpaceManager::is_humongous(next->word_size())) { 2448 log.debug(" new humongous chunk word size " PTR_FORMAT, next->word_size()); 2449 } 2450 2451 return next; 2452 } 2453 2454 /* 2455 * The policy is to allocate up to _small_chunk_limit small chunks 2456 * after which only medium chunks are allocated. This is done to 2457 * reduce fragmentation. In some cases, this can result in a lot 2458 * of small chunks being allocated to the point where it's not 2459 * possible to expand. If this happens, there may be no medium chunks 2460 * available and OOME would be thrown. Instead of doing that, 2461 * if the allocation request size fits in a small chunk, an attempt 2462 * will be made to allocate a small chunk. 2463 */ 2464 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) { 2465 size_t raw_word_size = get_allocation_word_size(word_size); 2466 2467 if (raw_word_size + Metachunk::overhead() > small_chunk_size()) { 2468 return NULL; 2469 } 2470 2471 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2472 MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag); 2473 2474 Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size()); 2475 2476 MetaWord* mem = NULL; 2477 2478 if (chunk != NULL) { 2479 // Add chunk to the in-use chunk list and do an allocation from it. 2480 // Add to this manager's list of chunks in use. 2481 add_chunk(chunk, false); 2482 mem = chunk->allocate(raw_word_size); 2483 2484 inc_used_metrics(raw_word_size); 2485 2486 // Track metaspace memory usage statistic. 2487 track_metaspace_memory_usage(); 2488 } 2489 2490 return mem; 2491 } 2492 2493 MetaWord* SpaceManager::allocate(size_t word_size) { 2494 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2495 size_t raw_word_size = get_allocation_word_size(word_size); 2496 BlockFreelist* fl = block_freelists(); 2497 MetaWord* p = NULL; 2498 // Allocation from the dictionary is expensive in the sense that 2499 // the dictionary has to be searched for a size. Don't allocate 2500 // from the dictionary until it starts to get fat. Is this 2501 // a reasonable policy? Maybe an skinny dictionary is fast enough 2502 // for allocations. Do some profiling. JJJ 2503 if (fl != NULL && fl->total_size() > allocation_from_dictionary_limit) { 2504 p = fl->get_block(raw_word_size); 2505 } 2506 if (p == NULL) { 2507 p = allocate_work(raw_word_size); 2508 } 2509 2510 return p; 2511 } 2512 2513 // Returns the address of spaced allocated for "word_size". 2514 // This methods does not know about blocks (Metablocks) 2515 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2516 assert_lock_strong(_lock); 2517 #ifdef ASSERT 2518 if (Metadebug::test_metadata_failure()) { 2519 return NULL; 2520 } 2521 #endif 2522 // Is there space in the current chunk? 2523 MetaWord* result = NULL; 2524 2525 // For DumpSharedSpaces, only allocate out of the current chunk which is 2526 // never null because we gave it the size we wanted. Caller reports out 2527 // of memory if this returns null. 2528 if (DumpSharedSpaces) { 2529 assert(current_chunk() != NULL, "should never happen"); 2530 inc_used_metrics(word_size); 2531 return current_chunk()->allocate(word_size); // caller handles null result 2532 } 2533 2534 if (current_chunk() != NULL) { 2535 result = current_chunk()->allocate(word_size); 2536 } 2537 2538 if (result == NULL) { 2539 result = grow_and_allocate(word_size); 2540 } 2541 2542 if (result != NULL) { 2543 inc_used_metrics(word_size); 2544 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2545 "Head of the list is being allocated"); 2546 } 2547 2548 return result; 2549 } 2550 2551 void SpaceManager::verify() { 2552 // If there are blocks in the dictionary, then 2553 // verification of chunks does not work since 2554 // being in the dictionary alters a chunk. 2555 if (block_freelists() != NULL && block_freelists()->total_size() == 0) { 2556 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2557 Metachunk* curr = chunks_in_use(i); 2558 while (curr != NULL) { 2559 curr->verify(); 2560 verify_chunk_size(curr); 2561 curr = curr->next(); 2562 } 2563 } 2564 } 2565 } 2566 2567 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2568 assert(is_humongous(chunk->word_size()) || 2569 chunk->word_size() == medium_chunk_size() || 2570 chunk->word_size() == small_chunk_size() || 2571 chunk->word_size() == specialized_chunk_size(), 2572 "Chunk size is wrong"); 2573 return; 2574 } 2575 2576 #ifdef ASSERT 2577 void SpaceManager::verify_allocated_blocks_words() { 2578 // Verification is only guaranteed at a safepoint. 2579 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2580 "Verification can fail if the applications is running"); 2581 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2582 "allocation total is not consistent " SIZE_FORMAT 2583 " vs " SIZE_FORMAT, 2584 allocated_blocks_words(), sum_used_in_chunks_in_use()); 2585 } 2586 2587 #endif 2588 2589 void SpaceManager::dump(outputStream* const out) const { 2590 size_t curr_total = 0; 2591 size_t waste = 0; 2592 uint i = 0; 2593 size_t used = 0; 2594 size_t capacity = 0; 2595 2596 // Add up statistics for all chunks in this SpaceManager. 2597 for (ChunkIndex index = ZeroIndex; 2598 index < NumberOfInUseLists; 2599 index = next_chunk_index(index)) { 2600 for (Metachunk* curr = chunks_in_use(index); 2601 curr != NULL; 2602 curr = curr->next()) { 2603 out->print("%d) ", i++); 2604 curr->print_on(out); 2605 curr_total += curr->word_size(); 2606 used += curr->used_word_size(); 2607 capacity += curr->word_size(); 2608 waste += curr->free_word_size() + curr->overhead();; 2609 } 2610 } 2611 2612 if (log_is_enabled(Trace, gc, metaspace, freelist)) { 2613 if (block_freelists() != NULL) block_freelists()->print_on(out); 2614 } 2615 2616 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2617 // Free space isn't wasted. 2618 waste -= free; 2619 2620 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2621 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2622 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2623 } 2624 2625 // MetaspaceAux 2626 2627 2628 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2629 size_t MetaspaceAux::_used_words[] = {0, 0}; 2630 2631 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2632 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2633 return list == NULL ? 0 : list->free_bytes(); 2634 } 2635 2636 size_t MetaspaceAux::free_bytes() { 2637 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2638 } 2639 2640 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2641 assert_lock_strong(SpaceManager::expand_lock()); 2642 assert(words <= capacity_words(mdtype), 2643 "About to decrement below 0: words " SIZE_FORMAT 2644 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2645 words, mdtype, capacity_words(mdtype)); 2646 _capacity_words[mdtype] -= words; 2647 } 2648 2649 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2650 assert_lock_strong(SpaceManager::expand_lock()); 2651 // Needs to be atomic 2652 _capacity_words[mdtype] += words; 2653 } 2654 2655 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2656 assert(words <= used_words(mdtype), 2657 "About to decrement below 0: words " SIZE_FORMAT 2658 " is greater than _used_words[%u] " SIZE_FORMAT, 2659 words, mdtype, used_words(mdtype)); 2660 // For CMS deallocation of the Metaspaces occurs during the 2661 // sweep which is a concurrent phase. Protection by the expand_lock() 2662 // is not enough since allocation is on a per Metaspace basis 2663 // and protected by the Metaspace lock. 2664 jlong minus_words = (jlong) - (jlong) words; 2665 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2666 } 2667 2668 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2669 // _used_words tracks allocations for 2670 // each piece of metadata. Those allocations are 2671 // generally done concurrently by different application 2672 // threads so must be done atomically. 2673 Atomic::add_ptr(words, &_used_words[mdtype]); 2674 } 2675 2676 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2677 size_t used = 0; 2678 ClassLoaderDataGraphMetaspaceIterator iter; 2679 while (iter.repeat()) { 2680 Metaspace* msp = iter.get_next(); 2681 // Sum allocated_blocks_words for each metaspace 2682 if (msp != NULL) { 2683 used += msp->used_words_slow(mdtype); 2684 } 2685 } 2686 return used * BytesPerWord; 2687 } 2688 2689 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2690 size_t free = 0; 2691 ClassLoaderDataGraphMetaspaceIterator iter; 2692 while (iter.repeat()) { 2693 Metaspace* msp = iter.get_next(); 2694 if (msp != NULL) { 2695 free += msp->free_words_slow(mdtype); 2696 } 2697 } 2698 return free * BytesPerWord; 2699 } 2700 2701 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2702 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2703 return 0; 2704 } 2705 // Don't count the space in the freelists. That space will be 2706 // added to the capacity calculation as needed. 2707 size_t capacity = 0; 2708 ClassLoaderDataGraphMetaspaceIterator iter; 2709 while (iter.repeat()) { 2710 Metaspace* msp = iter.get_next(); 2711 if (msp != NULL) { 2712 capacity += msp->capacity_words_slow(mdtype); 2713 } 2714 } 2715 return capacity * BytesPerWord; 2716 } 2717 2718 size_t MetaspaceAux::capacity_bytes_slow() { 2719 #ifdef PRODUCT 2720 // Use capacity_bytes() in PRODUCT instead of this function. 2721 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2722 #endif 2723 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2724 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2725 assert(capacity_bytes() == class_capacity + non_class_capacity, 2726 "bad accounting: capacity_bytes() " SIZE_FORMAT 2727 " class_capacity + non_class_capacity " SIZE_FORMAT 2728 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2729 capacity_bytes(), class_capacity + non_class_capacity, 2730 class_capacity, non_class_capacity); 2731 2732 return class_capacity + non_class_capacity; 2733 } 2734 2735 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2736 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2737 return list == NULL ? 0 : list->reserved_bytes(); 2738 } 2739 2740 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2741 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2742 return list == NULL ? 0 : list->committed_bytes(); 2743 } 2744 2745 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2746 2747 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2748 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2749 if (chunk_manager == NULL) { 2750 return 0; 2751 } 2752 chunk_manager->slow_verify(); 2753 return chunk_manager->free_chunks_total_words(); 2754 } 2755 2756 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2757 return free_chunks_total_words(mdtype) * BytesPerWord; 2758 } 2759 2760 size_t MetaspaceAux::free_chunks_total_words() { 2761 return free_chunks_total_words(Metaspace::ClassType) + 2762 free_chunks_total_words(Metaspace::NonClassType); 2763 } 2764 2765 size_t MetaspaceAux::free_chunks_total_bytes() { 2766 return free_chunks_total_words() * BytesPerWord; 2767 } 2768 2769 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2770 return Metaspace::get_chunk_manager(mdtype) != NULL; 2771 } 2772 2773 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2774 if (!has_chunk_free_list(mdtype)) { 2775 return MetaspaceChunkFreeListSummary(); 2776 } 2777 2778 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2779 return cm->chunk_free_list_summary(); 2780 } 2781 2782 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2783 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 2784 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K); 2785 } 2786 2787 void MetaspaceAux::print_on(outputStream* out) { 2788 Metaspace::MetadataType nct = Metaspace::NonClassType; 2789 2790 out->print_cr(" Metaspace " 2791 "used " SIZE_FORMAT "K, " 2792 "capacity " SIZE_FORMAT "K, " 2793 "committed " SIZE_FORMAT "K, " 2794 "reserved " SIZE_FORMAT "K", 2795 used_bytes()/K, 2796 capacity_bytes()/K, 2797 committed_bytes()/K, 2798 reserved_bytes()/K); 2799 2800 if (Metaspace::using_class_space()) { 2801 Metaspace::MetadataType ct = Metaspace::ClassType; 2802 out->print_cr(" class space " 2803 "used " SIZE_FORMAT "K, " 2804 "capacity " SIZE_FORMAT "K, " 2805 "committed " SIZE_FORMAT "K, " 2806 "reserved " SIZE_FORMAT "K", 2807 used_bytes(ct)/K, 2808 capacity_bytes(ct)/K, 2809 committed_bytes(ct)/K, 2810 reserved_bytes(ct)/K); 2811 } 2812 } 2813 2814 // Print information for class space and data space separately. 2815 // This is almost the same as above. 2816 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2817 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2818 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2819 size_t used_bytes = used_bytes_slow(mdtype); 2820 size_t free_bytes = free_bytes_slow(mdtype); 2821 size_t used_and_free = used_bytes + free_bytes + 2822 free_chunks_capacity_bytes; 2823 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2824 "K + unused in chunks " SIZE_FORMAT "K + " 2825 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2826 "K capacity in allocated chunks " SIZE_FORMAT "K", 2827 used_bytes / K, 2828 free_bytes / K, 2829 free_chunks_capacity_bytes / K, 2830 used_and_free / K, 2831 capacity_bytes / K); 2832 // Accounting can only be correct if we got the values during a safepoint 2833 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2834 } 2835 2836 // Print total fragmentation for class metaspaces 2837 void MetaspaceAux::print_class_waste(outputStream* out) { 2838 assert(Metaspace::using_class_space(), "class metaspace not used"); 2839 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2840 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2841 ClassLoaderDataGraphMetaspaceIterator iter; 2842 while (iter.repeat()) { 2843 Metaspace* msp = iter.get_next(); 2844 if (msp != NULL) { 2845 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2846 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2847 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2848 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2849 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2850 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2851 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2852 } 2853 } 2854 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2855 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2856 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2857 "large count " SIZE_FORMAT, 2858 cls_specialized_count, cls_specialized_waste, 2859 cls_small_count, cls_small_waste, 2860 cls_medium_count, cls_medium_waste, cls_humongous_count); 2861 } 2862 2863 // Print total fragmentation for data and class metaspaces separately 2864 void MetaspaceAux::print_waste(outputStream* out) { 2865 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2866 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2867 2868 ClassLoaderDataGraphMetaspaceIterator iter; 2869 while (iter.repeat()) { 2870 Metaspace* msp = iter.get_next(); 2871 if (msp != NULL) { 2872 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2873 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2874 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2875 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2876 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2877 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2878 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2879 } 2880 } 2881 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2882 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2883 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2884 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2885 "large count " SIZE_FORMAT, 2886 specialized_count, specialized_waste, small_count, 2887 small_waste, medium_count, medium_waste, humongous_count); 2888 if (Metaspace::using_class_space()) { 2889 print_class_waste(out); 2890 } 2891 } 2892 2893 // Dump global metaspace things from the end of ClassLoaderDataGraph 2894 void MetaspaceAux::dump(outputStream* out) { 2895 out->print_cr("All Metaspace:"); 2896 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2897 out->print("class space: "); print_on(out, Metaspace::ClassType); 2898 print_waste(out); 2899 } 2900 2901 void MetaspaceAux::verify_free_chunks() { 2902 Metaspace::chunk_manager_metadata()->verify(); 2903 if (Metaspace::using_class_space()) { 2904 Metaspace::chunk_manager_class()->verify(); 2905 } 2906 } 2907 2908 void MetaspaceAux::verify_capacity() { 2909 #ifdef ASSERT 2910 size_t running_sum_capacity_bytes = capacity_bytes(); 2911 // For purposes of the running sum of capacity, verify against capacity 2912 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2913 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2914 "capacity_words() * BytesPerWord " SIZE_FORMAT 2915 " capacity_bytes_slow()" SIZE_FORMAT, 2916 running_sum_capacity_bytes, capacity_in_use_bytes); 2917 for (Metaspace::MetadataType i = Metaspace::ClassType; 2918 i < Metaspace:: MetadataTypeCount; 2919 i = (Metaspace::MetadataType)(i + 1)) { 2920 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2921 assert(capacity_bytes(i) == capacity_in_use_bytes, 2922 "capacity_bytes(%u) " SIZE_FORMAT 2923 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2924 i, capacity_bytes(i), i, capacity_in_use_bytes); 2925 } 2926 #endif 2927 } 2928 2929 void MetaspaceAux::verify_used() { 2930 #ifdef ASSERT 2931 size_t running_sum_used_bytes = used_bytes(); 2932 // For purposes of the running sum of used, verify against used 2933 size_t used_in_use_bytes = used_bytes_slow(); 2934 assert(used_bytes() == used_in_use_bytes, 2935 "used_bytes() " SIZE_FORMAT 2936 " used_bytes_slow()" SIZE_FORMAT, 2937 used_bytes(), used_in_use_bytes); 2938 for (Metaspace::MetadataType i = Metaspace::ClassType; 2939 i < Metaspace:: MetadataTypeCount; 2940 i = (Metaspace::MetadataType)(i + 1)) { 2941 size_t used_in_use_bytes = used_bytes_slow(i); 2942 assert(used_bytes(i) == used_in_use_bytes, 2943 "used_bytes(%u) " SIZE_FORMAT 2944 " used_bytes_slow(%u)" SIZE_FORMAT, 2945 i, used_bytes(i), i, used_in_use_bytes); 2946 } 2947 #endif 2948 } 2949 2950 void MetaspaceAux::verify_metrics() { 2951 verify_capacity(); 2952 verify_used(); 2953 } 2954 2955 2956 // Metaspace methods 2957 2958 size_t Metaspace::_first_chunk_word_size = 0; 2959 size_t Metaspace::_first_class_chunk_word_size = 0; 2960 2961 size_t Metaspace::_commit_alignment = 0; 2962 size_t Metaspace::_reserve_alignment = 0; 2963 2964 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2965 initialize(lock, type); 2966 } 2967 2968 Metaspace::~Metaspace() { 2969 delete _vsm; 2970 if (using_class_space()) { 2971 delete _class_vsm; 2972 } 2973 } 2974 2975 VirtualSpaceList* Metaspace::_space_list = NULL; 2976 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2977 2978 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2979 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2980 2981 #define VIRTUALSPACEMULTIPLIER 2 2982 2983 #ifdef _LP64 2984 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2985 2986 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2987 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2988 // narrow_klass_base is the lower of the metaspace base and the cds base 2989 // (if cds is enabled). The narrow_klass_shift depends on the distance 2990 // between the lower base and higher address. 2991 address lower_base; 2992 address higher_address; 2993 #if INCLUDE_CDS 2994 if (UseSharedSpaces) { 2995 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2996 (address)(metaspace_base + compressed_class_space_size())); 2997 lower_base = MIN2(metaspace_base, cds_base); 2998 } else 2999 #endif 3000 { 3001 higher_address = metaspace_base + compressed_class_space_size(); 3002 lower_base = metaspace_base; 3003 3004 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 3005 // If compressed class space fits in lower 32G, we don't need a base. 3006 if (higher_address <= (address)klass_encoding_max) { 3007 lower_base = 0; // Effectively lower base is zero. 3008 } 3009 } 3010 3011 Universe::set_narrow_klass_base(lower_base); 3012 3013 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 3014 Universe::set_narrow_klass_shift(0); 3015 } else { 3016 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 3017 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 3018 } 3019 } 3020 3021 #if INCLUDE_CDS 3022 // Return TRUE if the specified metaspace_base and cds_base are close enough 3023 // to work with compressed klass pointers. 3024 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3025 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3026 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3027 address lower_base = MIN2((address)metaspace_base, cds_base); 3028 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3029 (address)(metaspace_base + compressed_class_space_size())); 3030 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3031 } 3032 #endif 3033 3034 // Try to allocate the metaspace at the requested addr. 3035 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3036 assert(using_class_space(), "called improperly"); 3037 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3038 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3039 "Metaspace size is too big"); 3040 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3041 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3042 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3043 3044 // Don't use large pages for the class space. 3045 bool large_pages = false; 3046 3047 #if !(defined(AARCH64) || defined(AIX)) 3048 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3049 _reserve_alignment, 3050 large_pages, 3051 requested_addr); 3052 #else // AARCH64 3053 ReservedSpace metaspace_rs; 3054 3055 // Our compressed klass pointers may fit nicely into the lower 32 3056 // bits. 3057 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3058 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3059 _reserve_alignment, 3060 large_pages, 3061 requested_addr); 3062 } 3063 3064 if (! metaspace_rs.is_reserved()) { 3065 // Aarch64: Try to align metaspace so that we can decode a compressed 3066 // klass with a single MOVK instruction. We can do this iff the 3067 // compressed class base is a multiple of 4G. 3068 // Aix: Search for a place where we can find memory. If we need to load 3069 // the base, 4G alignment is helpful, too. 3070 size_t increment = AARCH64_ONLY(4*)G; 3071 for (char *a = (char*)align_ptr_up(requested_addr, increment); 3072 a < (char*)(1024*G); 3073 a += increment) { 3074 if (a == (char *)(32*G)) { 3075 // Go faster from here on. Zero-based is no longer possible. 3076 increment = 4*G; 3077 } 3078 3079 #if INCLUDE_CDS 3080 if (UseSharedSpaces 3081 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3082 // We failed to find an aligned base that will reach. Fall 3083 // back to using our requested addr. 3084 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3085 _reserve_alignment, 3086 large_pages, 3087 requested_addr); 3088 break; 3089 } 3090 #endif 3091 3092 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3093 _reserve_alignment, 3094 large_pages, 3095 a); 3096 if (metaspace_rs.is_reserved()) 3097 break; 3098 } 3099 } 3100 3101 #endif // AARCH64 3102 3103 if (!metaspace_rs.is_reserved()) { 3104 #if INCLUDE_CDS 3105 if (UseSharedSpaces) { 3106 size_t increment = align_size_up(1*G, _reserve_alignment); 3107 3108 // Keep trying to allocate the metaspace, increasing the requested_addr 3109 // by 1GB each time, until we reach an address that will no longer allow 3110 // use of CDS with compressed klass pointers. 3111 char *addr = requested_addr; 3112 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3113 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3114 addr = addr + increment; 3115 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3116 _reserve_alignment, large_pages, addr); 3117 } 3118 } 3119 #endif 3120 // If no successful allocation then try to allocate the space anywhere. If 3121 // that fails then OOM doom. At this point we cannot try allocating the 3122 // metaspace as if UseCompressedClassPointers is off because too much 3123 // initialization has happened that depends on UseCompressedClassPointers. 3124 // So, UseCompressedClassPointers cannot be turned off at this point. 3125 if (!metaspace_rs.is_reserved()) { 3126 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3127 _reserve_alignment, large_pages); 3128 if (!metaspace_rs.is_reserved()) { 3129 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", 3130 compressed_class_space_size())); 3131 } 3132 } 3133 } 3134 3135 // If we got here then the metaspace got allocated. 3136 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3137 3138 #if INCLUDE_CDS 3139 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3140 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3141 FileMapInfo::stop_sharing_and_unmap( 3142 "Could not allocate metaspace at a compatible address"); 3143 } 3144 #endif 3145 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3146 UseSharedSpaces ? (address)cds_base : 0); 3147 3148 initialize_class_space(metaspace_rs); 3149 3150 if (log_is_enabled(Trace, gc, metaspace)) { 3151 Log(gc, metaspace) log; 3152 ResourceMark rm; 3153 print_compressed_class_space(log.trace_stream(), requested_addr); 3154 } 3155 } 3156 3157 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) { 3158 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 3159 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift()); 3160 if (_class_space_list != NULL) { 3161 address base = (address)_class_space_list->current_virtual_space()->bottom(); 3162 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT, 3163 compressed_class_space_size(), p2i(base)); 3164 if (requested_addr != 0) { 3165 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr)); 3166 } 3167 st->cr(); 3168 } 3169 } 3170 3171 // For UseCompressedClassPointers the class space is reserved above the top of 3172 // the Java heap. The argument passed in is at the base of the compressed space. 3173 void Metaspace::initialize_class_space(ReservedSpace rs) { 3174 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3175 assert(rs.size() >= CompressedClassSpaceSize, 3176 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize); 3177 assert(using_class_space(), "Must be using class space"); 3178 _class_space_list = new VirtualSpaceList(rs); 3179 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3180 3181 if (!_class_space_list->initialization_succeeded()) { 3182 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3183 } 3184 } 3185 3186 #endif 3187 3188 void Metaspace::ergo_initialize() { 3189 if (DumpSharedSpaces) { 3190 // Using large pages when dumping the shared archive is currently not implemented. 3191 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3192 } 3193 3194 size_t page_size = os::vm_page_size(); 3195 if (UseLargePages && UseLargePagesInMetaspace) { 3196 page_size = os::large_page_size(); 3197 } 3198 3199 _commit_alignment = page_size; 3200 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3201 3202 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3203 // override if MaxMetaspaceSize was set on the command line or not. 3204 // This information is needed later to conform to the specification of the 3205 // java.lang.management.MemoryUsage API. 3206 // 3207 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3208 // globals.hpp to the aligned value, but this is not possible, since the 3209 // alignment depends on other flags being parsed. 3210 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3211 3212 if (MetaspaceSize > MaxMetaspaceSize) { 3213 MetaspaceSize = MaxMetaspaceSize; 3214 } 3215 3216 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3217 3218 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3219 3220 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3221 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3222 3223 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3224 set_compressed_class_space_size(CompressedClassSpaceSize); 3225 } 3226 3227 void Metaspace::global_initialize() { 3228 MetaspaceGC::initialize(); 3229 3230 // Initialize the alignment for shared spaces. 3231 int max_alignment = os::vm_allocation_granularity(); 3232 size_t cds_total = 0; 3233 3234 MetaspaceShared::set_max_alignment(max_alignment); 3235 3236 if (DumpSharedSpaces) { 3237 #if INCLUDE_CDS 3238 MetaspaceShared::estimate_regions_size(); 3239 3240 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3241 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3242 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3243 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3244 3245 // Initialize with the sum of the shared space sizes. The read-only 3246 // and read write metaspace chunks will be allocated out of this and the 3247 // remainder is the misc code and data chunks. 3248 cds_total = FileMapInfo::shared_spaces_size(); 3249 cds_total = align_size_up(cds_total, _reserve_alignment); 3250 _space_list = new VirtualSpaceList(cds_total/wordSize); 3251 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3252 3253 if (!_space_list->initialization_succeeded()) { 3254 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3255 } 3256 3257 #ifdef _LP64 3258 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3259 vm_exit_during_initialization("Unable to dump shared archive.", 3260 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3261 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3262 "klass limit: " UINT64_FORMAT, cds_total, compressed_class_space_size(), 3263 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3264 } 3265 3266 // Set the compressed klass pointer base so that decoding of these pointers works 3267 // properly when creating the shared archive. 3268 assert(UseCompressedOops && UseCompressedClassPointers, 3269 "UseCompressedOops and UseCompressedClassPointers must be set"); 3270 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3271 log_develop_trace(gc, metaspace)("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3272 p2i(_space_list->current_virtual_space()->bottom())); 3273 3274 Universe::set_narrow_klass_shift(0); 3275 #endif // _LP64 3276 #endif // INCLUDE_CDS 3277 } else { 3278 #if INCLUDE_CDS 3279 if (UseSharedSpaces) { 3280 // If using shared space, open the file that contains the shared space 3281 // and map in the memory before initializing the rest of metaspace (so 3282 // the addresses don't conflict) 3283 address cds_address = NULL; 3284 FileMapInfo* mapinfo = new FileMapInfo(); 3285 3286 // Open the shared archive file, read and validate the header. If 3287 // initialization fails, shared spaces [UseSharedSpaces] are 3288 // disabled and the file is closed. 3289 // Map in spaces now also 3290 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3291 cds_total = FileMapInfo::shared_spaces_size(); 3292 cds_address = (address)mapinfo->header()->region_addr(0); 3293 #ifdef _LP64 3294 if (using_class_space()) { 3295 char* cds_end = (char*)(cds_address + cds_total); 3296 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3297 // If UseCompressedClassPointers is set then allocate the metaspace area 3298 // above the heap and above the CDS area (if it exists). 3299 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3300 // Map the shared string space after compressed pointers 3301 // because it relies on compressed class pointers setting to work 3302 mapinfo->map_string_regions(); 3303 } 3304 #endif // _LP64 3305 } else { 3306 assert(!mapinfo->is_open() && !UseSharedSpaces, 3307 "archive file not closed or shared spaces not disabled."); 3308 } 3309 } 3310 #endif // INCLUDE_CDS 3311 3312 #ifdef _LP64 3313 if (!UseSharedSpaces && using_class_space()) { 3314 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3315 allocate_metaspace_compressed_klass_ptrs(base, 0); 3316 } 3317 #endif // _LP64 3318 3319 // Initialize these before initializing the VirtualSpaceList 3320 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3321 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3322 // Make the first class chunk bigger than a medium chunk so it's not put 3323 // on the medium chunk list. The next chunk will be small and progress 3324 // from there. This size calculated by -version. 3325 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3326 (CompressedClassSpaceSize/BytesPerWord)*2); 3327 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3328 // Arbitrarily set the initial virtual space to a multiple 3329 // of the boot class loader size. 3330 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3331 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3332 3333 // Initialize the list of virtual spaces. 3334 _space_list = new VirtualSpaceList(word_size); 3335 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3336 3337 if (!_space_list->initialization_succeeded()) { 3338 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3339 } 3340 } 3341 3342 _tracer = new MetaspaceTracer(); 3343 } 3344 3345 void Metaspace::post_initialize() { 3346 MetaspaceGC::post_initialize(); 3347 } 3348 3349 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3350 size_t chunk_word_size, 3351 size_t chunk_bunch) { 3352 // Get a chunk from the chunk freelist 3353 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3354 if (chunk != NULL) { 3355 return chunk; 3356 } 3357 3358 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3359 } 3360 3361 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3362 3363 assert(space_list() != NULL, 3364 "Metadata VirtualSpaceList has not been initialized"); 3365 assert(chunk_manager_metadata() != NULL, 3366 "Metadata ChunkManager has not been initialized"); 3367 3368 _vsm = new SpaceManager(NonClassType, lock); 3369 if (_vsm == NULL) { 3370 return; 3371 } 3372 size_t word_size; 3373 size_t class_word_size; 3374 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3375 3376 if (using_class_space()) { 3377 assert(class_space_list() != NULL, 3378 "Class VirtualSpaceList has not been initialized"); 3379 assert(chunk_manager_class() != NULL, 3380 "Class ChunkManager has not been initialized"); 3381 3382 // Allocate SpaceManager for classes. 3383 _class_vsm = new SpaceManager(ClassType, lock); 3384 if (_class_vsm == NULL) { 3385 return; 3386 } 3387 } 3388 3389 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3390 3391 // Allocate chunk for metadata objects 3392 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3393 word_size, 3394 vsm()->medium_chunk_bunch()); 3395 // For dumping shared archive, report error if allocation has failed. 3396 if (DumpSharedSpaces && new_chunk == NULL) { 3397 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3398 } 3399 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3400 if (new_chunk != NULL) { 3401 // Add to this manager's list of chunks in use and current_chunk(). 3402 vsm()->add_chunk(new_chunk, true); 3403 } 3404 3405 // Allocate chunk for class metadata objects 3406 if (using_class_space()) { 3407 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3408 class_word_size, 3409 class_vsm()->medium_chunk_bunch()); 3410 if (class_chunk != NULL) { 3411 class_vsm()->add_chunk(class_chunk, true); 3412 } else { 3413 // For dumping shared archive, report error if allocation has failed. 3414 if (DumpSharedSpaces) { 3415 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3416 } 3417 } 3418 } 3419 3420 _alloc_record_head = NULL; 3421 _alloc_record_tail = NULL; 3422 } 3423 3424 size_t Metaspace::align_word_size_up(size_t word_size) { 3425 size_t byte_size = word_size * wordSize; 3426 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3427 } 3428 3429 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3430 // DumpSharedSpaces doesn't use class metadata area (yet) 3431 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3432 if (is_class_space_allocation(mdtype)) { 3433 return class_vsm()->allocate(word_size); 3434 } else { 3435 return vsm()->allocate(word_size); 3436 } 3437 } 3438 3439 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3440 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3441 assert(delta_bytes > 0, "Must be"); 3442 3443 size_t before = 0; 3444 size_t after = 0; 3445 MetaWord* res; 3446 bool incremented; 3447 3448 // Each thread increments the HWM at most once. Even if the thread fails to increment 3449 // the HWM, an allocation is still attempted. This is because another thread must then 3450 // have incremented the HWM and therefore the allocation might still succeed. 3451 do { 3452 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3453 res = allocate(word_size, mdtype); 3454 } while (!incremented && res == NULL); 3455 3456 if (incremented) { 3457 tracer()->report_gc_threshold(before, after, 3458 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3459 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after); 3460 } 3461 3462 return res; 3463 } 3464 3465 // Space allocated in the Metaspace. This may 3466 // be across several metadata virtual spaces. 3467 char* Metaspace::bottom() const { 3468 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3469 return (char*)vsm()->current_chunk()->bottom(); 3470 } 3471 3472 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3473 if (mdtype == ClassType) { 3474 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3475 } else { 3476 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3477 } 3478 } 3479 3480 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3481 if (mdtype == ClassType) { 3482 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3483 } else { 3484 return vsm()->sum_free_in_chunks_in_use(); 3485 } 3486 } 3487 3488 // Space capacity in the Metaspace. It includes 3489 // space in the list of chunks from which allocations 3490 // have been made. Don't include space in the global freelist and 3491 // in the space available in the dictionary which 3492 // is already counted in some chunk. 3493 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3494 if (mdtype == ClassType) { 3495 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3496 } else { 3497 return vsm()->sum_capacity_in_chunks_in_use(); 3498 } 3499 } 3500 3501 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3502 return used_words_slow(mdtype) * BytesPerWord; 3503 } 3504 3505 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3506 return capacity_words_slow(mdtype) * BytesPerWord; 3507 } 3508 3509 size_t Metaspace::allocated_blocks_bytes() const { 3510 return vsm()->allocated_blocks_bytes() + 3511 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3512 } 3513 3514 size_t Metaspace::allocated_chunks_bytes() const { 3515 return vsm()->allocated_chunks_bytes() + 3516 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3517 } 3518 3519 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3520 assert(!SafepointSynchronize::is_at_safepoint() 3521 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3522 3523 if (DumpSharedSpaces && PrintSharedSpaces) { 3524 record_deallocation(ptr, vsm()->get_allocation_word_size(word_size)); 3525 } 3526 3527 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3528 3529 if (is_class && using_class_space()) { 3530 class_vsm()->deallocate(ptr, word_size); 3531 } else { 3532 vsm()->deallocate(ptr, word_size); 3533 } 3534 } 3535 3536 3537 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3538 bool read_only, MetaspaceObj::Type type, TRAPS) { 3539 if (HAS_PENDING_EXCEPTION) { 3540 assert(false, "Should not allocate with exception pending"); 3541 return NULL; // caller does a CHECK_NULL too 3542 } 3543 3544 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3545 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3546 3547 // Allocate in metaspaces without taking out a lock, because it deadlocks 3548 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3549 // to revisit this for application class data sharing. 3550 if (DumpSharedSpaces) { 3551 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3552 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3553 MetaWord* result = space->allocate(word_size, NonClassType); 3554 if (result == NULL) { 3555 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3556 } 3557 if (PrintSharedSpaces) { 3558 space->record_allocation(result, type, space->vsm()->get_allocation_word_size(word_size)); 3559 } 3560 3561 // Zero initialize. 3562 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3563 3564 return result; 3565 } 3566 3567 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3568 3569 // Try to allocate metadata. 3570 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3571 3572 if (result == NULL) { 3573 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3574 3575 // Allocation failed. 3576 if (is_init_completed()) { 3577 // Only start a GC if the bootstrapping has completed. 3578 3579 // Try to clean out some memory and retry. 3580 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3581 loader_data, word_size, mdtype); 3582 } 3583 } 3584 3585 if (result == NULL) { 3586 SpaceManager* sm; 3587 if (is_class_space_allocation(mdtype)) { 3588 sm = loader_data->metaspace_non_null()->class_vsm(); 3589 } else { 3590 sm = loader_data->metaspace_non_null()->vsm(); 3591 } 3592 3593 result = sm->get_small_chunk_and_allocate(word_size); 3594 3595 if (result == NULL) { 3596 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3597 } 3598 } 3599 3600 // Zero initialize. 3601 Copy::fill_to_words((HeapWord*)result, word_size, 0); 3602 3603 return result; 3604 } 3605 3606 size_t Metaspace::class_chunk_size(size_t word_size) { 3607 assert(using_class_space(), "Has to use class space"); 3608 return class_vsm()->calc_chunk_size(word_size); 3609 } 3610 3611 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3612 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3613 3614 // If result is still null, we are out of memory. 3615 Log(gc, metaspace, freelist) log; 3616 if (log.is_info()) { 3617 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, 3618 is_class_space_allocation(mdtype) ? "class" : "data", word_size); 3619 ResourceMark rm; 3620 outputStream* out = log.info_stream(); 3621 if (loader_data->metaspace_or_null() != NULL) { 3622 loader_data->dump(out); 3623 } 3624 MetaspaceAux::dump(out); 3625 } 3626 3627 bool out_of_compressed_class_space = false; 3628 if (is_class_space_allocation(mdtype)) { 3629 Metaspace* metaspace = loader_data->metaspace_non_null(); 3630 out_of_compressed_class_space = 3631 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3632 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3633 CompressedClassSpaceSize; 3634 } 3635 3636 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3637 const char* space_string = out_of_compressed_class_space ? 3638 "Compressed class space" : "Metaspace"; 3639 3640 report_java_out_of_memory(space_string); 3641 3642 if (JvmtiExport::should_post_resource_exhausted()) { 3643 JvmtiExport::post_resource_exhausted( 3644 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3645 space_string); 3646 } 3647 3648 if (!is_init_completed()) { 3649 vm_exit_during_initialization("OutOfMemoryError", space_string); 3650 } 3651 3652 if (out_of_compressed_class_space) { 3653 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3654 } else { 3655 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3656 } 3657 } 3658 3659 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3660 switch (mdtype) { 3661 case Metaspace::ClassType: return "Class"; 3662 case Metaspace::NonClassType: return "Metadata"; 3663 default: 3664 assert(false, "Got bad mdtype: %d", (int) mdtype); 3665 return NULL; 3666 } 3667 } 3668 3669 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3670 assert(DumpSharedSpaces, "sanity"); 3671 3672 int byte_size = (int)word_size * wordSize; 3673 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3674 3675 if (_alloc_record_head == NULL) { 3676 _alloc_record_head = _alloc_record_tail = rec; 3677 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3678 _alloc_record_tail->_next = rec; 3679 _alloc_record_tail = rec; 3680 } else { 3681 // slow linear search, but this doesn't happen that often, and only when dumping 3682 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3683 if (old->_ptr == ptr) { 3684 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3685 int remain_bytes = old->_byte_size - byte_size; 3686 assert(remain_bytes >= 0, "sanity"); 3687 old->_type = type; 3688 3689 if (remain_bytes == 0) { 3690 delete(rec); 3691 } else { 3692 address remain_ptr = address(ptr) + byte_size; 3693 rec->_ptr = remain_ptr; 3694 rec->_byte_size = remain_bytes; 3695 rec->_type = MetaspaceObj::DeallocatedType; 3696 rec->_next = old->_next; 3697 old->_byte_size = byte_size; 3698 old->_next = rec; 3699 } 3700 return; 3701 } 3702 } 3703 assert(0, "reallocating a freed pointer that was not recorded"); 3704 } 3705 } 3706 3707 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3708 assert(DumpSharedSpaces, "sanity"); 3709 3710 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3711 if (rec->_ptr == ptr) { 3712 assert(rec->_byte_size == (int)word_size * wordSize, "sanity"); 3713 rec->_type = MetaspaceObj::DeallocatedType; 3714 return; 3715 } 3716 } 3717 3718 assert(0, "deallocating a pointer that was not recorded"); 3719 } 3720 3721 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3722 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3723 3724 address last_addr = (address)bottom(); 3725 3726 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3727 address ptr = rec->_ptr; 3728 if (last_addr < ptr) { 3729 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3730 } 3731 closure->doit(ptr, rec->_type, rec->_byte_size); 3732 last_addr = ptr + rec->_byte_size; 3733 } 3734 3735 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3736 if (last_addr < top) { 3737 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3738 } 3739 } 3740 3741 void Metaspace::purge(MetadataType mdtype) { 3742 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3743 } 3744 3745 void Metaspace::purge() { 3746 MutexLockerEx cl(SpaceManager::expand_lock(), 3747 Mutex::_no_safepoint_check_flag); 3748 purge(NonClassType); 3749 if (using_class_space()) { 3750 purge(ClassType); 3751 } 3752 } 3753 3754 void Metaspace::print_on(outputStream* out) const { 3755 // Print both class virtual space counts and metaspace. 3756 if (Verbose) { 3757 vsm()->print_on(out); 3758 if (using_class_space()) { 3759 class_vsm()->print_on(out); 3760 } 3761 } 3762 } 3763 3764 bool Metaspace::contains(const void* ptr) { 3765 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3766 return true; 3767 } 3768 3769 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3770 return true; 3771 } 3772 3773 return get_space_list(NonClassType)->contains(ptr); 3774 } 3775 3776 void Metaspace::verify() { 3777 vsm()->verify(); 3778 if (using_class_space()) { 3779 class_vsm()->verify(); 3780 } 3781 } 3782 3783 void Metaspace::dump(outputStream* const out) const { 3784 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm())); 3785 vsm()->dump(out); 3786 if (using_class_space()) { 3787 out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm())); 3788 class_vsm()->dump(out); 3789 } 3790 } 3791 3792 /////////////// Unit tests /////////////// 3793 3794 #ifndef PRODUCT 3795 3796 class TestMetaspaceAuxTest : AllStatic { 3797 public: 3798 static void test_reserved() { 3799 size_t reserved = MetaspaceAux::reserved_bytes(); 3800 3801 assert(reserved > 0, "assert"); 3802 3803 size_t committed = MetaspaceAux::committed_bytes(); 3804 assert(committed <= reserved, "assert"); 3805 3806 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3807 assert(reserved_metadata > 0, "assert"); 3808 assert(reserved_metadata <= reserved, "assert"); 3809 3810 if (UseCompressedClassPointers) { 3811 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3812 assert(reserved_class > 0, "assert"); 3813 assert(reserved_class < reserved, "assert"); 3814 } 3815 } 3816 3817 static void test_committed() { 3818 size_t committed = MetaspaceAux::committed_bytes(); 3819 3820 assert(committed > 0, "assert"); 3821 3822 size_t reserved = MetaspaceAux::reserved_bytes(); 3823 assert(committed <= reserved, "assert"); 3824 3825 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3826 assert(committed_metadata > 0, "assert"); 3827 assert(committed_metadata <= committed, "assert"); 3828 3829 if (UseCompressedClassPointers) { 3830 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3831 assert(committed_class > 0, "assert"); 3832 assert(committed_class < committed, "assert"); 3833 } 3834 } 3835 3836 static void test_virtual_space_list_large_chunk() { 3837 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3838 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3839 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3840 // vm_allocation_granularity aligned on Windows. 3841 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3842 large_size += (os::vm_page_size()/BytesPerWord); 3843 vs_list->get_new_chunk(large_size, large_size, 0); 3844 } 3845 3846 static void test() { 3847 test_reserved(); 3848 test_committed(); 3849 test_virtual_space_list_large_chunk(); 3850 } 3851 }; 3852 3853 void TestMetaspaceAux_test() { 3854 TestMetaspaceAuxTest::test(); 3855 } 3856 3857 class TestVirtualSpaceNodeTest { 3858 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3859 size_t& num_small_chunks, 3860 size_t& num_specialized_chunks) { 3861 num_medium_chunks = words_left / MediumChunk; 3862 words_left = words_left % MediumChunk; 3863 3864 num_small_chunks = words_left / SmallChunk; 3865 words_left = words_left % SmallChunk; 3866 // how many specialized chunks can we get? 3867 num_specialized_chunks = words_left / SpecializedChunk; 3868 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3869 } 3870 3871 public: 3872 static void test() { 3873 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3874 const size_t vsn_test_size_words = MediumChunk * 4; 3875 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3876 3877 // The chunk sizes must be multiples of eachother, or this will fail 3878 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3879 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3880 3881 { // No committed memory in VSN 3882 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3883 VirtualSpaceNode vsn(vsn_test_size_bytes); 3884 vsn.initialize(); 3885 vsn.retire(&cm); 3886 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3887 } 3888 3889 { // All of VSN is committed, half is used by chunks 3890 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3891 VirtualSpaceNode vsn(vsn_test_size_bytes); 3892 vsn.initialize(); 3893 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3894 vsn.get_chunk_vs(MediumChunk); 3895 vsn.get_chunk_vs(MediumChunk); 3896 vsn.retire(&cm); 3897 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3898 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3899 } 3900 3901 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3902 // This doesn't work for systems with vm_page_size >= 16K. 3903 if (page_chunks < MediumChunk) { 3904 // 4 pages of VSN is committed, some is used by chunks 3905 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3906 VirtualSpaceNode vsn(vsn_test_size_bytes); 3907 3908 vsn.initialize(); 3909 vsn.expand_by(page_chunks, page_chunks); 3910 vsn.get_chunk_vs(SmallChunk); 3911 vsn.get_chunk_vs(SpecializedChunk); 3912 vsn.retire(&cm); 3913 3914 // committed - used = words left to retire 3915 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3916 3917 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3918 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3919 3920 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3921 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3922 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3923 } 3924 3925 { // Half of VSN is committed, a humongous chunk is used 3926 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3927 VirtualSpaceNode vsn(vsn_test_size_bytes); 3928 vsn.initialize(); 3929 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3930 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3931 vsn.retire(&cm); 3932 3933 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3934 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3935 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3936 3937 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3938 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3939 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3940 } 3941 3942 } 3943 3944 #define assert_is_available_positive(word_size) \ 3945 assert(vsn.is_available(word_size), \ 3946 #word_size ": " PTR_FORMAT " bytes were not available in " \ 3947 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3948 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3949 3950 #define assert_is_available_negative(word_size) \ 3951 assert(!vsn.is_available(word_size), \ 3952 #word_size ": " PTR_FORMAT " bytes should not be available in " \ 3953 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3954 (uintptr_t)(word_size * BytesPerWord), p2i(vsn.bottom()), p2i(vsn.end())); 3955 3956 static void test_is_available_positive() { 3957 // Reserve some memory. 3958 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3959 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3960 3961 // Commit some memory. 3962 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3963 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3964 assert(expanded, "Failed to commit"); 3965 3966 // Check that is_available accepts the committed size. 3967 assert_is_available_positive(commit_word_size); 3968 3969 // Check that is_available accepts half the committed size. 3970 size_t expand_word_size = commit_word_size / 2; 3971 assert_is_available_positive(expand_word_size); 3972 } 3973 3974 static void test_is_available_negative() { 3975 // Reserve some memory. 3976 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3977 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3978 3979 // Commit some memory. 3980 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3981 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3982 assert(expanded, "Failed to commit"); 3983 3984 // Check that is_available doesn't accept a too large size. 3985 size_t two_times_commit_word_size = commit_word_size * 2; 3986 assert_is_available_negative(two_times_commit_word_size); 3987 } 3988 3989 static void test_is_available_overflow() { 3990 // Reserve some memory. 3991 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3992 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3993 3994 // Commit some memory. 3995 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3996 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3997 assert(expanded, "Failed to commit"); 3998 3999 // Calculate a size that will overflow the virtual space size. 4000 void* virtual_space_max = (void*)(uintptr_t)-1; 4001 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 4002 size_t overflow_size = bottom_to_max + BytesPerWord; 4003 size_t overflow_word_size = overflow_size / BytesPerWord; 4004 4005 // Check that is_available can handle the overflow. 4006 assert_is_available_negative(overflow_word_size); 4007 } 4008 4009 static void test_is_available() { 4010 TestVirtualSpaceNodeTest::test_is_available_positive(); 4011 TestVirtualSpaceNodeTest::test_is_available_negative(); 4012 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4013 } 4014 }; 4015 4016 void TestVirtualSpaceNode_test() { 4017 TestVirtualSpaceNodeTest::test(); 4018 TestVirtualSpaceNodeTest::test_is_available(); 4019 } 4020 #endif