1 /* 2 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc_interface/collectedHeap.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/binaryTreeDictionary.hpp" 28 #include "memory/freeList.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/freeList.hpp" 32 #include "memory/gcLocker.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 53 54 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 55 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 56 57 // Set this constant to enable slow integrity checking of the free chunk lists 58 const bool metaspace_slow_verify = false; 59 60 size_t const allocation_from_dictionary_limit = 4 * K; 61 62 MetaWord* last_allocated = 0; 63 64 size_t Metaspace::_compressed_class_space_size; 65 const MetaspaceTracer* Metaspace::_tracer = NULL; 66 67 // Used in declarations in SpaceManager and ChunkManager 68 enum ChunkIndex { 69 ZeroIndex = 0, 70 SpecializedIndex = ZeroIndex, 71 SmallIndex = SpecializedIndex + 1, 72 MediumIndex = SmallIndex + 1, 73 HumongousIndex = MediumIndex + 1, 74 NumberOfFreeLists = 3, 75 NumberOfInUseLists = 4 76 }; 77 78 enum ChunkSizes { // in words. 79 ClassSpecializedChunk = 128, 80 SpecializedChunk = 128, 81 ClassSmallChunk = 256, 82 SmallChunk = 512, 83 ClassMediumChunk = 4 * K, 84 MediumChunk = 8 * K 85 }; 86 87 static ChunkIndex next_chunk_index(ChunkIndex i) { 88 assert(i < NumberOfInUseLists, "Out of bound"); 89 return (ChunkIndex) (i+1); 90 } 91 92 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 93 uint MetaspaceGC::_shrink_factor = 0; 94 bool MetaspaceGC::_should_concurrent_collect = false; 95 96 typedef class FreeList<Metachunk> ChunkList; 97 98 // Manages the global free lists of chunks. 99 class ChunkManager : public CHeapObj<mtInternal> { 100 friend class TestVirtualSpaceNodeTest; 101 102 // Free list of chunks of different sizes. 103 // SpecializedChunk 104 // SmallChunk 105 // MediumChunk 106 // HumongousChunk 107 ChunkList _free_chunks[NumberOfFreeLists]; 108 109 // HumongousChunk 110 ChunkTreeDictionary _humongous_dictionary; 111 112 // ChunkManager in all lists of this type 113 size_t _free_chunks_total; 114 size_t _free_chunks_count; 115 116 void dec_free_chunks_total(size_t v) { 117 assert(_free_chunks_count > 0 && 118 _free_chunks_total > 0, 119 "About to go negative"); 120 Atomic::add_ptr(-1, &_free_chunks_count); 121 jlong minus_v = (jlong) - (jlong) v; 122 Atomic::add_ptr(minus_v, &_free_chunks_total); 123 } 124 125 // Debug support 126 127 size_t sum_free_chunks(); 128 size_t sum_free_chunks_count(); 129 130 void locked_verify_free_chunks_total(); 131 void slow_locked_verify_free_chunks_total() { 132 if (metaspace_slow_verify) { 133 locked_verify_free_chunks_total(); 134 } 135 } 136 void locked_verify_free_chunks_count(); 137 void slow_locked_verify_free_chunks_count() { 138 if (metaspace_slow_verify) { 139 locked_verify_free_chunks_count(); 140 } 141 } 142 void verify_free_chunks_count(); 143 144 public: 145 146 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 147 : _free_chunks_total(0), _free_chunks_count(0) { 148 _free_chunks[SpecializedIndex].set_size(specialized_size); 149 _free_chunks[SmallIndex].set_size(small_size); 150 _free_chunks[MediumIndex].set_size(medium_size); 151 } 152 153 // add or delete (return) a chunk to the global freelist. 154 Metachunk* chunk_freelist_allocate(size_t word_size); 155 156 // Map a size to a list index assuming that there are lists 157 // for special, small, medium, and humongous chunks. 158 static ChunkIndex list_index(size_t size); 159 160 // Remove the chunk from its freelist. It is 161 // expected to be on one of the _free_chunks[] lists. 162 void remove_chunk(Metachunk* chunk); 163 164 // Add the simple linked list of chunks to the freelist of chunks 165 // of type index. 166 void return_chunks(ChunkIndex index, Metachunk* chunks); 167 168 // Total of the space in the free chunks list 169 size_t free_chunks_total_words(); 170 size_t free_chunks_total_bytes(); 171 172 // Number of chunks in the free chunks list 173 size_t free_chunks_count(); 174 175 void inc_free_chunks_total(size_t v, size_t count = 1) { 176 Atomic::add_ptr(count, &_free_chunks_count); 177 Atomic::add_ptr(v, &_free_chunks_total); 178 } 179 ChunkTreeDictionary* humongous_dictionary() { 180 return &_humongous_dictionary; 181 } 182 183 ChunkList* free_chunks(ChunkIndex index); 184 185 // Returns the list for the given chunk word size. 186 ChunkList* find_free_chunks_list(size_t word_size); 187 188 // Remove from a list by size. Selects list based on size of chunk. 189 Metachunk* free_chunks_get(size_t chunk_word_size); 190 191 #define index_bounds_check(index) \ 192 assert(index == SpecializedIndex || \ 193 index == SmallIndex || \ 194 index == MediumIndex || \ 195 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) 196 197 size_t num_free_chunks(ChunkIndex index) const { 198 index_bounds_check(index); 199 200 if (index == HumongousIndex) { 201 return _humongous_dictionary.total_free_blocks(); 202 } 203 204 ssize_t count = _free_chunks[index].count(); 205 return count == -1 ? 0 : (size_t) count; 206 } 207 208 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 209 index_bounds_check(index); 210 211 size_t word_size = 0; 212 if (index == HumongousIndex) { 213 word_size = _humongous_dictionary.total_size(); 214 } else { 215 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 216 word_size = size_per_chunk_in_words * num_free_chunks(index); 217 } 218 219 return word_size * BytesPerWord; 220 } 221 222 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 223 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 224 num_free_chunks(SmallIndex), 225 num_free_chunks(MediumIndex), 226 num_free_chunks(HumongousIndex), 227 size_free_chunks_in_bytes(SpecializedIndex), 228 size_free_chunks_in_bytes(SmallIndex), 229 size_free_chunks_in_bytes(MediumIndex), 230 size_free_chunks_in_bytes(HumongousIndex)); 231 } 232 233 // Debug support 234 void verify(); 235 void slow_verify() { 236 if (metaspace_slow_verify) { 237 verify(); 238 } 239 } 240 void locked_verify(); 241 void slow_locked_verify() { 242 if (metaspace_slow_verify) { 243 locked_verify(); 244 } 245 } 246 void verify_free_chunks_total(); 247 248 void locked_print_free_chunks(outputStream* st); 249 void locked_print_sum_free_chunks(outputStream* st); 250 251 void print_on(outputStream* st) const; 252 }; 253 254 // Used to manage the free list of Metablocks (a block corresponds 255 // to the allocation of a quantum of metadata). 256 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 257 BlockTreeDictionary* _dictionary; 258 259 // Only allocate and split from freelist if the size of the allocation 260 // is at least 1/4th the size of the available block. 261 const static int WasteMultiplier = 4; 262 263 // Accessors 264 BlockTreeDictionary* dictionary() const { return _dictionary; } 265 266 public: 267 BlockFreelist(); 268 ~BlockFreelist(); 269 270 // Get and return a block to the free list 271 MetaWord* get_block(size_t word_size); 272 void return_block(MetaWord* p, size_t word_size); 273 274 size_t total_size() { 275 if (dictionary() == NULL) { 276 return 0; 277 } else { 278 return dictionary()->total_size(); 279 } 280 } 281 282 void print_on(outputStream* st) const; 283 }; 284 285 // A VirtualSpaceList node. 286 class VirtualSpaceNode : public CHeapObj<mtClass> { 287 friend class VirtualSpaceList; 288 289 // Link to next VirtualSpaceNode 290 VirtualSpaceNode* _next; 291 292 // total in the VirtualSpace 293 MemRegion _reserved; 294 ReservedSpace _rs; 295 VirtualSpace _virtual_space; 296 MetaWord* _top; 297 // count of chunks contained in this VirtualSpace 298 uintx _container_count; 299 300 // Convenience functions to access the _virtual_space 301 char* low() const { return virtual_space()->low(); } 302 char* high() const { return virtual_space()->high(); } 303 304 // The first Metachunk will be allocated at the bottom of the 305 // VirtualSpace 306 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 307 308 // Committed but unused space in the virtual space 309 size_t free_words_in_vs() const; 310 public: 311 312 VirtualSpaceNode(size_t byte_size); 313 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 314 ~VirtualSpaceNode(); 315 316 // Convenience functions for logical bottom and end 317 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 318 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 319 320 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 321 322 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 323 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 324 325 bool is_pre_committed() const { return _virtual_space.special(); } 326 327 // address of next available space in _virtual_space; 328 // Accessors 329 VirtualSpaceNode* next() { return _next; } 330 void set_next(VirtualSpaceNode* v) { _next = v; } 331 332 void set_reserved(MemRegion const v) { _reserved = v; } 333 void set_top(MetaWord* v) { _top = v; } 334 335 // Accessors 336 MemRegion* reserved() { return &_reserved; } 337 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 338 339 // Returns true if "word_size" is available in the VirtualSpace 340 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 341 342 MetaWord* top() const { return _top; } 343 void inc_top(size_t word_size) { _top += word_size; } 344 345 uintx container_count() { return _container_count; } 346 void inc_container_count(); 347 void dec_container_count(); 348 #ifdef ASSERT 349 uintx container_count_slow(); 350 void verify_container_count(); 351 #endif 352 353 // used and capacity in this single entry in the list 354 size_t used_words_in_vs() const; 355 size_t capacity_words_in_vs() const; 356 357 bool initialize(); 358 359 // get space from the virtual space 360 Metachunk* take_from_committed(size_t chunk_word_size); 361 362 // Allocate a chunk from the virtual space and return it. 363 Metachunk* get_chunk_vs(size_t chunk_word_size); 364 365 // Expands/shrinks the committed space in a virtual space. Delegates 366 // to Virtualspace 367 bool expand_by(size_t min_words, size_t preferred_words); 368 369 // In preparation for deleting this node, remove all the chunks 370 // in the node from any freelist. 371 void purge(ChunkManager* chunk_manager); 372 373 // If an allocation doesn't fit in the current node a new node is created. 374 // Allocate chunks out of the remaining committed space in this node 375 // to avoid wasting that memory. 376 // This always adds up because all the chunk sizes are multiples of 377 // the smallest chunk size. 378 void retire(ChunkManager* chunk_manager); 379 380 #ifdef ASSERT 381 // Debug support 382 void mangle(); 383 #endif 384 385 void print_on(outputStream* st) const; 386 }; 387 388 #define assert_is_ptr_aligned(ptr, alignment) \ 389 assert(is_ptr_aligned(ptr, alignment), \ 390 err_msg(PTR_FORMAT " is not aligned to " \ 391 SIZE_FORMAT, ptr, alignment)) 392 393 #define assert_is_size_aligned(size, alignment) \ 394 assert(is_size_aligned(size, alignment), \ 395 err_msg(SIZE_FORMAT " is not aligned to " \ 396 SIZE_FORMAT, size, alignment)) 397 398 399 // Decide if large pages should be committed when the memory is reserved. 400 static bool should_commit_large_pages_when_reserving(size_t bytes) { 401 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 402 size_t words = bytes / BytesPerWord; 403 bool is_class = false; // We never reserve large pages for the class space. 404 if (MetaspaceGC::can_expand(words, is_class) && 405 MetaspaceGC::allowed_expansion() >= words) { 406 return true; 407 } 408 } 409 410 return false; 411 } 412 413 // byte_size is the size of the associated virtualspace. 414 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 415 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 416 417 #if INCLUDE_CDS 418 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 419 // configurable address, generally at the top of the Java heap so other 420 // memory addresses don't conflict. 421 if (DumpSharedSpaces) { 422 bool large_pages = false; // No large pages when dumping the CDS archive. 423 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 424 425 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); 426 if (_rs.is_reserved()) { 427 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 428 } else { 429 // Get a mmap region anywhere if the SharedBaseAddress fails. 430 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 431 } 432 MetaspaceShared::set_shared_rs(&_rs); 433 } else 434 #endif 435 { 436 bool large_pages = should_commit_large_pages_when_reserving(bytes); 437 438 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 439 } 440 441 if (_rs.is_reserved()) { 442 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 443 assert(_rs.size() != 0, "Catch if we get a 0 size"); 444 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 445 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 446 447 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 448 } 449 } 450 451 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 452 Metachunk* chunk = first_chunk(); 453 Metachunk* invalid_chunk = (Metachunk*) top(); 454 while (chunk < invalid_chunk ) { 455 assert(chunk->is_tagged_free(), "Should be tagged free"); 456 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 457 chunk_manager->remove_chunk(chunk); 458 assert(chunk->next() == NULL && 459 chunk->prev() == NULL, 460 "Was not removed from its list"); 461 chunk = (Metachunk*) next; 462 } 463 } 464 465 #ifdef ASSERT 466 uintx VirtualSpaceNode::container_count_slow() { 467 uintx count = 0; 468 Metachunk* chunk = first_chunk(); 469 Metachunk* invalid_chunk = (Metachunk*) top(); 470 while (chunk < invalid_chunk ) { 471 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 472 // Don't count the chunks on the free lists. Those are 473 // still part of the VirtualSpaceNode but not currently 474 // counted. 475 if (!chunk->is_tagged_free()) { 476 count++; 477 } 478 chunk = (Metachunk*) next; 479 } 480 return count; 481 } 482 #endif 483 484 // List of VirtualSpaces for metadata allocation. 485 class VirtualSpaceList : public CHeapObj<mtClass> { 486 friend class VirtualSpaceNode; 487 488 enum VirtualSpaceSizes { 489 VirtualSpaceSize = 256 * K 490 }; 491 492 // Head of the list 493 VirtualSpaceNode* _virtual_space_list; 494 // virtual space currently being used for allocations 495 VirtualSpaceNode* _current_virtual_space; 496 497 // Is this VirtualSpaceList used for the compressed class space 498 bool _is_class; 499 500 // Sum of reserved and committed memory in the virtual spaces 501 size_t _reserved_words; 502 size_t _committed_words; 503 504 // Number of virtual spaces 505 size_t _virtual_space_count; 506 507 ~VirtualSpaceList(); 508 509 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 510 511 void set_virtual_space_list(VirtualSpaceNode* v) { 512 _virtual_space_list = v; 513 } 514 void set_current_virtual_space(VirtualSpaceNode* v) { 515 _current_virtual_space = v; 516 } 517 518 void link_vs(VirtualSpaceNode* new_entry); 519 520 // Get another virtual space and add it to the list. This 521 // is typically prompted by a failed attempt to allocate a chunk 522 // and is typically followed by the allocation of a chunk. 523 bool create_new_virtual_space(size_t vs_word_size); 524 525 // Chunk up the unused committed space in the current 526 // virtual space and add the chunks to the free list. 527 void retire_current_virtual_space(); 528 529 public: 530 VirtualSpaceList(size_t word_size); 531 VirtualSpaceList(ReservedSpace rs); 532 533 size_t free_bytes(); 534 535 Metachunk* get_new_chunk(size_t word_size, 536 size_t grow_chunks_by_words, 537 size_t medium_chunk_bunch); 538 539 bool expand_node_by(VirtualSpaceNode* node, 540 size_t min_words, 541 size_t preferred_words); 542 543 bool expand_by(size_t min_words, 544 size_t preferred_words); 545 546 VirtualSpaceNode* current_virtual_space() { 547 return _current_virtual_space; 548 } 549 550 bool is_class() const { return _is_class; } 551 552 bool initialization_succeeded() { return _virtual_space_list != NULL; } 553 554 size_t reserved_words() { return _reserved_words; } 555 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 556 size_t committed_words() { return _committed_words; } 557 size_t committed_bytes() { return committed_words() * BytesPerWord; } 558 559 void inc_reserved_words(size_t v); 560 void dec_reserved_words(size_t v); 561 void inc_committed_words(size_t v); 562 void dec_committed_words(size_t v); 563 void inc_virtual_space_count(); 564 void dec_virtual_space_count(); 565 566 bool contains(const void* ptr); 567 568 // Unlink empty VirtualSpaceNodes and free it. 569 void purge(ChunkManager* chunk_manager); 570 571 void print_on(outputStream* st) const; 572 573 class VirtualSpaceListIterator : public StackObj { 574 VirtualSpaceNode* _virtual_spaces; 575 public: 576 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 577 _virtual_spaces(virtual_spaces) {} 578 579 bool repeat() { 580 return _virtual_spaces != NULL; 581 } 582 583 VirtualSpaceNode* get_next() { 584 VirtualSpaceNode* result = _virtual_spaces; 585 if (_virtual_spaces != NULL) { 586 _virtual_spaces = _virtual_spaces->next(); 587 } 588 return result; 589 } 590 }; 591 }; 592 593 class Metadebug : AllStatic { 594 // Debugging support for Metaspaces 595 static int _allocation_fail_alot_count; 596 597 public: 598 599 static void init_allocation_fail_alot_count(); 600 #ifdef ASSERT 601 static bool test_metadata_failure(); 602 #endif 603 }; 604 605 int Metadebug::_allocation_fail_alot_count = 0; 606 607 // SpaceManager - used by Metaspace to handle allocations 608 class SpaceManager : public CHeapObj<mtClass> { 609 friend class Metaspace; 610 friend class Metadebug; 611 612 private: 613 614 // protects allocations 615 Mutex* const _lock; 616 617 // Type of metadata allocated. 618 Metaspace::MetadataType _mdtype; 619 620 // List of chunks in use by this SpaceManager. Allocations 621 // are done from the current chunk. The list is used for deallocating 622 // chunks when the SpaceManager is freed. 623 Metachunk* _chunks_in_use[NumberOfInUseLists]; 624 Metachunk* _current_chunk; 625 626 // Number of small chunks to allocate to a manager 627 // If class space manager, small chunks are unlimited 628 static uint const _small_chunk_limit; 629 630 // Sum of all space in allocated chunks 631 size_t _allocated_blocks_words; 632 633 // Sum of all allocated chunks 634 size_t _allocated_chunks_words; 635 size_t _allocated_chunks_count; 636 637 // Free lists of blocks are per SpaceManager since they 638 // are assumed to be in chunks in use by the SpaceManager 639 // and all chunks in use by a SpaceManager are freed when 640 // the class loader using the SpaceManager is collected. 641 BlockFreelist _block_freelists; 642 643 // protects virtualspace and chunk expansions 644 static const char* _expand_lock_name; 645 static const int _expand_lock_rank; 646 static Mutex* const _expand_lock; 647 648 private: 649 // Accessors 650 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 651 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 652 _chunks_in_use[index] = v; 653 } 654 655 BlockFreelist* block_freelists() const { 656 return (BlockFreelist*) &_block_freelists; 657 } 658 659 Metaspace::MetadataType mdtype() { return _mdtype; } 660 661 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 662 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 663 664 Metachunk* current_chunk() const { return _current_chunk; } 665 void set_current_chunk(Metachunk* v) { 666 _current_chunk = v; 667 } 668 669 Metachunk* find_current_chunk(size_t word_size); 670 671 // Add chunk to the list of chunks in use 672 void add_chunk(Metachunk* v, bool make_current); 673 void retire_current_chunk(); 674 675 Mutex* lock() const { return _lock; } 676 677 const char* chunk_size_name(ChunkIndex index) const; 678 679 protected: 680 void initialize(); 681 682 public: 683 SpaceManager(Metaspace::MetadataType mdtype, 684 Mutex* lock); 685 ~SpaceManager(); 686 687 enum ChunkMultiples { 688 MediumChunkMultiple = 4 689 }; 690 691 bool is_class() { return _mdtype == Metaspace::ClassType; } 692 693 // Accessors 694 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 695 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 696 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 697 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 698 699 size_t smallest_chunk_size() { return specialized_chunk_size(); } 700 701 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 702 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 703 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 704 size_t allocated_chunks_bytes() const { return _allocated_chunks_words * BytesPerWord; } 705 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 706 707 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 708 709 static Mutex* expand_lock() { return _expand_lock; } 710 711 // Increment the per Metaspace and global running sums for Metachunks 712 // by the given size. This is used when a Metachunk to added to 713 // the in-use list. 714 void inc_size_metrics(size_t words); 715 // Increment the per Metaspace and global running sums Metablocks by the given 716 // size. This is used when a Metablock is allocated. 717 void inc_used_metrics(size_t words); 718 // Delete the portion of the running sums for this SpaceManager. That is, 719 // the globals running sums for the Metachunks and Metablocks are 720 // decremented for all the Metachunks in-use by this SpaceManager. 721 void dec_total_from_size_metrics(); 722 723 // Set the sizes for the initial chunks. 724 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 725 size_t* chunk_word_size, 726 size_t* class_chunk_word_size); 727 728 size_t sum_capacity_in_chunks_in_use() const; 729 size_t sum_used_in_chunks_in_use() const; 730 size_t sum_free_in_chunks_in_use() const; 731 size_t sum_waste_in_chunks_in_use() const; 732 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 733 734 size_t sum_count_in_chunks_in_use(); 735 size_t sum_count_in_chunks_in_use(ChunkIndex i); 736 737 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 738 739 // Block allocation and deallocation. 740 // Allocates a block from the current chunk 741 MetaWord* allocate(size_t word_size); 742 743 // Helper for allocations 744 MetaWord* allocate_work(size_t word_size); 745 746 // Returns a block to the per manager freelist 747 void deallocate(MetaWord* p, size_t word_size); 748 749 // Based on the allocation size and a minimum chunk size, 750 // returned chunk size (for expanding space for chunk allocation). 751 size_t calc_chunk_size(size_t allocation_word_size); 752 753 // Called when an allocation from the current chunk fails. 754 // Gets a new chunk (may require getting a new virtual space), 755 // and allocates from that chunk. 756 MetaWord* grow_and_allocate(size_t word_size); 757 758 // Notify memory usage to MemoryService. 759 void track_metaspace_memory_usage(); 760 761 // debugging support. 762 763 void dump(outputStream* const out) const; 764 void print_on(outputStream* st) const; 765 void locked_print_chunks_in_use_on(outputStream* st) const; 766 767 void verify(); 768 void verify_chunk_size(Metachunk* chunk); 769 NOT_PRODUCT(void mangle_freed_chunks();) 770 #ifdef ASSERT 771 void verify_allocated_blocks_words(); 772 #endif 773 774 size_t get_raw_word_size(size_t word_size) { 775 size_t byte_size = word_size * BytesPerWord; 776 777 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 778 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 779 780 size_t raw_word_size = raw_bytes_size / BytesPerWord; 781 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 782 783 return raw_word_size; 784 } 785 }; 786 787 uint const SpaceManager::_small_chunk_limit = 4; 788 789 const char* SpaceManager::_expand_lock_name = 790 "SpaceManager chunk allocation lock"; 791 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 792 Mutex* const SpaceManager::_expand_lock = 793 new Mutex(SpaceManager::_expand_lock_rank, 794 SpaceManager::_expand_lock_name, 795 Mutex::_allow_vm_block_flag, 796 Monitor::_safepoint_check_never); 797 798 void VirtualSpaceNode::inc_container_count() { 799 assert_lock_strong(SpaceManager::expand_lock()); 800 _container_count++; 801 assert(_container_count == container_count_slow(), 802 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 803 " container_count_slow() " SIZE_FORMAT, 804 _container_count, container_count_slow())); 805 } 806 807 void VirtualSpaceNode::dec_container_count() { 808 assert_lock_strong(SpaceManager::expand_lock()); 809 _container_count--; 810 } 811 812 #ifdef ASSERT 813 void VirtualSpaceNode::verify_container_count() { 814 assert(_container_count == container_count_slow(), 815 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 816 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); 817 } 818 #endif 819 820 // BlockFreelist methods 821 822 BlockFreelist::BlockFreelist() : _dictionary(NULL) {} 823 824 BlockFreelist::~BlockFreelist() { 825 if (_dictionary != NULL) { 826 if (Verbose && TraceMetadataChunkAllocation) { 827 _dictionary->print_free_lists(gclog_or_tty); 828 } 829 delete _dictionary; 830 } 831 } 832 833 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 834 Metablock* free_chunk = ::new (p) Metablock(word_size); 835 if (dictionary() == NULL) { 836 _dictionary = new BlockTreeDictionary(); 837 } 838 dictionary()->return_chunk(free_chunk); 839 } 840 841 MetaWord* BlockFreelist::get_block(size_t word_size) { 842 if (dictionary() == NULL) { 843 return NULL; 844 } 845 846 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 847 // Dark matter. Too small for dictionary. 848 return NULL; 849 } 850 851 Metablock* free_block = 852 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 853 if (free_block == NULL) { 854 return NULL; 855 } 856 857 const size_t block_size = free_block->size(); 858 if (block_size > WasteMultiplier * word_size) { 859 return_block((MetaWord*)free_block, block_size); 860 return NULL; 861 } 862 863 MetaWord* new_block = (MetaWord*)free_block; 864 assert(block_size >= word_size, "Incorrect size of block from freelist"); 865 const size_t unused = block_size - word_size; 866 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 867 return_block(new_block + word_size, unused); 868 } 869 870 return new_block; 871 } 872 873 void BlockFreelist::print_on(outputStream* st) const { 874 if (dictionary() == NULL) { 875 return; 876 } 877 dictionary()->print_free_lists(st); 878 } 879 880 // VirtualSpaceNode methods 881 882 VirtualSpaceNode::~VirtualSpaceNode() { 883 _rs.release(); 884 #ifdef ASSERT 885 size_t word_size = sizeof(*this) / BytesPerWord; 886 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 887 #endif 888 } 889 890 size_t VirtualSpaceNode::used_words_in_vs() const { 891 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 892 } 893 894 // Space committed in the VirtualSpace 895 size_t VirtualSpaceNode::capacity_words_in_vs() const { 896 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 897 } 898 899 size_t VirtualSpaceNode::free_words_in_vs() const { 900 return pointer_delta(end(), top(), sizeof(MetaWord)); 901 } 902 903 // Allocates the chunk from the virtual space only. 904 // This interface is also used internally for debugging. Not all 905 // chunks removed here are necessarily used for allocation. 906 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 907 // Bottom of the new chunk 908 MetaWord* chunk_limit = top(); 909 assert(chunk_limit != NULL, "Not safe to call this method"); 910 911 // The virtual spaces are always expanded by the 912 // commit granularity to enforce the following condition. 913 // Without this the is_available check will not work correctly. 914 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 915 "The committed memory doesn't match the expanded memory."); 916 917 if (!is_available(chunk_word_size)) { 918 if (TraceMetadataChunkAllocation) { 919 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 920 // Dump some information about the virtual space that is nearly full 921 print_on(gclog_or_tty); 922 } 923 return NULL; 924 } 925 926 // Take the space (bump top on the current virtual space). 927 inc_top(chunk_word_size); 928 929 // Initialize the chunk 930 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 931 return result; 932 } 933 934 935 // Expand the virtual space (commit more of the reserved space) 936 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 937 size_t min_bytes = min_words * BytesPerWord; 938 size_t preferred_bytes = preferred_words * BytesPerWord; 939 940 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 941 942 if (uncommitted < min_bytes) { 943 return false; 944 } 945 946 size_t commit = MIN2(preferred_bytes, uncommitted); 947 bool result = virtual_space()->expand_by(commit, false); 948 949 assert(result, "Failed to commit memory"); 950 951 return result; 952 } 953 954 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 955 assert_lock_strong(SpaceManager::expand_lock()); 956 Metachunk* result = take_from_committed(chunk_word_size); 957 if (result != NULL) { 958 inc_container_count(); 959 } 960 return result; 961 } 962 963 bool VirtualSpaceNode::initialize() { 964 965 if (!_rs.is_reserved()) { 966 return false; 967 } 968 969 // These are necessary restriction to make sure that the virtual space always 970 // grows in steps of Metaspace::commit_alignment(). If both base and size are 971 // aligned only the middle alignment of the VirtualSpace is used. 972 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 973 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 974 975 // ReservedSpaces marked as special will have the entire memory 976 // pre-committed. Setting a committed size will make sure that 977 // committed_size and actual_committed_size agrees. 978 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 979 980 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 981 Metaspace::commit_alignment()); 982 if (result) { 983 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 984 "Checking that the pre-committed memory was registered by the VirtualSpace"); 985 986 set_top((MetaWord*)virtual_space()->low()); 987 set_reserved(MemRegion((HeapWord*)_rs.base(), 988 (HeapWord*)(_rs.base() + _rs.size()))); 989 990 assert(reserved()->start() == (HeapWord*) _rs.base(), 991 err_msg("Reserved start was not set properly " PTR_FORMAT 992 " != " PTR_FORMAT, reserved()->start(), _rs.base())); 993 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 994 err_msg("Reserved size was not set properly " SIZE_FORMAT 995 " != " SIZE_FORMAT, reserved()->word_size(), 996 _rs.size() / BytesPerWord)); 997 } 998 999 return result; 1000 } 1001 1002 void VirtualSpaceNode::print_on(outputStream* st) const { 1003 size_t used = used_words_in_vs(); 1004 size_t capacity = capacity_words_in_vs(); 1005 VirtualSpace* vs = virtual_space(); 1006 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used " 1007 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1008 PTR_FORMAT ", " PTR_FORMAT ")", 1009 vs, capacity / K, 1010 capacity == 0 ? 0 : used * 100 / capacity, 1011 bottom(), top(), end(), 1012 vs->high_boundary()); 1013 } 1014 1015 #ifdef ASSERT 1016 void VirtualSpaceNode::mangle() { 1017 size_t word_size = capacity_words_in_vs(); 1018 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1019 } 1020 #endif // ASSERT 1021 1022 // VirtualSpaceList methods 1023 // Space allocated from the VirtualSpace 1024 1025 VirtualSpaceList::~VirtualSpaceList() { 1026 VirtualSpaceListIterator iter(virtual_space_list()); 1027 while (iter.repeat()) { 1028 VirtualSpaceNode* vsl = iter.get_next(); 1029 delete vsl; 1030 } 1031 } 1032 1033 void VirtualSpaceList::inc_reserved_words(size_t v) { 1034 assert_lock_strong(SpaceManager::expand_lock()); 1035 _reserved_words = _reserved_words + v; 1036 } 1037 void VirtualSpaceList::dec_reserved_words(size_t v) { 1038 assert_lock_strong(SpaceManager::expand_lock()); 1039 _reserved_words = _reserved_words - v; 1040 } 1041 1042 #define assert_committed_below_limit() \ 1043 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1044 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 1045 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1046 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 1047 1048 void VirtualSpaceList::inc_committed_words(size_t v) { 1049 assert_lock_strong(SpaceManager::expand_lock()); 1050 _committed_words = _committed_words + v; 1051 1052 assert_committed_below_limit(); 1053 } 1054 void VirtualSpaceList::dec_committed_words(size_t v) { 1055 assert_lock_strong(SpaceManager::expand_lock()); 1056 _committed_words = _committed_words - v; 1057 1058 assert_committed_below_limit(); 1059 } 1060 1061 void VirtualSpaceList::inc_virtual_space_count() { 1062 assert_lock_strong(SpaceManager::expand_lock()); 1063 _virtual_space_count++; 1064 } 1065 void VirtualSpaceList::dec_virtual_space_count() { 1066 assert_lock_strong(SpaceManager::expand_lock()); 1067 _virtual_space_count--; 1068 } 1069 1070 void ChunkManager::remove_chunk(Metachunk* chunk) { 1071 size_t word_size = chunk->word_size(); 1072 ChunkIndex index = list_index(word_size); 1073 if (index != HumongousIndex) { 1074 free_chunks(index)->remove_chunk(chunk); 1075 } else { 1076 humongous_dictionary()->remove_chunk(chunk); 1077 } 1078 1079 // Chunk is being removed from the chunks free list. 1080 dec_free_chunks_total(chunk->word_size()); 1081 } 1082 1083 // Walk the list of VirtualSpaceNodes and delete 1084 // nodes with a 0 container_count. Remove Metachunks in 1085 // the node from their respective freelists. 1086 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1087 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1088 assert_lock_strong(SpaceManager::expand_lock()); 1089 // Don't use a VirtualSpaceListIterator because this 1090 // list is being changed and a straightforward use of an iterator is not safe. 1091 VirtualSpaceNode* purged_vsl = NULL; 1092 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1093 VirtualSpaceNode* next_vsl = prev_vsl; 1094 while (next_vsl != NULL) { 1095 VirtualSpaceNode* vsl = next_vsl; 1096 next_vsl = vsl->next(); 1097 // Don't free the current virtual space since it will likely 1098 // be needed soon. 1099 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1100 // Unlink it from the list 1101 if (prev_vsl == vsl) { 1102 // This is the case of the current node being the first node. 1103 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1104 set_virtual_space_list(vsl->next()); 1105 } else { 1106 prev_vsl->set_next(vsl->next()); 1107 } 1108 1109 vsl->purge(chunk_manager); 1110 dec_reserved_words(vsl->reserved_words()); 1111 dec_committed_words(vsl->committed_words()); 1112 dec_virtual_space_count(); 1113 purged_vsl = vsl; 1114 delete vsl; 1115 } else { 1116 prev_vsl = vsl; 1117 } 1118 } 1119 #ifdef ASSERT 1120 if (purged_vsl != NULL) { 1121 // List should be stable enough to use an iterator here. 1122 VirtualSpaceListIterator iter(virtual_space_list()); 1123 while (iter.repeat()) { 1124 VirtualSpaceNode* vsl = iter.get_next(); 1125 assert(vsl != purged_vsl, "Purge of vsl failed"); 1126 } 1127 } 1128 #endif 1129 } 1130 1131 1132 // This function looks at the mmap regions in the metaspace without locking. 1133 // The chunks are added with store ordering and not deleted except for at 1134 // unloading time during a safepoint. 1135 bool VirtualSpaceList::contains(const void* ptr) { 1136 // List should be stable enough to use an iterator here because removing virtual 1137 // space nodes is only allowed at a safepoint. 1138 VirtualSpaceListIterator iter(virtual_space_list()); 1139 while (iter.repeat()) { 1140 VirtualSpaceNode* vsn = iter.get_next(); 1141 if (vsn->contains(ptr)) { 1142 return true; 1143 } 1144 } 1145 return false; 1146 } 1147 1148 void VirtualSpaceList::retire_current_virtual_space() { 1149 assert_lock_strong(SpaceManager::expand_lock()); 1150 1151 VirtualSpaceNode* vsn = current_virtual_space(); 1152 1153 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1154 Metaspace::chunk_manager_metadata(); 1155 1156 vsn->retire(cm); 1157 } 1158 1159 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1160 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1161 ChunkIndex index = (ChunkIndex)i; 1162 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1163 1164 while (free_words_in_vs() >= chunk_size) { 1165 DEBUG_ONLY(verify_container_count();) 1166 Metachunk* chunk = get_chunk_vs(chunk_size); 1167 assert(chunk != NULL, "allocation should have been successful"); 1168 1169 chunk_manager->return_chunks(index, chunk); 1170 chunk_manager->inc_free_chunks_total(chunk_size); 1171 DEBUG_ONLY(verify_container_count();) 1172 } 1173 } 1174 assert(free_words_in_vs() == 0, "should be empty now"); 1175 } 1176 1177 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1178 _is_class(false), 1179 _virtual_space_list(NULL), 1180 _current_virtual_space(NULL), 1181 _reserved_words(0), 1182 _committed_words(0), 1183 _virtual_space_count(0) { 1184 MutexLockerEx cl(SpaceManager::expand_lock(), 1185 Mutex::_no_safepoint_check_flag); 1186 create_new_virtual_space(word_size); 1187 } 1188 1189 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1190 _is_class(true), 1191 _virtual_space_list(NULL), 1192 _current_virtual_space(NULL), 1193 _reserved_words(0), 1194 _committed_words(0), 1195 _virtual_space_count(0) { 1196 MutexLockerEx cl(SpaceManager::expand_lock(), 1197 Mutex::_no_safepoint_check_flag); 1198 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1199 bool succeeded = class_entry->initialize(); 1200 if (succeeded) { 1201 link_vs(class_entry); 1202 } 1203 } 1204 1205 size_t VirtualSpaceList::free_bytes() { 1206 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1207 } 1208 1209 // Allocate another meta virtual space and add it to the list. 1210 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1211 assert_lock_strong(SpaceManager::expand_lock()); 1212 1213 if (is_class()) { 1214 assert(false, "We currently don't support more than one VirtualSpace for" 1215 " the compressed class space. The initialization of the" 1216 " CCS uses another code path and should not hit this path."); 1217 return false; 1218 } 1219 1220 if (vs_word_size == 0) { 1221 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1222 return false; 1223 } 1224 1225 // Reserve the space 1226 size_t vs_byte_size = vs_word_size * BytesPerWord; 1227 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1228 1229 // Allocate the meta virtual space and initialize it. 1230 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1231 if (!new_entry->initialize()) { 1232 delete new_entry; 1233 return false; 1234 } else { 1235 assert(new_entry->reserved_words() == vs_word_size, 1236 "Reserved memory size differs from requested memory size"); 1237 // ensure lock-free iteration sees fully initialized node 1238 OrderAccess::storestore(); 1239 link_vs(new_entry); 1240 return true; 1241 } 1242 } 1243 1244 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1245 if (virtual_space_list() == NULL) { 1246 set_virtual_space_list(new_entry); 1247 } else { 1248 current_virtual_space()->set_next(new_entry); 1249 } 1250 set_current_virtual_space(new_entry); 1251 inc_reserved_words(new_entry->reserved_words()); 1252 inc_committed_words(new_entry->committed_words()); 1253 inc_virtual_space_count(); 1254 #ifdef ASSERT 1255 new_entry->mangle(); 1256 #endif 1257 if (TraceMetavirtualspaceAllocation && Verbose) { 1258 VirtualSpaceNode* vsl = current_virtual_space(); 1259 vsl->print_on(gclog_or_tty); 1260 } 1261 } 1262 1263 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1264 size_t min_words, 1265 size_t preferred_words) { 1266 size_t before = node->committed_words(); 1267 1268 bool result = node->expand_by(min_words, preferred_words); 1269 1270 size_t after = node->committed_words(); 1271 1272 // after and before can be the same if the memory was pre-committed. 1273 assert(after >= before, "Inconsistency"); 1274 inc_committed_words(after - before); 1275 1276 return result; 1277 } 1278 1279 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1280 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1281 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1282 assert(min_words <= preferred_words, "Invalid arguments"); 1283 1284 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1285 return false; 1286 } 1287 1288 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1289 if (allowed_expansion_words < min_words) { 1290 return false; 1291 } 1292 1293 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1294 1295 // Commit more memory from the the current virtual space. 1296 bool vs_expanded = expand_node_by(current_virtual_space(), 1297 min_words, 1298 max_expansion_words); 1299 if (vs_expanded) { 1300 return true; 1301 } 1302 retire_current_virtual_space(); 1303 1304 // Get another virtual space. 1305 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1306 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1307 1308 if (create_new_virtual_space(grow_vs_words)) { 1309 if (current_virtual_space()->is_pre_committed()) { 1310 // The memory was pre-committed, so we are done here. 1311 assert(min_words <= current_virtual_space()->committed_words(), 1312 "The new VirtualSpace was pre-committed, so it" 1313 "should be large enough to fit the alloc request."); 1314 return true; 1315 } 1316 1317 return expand_node_by(current_virtual_space(), 1318 min_words, 1319 max_expansion_words); 1320 } 1321 1322 return false; 1323 } 1324 1325 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1326 size_t grow_chunks_by_words, 1327 size_t medium_chunk_bunch) { 1328 1329 // Allocate a chunk out of the current virtual space. 1330 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1331 1332 if (next != NULL) { 1333 return next; 1334 } 1335 1336 // The expand amount is currently only determined by the requested sizes 1337 // and not how much committed memory is left in the current virtual space. 1338 1339 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1340 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1341 if (min_word_size >= preferred_word_size) { 1342 // Can happen when humongous chunks are allocated. 1343 preferred_word_size = min_word_size; 1344 } 1345 1346 bool expanded = expand_by(min_word_size, preferred_word_size); 1347 if (expanded) { 1348 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1349 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1350 } 1351 1352 return next; 1353 } 1354 1355 void VirtualSpaceList::print_on(outputStream* st) const { 1356 if (TraceMetadataChunkAllocation && Verbose) { 1357 VirtualSpaceListIterator iter(virtual_space_list()); 1358 while (iter.repeat()) { 1359 VirtualSpaceNode* node = iter.get_next(); 1360 node->print_on(st); 1361 } 1362 } 1363 } 1364 1365 // MetaspaceGC methods 1366 1367 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1368 // Within the VM operation after the GC the attempt to allocate the metadata 1369 // should succeed. If the GC did not free enough space for the metaspace 1370 // allocation, the HWM is increased so that another virtualspace will be 1371 // allocated for the metadata. With perm gen the increase in the perm 1372 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1373 // metaspace policy uses those as the small and large steps for the HWM. 1374 // 1375 // After the GC the compute_new_size() for MetaspaceGC is called to 1376 // resize the capacity of the metaspaces. The current implementation 1377 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1378 // to resize the Java heap by some GC's. New flags can be implemented 1379 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1380 // free space is desirable in the metaspace capacity to decide how much 1381 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1382 // free space is desirable in the metaspace capacity before decreasing 1383 // the HWM. 1384 1385 // Calculate the amount to increase the high water mark (HWM). 1386 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1387 // another expansion is not requested too soon. If that is not 1388 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1389 // If that is still not enough, expand by the size of the allocation 1390 // plus some. 1391 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1392 size_t min_delta = MinMetaspaceExpansion; 1393 size_t max_delta = MaxMetaspaceExpansion; 1394 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1395 1396 if (delta <= min_delta) { 1397 delta = min_delta; 1398 } else if (delta <= max_delta) { 1399 // Don't want to hit the high water mark on the next 1400 // allocation so make the delta greater than just enough 1401 // for this allocation. 1402 delta = max_delta; 1403 } else { 1404 // This allocation is large but the next ones are probably not 1405 // so increase by the minimum. 1406 delta = delta + min_delta; 1407 } 1408 1409 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1410 1411 return delta; 1412 } 1413 1414 size_t MetaspaceGC::capacity_until_GC() { 1415 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1416 assert(value >= MetaspaceSize, "Not initialized properly?"); 1417 return value; 1418 } 1419 1420 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { 1421 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1422 1423 size_t capacity_until_GC = (size_t) _capacity_until_GC; 1424 size_t new_value = capacity_until_GC + v; 1425 1426 if (new_value < capacity_until_GC) { 1427 // The addition wrapped around, set new_value to aligned max value. 1428 new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); 1429 } 1430 1431 intptr_t expected = (intptr_t) capacity_until_GC; 1432 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); 1433 1434 if (expected != actual) { 1435 return false; 1436 } 1437 1438 if (new_cap_until_GC != NULL) { 1439 *new_cap_until_GC = new_value; 1440 } 1441 if (old_cap_until_GC != NULL) { 1442 *old_cap_until_GC = capacity_until_GC; 1443 } 1444 return true; 1445 } 1446 1447 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1448 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1449 1450 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1451 } 1452 1453 void MetaspaceGC::initialize() { 1454 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1455 // we can't do a GC during initialization. 1456 _capacity_until_GC = MaxMetaspaceSize; 1457 } 1458 1459 void MetaspaceGC::post_initialize() { 1460 // Reset the high-water mark once the VM initialization is done. 1461 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize); 1462 } 1463 1464 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1465 // Check if the compressed class space is full. 1466 if (is_class && Metaspace::using_class_space()) { 1467 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1468 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1469 return false; 1470 } 1471 } 1472 1473 // Check if the user has imposed a limit on the metaspace memory. 1474 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1475 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1476 return false; 1477 } 1478 1479 return true; 1480 } 1481 1482 size_t MetaspaceGC::allowed_expansion() { 1483 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1484 size_t capacity_until_gc = capacity_until_GC(); 1485 1486 assert(capacity_until_gc >= committed_bytes, 1487 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1488 capacity_until_gc, committed_bytes)); 1489 1490 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1491 size_t left_until_GC = capacity_until_gc - committed_bytes; 1492 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1493 1494 return left_to_commit / BytesPerWord; 1495 } 1496 1497 void MetaspaceGC::compute_new_size() { 1498 assert(_shrink_factor <= 100, "invalid shrink factor"); 1499 uint current_shrink_factor = _shrink_factor; 1500 _shrink_factor = 0; 1501 1502 // Using committed_bytes() for used_after_gc is an overestimation, since the 1503 // chunk free lists are included in committed_bytes() and the memory in an 1504 // un-fragmented chunk free list is available for future allocations. 1505 // However, if the chunk free lists becomes fragmented, then the memory may 1506 // not be available for future allocations and the memory is therefore "in use". 1507 // Including the chunk free lists in the definition of "in use" is therefore 1508 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1509 // shrink below committed_bytes() and this has caused serious bugs in the past. 1510 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1511 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1512 1513 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1514 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1515 1516 const double min_tmp = used_after_gc / maximum_used_percentage; 1517 size_t minimum_desired_capacity = 1518 (size_t)MIN2(min_tmp, double(max_uintx)); 1519 // Don't shrink less than the initial generation size 1520 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1521 MetaspaceSize); 1522 1523 if (PrintGCDetails && Verbose) { 1524 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); 1525 gclog_or_tty->print_cr(" " 1526 " minimum_free_percentage: %6.2f" 1527 " maximum_used_percentage: %6.2f", 1528 minimum_free_percentage, 1529 maximum_used_percentage); 1530 gclog_or_tty->print_cr(" " 1531 " used_after_gc : %6.1fKB", 1532 used_after_gc / (double) K); 1533 } 1534 1535 1536 size_t shrink_bytes = 0; 1537 if (capacity_until_GC < minimum_desired_capacity) { 1538 // If we have less capacity below the metaspace HWM, then 1539 // increment the HWM. 1540 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1541 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1542 // Don't expand unless it's significant 1543 if (expand_bytes >= MinMetaspaceExpansion) { 1544 size_t new_capacity_until_GC = 0; 1545 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC); 1546 assert(succeeded, "Should always succesfully increment HWM when at safepoint"); 1547 1548 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1549 new_capacity_until_GC, 1550 MetaspaceGCThresholdUpdater::ComputeNewSize); 1551 if (PrintGCDetails && Verbose) { 1552 gclog_or_tty->print_cr(" expanding:" 1553 " minimum_desired_capacity: %6.1fKB" 1554 " expand_bytes: %6.1fKB" 1555 " MinMetaspaceExpansion: %6.1fKB" 1556 " new metaspace HWM: %6.1fKB", 1557 minimum_desired_capacity / (double) K, 1558 expand_bytes / (double) K, 1559 MinMetaspaceExpansion / (double) K, 1560 new_capacity_until_GC / (double) K); 1561 } 1562 } 1563 return; 1564 } 1565 1566 // No expansion, now see if we want to shrink 1567 // We would never want to shrink more than this 1568 assert(capacity_until_GC >= minimum_desired_capacity, 1569 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT, 1570 capacity_until_GC, minimum_desired_capacity)); 1571 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1572 1573 // Should shrinking be considered? 1574 if (MaxMetaspaceFreeRatio < 100) { 1575 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1576 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1577 const double max_tmp = used_after_gc / minimum_used_percentage; 1578 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1579 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1580 MetaspaceSize); 1581 if (PrintGCDetails && Verbose) { 1582 gclog_or_tty->print_cr(" " 1583 " maximum_free_percentage: %6.2f" 1584 " minimum_used_percentage: %6.2f", 1585 maximum_free_percentage, 1586 minimum_used_percentage); 1587 gclog_or_tty->print_cr(" " 1588 " minimum_desired_capacity: %6.1fKB" 1589 " maximum_desired_capacity: %6.1fKB", 1590 minimum_desired_capacity / (double) K, 1591 maximum_desired_capacity / (double) K); 1592 } 1593 1594 assert(minimum_desired_capacity <= maximum_desired_capacity, 1595 "sanity check"); 1596 1597 if (capacity_until_GC > maximum_desired_capacity) { 1598 // Capacity too large, compute shrinking size 1599 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1600 // We don't want shrink all the way back to initSize if people call 1601 // System.gc(), because some programs do that between "phases" and then 1602 // we'd just have to grow the heap up again for the next phase. So we 1603 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1604 // on the third call, and 100% by the fourth call. But if we recompute 1605 // size without shrinking, it goes back to 0%. 1606 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1607 1608 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1609 1610 assert(shrink_bytes <= max_shrink_bytes, 1611 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1612 shrink_bytes, max_shrink_bytes)); 1613 if (current_shrink_factor == 0) { 1614 _shrink_factor = 10; 1615 } else { 1616 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1617 } 1618 if (PrintGCDetails && Verbose) { 1619 gclog_or_tty->print_cr(" " 1620 " shrinking:" 1621 " initSize: %.1fK" 1622 " maximum_desired_capacity: %.1fK", 1623 MetaspaceSize / (double) K, 1624 maximum_desired_capacity / (double) K); 1625 gclog_or_tty->print_cr(" " 1626 " shrink_bytes: %.1fK" 1627 " current_shrink_factor: %d" 1628 " new shrink factor: %d" 1629 " MinMetaspaceExpansion: %.1fK", 1630 shrink_bytes / (double) K, 1631 current_shrink_factor, 1632 _shrink_factor, 1633 MinMetaspaceExpansion / (double) K); 1634 } 1635 } 1636 } 1637 1638 // Don't shrink unless it's significant 1639 if (shrink_bytes >= MinMetaspaceExpansion && 1640 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1641 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1642 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1643 new_capacity_until_GC, 1644 MetaspaceGCThresholdUpdater::ComputeNewSize); 1645 } 1646 } 1647 1648 // Metadebug methods 1649 1650 void Metadebug::init_allocation_fail_alot_count() { 1651 if (MetadataAllocationFailALot) { 1652 _allocation_fail_alot_count = 1653 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1654 } 1655 } 1656 1657 #ifdef ASSERT 1658 bool Metadebug::test_metadata_failure() { 1659 if (MetadataAllocationFailALot && 1660 Threads::is_vm_complete()) { 1661 if (_allocation_fail_alot_count > 0) { 1662 _allocation_fail_alot_count--; 1663 } else { 1664 if (TraceMetadataChunkAllocation && Verbose) { 1665 gclog_or_tty->print_cr("Metadata allocation failing for " 1666 "MetadataAllocationFailALot"); 1667 } 1668 init_allocation_fail_alot_count(); 1669 return true; 1670 } 1671 } 1672 return false; 1673 } 1674 #endif 1675 1676 // ChunkManager methods 1677 1678 size_t ChunkManager::free_chunks_total_words() { 1679 return _free_chunks_total; 1680 } 1681 1682 size_t ChunkManager::free_chunks_total_bytes() { 1683 return free_chunks_total_words() * BytesPerWord; 1684 } 1685 1686 size_t ChunkManager::free_chunks_count() { 1687 #ifdef ASSERT 1688 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1689 MutexLockerEx cl(SpaceManager::expand_lock(), 1690 Mutex::_no_safepoint_check_flag); 1691 // This lock is only needed in debug because the verification 1692 // of the _free_chunks_totals walks the list of free chunks 1693 slow_locked_verify_free_chunks_count(); 1694 } 1695 #endif 1696 return _free_chunks_count; 1697 } 1698 1699 void ChunkManager::locked_verify_free_chunks_total() { 1700 assert_lock_strong(SpaceManager::expand_lock()); 1701 assert(sum_free_chunks() == _free_chunks_total, 1702 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" 1703 " same as sum " SIZE_FORMAT, _free_chunks_total, 1704 sum_free_chunks())); 1705 } 1706 1707 void ChunkManager::verify_free_chunks_total() { 1708 MutexLockerEx cl(SpaceManager::expand_lock(), 1709 Mutex::_no_safepoint_check_flag); 1710 locked_verify_free_chunks_total(); 1711 } 1712 1713 void ChunkManager::locked_verify_free_chunks_count() { 1714 assert_lock_strong(SpaceManager::expand_lock()); 1715 assert(sum_free_chunks_count() == _free_chunks_count, 1716 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" 1717 " same as sum " SIZE_FORMAT, _free_chunks_count, 1718 sum_free_chunks_count())); 1719 } 1720 1721 void ChunkManager::verify_free_chunks_count() { 1722 #ifdef ASSERT 1723 MutexLockerEx cl(SpaceManager::expand_lock(), 1724 Mutex::_no_safepoint_check_flag); 1725 locked_verify_free_chunks_count(); 1726 #endif 1727 } 1728 1729 void ChunkManager::verify() { 1730 MutexLockerEx cl(SpaceManager::expand_lock(), 1731 Mutex::_no_safepoint_check_flag); 1732 locked_verify(); 1733 } 1734 1735 void ChunkManager::locked_verify() { 1736 locked_verify_free_chunks_count(); 1737 locked_verify_free_chunks_total(); 1738 } 1739 1740 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1741 assert_lock_strong(SpaceManager::expand_lock()); 1742 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1743 _free_chunks_total, _free_chunks_count); 1744 } 1745 1746 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1747 assert_lock_strong(SpaceManager::expand_lock()); 1748 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1749 sum_free_chunks(), sum_free_chunks_count()); 1750 } 1751 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1752 return &_free_chunks[index]; 1753 } 1754 1755 // These methods that sum the free chunk lists are used in printing 1756 // methods that are used in product builds. 1757 size_t ChunkManager::sum_free_chunks() { 1758 assert_lock_strong(SpaceManager::expand_lock()); 1759 size_t result = 0; 1760 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1761 ChunkList* list = free_chunks(i); 1762 1763 if (list == NULL) { 1764 continue; 1765 } 1766 1767 result = result + list->count() * list->size(); 1768 } 1769 result = result + humongous_dictionary()->total_size(); 1770 return result; 1771 } 1772 1773 size_t ChunkManager::sum_free_chunks_count() { 1774 assert_lock_strong(SpaceManager::expand_lock()); 1775 size_t count = 0; 1776 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1777 ChunkList* list = free_chunks(i); 1778 if (list == NULL) { 1779 continue; 1780 } 1781 count = count + list->count(); 1782 } 1783 count = count + humongous_dictionary()->total_free_blocks(); 1784 return count; 1785 } 1786 1787 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1788 ChunkIndex index = list_index(word_size); 1789 assert(index < HumongousIndex, "No humongous list"); 1790 return free_chunks(index); 1791 } 1792 1793 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1794 assert_lock_strong(SpaceManager::expand_lock()); 1795 1796 slow_locked_verify(); 1797 1798 Metachunk* chunk = NULL; 1799 if (list_index(word_size) != HumongousIndex) { 1800 ChunkList* free_list = find_free_chunks_list(word_size); 1801 assert(free_list != NULL, "Sanity check"); 1802 1803 chunk = free_list->head(); 1804 1805 if (chunk == NULL) { 1806 return NULL; 1807 } 1808 1809 // Remove the chunk as the head of the list. 1810 free_list->remove_chunk(chunk); 1811 1812 if (TraceMetadataChunkAllocation && Verbose) { 1813 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1814 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1815 free_list, chunk, chunk->word_size()); 1816 } 1817 } else { 1818 chunk = humongous_dictionary()->get_chunk( 1819 word_size, 1820 FreeBlockDictionary<Metachunk>::atLeast); 1821 1822 if (chunk == NULL) { 1823 return NULL; 1824 } 1825 1826 if (TraceMetadataHumongousAllocation) { 1827 size_t waste = chunk->word_size() - word_size; 1828 gclog_or_tty->print_cr("Free list allocate humongous chunk size " 1829 SIZE_FORMAT " for requested size " SIZE_FORMAT 1830 " waste " SIZE_FORMAT, 1831 chunk->word_size(), word_size, waste); 1832 } 1833 } 1834 1835 // Chunk is being removed from the chunks free list. 1836 dec_free_chunks_total(chunk->word_size()); 1837 1838 // Remove it from the links to this freelist 1839 chunk->set_next(NULL); 1840 chunk->set_prev(NULL); 1841 #ifdef ASSERT 1842 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1843 // work. 1844 chunk->set_is_tagged_free(false); 1845 #endif 1846 chunk->container()->inc_container_count(); 1847 1848 slow_locked_verify(); 1849 return chunk; 1850 } 1851 1852 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1853 assert_lock_strong(SpaceManager::expand_lock()); 1854 slow_locked_verify(); 1855 1856 // Take from the beginning of the list 1857 Metachunk* chunk = free_chunks_get(word_size); 1858 if (chunk == NULL) { 1859 return NULL; 1860 } 1861 1862 assert((word_size <= chunk->word_size()) || 1863 list_index(chunk->word_size() == HumongousIndex), 1864 "Non-humongous variable sized chunk"); 1865 if (TraceMetadataChunkAllocation) { 1866 size_t list_count; 1867 if (list_index(word_size) < HumongousIndex) { 1868 ChunkList* list = find_free_chunks_list(word_size); 1869 list_count = list->count(); 1870 } else { 1871 list_count = humongous_dictionary()->total_count(); 1872 } 1873 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1874 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1875 this, chunk, chunk->word_size(), list_count); 1876 locked_print_free_chunks(gclog_or_tty); 1877 } 1878 1879 return chunk; 1880 } 1881 1882 void ChunkManager::print_on(outputStream* out) const { 1883 if (PrintFLSStatistics != 0) { 1884 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); 1885 } 1886 } 1887 1888 // SpaceManager methods 1889 1890 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1891 size_t* chunk_word_size, 1892 size_t* class_chunk_word_size) { 1893 switch (type) { 1894 case Metaspace::BootMetaspaceType: 1895 *chunk_word_size = Metaspace::first_chunk_word_size(); 1896 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1897 break; 1898 case Metaspace::ROMetaspaceType: 1899 *chunk_word_size = SharedReadOnlySize / wordSize; 1900 *class_chunk_word_size = ClassSpecializedChunk; 1901 break; 1902 case Metaspace::ReadWriteMetaspaceType: 1903 *chunk_word_size = SharedReadWriteSize / wordSize; 1904 *class_chunk_word_size = ClassSpecializedChunk; 1905 break; 1906 case Metaspace::AnonymousMetaspaceType: 1907 case Metaspace::ReflectionMetaspaceType: 1908 *chunk_word_size = SpecializedChunk; 1909 *class_chunk_word_size = ClassSpecializedChunk; 1910 break; 1911 default: 1912 *chunk_word_size = SmallChunk; 1913 *class_chunk_word_size = ClassSmallChunk; 1914 break; 1915 } 1916 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1917 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT 1918 " class " SIZE_FORMAT, 1919 *chunk_word_size, *class_chunk_word_size)); 1920 } 1921 1922 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1923 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1924 size_t free = 0; 1925 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1926 Metachunk* chunk = chunks_in_use(i); 1927 while (chunk != NULL) { 1928 free += chunk->free_word_size(); 1929 chunk = chunk->next(); 1930 } 1931 } 1932 return free; 1933 } 1934 1935 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1936 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1937 size_t result = 0; 1938 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1939 result += sum_waste_in_chunks_in_use(i); 1940 } 1941 1942 return result; 1943 } 1944 1945 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1946 size_t result = 0; 1947 Metachunk* chunk = chunks_in_use(index); 1948 // Count the free space in all the chunk but not the 1949 // current chunk from which allocations are still being done. 1950 while (chunk != NULL) { 1951 if (chunk != current_chunk()) { 1952 result += chunk->free_word_size(); 1953 } 1954 chunk = chunk->next(); 1955 } 1956 return result; 1957 } 1958 1959 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1960 // For CMS use "allocated_chunks_words()" which does not need the 1961 // Metaspace lock. For the other collectors sum over the 1962 // lists. Use both methods as a check that "allocated_chunks_words()" 1963 // is correct. That is, sum_capacity_in_chunks() is too expensive 1964 // to use in the product and allocated_chunks_words() should be used 1965 // but allow for checking that allocated_chunks_words() returns the same 1966 // value as sum_capacity_in_chunks_in_use() which is the definitive 1967 // answer. 1968 if (UseConcMarkSweepGC) { 1969 return allocated_chunks_words(); 1970 } else { 1971 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1972 size_t sum = 0; 1973 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1974 Metachunk* chunk = chunks_in_use(i); 1975 while (chunk != NULL) { 1976 sum += chunk->word_size(); 1977 chunk = chunk->next(); 1978 } 1979 } 1980 return sum; 1981 } 1982 } 1983 1984 size_t SpaceManager::sum_count_in_chunks_in_use() { 1985 size_t count = 0; 1986 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1987 count = count + sum_count_in_chunks_in_use(i); 1988 } 1989 1990 return count; 1991 } 1992 1993 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 1994 size_t count = 0; 1995 Metachunk* chunk = chunks_in_use(i); 1996 while (chunk != NULL) { 1997 count++; 1998 chunk = chunk->next(); 1999 } 2000 return count; 2001 } 2002 2003 2004 size_t SpaceManager::sum_used_in_chunks_in_use() const { 2005 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2006 size_t used = 0; 2007 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2008 Metachunk* chunk = chunks_in_use(i); 2009 while (chunk != NULL) { 2010 used += chunk->used_word_size(); 2011 chunk = chunk->next(); 2012 } 2013 } 2014 return used; 2015 } 2016 2017 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 2018 2019 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2020 Metachunk* chunk = chunks_in_use(i); 2021 st->print("SpaceManager: %s " PTR_FORMAT, 2022 chunk_size_name(i), chunk); 2023 if (chunk != NULL) { 2024 st->print_cr(" free " SIZE_FORMAT, 2025 chunk->free_word_size()); 2026 } else { 2027 st->cr(); 2028 } 2029 } 2030 2031 chunk_manager()->locked_print_free_chunks(st); 2032 chunk_manager()->locked_print_sum_free_chunks(st); 2033 } 2034 2035 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2036 2037 // Decide between a small chunk and a medium chunk. Up to 2038 // _small_chunk_limit small chunks can be allocated but 2039 // once a medium chunk has been allocated, no more small 2040 // chunks will be allocated. 2041 size_t chunk_word_size; 2042 if (chunks_in_use(MediumIndex) == NULL && 2043 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2044 chunk_word_size = (size_t) small_chunk_size(); 2045 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2046 chunk_word_size = medium_chunk_size(); 2047 } 2048 } else { 2049 chunk_word_size = medium_chunk_size(); 2050 } 2051 2052 // Might still need a humongous chunk. Enforce 2053 // humongous allocations sizes to be aligned up to 2054 // the smallest chunk size. 2055 size_t if_humongous_sized_chunk = 2056 align_size_up(word_size + Metachunk::overhead(), 2057 smallest_chunk_size()); 2058 chunk_word_size = 2059 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2060 2061 assert(!SpaceManager::is_humongous(word_size) || 2062 chunk_word_size == if_humongous_sized_chunk, 2063 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT 2064 " chunk_word_size " SIZE_FORMAT, 2065 word_size, chunk_word_size)); 2066 if (TraceMetadataHumongousAllocation && 2067 SpaceManager::is_humongous(word_size)) { 2068 gclog_or_tty->print_cr("Metadata humongous allocation:"); 2069 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); 2070 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, 2071 chunk_word_size); 2072 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, 2073 Metachunk::overhead()); 2074 } 2075 return chunk_word_size; 2076 } 2077 2078 void SpaceManager::track_metaspace_memory_usage() { 2079 if (is_init_completed()) { 2080 if (is_class()) { 2081 MemoryService::track_compressed_class_memory_usage(); 2082 } 2083 MemoryService::track_metaspace_memory_usage(); 2084 } 2085 } 2086 2087 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2088 assert(vs_list()->current_virtual_space() != NULL, 2089 "Should have been set"); 2090 assert(current_chunk() == NULL || 2091 current_chunk()->allocate(word_size) == NULL, 2092 "Don't need to expand"); 2093 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2094 2095 if (TraceMetadataChunkAllocation && Verbose) { 2096 size_t words_left = 0; 2097 size_t words_used = 0; 2098 if (current_chunk() != NULL) { 2099 words_left = current_chunk()->free_word_size(); 2100 words_used = current_chunk()->used_word_size(); 2101 } 2102 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT 2103 " words " SIZE_FORMAT " words used " SIZE_FORMAT 2104 " words left", 2105 word_size, words_used, words_left); 2106 } 2107 2108 // Get another chunk out of the virtual space 2109 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2110 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2111 2112 MetaWord* mem = NULL; 2113 2114 // If a chunk was available, add it to the in-use chunk list 2115 // and do an allocation from it. 2116 if (next != NULL) { 2117 // Add to this manager's list of chunks in use. 2118 add_chunk(next, false); 2119 mem = next->allocate(word_size); 2120 } 2121 2122 // Track metaspace memory usage statistic. 2123 track_metaspace_memory_usage(); 2124 2125 return mem; 2126 } 2127 2128 void SpaceManager::print_on(outputStream* st) const { 2129 2130 for (ChunkIndex i = ZeroIndex; 2131 i < NumberOfInUseLists ; 2132 i = next_chunk_index(i) ) { 2133 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, 2134 chunks_in_use(i), 2135 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2136 } 2137 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2138 " Humongous " SIZE_FORMAT, 2139 sum_waste_in_chunks_in_use(SmallIndex), 2140 sum_waste_in_chunks_in_use(MediumIndex), 2141 sum_waste_in_chunks_in_use(HumongousIndex)); 2142 // block free lists 2143 if (block_freelists() != NULL) { 2144 st->print_cr("total in block free lists " SIZE_FORMAT, 2145 block_freelists()->total_size()); 2146 } 2147 } 2148 2149 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2150 Mutex* lock) : 2151 _mdtype(mdtype), 2152 _allocated_blocks_words(0), 2153 _allocated_chunks_words(0), 2154 _allocated_chunks_count(0), 2155 _lock(lock) 2156 { 2157 initialize(); 2158 } 2159 2160 void SpaceManager::inc_size_metrics(size_t words) { 2161 assert_lock_strong(SpaceManager::expand_lock()); 2162 // Total of allocated Metachunks and allocated Metachunks count 2163 // for each SpaceManager 2164 _allocated_chunks_words = _allocated_chunks_words + words; 2165 _allocated_chunks_count++; 2166 // Global total of capacity in allocated Metachunks 2167 MetaspaceAux::inc_capacity(mdtype(), words); 2168 // Global total of allocated Metablocks. 2169 // used_words_slow() includes the overhead in each 2170 // Metachunk so include it in the used when the 2171 // Metachunk is first added (so only added once per 2172 // Metachunk). 2173 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2174 } 2175 2176 void SpaceManager::inc_used_metrics(size_t words) { 2177 // Add to the per SpaceManager total 2178 Atomic::add_ptr(words, &_allocated_blocks_words); 2179 // Add to the global total 2180 MetaspaceAux::inc_used(mdtype(), words); 2181 } 2182 2183 void SpaceManager::dec_total_from_size_metrics() { 2184 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2185 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2186 // Also deduct the overhead per Metachunk 2187 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2188 } 2189 2190 void SpaceManager::initialize() { 2191 Metadebug::init_allocation_fail_alot_count(); 2192 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2193 _chunks_in_use[i] = NULL; 2194 } 2195 _current_chunk = NULL; 2196 if (TraceMetadataChunkAllocation && Verbose) { 2197 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this); 2198 } 2199 } 2200 2201 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2202 if (chunks == NULL) { 2203 return; 2204 } 2205 ChunkList* list = free_chunks(index); 2206 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2207 assert_lock_strong(SpaceManager::expand_lock()); 2208 Metachunk* cur = chunks; 2209 2210 // This returns chunks one at a time. If a new 2211 // class List can be created that is a base class 2212 // of FreeList then something like FreeList::prepend() 2213 // can be used in place of this loop 2214 while (cur != NULL) { 2215 assert(cur->container() != NULL, "Container should have been set"); 2216 cur->container()->dec_container_count(); 2217 // Capture the next link before it is changed 2218 // by the call to return_chunk_at_head(); 2219 Metachunk* next = cur->next(); 2220 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2221 list->return_chunk_at_head(cur); 2222 cur = next; 2223 } 2224 } 2225 2226 SpaceManager::~SpaceManager() { 2227 // This call this->_lock which can't be done while holding expand_lock() 2228 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2229 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2230 " allocated_chunks_words() " SIZE_FORMAT, 2231 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2232 2233 MutexLockerEx fcl(SpaceManager::expand_lock(), 2234 Mutex::_no_safepoint_check_flag); 2235 2236 chunk_manager()->slow_locked_verify(); 2237 2238 dec_total_from_size_metrics(); 2239 2240 if (TraceMetadataChunkAllocation && Verbose) { 2241 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); 2242 locked_print_chunks_in_use_on(gclog_or_tty); 2243 } 2244 2245 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2246 // is during the freeing of a VirtualSpaceNodes. 2247 2248 // Have to update before the chunks_in_use lists are emptied 2249 // below. 2250 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2251 sum_count_in_chunks_in_use()); 2252 2253 // Add all the chunks in use by this space manager 2254 // to the global list of free chunks. 2255 2256 // Follow each list of chunks-in-use and add them to the 2257 // free lists. Each list is NULL terminated. 2258 2259 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2260 if (TraceMetadataChunkAllocation && Verbose) { 2261 gclog_or_tty->print_cr("returned %d %s chunks to freelist", 2262 sum_count_in_chunks_in_use(i), 2263 chunk_size_name(i)); 2264 } 2265 Metachunk* chunks = chunks_in_use(i); 2266 chunk_manager()->return_chunks(i, chunks); 2267 set_chunks_in_use(i, NULL); 2268 if (TraceMetadataChunkAllocation && Verbose) { 2269 gclog_or_tty->print_cr("updated freelist count %d %s", 2270 chunk_manager()->free_chunks(i)->count(), 2271 chunk_size_name(i)); 2272 } 2273 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2274 } 2275 2276 // The medium chunk case may be optimized by passing the head and 2277 // tail of the medium chunk list to add_at_head(). The tail is often 2278 // the current chunk but there are probably exceptions. 2279 2280 // Humongous chunks 2281 if (TraceMetadataChunkAllocation && Verbose) { 2282 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary", 2283 sum_count_in_chunks_in_use(HumongousIndex), 2284 chunk_size_name(HumongousIndex)); 2285 gclog_or_tty->print("Humongous chunk dictionary: "); 2286 } 2287 // Humongous chunks are never the current chunk. 2288 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2289 2290 while (humongous_chunks != NULL) { 2291 #ifdef ASSERT 2292 humongous_chunks->set_is_tagged_free(true); 2293 #endif 2294 if (TraceMetadataChunkAllocation && Verbose) { 2295 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2296 humongous_chunks, 2297 humongous_chunks->word_size()); 2298 } 2299 assert(humongous_chunks->word_size() == (size_t) 2300 align_size_up(humongous_chunks->word_size(), 2301 smallest_chunk_size()), 2302 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2303 " granularity %d", 2304 humongous_chunks->word_size(), smallest_chunk_size())); 2305 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2306 humongous_chunks->container()->dec_container_count(); 2307 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2308 humongous_chunks = next_humongous_chunks; 2309 } 2310 if (TraceMetadataChunkAllocation && Verbose) { 2311 gclog_or_tty->cr(); 2312 gclog_or_tty->print_cr("updated dictionary count %d %s", 2313 chunk_manager()->humongous_dictionary()->total_count(), 2314 chunk_size_name(HumongousIndex)); 2315 } 2316 chunk_manager()->slow_locked_verify(); 2317 } 2318 2319 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2320 switch (index) { 2321 case SpecializedIndex: 2322 return "Specialized"; 2323 case SmallIndex: 2324 return "Small"; 2325 case MediumIndex: 2326 return "Medium"; 2327 case HumongousIndex: 2328 return "Humongous"; 2329 default: 2330 return NULL; 2331 } 2332 } 2333 2334 ChunkIndex ChunkManager::list_index(size_t size) { 2335 switch (size) { 2336 case SpecializedChunk: 2337 assert(SpecializedChunk == ClassSpecializedChunk, 2338 "Need branch for ClassSpecializedChunk"); 2339 return SpecializedIndex; 2340 case SmallChunk: 2341 case ClassSmallChunk: 2342 return SmallIndex; 2343 case MediumChunk: 2344 case ClassMediumChunk: 2345 return MediumIndex; 2346 default: 2347 assert(size > MediumChunk || size > ClassMediumChunk, 2348 "Not a humongous chunk"); 2349 return HumongousIndex; 2350 } 2351 } 2352 2353 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2354 assert_lock_strong(_lock); 2355 size_t raw_word_size = get_raw_word_size(word_size); 2356 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2357 assert(raw_word_size >= min_size, 2358 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 2359 block_freelists()->return_block(p, raw_word_size); 2360 } 2361 2362 // Adds a chunk to the list of chunks in use. 2363 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2364 2365 assert(new_chunk != NULL, "Should not be NULL"); 2366 assert(new_chunk->next() == NULL, "Should not be on a list"); 2367 2368 new_chunk->reset_empty(); 2369 2370 // Find the correct list and and set the current 2371 // chunk for that list. 2372 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2373 2374 if (index != HumongousIndex) { 2375 retire_current_chunk(); 2376 set_current_chunk(new_chunk); 2377 new_chunk->set_next(chunks_in_use(index)); 2378 set_chunks_in_use(index, new_chunk); 2379 } else { 2380 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2381 // small, so small will be null. Link this first chunk as the current 2382 // chunk. 2383 if (make_current) { 2384 // Set as the current chunk but otherwise treat as a humongous chunk. 2385 set_current_chunk(new_chunk); 2386 } 2387 // Link at head. The _current_chunk only points to a humongous chunk for 2388 // the null class loader metaspace (class and data virtual space managers) 2389 // any humongous chunks so will not point to the tail 2390 // of the humongous chunks list. 2391 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2392 set_chunks_in_use(HumongousIndex, new_chunk); 2393 2394 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2395 } 2396 2397 // Add to the running sum of capacity 2398 inc_size_metrics(new_chunk->word_size()); 2399 2400 assert(new_chunk->is_empty(), "Not ready for reuse"); 2401 if (TraceMetadataChunkAllocation && Verbose) { 2402 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", 2403 sum_count_in_chunks_in_use()); 2404 new_chunk->print_on(gclog_or_tty); 2405 chunk_manager()->locked_print_free_chunks(gclog_or_tty); 2406 } 2407 } 2408 2409 void SpaceManager::retire_current_chunk() { 2410 if (current_chunk() != NULL) { 2411 size_t remaining_words = current_chunk()->free_word_size(); 2412 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2413 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2414 inc_used_metrics(remaining_words); 2415 } 2416 } 2417 } 2418 2419 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2420 size_t grow_chunks_by_words) { 2421 // Get a chunk from the chunk freelist 2422 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2423 2424 if (next == NULL) { 2425 next = vs_list()->get_new_chunk(word_size, 2426 grow_chunks_by_words, 2427 medium_chunk_bunch()); 2428 } 2429 2430 if (TraceMetadataHumongousAllocation && next != NULL && 2431 SpaceManager::is_humongous(next->word_size())) { 2432 gclog_or_tty->print_cr(" new humongous chunk word size " 2433 PTR_FORMAT, next->word_size()); 2434 } 2435 2436 return next; 2437 } 2438 2439 MetaWord* SpaceManager::allocate(size_t word_size) { 2440 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2441 2442 size_t raw_word_size = get_raw_word_size(word_size); 2443 BlockFreelist* fl = block_freelists(); 2444 MetaWord* p = NULL; 2445 // Allocation from the dictionary is expensive in the sense that 2446 // the dictionary has to be searched for a size. Don't allocate 2447 // from the dictionary until it starts to get fat. Is this 2448 // a reasonable policy? Maybe an skinny dictionary is fast enough 2449 // for allocations. Do some profiling. JJJ 2450 if (fl->total_size() > allocation_from_dictionary_limit) { 2451 p = fl->get_block(raw_word_size); 2452 } 2453 if (p == NULL) { 2454 p = allocate_work(raw_word_size); 2455 } 2456 2457 return p; 2458 } 2459 2460 // Returns the address of spaced allocated for "word_size". 2461 // This methods does not know about blocks (Metablocks) 2462 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2463 assert_lock_strong(_lock); 2464 #ifdef ASSERT 2465 if (Metadebug::test_metadata_failure()) { 2466 return NULL; 2467 } 2468 #endif 2469 // Is there space in the current chunk? 2470 MetaWord* result = NULL; 2471 2472 // For DumpSharedSpaces, only allocate out of the current chunk which is 2473 // never null because we gave it the size we wanted. Caller reports out 2474 // of memory if this returns null. 2475 if (DumpSharedSpaces) { 2476 assert(current_chunk() != NULL, "should never happen"); 2477 inc_used_metrics(word_size); 2478 return current_chunk()->allocate(word_size); // caller handles null result 2479 } 2480 2481 if (current_chunk() != NULL) { 2482 result = current_chunk()->allocate(word_size); 2483 } 2484 2485 if (result == NULL) { 2486 result = grow_and_allocate(word_size); 2487 } 2488 2489 if (result != NULL) { 2490 inc_used_metrics(word_size); 2491 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2492 "Head of the list is being allocated"); 2493 } 2494 2495 return result; 2496 } 2497 2498 void SpaceManager::verify() { 2499 // If there are blocks in the dictionary, then 2500 // verification of chunks does not work since 2501 // being in the dictionary alters a chunk. 2502 if (block_freelists()->total_size() == 0) { 2503 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2504 Metachunk* curr = chunks_in_use(i); 2505 while (curr != NULL) { 2506 curr->verify(); 2507 verify_chunk_size(curr); 2508 curr = curr->next(); 2509 } 2510 } 2511 } 2512 } 2513 2514 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2515 assert(is_humongous(chunk->word_size()) || 2516 chunk->word_size() == medium_chunk_size() || 2517 chunk->word_size() == small_chunk_size() || 2518 chunk->word_size() == specialized_chunk_size(), 2519 "Chunk size is wrong"); 2520 return; 2521 } 2522 2523 #ifdef ASSERT 2524 void SpaceManager::verify_allocated_blocks_words() { 2525 // Verification is only guaranteed at a safepoint. 2526 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2527 "Verification can fail if the applications is running"); 2528 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2529 err_msg("allocation total is not consistent " SIZE_FORMAT 2530 " vs " SIZE_FORMAT, 2531 allocated_blocks_words(), sum_used_in_chunks_in_use())); 2532 } 2533 2534 #endif 2535 2536 void SpaceManager::dump(outputStream* const out) const { 2537 size_t curr_total = 0; 2538 size_t waste = 0; 2539 uint i = 0; 2540 size_t used = 0; 2541 size_t capacity = 0; 2542 2543 // Add up statistics for all chunks in this SpaceManager. 2544 for (ChunkIndex index = ZeroIndex; 2545 index < NumberOfInUseLists; 2546 index = next_chunk_index(index)) { 2547 for (Metachunk* curr = chunks_in_use(index); 2548 curr != NULL; 2549 curr = curr->next()) { 2550 out->print("%d) ", i++); 2551 curr->print_on(out); 2552 curr_total += curr->word_size(); 2553 used += curr->used_word_size(); 2554 capacity += curr->word_size(); 2555 waste += curr->free_word_size() + curr->overhead();; 2556 } 2557 } 2558 2559 if (TraceMetadataChunkAllocation && Verbose) { 2560 block_freelists()->print_on(out); 2561 } 2562 2563 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2564 // Free space isn't wasted. 2565 waste -= free; 2566 2567 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2568 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2569 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2570 } 2571 2572 #ifndef PRODUCT 2573 void SpaceManager::mangle_freed_chunks() { 2574 for (ChunkIndex index = ZeroIndex; 2575 index < NumberOfInUseLists; 2576 index = next_chunk_index(index)) { 2577 for (Metachunk* curr = chunks_in_use(index); 2578 curr != NULL; 2579 curr = curr->next()) { 2580 curr->mangle(); 2581 } 2582 } 2583 } 2584 #endif // PRODUCT 2585 2586 // MetaspaceAux 2587 2588 2589 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2590 size_t MetaspaceAux::_used_words[] = {0, 0}; 2591 2592 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2593 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2594 return list == NULL ? 0 : list->free_bytes(); 2595 } 2596 2597 size_t MetaspaceAux::free_bytes() { 2598 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2599 } 2600 2601 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2602 assert_lock_strong(SpaceManager::expand_lock()); 2603 assert(words <= capacity_words(mdtype), 2604 err_msg("About to decrement below 0: words " SIZE_FORMAT 2605 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2606 words, mdtype, capacity_words(mdtype))); 2607 _capacity_words[mdtype] -= words; 2608 } 2609 2610 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2611 assert_lock_strong(SpaceManager::expand_lock()); 2612 // Needs to be atomic 2613 _capacity_words[mdtype] += words; 2614 } 2615 2616 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2617 assert(words <= used_words(mdtype), 2618 err_msg("About to decrement below 0: words " SIZE_FORMAT 2619 " is greater than _used_words[%u] " SIZE_FORMAT, 2620 words, mdtype, used_words(mdtype))); 2621 // For CMS deallocation of the Metaspaces occurs during the 2622 // sweep which is a concurrent phase. Protection by the expand_lock() 2623 // is not enough since allocation is on a per Metaspace basis 2624 // and protected by the Metaspace lock. 2625 jlong minus_words = (jlong) - (jlong) words; 2626 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2627 } 2628 2629 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2630 // _used_words tracks allocations for 2631 // each piece of metadata. Those allocations are 2632 // generally done concurrently by different application 2633 // threads so must be done atomically. 2634 Atomic::add_ptr(words, &_used_words[mdtype]); 2635 } 2636 2637 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2638 size_t used = 0; 2639 ClassLoaderDataGraphMetaspaceIterator iter; 2640 while (iter.repeat()) { 2641 Metaspace* msp = iter.get_next(); 2642 // Sum allocated_blocks_words for each metaspace 2643 if (msp != NULL) { 2644 used += msp->used_words_slow(mdtype); 2645 } 2646 } 2647 return used * BytesPerWord; 2648 } 2649 2650 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2651 size_t free = 0; 2652 ClassLoaderDataGraphMetaspaceIterator iter; 2653 while (iter.repeat()) { 2654 Metaspace* msp = iter.get_next(); 2655 if (msp != NULL) { 2656 free += msp->free_words_slow(mdtype); 2657 } 2658 } 2659 return free * BytesPerWord; 2660 } 2661 2662 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2663 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2664 return 0; 2665 } 2666 // Don't count the space in the freelists. That space will be 2667 // added to the capacity calculation as needed. 2668 size_t capacity = 0; 2669 ClassLoaderDataGraphMetaspaceIterator iter; 2670 while (iter.repeat()) { 2671 Metaspace* msp = iter.get_next(); 2672 if (msp != NULL) { 2673 capacity += msp->capacity_words_slow(mdtype); 2674 } 2675 } 2676 return capacity * BytesPerWord; 2677 } 2678 2679 size_t MetaspaceAux::capacity_bytes_slow() { 2680 #ifdef PRODUCT 2681 // Use capacity_bytes() in PRODUCT instead of this function. 2682 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2683 #endif 2684 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2685 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2686 assert(capacity_bytes() == class_capacity + non_class_capacity, 2687 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT 2688 " class_capacity + non_class_capacity " SIZE_FORMAT 2689 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2690 capacity_bytes(), class_capacity + non_class_capacity, 2691 class_capacity, non_class_capacity)); 2692 2693 return class_capacity + non_class_capacity; 2694 } 2695 2696 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2697 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2698 return list == NULL ? 0 : list->reserved_bytes(); 2699 } 2700 2701 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2702 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2703 return list == NULL ? 0 : list->committed_bytes(); 2704 } 2705 2706 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2707 2708 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2709 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2710 if (chunk_manager == NULL) { 2711 return 0; 2712 } 2713 chunk_manager->slow_verify(); 2714 return chunk_manager->free_chunks_total_words(); 2715 } 2716 2717 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2718 return free_chunks_total_words(mdtype) * BytesPerWord; 2719 } 2720 2721 size_t MetaspaceAux::free_chunks_total_words() { 2722 return free_chunks_total_words(Metaspace::ClassType) + 2723 free_chunks_total_words(Metaspace::NonClassType); 2724 } 2725 2726 size_t MetaspaceAux::free_chunks_total_bytes() { 2727 return free_chunks_total_words() * BytesPerWord; 2728 } 2729 2730 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2731 return Metaspace::get_chunk_manager(mdtype) != NULL; 2732 } 2733 2734 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2735 if (!has_chunk_free_list(mdtype)) { 2736 return MetaspaceChunkFreeListSummary(); 2737 } 2738 2739 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2740 return cm->chunk_free_list_summary(); 2741 } 2742 2743 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2744 gclog_or_tty->print(", [Metaspace:"); 2745 if (PrintGCDetails && Verbose) { 2746 gclog_or_tty->print(" " SIZE_FORMAT 2747 "->" SIZE_FORMAT 2748 "(" SIZE_FORMAT ")", 2749 prev_metadata_used, 2750 used_bytes(), 2751 reserved_bytes()); 2752 } else { 2753 gclog_or_tty->print(" " SIZE_FORMAT "K" 2754 "->" SIZE_FORMAT "K" 2755 "(" SIZE_FORMAT "K)", 2756 prev_metadata_used/K, 2757 used_bytes()/K, 2758 reserved_bytes()/K); 2759 } 2760 2761 gclog_or_tty->print("]"); 2762 } 2763 2764 // This is printed when PrintGCDetails 2765 void MetaspaceAux::print_on(outputStream* out) { 2766 Metaspace::MetadataType nct = Metaspace::NonClassType; 2767 2768 out->print_cr(" Metaspace " 2769 "used " SIZE_FORMAT "K, " 2770 "capacity " SIZE_FORMAT "K, " 2771 "committed " SIZE_FORMAT "K, " 2772 "reserved " SIZE_FORMAT "K", 2773 used_bytes()/K, 2774 capacity_bytes()/K, 2775 committed_bytes()/K, 2776 reserved_bytes()/K); 2777 2778 if (Metaspace::using_class_space()) { 2779 Metaspace::MetadataType ct = Metaspace::ClassType; 2780 out->print_cr(" class space " 2781 "used " SIZE_FORMAT "K, " 2782 "capacity " SIZE_FORMAT "K, " 2783 "committed " SIZE_FORMAT "K, " 2784 "reserved " SIZE_FORMAT "K", 2785 used_bytes(ct)/K, 2786 capacity_bytes(ct)/K, 2787 committed_bytes(ct)/K, 2788 reserved_bytes(ct)/K); 2789 } 2790 } 2791 2792 // Print information for class space and data space separately. 2793 // This is almost the same as above. 2794 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2795 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2796 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2797 size_t used_bytes = used_bytes_slow(mdtype); 2798 size_t free_bytes = free_bytes_slow(mdtype); 2799 size_t used_and_free = used_bytes + free_bytes + 2800 free_chunks_capacity_bytes; 2801 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2802 "K + unused in chunks " SIZE_FORMAT "K + " 2803 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2804 "K capacity in allocated chunks " SIZE_FORMAT "K", 2805 used_bytes / K, 2806 free_bytes / K, 2807 free_chunks_capacity_bytes / K, 2808 used_and_free / K, 2809 capacity_bytes / K); 2810 // Accounting can only be correct if we got the values during a safepoint 2811 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2812 } 2813 2814 // Print total fragmentation for class metaspaces 2815 void MetaspaceAux::print_class_waste(outputStream* out) { 2816 assert(Metaspace::using_class_space(), "class metaspace not used"); 2817 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2818 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2819 ClassLoaderDataGraphMetaspaceIterator iter; 2820 while (iter.repeat()) { 2821 Metaspace* msp = iter.get_next(); 2822 if (msp != NULL) { 2823 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2824 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2825 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2826 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2827 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2828 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2829 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2830 } 2831 } 2832 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2833 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2834 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2835 "large count " SIZE_FORMAT, 2836 cls_specialized_count, cls_specialized_waste, 2837 cls_small_count, cls_small_waste, 2838 cls_medium_count, cls_medium_waste, cls_humongous_count); 2839 } 2840 2841 // Print total fragmentation for data and class metaspaces separately 2842 void MetaspaceAux::print_waste(outputStream* out) { 2843 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2844 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2845 2846 ClassLoaderDataGraphMetaspaceIterator iter; 2847 while (iter.repeat()) { 2848 Metaspace* msp = iter.get_next(); 2849 if (msp != NULL) { 2850 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2851 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2852 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2853 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2854 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2855 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2856 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2857 } 2858 } 2859 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2860 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2861 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2862 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2863 "large count " SIZE_FORMAT, 2864 specialized_count, specialized_waste, small_count, 2865 small_waste, medium_count, medium_waste, humongous_count); 2866 if (Metaspace::using_class_space()) { 2867 print_class_waste(out); 2868 } 2869 } 2870 2871 // Dump global metaspace things from the end of ClassLoaderDataGraph 2872 void MetaspaceAux::dump(outputStream* out) { 2873 out->print_cr("All Metaspace:"); 2874 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2875 out->print("class space: "); print_on(out, Metaspace::ClassType); 2876 print_waste(out); 2877 } 2878 2879 void MetaspaceAux::verify_free_chunks() { 2880 Metaspace::chunk_manager_metadata()->verify(); 2881 if (Metaspace::using_class_space()) { 2882 Metaspace::chunk_manager_class()->verify(); 2883 } 2884 } 2885 2886 void MetaspaceAux::verify_capacity() { 2887 #ifdef ASSERT 2888 size_t running_sum_capacity_bytes = capacity_bytes(); 2889 // For purposes of the running sum of capacity, verify against capacity 2890 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2891 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2892 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT 2893 " capacity_bytes_slow()" SIZE_FORMAT, 2894 running_sum_capacity_bytes, capacity_in_use_bytes)); 2895 for (Metaspace::MetadataType i = Metaspace::ClassType; 2896 i < Metaspace:: MetadataTypeCount; 2897 i = (Metaspace::MetadataType)(i + 1)) { 2898 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2899 assert(capacity_bytes(i) == capacity_in_use_bytes, 2900 err_msg("capacity_bytes(%u) " SIZE_FORMAT 2901 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2902 i, capacity_bytes(i), i, capacity_in_use_bytes)); 2903 } 2904 #endif 2905 } 2906 2907 void MetaspaceAux::verify_used() { 2908 #ifdef ASSERT 2909 size_t running_sum_used_bytes = used_bytes(); 2910 // For purposes of the running sum of used, verify against used 2911 size_t used_in_use_bytes = used_bytes_slow(); 2912 assert(used_bytes() == used_in_use_bytes, 2913 err_msg("used_bytes() " SIZE_FORMAT 2914 " used_bytes_slow()" SIZE_FORMAT, 2915 used_bytes(), used_in_use_bytes)); 2916 for (Metaspace::MetadataType i = Metaspace::ClassType; 2917 i < Metaspace:: MetadataTypeCount; 2918 i = (Metaspace::MetadataType)(i + 1)) { 2919 size_t used_in_use_bytes = used_bytes_slow(i); 2920 assert(used_bytes(i) == used_in_use_bytes, 2921 err_msg("used_bytes(%u) " SIZE_FORMAT 2922 " used_bytes_slow(%u)" SIZE_FORMAT, 2923 i, used_bytes(i), i, used_in_use_bytes)); 2924 } 2925 #endif 2926 } 2927 2928 void MetaspaceAux::verify_metrics() { 2929 verify_capacity(); 2930 verify_used(); 2931 } 2932 2933 2934 // Metaspace methods 2935 2936 size_t Metaspace::_first_chunk_word_size = 0; 2937 size_t Metaspace::_first_class_chunk_word_size = 0; 2938 2939 size_t Metaspace::_commit_alignment = 0; 2940 size_t Metaspace::_reserve_alignment = 0; 2941 2942 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2943 initialize(lock, type); 2944 } 2945 2946 Metaspace::~Metaspace() { 2947 delete _vsm; 2948 if (using_class_space()) { 2949 delete _class_vsm; 2950 } 2951 } 2952 2953 VirtualSpaceList* Metaspace::_space_list = NULL; 2954 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2955 2956 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2957 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2958 2959 #define VIRTUALSPACEMULTIPLIER 2 2960 2961 #ifdef _LP64 2962 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2963 2964 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2965 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2966 // narrow_klass_base is the lower of the metaspace base and the cds base 2967 // (if cds is enabled). The narrow_klass_shift depends on the distance 2968 // between the lower base and higher address. 2969 address lower_base; 2970 address higher_address; 2971 #if INCLUDE_CDS 2972 if (UseSharedSpaces) { 2973 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2974 (address)(metaspace_base + compressed_class_space_size())); 2975 lower_base = MIN2(metaspace_base, cds_base); 2976 } else 2977 #endif 2978 { 2979 higher_address = metaspace_base + compressed_class_space_size(); 2980 lower_base = metaspace_base; 2981 2982 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2983 // If compressed class space fits in lower 32G, we don't need a base. 2984 if (higher_address <= (address)klass_encoding_max) { 2985 lower_base = 0; // Effectively lower base is zero. 2986 } 2987 } 2988 2989 Universe::set_narrow_klass_base(lower_base); 2990 2991 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 2992 Universe::set_narrow_klass_shift(0); 2993 } else { 2994 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2995 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2996 } 2997 } 2998 2999 #if INCLUDE_CDS 3000 // Return TRUE if the specified metaspace_base and cds_base are close enough 3001 // to work with compressed klass pointers. 3002 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 3003 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 3004 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3005 address lower_base = MIN2((address)metaspace_base, cds_base); 3006 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 3007 (address)(metaspace_base + compressed_class_space_size())); 3008 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 3009 } 3010 #endif 3011 3012 // Try to allocate the metaspace at the requested addr. 3013 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 3014 assert(using_class_space(), "called improperly"); 3015 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 3016 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 3017 "Metaspace size is too big"); 3018 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 3019 assert_is_ptr_aligned(cds_base, _reserve_alignment); 3020 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 3021 3022 // Don't use large pages for the class space. 3023 bool large_pages = false; 3024 3025 #ifndef AARCH64 3026 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 3027 _reserve_alignment, 3028 large_pages, 3029 requested_addr); 3030 #else // AARCH64 3031 ReservedSpace metaspace_rs; 3032 3033 // Our compressed klass pointers may fit nicely into the lower 32 3034 // bits. 3035 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { 3036 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3037 _reserve_alignment, 3038 large_pages, 3039 requested_addr); 3040 } 3041 3042 if (! metaspace_rs.is_reserved()) { 3043 // Try to align metaspace so that we can decode a compressed klass 3044 // with a single MOVK instruction. We can do this iff the 3045 // compressed class base is a multiple of 4G. 3046 for (char *a = (char*)align_ptr_up(requested_addr, 4*G); 3047 a < (char*)(1024*G); 3048 a += 4*G) { 3049 3050 #if INCLUDE_CDS 3051 if (UseSharedSpaces 3052 && ! can_use_cds_with_metaspace_addr(a, cds_base)) { 3053 // We failed to find an aligned base that will reach. Fall 3054 // back to using our requested addr. 3055 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3056 _reserve_alignment, 3057 large_pages, 3058 requested_addr); 3059 break; 3060 } 3061 #endif 3062 3063 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3064 _reserve_alignment, 3065 large_pages, 3066 a); 3067 if (metaspace_rs.is_reserved()) 3068 break; 3069 } 3070 } 3071 3072 #endif // AARCH64 3073 3074 if (!metaspace_rs.is_reserved()) { 3075 #if INCLUDE_CDS 3076 if (UseSharedSpaces) { 3077 size_t increment = align_size_up(1*G, _reserve_alignment); 3078 3079 // Keep trying to allocate the metaspace, increasing the requested_addr 3080 // by 1GB each time, until we reach an address that will no longer allow 3081 // use of CDS with compressed klass pointers. 3082 char *addr = requested_addr; 3083 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3084 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3085 addr = addr + increment; 3086 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3087 _reserve_alignment, large_pages, addr); 3088 } 3089 } 3090 #endif 3091 // If no successful allocation then try to allocate the space anywhere. If 3092 // that fails then OOM doom. At this point we cannot try allocating the 3093 // metaspace as if UseCompressedClassPointers is off because too much 3094 // initialization has happened that depends on UseCompressedClassPointers. 3095 // So, UseCompressedClassPointers cannot be turned off at this point. 3096 if (!metaspace_rs.is_reserved()) { 3097 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3098 _reserve_alignment, large_pages); 3099 if (!metaspace_rs.is_reserved()) { 3100 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 3101 compressed_class_space_size())); 3102 } 3103 } 3104 } 3105 3106 // If we got here then the metaspace got allocated. 3107 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3108 3109 #if INCLUDE_CDS 3110 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3111 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3112 FileMapInfo::stop_sharing_and_unmap( 3113 "Could not allocate metaspace at a compatible address"); 3114 } 3115 #endif 3116 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3117 UseSharedSpaces ? (address)cds_base : 0); 3118 3119 initialize_class_space(metaspace_rs); 3120 3121 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 3122 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 3123 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 3124 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 3125 compressed_class_space_size(), metaspace_rs.base(), requested_addr); 3126 } 3127 } 3128 3129 // For UseCompressedClassPointers the class space is reserved above the top of 3130 // the Java heap. The argument passed in is at the base of the compressed space. 3131 void Metaspace::initialize_class_space(ReservedSpace rs) { 3132 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3133 assert(rs.size() >= CompressedClassSpaceSize, 3134 err_msg(SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize)); 3135 assert(using_class_space(), "Must be using class space"); 3136 _class_space_list = new VirtualSpaceList(rs); 3137 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3138 3139 if (!_class_space_list->initialization_succeeded()) { 3140 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3141 } 3142 } 3143 3144 #endif 3145 3146 void Metaspace::ergo_initialize() { 3147 if (DumpSharedSpaces) { 3148 // Using large pages when dumping the shared archive is currently not implemented. 3149 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3150 } 3151 3152 size_t page_size = os::vm_page_size(); 3153 if (UseLargePages && UseLargePagesInMetaspace) { 3154 page_size = os::large_page_size(); 3155 } 3156 3157 _commit_alignment = page_size; 3158 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3159 3160 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3161 // override if MaxMetaspaceSize was set on the command line or not. 3162 // This information is needed later to conform to the specification of the 3163 // java.lang.management.MemoryUsage API. 3164 // 3165 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3166 // globals.hpp to the aligned value, but this is not possible, since the 3167 // alignment depends on other flags being parsed. 3168 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3169 3170 if (MetaspaceSize > MaxMetaspaceSize) { 3171 MetaspaceSize = MaxMetaspaceSize; 3172 } 3173 3174 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3175 3176 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3177 3178 if (MetaspaceSize < 256*K) { 3179 vm_exit_during_initialization("Too small initial Metaspace size"); 3180 } 3181 3182 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3183 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3184 3185 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3186 set_compressed_class_space_size(CompressedClassSpaceSize); 3187 } 3188 3189 void Metaspace::global_initialize() { 3190 MetaspaceGC::initialize(); 3191 3192 // Initialize the alignment for shared spaces. 3193 int max_alignment = os::vm_allocation_granularity(); 3194 size_t cds_total = 0; 3195 3196 MetaspaceShared::set_max_alignment(max_alignment); 3197 3198 if (DumpSharedSpaces) { 3199 #if INCLUDE_CDS 3200 MetaspaceShared::estimate_regions_size(); 3201 3202 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3203 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3204 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3205 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3206 3207 // make sure SharedReadOnlySize and SharedReadWriteSize are not less than 3208 // the minimum values. 3209 if (SharedReadOnlySize < MetaspaceShared::min_ro_size){ 3210 report_out_of_shared_space(SharedReadOnly); 3211 } 3212 3213 if (SharedReadWriteSize < MetaspaceShared::min_rw_size){ 3214 report_out_of_shared_space(SharedReadWrite); 3215 } 3216 3217 // the min_misc_data_size and min_misc_code_size estimates are based on 3218 // MetaspaceShared::generate_vtable_methods(). 3219 // The minimum size only accounts for the vtable methods. Any size less than the 3220 // minimum required size would cause vm crash when allocating the vtable methods. 3221 uint min_misc_data_size = align_size_up( 3222 MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment); 3223 3224 if (SharedMiscDataSize < min_misc_data_size) { 3225 report_out_of_shared_space(SharedMiscData); 3226 } 3227 3228 uintx min_misc_code_size = align_size_up( 3229 (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * 3230 (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, 3231 max_alignment); 3232 3233 if (SharedMiscCodeSize < min_misc_code_size) { 3234 report_out_of_shared_space(SharedMiscCode); 3235 } 3236 3237 // Initialize with the sum of the shared space sizes. The read-only 3238 // and read write metaspace chunks will be allocated out of this and the 3239 // remainder is the misc code and data chunks. 3240 cds_total = FileMapInfo::shared_spaces_size(); 3241 cds_total = align_size_up(cds_total, _reserve_alignment); 3242 _space_list = new VirtualSpaceList(cds_total/wordSize); 3243 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3244 3245 if (!_space_list->initialization_succeeded()) { 3246 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3247 } 3248 3249 #ifdef _LP64 3250 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3251 vm_exit_during_initialization("Unable to dump shared archive.", 3252 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3253 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3254 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), 3255 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3256 } 3257 3258 // Set the compressed klass pointer base so that decoding of these pointers works 3259 // properly when creating the shared archive. 3260 assert(UseCompressedOops && UseCompressedClassPointers, 3261 "UseCompressedOops and UseCompressedClassPointers must be set"); 3262 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3263 if (TraceMetavirtualspaceAllocation && Verbose) { 3264 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3265 _space_list->current_virtual_space()->bottom()); 3266 } 3267 3268 Universe::set_narrow_klass_shift(0); 3269 #endif // _LP64 3270 #endif // INCLUDE_CDS 3271 } else { 3272 #if INCLUDE_CDS 3273 // If using shared space, open the file that contains the shared space 3274 // and map in the memory before initializing the rest of metaspace (so 3275 // the addresses don't conflict) 3276 address cds_address = NULL; 3277 if (UseSharedSpaces) { 3278 FileMapInfo* mapinfo = new FileMapInfo(); 3279 3280 // Open the shared archive file, read and validate the header. If 3281 // initialization fails, shared spaces [UseSharedSpaces] are 3282 // disabled and the file is closed. 3283 // Map in spaces now also 3284 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3285 cds_total = FileMapInfo::shared_spaces_size(); 3286 cds_address = (address)mapinfo->region_base(0); 3287 } else { 3288 assert(!mapinfo->is_open() && !UseSharedSpaces, 3289 "archive file not closed or shared spaces not disabled."); 3290 } 3291 } 3292 #endif // INCLUDE_CDS 3293 #ifdef _LP64 3294 // If UseCompressedClassPointers is set then allocate the metaspace area 3295 // above the heap and above the CDS area (if it exists). 3296 if (using_class_space()) { 3297 if (UseSharedSpaces) { 3298 #if INCLUDE_CDS 3299 char* cds_end = (char*)(cds_address + cds_total); 3300 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3301 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3302 #endif 3303 } else { 3304 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3305 allocate_metaspace_compressed_klass_ptrs(base, 0); 3306 } 3307 } 3308 #endif // _LP64 3309 3310 // Initialize these before initializing the VirtualSpaceList 3311 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3312 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3313 // Make the first class chunk bigger than a medium chunk so it's not put 3314 // on the medium chunk list. The next chunk will be small and progress 3315 // from there. This size calculated by -version. 3316 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6, 3317 (CompressedClassSpaceSize/BytesPerWord)*2); 3318 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3319 // Arbitrarily set the initial virtual space to a multiple 3320 // of the boot class loader size. 3321 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3322 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3323 3324 // Initialize the list of virtual spaces. 3325 _space_list = new VirtualSpaceList(word_size); 3326 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3327 3328 if (!_space_list->initialization_succeeded()) { 3329 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3330 } 3331 } 3332 3333 _tracer = new MetaspaceTracer(); 3334 } 3335 3336 void Metaspace::post_initialize() { 3337 MetaspaceGC::post_initialize(); 3338 } 3339 3340 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3341 size_t chunk_word_size, 3342 size_t chunk_bunch) { 3343 // Get a chunk from the chunk freelist 3344 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3345 if (chunk != NULL) { 3346 return chunk; 3347 } 3348 3349 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3350 } 3351 3352 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3353 3354 assert(space_list() != NULL, 3355 "Metadata VirtualSpaceList has not been initialized"); 3356 assert(chunk_manager_metadata() != NULL, 3357 "Metadata ChunkManager has not been initialized"); 3358 3359 _vsm = new SpaceManager(NonClassType, lock); 3360 if (_vsm == NULL) { 3361 return; 3362 } 3363 size_t word_size; 3364 size_t class_word_size; 3365 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3366 3367 if (using_class_space()) { 3368 assert(class_space_list() != NULL, 3369 "Class VirtualSpaceList has not been initialized"); 3370 assert(chunk_manager_class() != NULL, 3371 "Class ChunkManager has not been initialized"); 3372 3373 // Allocate SpaceManager for classes. 3374 _class_vsm = new SpaceManager(ClassType, lock); 3375 if (_class_vsm == NULL) { 3376 return; 3377 } 3378 } 3379 3380 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3381 3382 // Allocate chunk for metadata objects 3383 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3384 word_size, 3385 vsm()->medium_chunk_bunch()); 3386 // For dumping shared archive, report error if allocation has failed. 3387 if (DumpSharedSpaces && new_chunk == NULL) { 3388 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + word_size * BytesPerWord); 3389 } 3390 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3391 if (new_chunk != NULL) { 3392 // Add to this manager's list of chunks in use and current_chunk(). 3393 vsm()->add_chunk(new_chunk, true); 3394 } 3395 3396 // Allocate chunk for class metadata objects 3397 if (using_class_space()) { 3398 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3399 class_word_size, 3400 class_vsm()->medium_chunk_bunch()); 3401 if (class_chunk != NULL) { 3402 class_vsm()->add_chunk(class_chunk, true); 3403 } else { 3404 // For dumping shared archive, report error if allocation has failed. 3405 if (DumpSharedSpaces) { 3406 report_insufficient_metaspace(MetaspaceAux::committed_bytes() + class_word_size * BytesPerWord); 3407 } 3408 } 3409 } 3410 3411 _alloc_record_head = NULL; 3412 _alloc_record_tail = NULL; 3413 } 3414 3415 size_t Metaspace::align_word_size_up(size_t word_size) { 3416 size_t byte_size = word_size * wordSize; 3417 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3418 } 3419 3420 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3421 // DumpSharedSpaces doesn't use class metadata area (yet) 3422 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3423 if (is_class_space_allocation(mdtype)) { 3424 return class_vsm()->allocate(word_size); 3425 } else { 3426 return vsm()->allocate(word_size); 3427 } 3428 } 3429 3430 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3431 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3432 assert(delta_bytes > 0, "Must be"); 3433 3434 size_t before = 0; 3435 size_t after = 0; 3436 MetaWord* res; 3437 bool incremented; 3438 3439 // Each thread increments the HWM at most once. Even if the thread fails to increment 3440 // the HWM, an allocation is still attempted. This is because another thread must then 3441 // have incremented the HWM and therefore the allocation might still succeed. 3442 do { 3443 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before); 3444 res = allocate(word_size, mdtype); 3445 } while (!incremented && res == NULL); 3446 3447 if (incremented) { 3448 tracer()->report_gc_threshold(before, after, 3449 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3450 if (PrintGCDetails && Verbose) { 3451 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3452 " to " SIZE_FORMAT, before, after); 3453 } 3454 } 3455 3456 return res; 3457 } 3458 3459 // Space allocated in the Metaspace. This may 3460 // be across several metadata virtual spaces. 3461 char* Metaspace::bottom() const { 3462 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3463 return (char*)vsm()->current_chunk()->bottom(); 3464 } 3465 3466 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3467 if (mdtype == ClassType) { 3468 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3469 } else { 3470 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3471 } 3472 } 3473 3474 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3475 if (mdtype == ClassType) { 3476 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3477 } else { 3478 return vsm()->sum_free_in_chunks_in_use(); 3479 } 3480 } 3481 3482 // Space capacity in the Metaspace. It includes 3483 // space in the list of chunks from which allocations 3484 // have been made. Don't include space in the global freelist and 3485 // in the space available in the dictionary which 3486 // is already counted in some chunk. 3487 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3488 if (mdtype == ClassType) { 3489 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3490 } else { 3491 return vsm()->sum_capacity_in_chunks_in_use(); 3492 } 3493 } 3494 3495 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3496 return used_words_slow(mdtype) * BytesPerWord; 3497 } 3498 3499 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3500 return capacity_words_slow(mdtype) * BytesPerWord; 3501 } 3502 3503 size_t Metaspace::allocated_blocks_bytes() const { 3504 return vsm()->allocated_blocks_bytes() + 3505 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0); 3506 } 3507 3508 size_t Metaspace::allocated_chunks_bytes() const { 3509 return vsm()->allocated_chunks_bytes() + 3510 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0); 3511 } 3512 3513 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3514 assert(!SafepointSynchronize::is_at_safepoint() 3515 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3516 3517 if (DumpSharedSpaces && PrintSharedSpaces) { 3518 record_deallocation(ptr, vsm()->get_raw_word_size(word_size)); 3519 } 3520 3521 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3522 3523 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3524 // Dark matter. Too small for dictionary. 3525 #ifdef ASSERT 3526 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3527 #endif 3528 return; 3529 } 3530 if (is_class && using_class_space()) { 3531 class_vsm()->deallocate(ptr, word_size); 3532 } else { 3533 vsm()->deallocate(ptr, word_size); 3534 } 3535 } 3536 3537 3538 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3539 bool read_only, MetaspaceObj::Type type, TRAPS) { 3540 if (HAS_PENDING_EXCEPTION) { 3541 assert(false, "Should not allocate with exception pending"); 3542 return NULL; // caller does a CHECK_NULL too 3543 } 3544 3545 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3546 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3547 3548 // Allocate in metaspaces without taking out a lock, because it deadlocks 3549 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3550 // to revisit this for application class data sharing. 3551 if (DumpSharedSpaces) { 3552 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3553 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3554 MetaWord* result = space->allocate(word_size, NonClassType); 3555 if (result == NULL) { 3556 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3557 } 3558 if (PrintSharedSpaces) { 3559 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3560 } 3561 3562 // Zero initialize. 3563 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3564 3565 return result; 3566 } 3567 3568 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3569 3570 // Try to allocate metadata. 3571 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3572 3573 if (result == NULL) { 3574 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3575 3576 // Allocation failed. 3577 if (is_init_completed()) { 3578 // Only start a GC if the bootstrapping has completed. 3579 3580 // Try to clean out some memory and retry. 3581 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3582 loader_data, word_size, mdtype); 3583 } 3584 } 3585 3586 if (result == NULL) { 3587 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3588 } 3589 3590 // Zero initialize. 3591 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3592 3593 return result; 3594 } 3595 3596 size_t Metaspace::class_chunk_size(size_t word_size) { 3597 assert(using_class_space(), "Has to use class space"); 3598 return class_vsm()->calc_chunk_size(word_size); 3599 } 3600 3601 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3602 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3603 3604 // If result is still null, we are out of memory. 3605 if (Verbose && TraceMetadataChunkAllocation) { 3606 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3607 SIZE_FORMAT, word_size); 3608 if (loader_data->metaspace_or_null() != NULL) { 3609 loader_data->dump(gclog_or_tty); 3610 } 3611 MetaspaceAux::dump(gclog_or_tty); 3612 } 3613 3614 bool out_of_compressed_class_space = false; 3615 if (is_class_space_allocation(mdtype)) { 3616 Metaspace* metaspace = loader_data->metaspace_non_null(); 3617 out_of_compressed_class_space = 3618 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3619 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3620 CompressedClassSpaceSize; 3621 } 3622 3623 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3624 const char* space_string = out_of_compressed_class_space ? 3625 "Compressed class space" : "Metaspace"; 3626 3627 report_java_out_of_memory(space_string); 3628 3629 if (JvmtiExport::should_post_resource_exhausted()) { 3630 JvmtiExport::post_resource_exhausted( 3631 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3632 space_string); 3633 } 3634 3635 if (!is_init_completed()) { 3636 vm_exit_during_initialization("OutOfMemoryError", space_string); 3637 } 3638 3639 if (out_of_compressed_class_space) { 3640 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3641 } else { 3642 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3643 } 3644 } 3645 3646 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3647 switch (mdtype) { 3648 case Metaspace::ClassType: return "Class"; 3649 case Metaspace::NonClassType: return "Metadata"; 3650 default: 3651 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); 3652 return NULL; 3653 } 3654 } 3655 3656 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3657 assert(DumpSharedSpaces, "sanity"); 3658 3659 int byte_size = (int)word_size * HeapWordSize; 3660 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size); 3661 3662 if (_alloc_record_head == NULL) { 3663 _alloc_record_head = _alloc_record_tail = rec; 3664 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) { 3665 _alloc_record_tail->_next = rec; 3666 _alloc_record_tail = rec; 3667 } else { 3668 // slow linear search, but this doesn't happen that often, and only when dumping 3669 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) { 3670 if (old->_ptr == ptr) { 3671 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity"); 3672 int remain_bytes = old->_byte_size - byte_size; 3673 assert(remain_bytes >= 0, "sanity"); 3674 old->_type = type; 3675 3676 if (remain_bytes == 0) { 3677 delete(rec); 3678 } else { 3679 address remain_ptr = address(ptr) + byte_size; 3680 rec->_ptr = remain_ptr; 3681 rec->_byte_size = remain_bytes; 3682 rec->_type = MetaspaceObj::DeallocatedType; 3683 rec->_next = old->_next; 3684 old->_byte_size = byte_size; 3685 old->_next = rec; 3686 } 3687 return; 3688 } 3689 } 3690 assert(0, "reallocating a freed pointer that was not recorded"); 3691 } 3692 } 3693 3694 void Metaspace::record_deallocation(void* ptr, size_t word_size) { 3695 assert(DumpSharedSpaces, "sanity"); 3696 3697 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3698 if (rec->_ptr == ptr) { 3699 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity"); 3700 rec->_type = MetaspaceObj::DeallocatedType; 3701 return; 3702 } 3703 } 3704 3705 assert(0, "deallocating a pointer that was not recorded"); 3706 } 3707 3708 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3709 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3710 3711 address last_addr = (address)bottom(); 3712 3713 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3714 address ptr = rec->_ptr; 3715 if (last_addr < ptr) { 3716 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3717 } 3718 closure->doit(ptr, rec->_type, rec->_byte_size); 3719 last_addr = ptr + rec->_byte_size; 3720 } 3721 3722 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3723 if (last_addr < top) { 3724 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3725 } 3726 } 3727 3728 void Metaspace::purge(MetadataType mdtype) { 3729 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3730 } 3731 3732 void Metaspace::purge() { 3733 MutexLockerEx cl(SpaceManager::expand_lock(), 3734 Mutex::_no_safepoint_check_flag); 3735 purge(NonClassType); 3736 if (using_class_space()) { 3737 purge(ClassType); 3738 } 3739 } 3740 3741 void Metaspace::print_on(outputStream* out) const { 3742 // Print both class virtual space counts and metaspace. 3743 if (Verbose) { 3744 vsm()->print_on(out); 3745 if (using_class_space()) { 3746 class_vsm()->print_on(out); 3747 } 3748 } 3749 } 3750 3751 bool Metaspace::contains(const void* ptr) { 3752 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3753 return true; 3754 } 3755 3756 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3757 return true; 3758 } 3759 3760 return get_space_list(NonClassType)->contains(ptr); 3761 } 3762 3763 void Metaspace::verify() { 3764 vsm()->verify(); 3765 if (using_class_space()) { 3766 class_vsm()->verify(); 3767 } 3768 } 3769 3770 void Metaspace::dump(outputStream* const out) const { 3771 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); 3772 vsm()->dump(out); 3773 if (using_class_space()) { 3774 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 3775 class_vsm()->dump(out); 3776 } 3777 } 3778 3779 /////////////// Unit tests /////////////// 3780 3781 #ifndef PRODUCT 3782 3783 class TestMetaspaceAuxTest : AllStatic { 3784 public: 3785 static void test_reserved() { 3786 size_t reserved = MetaspaceAux::reserved_bytes(); 3787 3788 assert(reserved > 0, "assert"); 3789 3790 size_t committed = MetaspaceAux::committed_bytes(); 3791 assert(committed <= reserved, "assert"); 3792 3793 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3794 assert(reserved_metadata > 0, "assert"); 3795 assert(reserved_metadata <= reserved, "assert"); 3796 3797 if (UseCompressedClassPointers) { 3798 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3799 assert(reserved_class > 0, "assert"); 3800 assert(reserved_class < reserved, "assert"); 3801 } 3802 } 3803 3804 static void test_committed() { 3805 size_t committed = MetaspaceAux::committed_bytes(); 3806 3807 assert(committed > 0, "assert"); 3808 3809 size_t reserved = MetaspaceAux::reserved_bytes(); 3810 assert(committed <= reserved, "assert"); 3811 3812 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3813 assert(committed_metadata > 0, "assert"); 3814 assert(committed_metadata <= committed, "assert"); 3815 3816 if (UseCompressedClassPointers) { 3817 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3818 assert(committed_class > 0, "assert"); 3819 assert(committed_class < committed, "assert"); 3820 } 3821 } 3822 3823 static void test_virtual_space_list_large_chunk() { 3824 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3825 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3826 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3827 // vm_allocation_granularity aligned on Windows. 3828 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3829 large_size += (os::vm_page_size()/BytesPerWord); 3830 vs_list->get_new_chunk(large_size, large_size, 0); 3831 } 3832 3833 static void test() { 3834 test_reserved(); 3835 test_committed(); 3836 test_virtual_space_list_large_chunk(); 3837 } 3838 }; 3839 3840 void TestMetaspaceAux_test() { 3841 TestMetaspaceAuxTest::test(); 3842 } 3843 3844 class TestVirtualSpaceNodeTest { 3845 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3846 size_t& num_small_chunks, 3847 size_t& num_specialized_chunks) { 3848 num_medium_chunks = words_left / MediumChunk; 3849 words_left = words_left % MediumChunk; 3850 3851 num_small_chunks = words_left / SmallChunk; 3852 words_left = words_left % SmallChunk; 3853 // how many specialized chunks can we get? 3854 num_specialized_chunks = words_left / SpecializedChunk; 3855 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3856 } 3857 3858 public: 3859 static void test() { 3860 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3861 const size_t vsn_test_size_words = MediumChunk * 4; 3862 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3863 3864 // The chunk sizes must be multiples of eachother, or this will fail 3865 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3866 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3867 3868 { // No committed memory in VSN 3869 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3870 VirtualSpaceNode vsn(vsn_test_size_bytes); 3871 vsn.initialize(); 3872 vsn.retire(&cm); 3873 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3874 } 3875 3876 { // All of VSN is committed, half is used by chunks 3877 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3878 VirtualSpaceNode vsn(vsn_test_size_bytes); 3879 vsn.initialize(); 3880 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3881 vsn.get_chunk_vs(MediumChunk); 3882 vsn.get_chunk_vs(MediumChunk); 3883 vsn.retire(&cm); 3884 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3885 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3886 } 3887 3888 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3889 // This doesn't work for systems with vm_page_size >= 16K. 3890 if (page_chunks < MediumChunk) { 3891 // 4 pages of VSN is committed, some is used by chunks 3892 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3893 VirtualSpaceNode vsn(vsn_test_size_bytes); 3894 3895 vsn.initialize(); 3896 vsn.expand_by(page_chunks, page_chunks); 3897 vsn.get_chunk_vs(SmallChunk); 3898 vsn.get_chunk_vs(SpecializedChunk); 3899 vsn.retire(&cm); 3900 3901 // committed - used = words left to retire 3902 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3903 3904 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3905 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3906 3907 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3908 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3909 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3910 } 3911 3912 { // Half of VSN is committed, a humongous chunk is used 3913 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3914 VirtualSpaceNode vsn(vsn_test_size_bytes); 3915 vsn.initialize(); 3916 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3917 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3918 vsn.retire(&cm); 3919 3920 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3921 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3922 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3923 3924 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3925 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3926 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3927 } 3928 3929 } 3930 3931 #define assert_is_available_positive(word_size) \ 3932 assert(vsn.is_available(word_size), \ 3933 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ 3934 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3935 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3936 3937 #define assert_is_available_negative(word_size) \ 3938 assert(!vsn.is_available(word_size), \ 3939 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ 3940 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3941 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3942 3943 static void test_is_available_positive() { 3944 // Reserve some memory. 3945 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3946 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3947 3948 // Commit some memory. 3949 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3950 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3951 assert(expanded, "Failed to commit"); 3952 3953 // Check that is_available accepts the committed size. 3954 assert_is_available_positive(commit_word_size); 3955 3956 // Check that is_available accepts half the committed size. 3957 size_t expand_word_size = commit_word_size / 2; 3958 assert_is_available_positive(expand_word_size); 3959 } 3960 3961 static void test_is_available_negative() { 3962 // Reserve some memory. 3963 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3964 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3965 3966 // Commit some memory. 3967 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3968 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3969 assert(expanded, "Failed to commit"); 3970 3971 // Check that is_available doesn't accept a too large size. 3972 size_t two_times_commit_word_size = commit_word_size * 2; 3973 assert_is_available_negative(two_times_commit_word_size); 3974 } 3975 3976 static void test_is_available_overflow() { 3977 // Reserve some memory. 3978 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3979 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3980 3981 // Commit some memory. 3982 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3983 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3984 assert(expanded, "Failed to commit"); 3985 3986 // Calculate a size that will overflow the virtual space size. 3987 void* virtual_space_max = (void*)(uintptr_t)-1; 3988 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3989 size_t overflow_size = bottom_to_max + BytesPerWord; 3990 size_t overflow_word_size = overflow_size / BytesPerWord; 3991 3992 // Check that is_available can handle the overflow. 3993 assert_is_available_negative(overflow_word_size); 3994 } 3995 3996 static void test_is_available() { 3997 TestVirtualSpaceNodeTest::test_is_available_positive(); 3998 TestVirtualSpaceNodeTest::test_is_available_negative(); 3999 TestVirtualSpaceNodeTest::test_is_available_overflow(); 4000 } 4001 }; 4002 4003 void TestVirtualSpaceNode_test() { 4004 TestVirtualSpaceNodeTest::test(); 4005 TestVirtualSpaceNodeTest::test_is_available(); 4006 } 4007 #endif