< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page

        

@@ -1246,13 +1246,13 @@
   Metachunk* _current_chunk;
 
   // Maximum number of small chunks to allocate to a SpaceManager
   static uint const _small_chunk_limit;
 
-  // Maximum number of specialize chunks to allocate for anonymous
+  // Maximum number of specialize chunks to allocate for anonymous and delegating
   // metadata space to a SpaceManager
-  static uint const _anon_metadata_specialize_chunk_limit;
+  static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
 
   // Sum of all space in allocated chunks
   size_t _allocated_blocks_words;
 
   // Sum of all allocated chunks

@@ -1416,11 +1416,11 @@
     return raw_word_size;
   }
 };
 
 uint const SpaceManager::_small_chunk_limit = 4;
-uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
+uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
 
 const char* SpaceManager::_expand_lock_name =
   "SpaceManager chunk allocation lock";
 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
 Mutex* const SpaceManager::_expand_lock =

@@ -3349,15 +3349,15 @@
 
   // Special case for anonymous metadata space.
   // Anonymous metadata space is usually small, with majority within 1K - 2K range and
   // rarely about 4K (64-bits JVM).
   // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
-  // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
-  // from 60+% to around 30%.
-  if (_space_type == Metaspace::AnonymousMetaspaceType &&
+  // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
+  // reduces space waste from 60+% to around 30%.
+  if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
       _mdtype == Metaspace::NonClassType &&
-      sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
+      sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
       word_size + Metachunk::overhead() <= SpecializedChunk) {
     return SpecializedChunk;
   }
 
   if (chunks_in_use(MediumIndex) == NULL &&
< prev index next >