1231 private:
1232
1233 // protects allocations
1234 Mutex* const _lock;
1235
1236 // Type of metadata allocated.
1237 const Metaspace::MetadataType _mdtype;
1238
1239 // Type of metaspace
1240 const Metaspace::MetaspaceType _space_type;
1241
1242 // List of chunks in use by this SpaceManager. Allocations
1243 // are done from the current chunk. The list is used for deallocating
1244 // chunks when the SpaceManager is freed.
1245 Metachunk* _chunks_in_use[NumberOfInUseLists];
1246 Metachunk* _current_chunk;
1247
1248 // Maximum number of small chunks to allocate to a SpaceManager
1249 static uint const _small_chunk_limit;
1250
1251 // Maximum number of specialize chunks to allocate for anonymous
1252 // metadata space to a SpaceManager
1253 static uint const _anon_metadata_specialize_chunk_limit;
1254
1255 // Sum of all space in allocated chunks
1256 size_t _allocated_blocks_words;
1257
1258 // Sum of all allocated chunks
1259 size_t _allocated_chunks_words;
1260 size_t _allocated_chunks_count;
1261
1262 // Free lists of blocks are per SpaceManager since they
1263 // are assumed to be in chunks in use by the SpaceManager
1264 // and all chunks in use by a SpaceManager are freed when
1265 // the class loader using the SpaceManager is collected.
1266 BlockFreelist* _block_freelists;
1267
1268 // protects virtualspace and chunk expansions
1269 static const char* _expand_lock_name;
1270 static const int _expand_lock_rank;
1271 static Mutex* const _expand_lock;
1272
1273 private:
1401 #ifdef ASSERT
1402 void verify_allocated_blocks_words();
1403 #endif
1404
1405 // This adjusts the size given to be greater than the minimum allocation size in
1406 // words for data in metaspace. Esentially the minimum size is currently 3 words.
1407 size_t get_allocation_word_size(size_t word_size) {
1408 size_t byte_size = word_size * BytesPerWord;
1409
1410 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1411 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1412
1413 size_t raw_word_size = raw_bytes_size / BytesPerWord;
1414 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1415
1416 return raw_word_size;
1417 }
1418 };
1419
1420 uint const SpaceManager::_small_chunk_limit = 4;
1421 uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
1422
1423 const char* SpaceManager::_expand_lock_name =
1424 "SpaceManager chunk allocation lock";
1425 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1426 Mutex* const SpaceManager::_expand_lock =
1427 new Mutex(SpaceManager::_expand_lock_rank,
1428 SpaceManager::_expand_lock_name,
1429 Mutex::_allow_vm_block_flag,
1430 Monitor::_safepoint_check_never);
1431
1432 void VirtualSpaceNode::inc_container_count() {
1433 assert_lock_strong(SpaceManager::expand_lock());
1434 _container_count++;
1435 }
1436
1437 void VirtualSpaceNode::dec_container_count() {
1438 assert_lock_strong(SpaceManager::expand_lock());
1439 _container_count--;
1440 }
1441
3334 } else {
3335 st->cr();
3336 }
3337 }
3338
3339 chunk_manager()->locked_print_free_chunks(st);
3340 chunk_manager()->locked_print_sum_free_chunks(st);
3341 }
3342
3343 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3344
3345 // Decide between a small chunk and a medium chunk. Up to
3346 // _small_chunk_limit small chunks can be allocated.
3347 // After that a medium chunk is preferred.
3348 size_t chunk_word_size;
3349
3350 // Special case for anonymous metadata space.
3351 // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3352 // rarely about 4K (64-bits JVM).
3353 // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3354 // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
3355 // from 60+% to around 30%.
3356 if (_space_type == Metaspace::AnonymousMetaspaceType &&
3357 _mdtype == Metaspace::NonClassType &&
3358 sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
3359 word_size + Metachunk::overhead() <= SpecializedChunk) {
3360 return SpecializedChunk;
3361 }
3362
3363 if (chunks_in_use(MediumIndex) == NULL &&
3364 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3365 chunk_word_size = (size_t) small_chunk_size();
3366 if (word_size + Metachunk::overhead() > small_chunk_size()) {
3367 chunk_word_size = medium_chunk_size();
3368 }
3369 } else {
3370 chunk_word_size = medium_chunk_size();
3371 }
3372
3373 // Might still need a humongous chunk. Enforce
3374 // humongous allocations sizes to be aligned up to
3375 // the smallest chunk size.
3376 size_t if_humongous_sized_chunk =
3377 align_up(word_size + Metachunk::overhead(),
3378 smallest_chunk_size());
|
1231 private:
1232
1233 // protects allocations
1234 Mutex* const _lock;
1235
1236 // Type of metadata allocated.
1237 const Metaspace::MetadataType _mdtype;
1238
1239 // Type of metaspace
1240 const Metaspace::MetaspaceType _space_type;
1241
1242 // List of chunks in use by this SpaceManager. Allocations
1243 // are done from the current chunk. The list is used for deallocating
1244 // chunks when the SpaceManager is freed.
1245 Metachunk* _chunks_in_use[NumberOfInUseLists];
1246 Metachunk* _current_chunk;
1247
1248 // Maximum number of small chunks to allocate to a SpaceManager
1249 static uint const _small_chunk_limit;
1250
1251 // Maximum number of specialize chunks to allocate for anonymous and delegating
1252 // metadata space to a SpaceManager
1253 static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
1254
1255 // Sum of all space in allocated chunks
1256 size_t _allocated_blocks_words;
1257
1258 // Sum of all allocated chunks
1259 size_t _allocated_chunks_words;
1260 size_t _allocated_chunks_count;
1261
1262 // Free lists of blocks are per SpaceManager since they
1263 // are assumed to be in chunks in use by the SpaceManager
1264 // and all chunks in use by a SpaceManager are freed when
1265 // the class loader using the SpaceManager is collected.
1266 BlockFreelist* _block_freelists;
1267
1268 // protects virtualspace and chunk expansions
1269 static const char* _expand_lock_name;
1270 static const int _expand_lock_rank;
1271 static Mutex* const _expand_lock;
1272
1273 private:
1401 #ifdef ASSERT
1402 void verify_allocated_blocks_words();
1403 #endif
1404
1405 // This adjusts the size given to be greater than the minimum allocation size in
1406 // words for data in metaspace. Esentially the minimum size is currently 3 words.
1407 size_t get_allocation_word_size(size_t word_size) {
1408 size_t byte_size = word_size * BytesPerWord;
1409
1410 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
1411 raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
1412
1413 size_t raw_word_size = raw_bytes_size / BytesPerWord;
1414 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
1415
1416 return raw_word_size;
1417 }
1418 };
1419
1420 uint const SpaceManager::_small_chunk_limit = 4;
1421 uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
1422
1423 const char* SpaceManager::_expand_lock_name =
1424 "SpaceManager chunk allocation lock";
1425 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
1426 Mutex* const SpaceManager::_expand_lock =
1427 new Mutex(SpaceManager::_expand_lock_rank,
1428 SpaceManager::_expand_lock_name,
1429 Mutex::_allow_vm_block_flag,
1430 Monitor::_safepoint_check_never);
1431
1432 void VirtualSpaceNode::inc_container_count() {
1433 assert_lock_strong(SpaceManager::expand_lock());
1434 _container_count++;
1435 }
1436
1437 void VirtualSpaceNode::dec_container_count() {
1438 assert_lock_strong(SpaceManager::expand_lock());
1439 _container_count--;
1440 }
1441
3334 } else {
3335 st->cr();
3336 }
3337 }
3338
3339 chunk_manager()->locked_print_free_chunks(st);
3340 chunk_manager()->locked_print_sum_free_chunks(st);
3341 }
3342
3343 size_t SpaceManager::calc_chunk_size(size_t word_size) {
3344
3345 // Decide between a small chunk and a medium chunk. Up to
3346 // _small_chunk_limit small chunks can be allocated.
3347 // After that a medium chunk is preferred.
3348 size_t chunk_word_size;
3349
3350 // Special case for anonymous metadata space.
3351 // Anonymous metadata space is usually small, with majority within 1K - 2K range and
3352 // rarely about 4K (64-bits JVM).
3353 // Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
3354 // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
3355 // reduces space waste from 60+% to around 30%.
3356 if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
3357 _mdtype == Metaspace::NonClassType &&
3358 sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
3359 word_size + Metachunk::overhead() <= SpecializedChunk) {
3360 return SpecializedChunk;
3361 }
3362
3363 if (chunks_in_use(MediumIndex) == NULL &&
3364 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
3365 chunk_word_size = (size_t) small_chunk_size();
3366 if (word_size + Metachunk::overhead() > small_chunk_size()) {
3367 chunk_word_size = medium_chunk_size();
3368 }
3369 } else {
3370 chunk_word_size = medium_chunk_size();
3371 }
3372
3373 // Might still need a humongous chunk. Enforce
3374 // humongous allocations sizes to be aligned up to
3375 // the smallest chunk size.
3376 size_t if_humongous_sized_chunk =
3377 align_up(word_size + Metachunk::overhead(),
3378 smallest_chunk_size());
|