--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-11 14:19:29.936991188 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-02-11 14:19:29.521978430 +0100 @@ -1799,8 +1799,8 @@ // Create the G1ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); - if (_cm == NULL || !_cm->completed_initialization()) { - vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark"); + if (!_cm->completed_initialization()) { + vm_shutdown_during_initialization("Could not initialize G1ConcurrentMark"); return JNI_ENOMEM; } _cm_thread = _cm->cm_thread(); --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-11 14:19:31.485038779 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-02-11 14:19:31.071026051 +0100 @@ -268,9 +268,6 @@ _scan_in_progress(false), _should_abort(false) { _root_regions = new MemRegion[_max_regions]; - if (_root_regions == NULL) { - vm_exit_during_initialization("Could not allocate root MemRegion set."); - } } G1CMRootMemRegions::~G1CMRootMemRegions() { --- old/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp 2020-02-11 14:19:33.007085571 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp 2020-02-11 14:19:32.583072535 +0100 @@ -72,11 +72,7 @@ _cr = cr; _num_max_threads = num_max_threads; - _threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC); - if (_threads == NULL) { - vm_shutdown_during_initialization("Could not allocate thread holder array."); - return JNI_ENOMEM; - } + _threads = NEW_C_HEAP_ARRAY(G1ConcurrentRefineThread*, num_max_threads, mtGC); for (uint i = 0; i < num_max_threads; i++) { if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) { @@ -303,13 +299,6 @@ yellow_zone, red_zone, min_yellow_zone_size); - - if (cr == NULL) { - *ecode = JNI_ENOMEM; - vm_shutdown_during_initialization("Could not create G1ConcurrentRefine"); - return NULL; - } - *ecode = cr->initialize(); return cr; } --- old/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2020-02-11 14:19:34.515131931 +0100 +++ new/src/hotspot/share/gc/g1/heapRegionRemSet.cpp 2020-02-11 14:19:34.100119173 +0100 @@ -92,14 +92,7 @@ _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; } - _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, - mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); - - if (_fine_grain_regions == NULL) { - vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, - "Failed to allocate _fine_grain_entries."); - } - + _fine_grain_regions = NEW_C_HEAP_ARRAY(PerRegionTablePtr, _max_fine_entries, mtGC); for (size_t i = 0; i < _max_fine_entries; i++) { _fine_grain_regions[i] = NULL; } --- old/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2020-02-11 14:19:36.006177770 +0100 +++ new/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2020-02-11 14:19:35.592165042 +0100 @@ -60,8 +60,7 @@ assert(_init_gen_size != 0, "Should have a finite size"); _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment); if (!_virtual_space->expand_by(_init_gen_size)) { - vm_exit_during_initialization("Could not reserve enough space for " - "object heap"); + vm_exit_during_initialization("Could not reserve enough space for object heap"); } } --- old/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2020-02-11 14:19:37.508223946 +0100 +++ new/src/hotspot/share/gc/parallel/psCompactionManager.cpp 2020-02-11 14:19:37.091211126 +0100 @@ -78,16 +78,12 @@ _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); _oop_task_queues = new OopTaskQueueSet(parallel_gc_threads); - guarantee(_oop_task_queues != NULL, "Could not allocate oop task queues"); _objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads); - guarantee(_objarray_task_queues != NULL, "Could not allocate objarray task queues"); _region_task_queues = new RegionTaskQueueSet(parallel_gc_threads); - guarantee(_region_task_queues != NULL, "Could not allocate region task queues"); // Create and register the ParCompactionManager(s) for the worker threads. for(uint i=0; iregister_queue(i, _manager_array[i]->marking_stack()); _objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack); region_task_queues()->register_queue(i, _manager_array[i]->region_stack()); @@ -96,8 +92,6 @@ // The VMThread gets its own ParCompactionManager, which is not available // for work stealing. _manager_array[parallel_gc_threads] = new ParCompactionManager(); - guarantee(_manager_array[parallel_gc_threads] != NULL, - "Could not create ParCompactionManager"); assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0, "Not initialized?"); --- old/src/hotspot/share/gc/parallel/psOldGen.cpp 2020-02-11 14:19:38.999269785 +0100 +++ new/src/hotspot/share/gc/parallel/psOldGen.cpp 2020-02-11 14:19:38.589257180 +0100 @@ -135,10 +135,6 @@ // _object_space = new MutableSpace(virtual_space()->alignment()); - - if (_object_space == NULL) - vm_exit_during_initialization("Could not allocate an old gen space"); - object_space()->initialize(cmr, SpaceDecorator::Clear, SpaceDecorator::Mangle); --- old/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2020-02-11 14:19:40.502315992 +0100 +++ new/src/hotspot/share/gc/parallel/psPromotionManager.cpp 2020-02-11 14:19:40.088303264 +0100 @@ -60,10 +60,8 @@ // and make sure that the first instance starts at a cache line. assert(_manager_array == NULL, "Attempt to initialize twice"); _manager_array = PaddedArray::create_unfreeable(promotion_manager_num); - guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); - guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); // Create and register the PSPromotionManager(s) for the worker threads. for(uint i=0; iinit(promotion_manager_num); for (uint i = 0; i < promotion_manager_num; i += 1) { _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i)); --- old/src/hotspot/share/gc/parallel/psYoungGen.cpp 2020-02-11 14:19:42.003362137 +0100 +++ new/src/hotspot/share/gc/parallel/psYoungGen.cpp 2020-02-11 14:19:41.579349102 +0100 @@ -88,10 +88,6 @@ _from_space = new MutableSpace(virtual_space()->alignment()); _to_space = new MutableSpace(virtual_space()->alignment()); - if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { - vm_exit_during_initialization("Could not allocate a young gen space"); - } - // Generation Counters - generation 0, 3 subspaces _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size, _max_gen_size, _virtual_space); --- old/src/hotspot/share/gc/serial/defNewGeneration.cpp 2020-02-11 14:19:43.517408683 +0100 +++ new/src/hotspot/share/gc/serial/defNewGeneration.cpp 2020-02-11 14:19:43.102395924 +0100 @@ -168,10 +168,6 @@ _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); - if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { - vm_exit_during_initialization("Could not allocate a new gen space"); - } - // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. --- old/src/hotspot/share/gc/shared/cardGeneration.cpp 2020-02-11 14:19:45.011454613 +0100 +++ new/src/hotspot/share/gc/shared/cardGeneration.cpp 2020-02-11 14:19:44.604442101 +0100 @@ -53,9 +53,6 @@ heap_word_size(initial_byte_size)); MemRegion committed_mr(start, heap_word_size(initial_byte_size)); _rs->resize_covered_region(committed_mr); - if (_bts == NULL) { - vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); - } // Verify that the start and end of this generation is the start of a card. // If this wasn't true, a single card could span more than on generation, --- old/src/hotspot/share/gc/shared/cardTable.cpp 2020-02-11 14:19:46.501500421 +0100 +++ new/src/hotspot/share/gc/shared/cardTable.cpp 2020-02-11 14:19:46.088487724 +0100 @@ -60,10 +60,7 @@ assert(card_size <= 512, "card_size must be less than 512"); // why? - _covered = new MemRegion[_max_covered_regions]; - if (_covered == NULL) { - vm_exit_during_initialization("Could not allocate card table covered region set."); - } + _covered = new MemRegion[_max_covered_regions]; } CardTable::~CardTable() { @@ -88,9 +85,6 @@ _cur_covered_regions = 0; _committed = new MemRegion[_max_covered_regions]; - if (_committed == NULL) { - vm_exit_during_initialization("Could not allocate card table committed region set."); - } const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : MAX2(_page_size, (size_t) os::vm_allocation_granularity()); --- old/src/hotspot/share/gc/shared/cardTableRS.cpp 2020-02-11 14:19:47.998546444 +0100 +++ new/src/hotspot/share/gc/shared/cardTableRS.cpp 2020-02-11 14:19:47.585533747 +0100 @@ -579,11 +579,7 @@ // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. uint max_gens = 2; - _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(CardValue, max_gens + 1, - mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); - if (_last_cur_val_in_gen == NULL) { - vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); - } + _last_cur_val_in_gen = NEW_C_HEAP_ARRAY(CardValue, max_gens + 1, mtGC); for (uint i = 0; i < max_gens + 1; i++) { _last_cur_val_in_gen[i] = clean_card_val(); } --- old/src/hotspot/share/gc/shared/generation.cpp 2020-02-11 14:19:49.491592343 +0100 +++ new/src/hotspot/share/gc/shared/generation.cpp 2020-02-11 14:19:49.082579769 +0100 @@ -79,9 +79,6 @@ assert(!_reserved.is_empty(), "empty generation?"); _span_based_discoverer.set_span(_reserved); _ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor - if (_ref_processor == NULL) { - vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); - } } void Generation::print() const { print_on(tty); } --- old/src/hotspot/share/gc/shared/referenceProcessor.cpp 2020-02-11 14:19:50.984638243 +0100 +++ new/src/hotspot/share/gc/shared/referenceProcessor.cpp 2020-02-11 14:19:50.573625608 +0100 @@ -64,9 +64,6 @@ } else { _default_soft_ref_policy = new LRUCurrentHeapPolicy(); } - if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { - vm_exit_during_initialization("Could not allocate reference policy object"); - } guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery, "Unrecognized RefDiscoveryPolicy");