439 return;
440 }
441
442 // If an end alignment was requested, insert filler objects.
443 if (end_alignment_in_bytes != 0) {
444 HeapWord* currtop = _allocation_region->top();
445 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
446 size_t fill_size = pointer_delta(newtop, currtop);
447 if (fill_size != 0) {
448 if (fill_size < CollectedHeap::min_fill_size()) {
449 // If the required fill is smaller than we can represent,
450 // bump up to the next aligned address. We know we won't exceed the current
451 // region boundary because the max supported alignment is smaller than the min
452 // region size, and because the allocation code never leaves space smaller than
453 // the min_fill_size at the top of the current allocation region.
454 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
455 end_alignment_in_bytes);
456 fill_size = pointer_delta(newtop, currtop);
457 }
458 HeapWord* fill = archive_mem_allocate(fill_size);
459 CollectedHeap::fill_with_objects(fill, fill_size);
460 }
461 }
462
463 // Loop through the allocated regions, and create MemRegions summarizing
464 // the allocated address range, combining contiguous ranges. Add the
465 // MemRegions to the GrowableArray provided by the caller.
466 int index = _allocated_regions.length() - 1;
467 assert(_allocated_regions.at(index) == _allocation_region,
468 "expected region %u at end of array, found %u",
469 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
470 HeapWord* base_address = _allocation_region->bottom();
471 HeapWord* top = base_address;
472
473 while (index >= 0) {
474 HeapRegion* next = _allocated_regions.at(index);
475 HeapWord* new_base = next->bottom();
476 HeapWord* new_top = next->top();
477 if (new_base != top) {
478 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
479 base_address = new_base;
|
439 return;
440 }
441
442 // If an end alignment was requested, insert filler objects.
443 if (end_alignment_in_bytes != 0) {
444 HeapWord* currtop = _allocation_region->top();
445 HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
446 size_t fill_size = pointer_delta(newtop, currtop);
447 if (fill_size != 0) {
448 if (fill_size < CollectedHeap::min_fill_size()) {
449 // If the required fill is smaller than we can represent,
450 // bump up to the next aligned address. We know we won't exceed the current
451 // region boundary because the max supported alignment is smaller than the min
452 // region size, and because the allocation code never leaves space smaller than
453 // the min_fill_size at the top of the current allocation region.
454 newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
455 end_alignment_in_bytes);
456 fill_size = pointer_delta(newtop, currtop);
457 }
458 HeapWord* fill = archive_mem_allocate(fill_size);
459 G1CollectedHeap::fill_with_non_humongous_objects(fill, fill_size);
460 }
461 }
462
463 // Loop through the allocated regions, and create MemRegions summarizing
464 // the allocated address range, combining contiguous ranges. Add the
465 // MemRegions to the GrowableArray provided by the caller.
466 int index = _allocated_regions.length() - 1;
467 assert(_allocated_regions.at(index) == _allocation_region,
468 "expected region %u at end of array, found %u",
469 _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
470 HeapWord* base_address = _allocation_region->bottom();
471 HeapWord* top = base_address;
472
473 while (index >= 0) {
474 HeapRegion* next = _allocated_regions.at(index);
475 HeapWord* new_base = next->bottom();
476 HeapWord* new_top = next->top();
477 if (new_base != top) {
478 ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
479 base_address = new_base;
|