1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1InCSetState.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  32 
  33 // Interface to keep track of which regions G1 is currently allocating into.
  34 class AllocRegionManager : public CHeapObj<mtGC> {
  35   friend class VMStructs;
  36  protected:
  37   G1CollectedHeap* _g1h;
  38 
  39   // Outside of GC pauses, the number of bytes used in all regions other
  40   // than the current allocation region.
  41   size_t _summary_bytes_used;
  42 
  43  public:
  44    AllocRegionManager(G1CollectedHeap* heap) :
  45      _g1h(heap), _summary_bytes_used(0) { }
  46 
  47    static AllocRegionManager* create_allocator(G1CollectedHeap* g1h);
  48 
  49    virtual void init_mutator_alloc_region() = 0;
  50    virtual void release_mutator_alloc_region() = 0;
  51 
  52    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  53    virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  54    virtual void abandon_gc_alloc_regions() = 0;
  55 
  56    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  57    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  58    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  59    virtual size_t                 used() = 0;
  60    virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
  61 
  62    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
  63                                                             OldGCAllocRegion* old,
  64                                                             HeapRegion** retained);
  65 
  66    size_t used_unlocked() const {
  67      return _summary_bytes_used;
  68    }
  69 
  70    void increase_used(size_t bytes) {
  71      _summary_bytes_used += bytes;
  72    }
  73 
  74    void decrease_used(size_t bytes) {
  75      assert(_summary_bytes_used >= bytes,
  76             err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  77                 _summary_bytes_used, bytes));
  78      _summary_bytes_used -= bytes;
  79    }
  80 
  81    void set_used(size_t bytes) {
  82      _summary_bytes_used = bytes;
  83    }
  84 
  85    virtual HeapRegion* new_heap_region(uint hrs_index,
  86                                        G1BlockOffsetSharedArray* sharedOffsetArray,
  87                                        MemRegion mr) {
  88      return new HeapRegion(hrs_index, sharedOffsetArray, mr);
  89    }
  90 };
  91 
  92 // The default allocation region manager for G1. Provides a single mutator, survivor
  93 // and old generation allocation region.
  94 // Can retain the old generation allocation region across GCs.
  95 class DefaultAllocRegionManager : public AllocRegionManager {
  96  protected:
  97   // Alloc region used to satisfy mutator allocation requests.
  98   MutatorAllocRegion _mutator_alloc_region;
  99 
 100   // Alloc region used to satisfy allocation requests by the GC for
 101   // survivor objects.
 102   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 103 
 104   // Alloc region used to satisfy allocation requests by the GC for
 105   // old objects.
 106   OldGCAllocRegion _old_gc_alloc_region;
 107 
 108   HeapRegion* _retained_old_gc_alloc_region;
 109  public:
 110   DefaultAllocRegionManager(G1CollectedHeap* heap) : AllocRegionManager(heap), _retained_old_gc_alloc_region(NULL) { }
 111 
 112   virtual void init_mutator_alloc_region();
 113   virtual void release_mutator_alloc_region();
 114 
 115   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 116   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 117   virtual void abandon_gc_alloc_regions();
 118 
 119   virtual bool is_retained_old_region(HeapRegion* hr) {
 120     return _retained_old_gc_alloc_region == hr;
 121   }
 122 
 123   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 124     return &_mutator_alloc_region;
 125   }
 126 
 127   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 128     return &_survivor_gc_alloc_region;
 129   }
 130 
 131   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 132     return &_old_gc_alloc_region;
 133   }
 134 
 135   virtual size_t used() {
 136     assert(Heap_lock->owner() != NULL,
 137            "Should be owned on this thread's behalf.");
 138     size_t result = _summary_bytes_used;
 139 
 140     // Read only once in case it is set to NULL concurrently
 141     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 142     if (hr != NULL) {
 143       result += hr->used();
 144     }
 145     return result;
 146   }
 147 };
 148 
 149 // A PLAB used during garbage collection that is specific to G1.
 150 class G1PLAB: public ParGCAllocBuffer {
 151  private:
 152   bool _retired;
 153 
 154  public:
 155   G1PLAB(size_t gclab_word_size);
 156   virtual ~G1PLAB() {
 157     guarantee(_retired, "Allocation buffer has not been retired");
 158   }
 159 
 160   virtual void set_buf(HeapWord* buf) {
 161     ParGCAllocBuffer::set_buf(buf);
 162     _retired = false;
 163   }
 164 
 165   virtual void retire() {
 166     if (_retired) {
 167       return;
 168     }
 169     ParGCAllocBuffer::retire();
 170     _retired = true;
 171   }
 172 };
 173 
 174 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 175 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 176 // statistics.
 177 class PLABAllocator : public CHeapObj<mtGC> {
 178   friend class G1ParScanThreadState;
 179  protected:
 180   G1CollectedHeap* _g1h;
 181 
 182   // The survivor alignment in effect in bytes.
 183   // == 0 : don't align survivors
 184   // != 0 : align survivors to that alignment
 185   // These values were chosen to favor the non-alignment case since some
 186   // architectures have a special compare against zero instructions.
 187   const uint _survivor_alignment_bytes;
 188 
 189   size_t _alloc_buffer_waste;
 190   size_t _undo_waste;
 191 
 192   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 193   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 194 
 195   virtual void retire_alloc_buffers() = 0;
 196   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 197 
 198   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 199   // there are no restrictions on survivor alignment.
 200   static uint calc_survivor_alignment_bytes() {
 201     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 202     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 203       // No need to align objects in the survivors differently, return 0
 204       // which means "survivor alignment is not used".
 205       return 0;
 206     } else {
 207       assert(SurvivorAlignmentInBytes > 0, "sanity");
 208       return SurvivorAlignmentInBytes;
 209     }
 210   }
 211 
 212  public:
 213   PLABAllocator(G1CollectedHeap* g1h) :
 214     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 215     _alloc_buffer_waste(0), _undo_waste(0) {
 216   }
 217 
 218   static PLABAllocator* create_allocator(G1CollectedHeap* g1h);
 219 
 220   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 221   size_t undo_waste() {return _undo_waste; }
 222 
 223   // Allocate word_sz words in dest, either directly into the regions or by
 224   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 225   // not successful.
 226   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 227                                         size_t word_sz,
 228                                         AllocationContext_t context);
 229 
 230   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 231   // allocated memory, NULL if not successful.
 232   HeapWord* plab_allocate(InCSetState dest,
 233                           size_t word_sz,
 234                           AllocationContext_t context) {
 235     G1PLAB* buffer = alloc_buffer(dest, context);
 236     if (_survivor_alignment_bytes == 0) {
 237       return buffer->allocate(word_sz);
 238     } else {
 239       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 240     }
 241   }
 242 
 243   HeapWord* allocate(InCSetState dest, size_t word_sz,
 244                      AllocationContext_t context) {
 245     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 246     if (obj != NULL) {
 247       return obj;
 248     }
 249     return allocate_direct_or_new_plab(dest, word_sz, context);
 250   }
 251 
 252   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 253     if (alloc_buffer(dest, context)->contains(obj)) {
 254       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 255              "should contain whole object");
 256       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 257     } else {
 258       CollectedHeap::fill_with_object(obj, word_sz);
 259       add_to_undo_waste(word_sz);
 260     }
 261   }
 262 };
 263 
 264 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 265 // and old generation allocation.
 266 class DefaultPLABAllocator : public PLABAllocator {
 267   G1PLAB  _surviving_alloc_buffer;
 268   G1PLAB  _tenured_alloc_buffer;
 269   G1PLAB* _alloc_buffers[InCSetState::Num];
 270 
 271  public:
 272   DefaultPLABAllocator(G1CollectedHeap* g1h);
 273 
 274   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 275     assert(dest.is_valid(),
 276            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 277     assert(_alloc_buffers[dest.value()] != NULL,
 278            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 279     return _alloc_buffers[dest.value()];
 280   }
 281 
 282   virtual void retire_alloc_buffers() ;
 283 };
 284 
 285 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP