1 /*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
108 // reference processing during STW evacuation pauses.
109 class G1STWIsAliveClosure: public BoolObjectClosure {
110 G1CollectedHeap* _g1;
111 public:
112 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
113 bool do_object_b(oop p);
114 };
115
116 class G1RegionMappingChangedListener : public G1MappingChangedListener {
117 private:
118 void reset_from_card_cache(uint start_idx, size_t num_regions);
119 public:
120 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
121 };
122
123 class G1CollectedHeap : public CollectedHeap {
124 friend class G1FreeCollectionSetTask;
125 friend class VM_CollectForMetadataAllocation;
126 friend class VM_G1CollectForAllocation;
127 friend class VM_G1CollectFull;
128 friend class VM_G1IncCollectionPause;
129 friend class VMStructs;
130 friend class MutatorAllocRegion;
131 friend class G1FullCollector;
132 friend class G1GCAllocRegion;
133 friend class G1HeapVerifier;
134
135 // Closures used in implementation.
136 friend class G1ParScanThreadState;
137 friend class G1ParScanThreadStateSet;
138 friend class G1ParTask;
139 friend class G1PLABAllocator;
140 friend class G1PrepareCompactClosure;
141
142 // Other related classes.
143 friend class HeapRegionClaimer;
144
145 // Testing classes.
146 friend class G1CheckCSetFastTableClosure;
147
148 private:
437 // * If either call cannot satisfy the allocation request using the
438 // current allocating region, they will try to get a new one. If
439 // this fails, they will attempt to do an evacuation pause and
440 // retry the allocation.
441 //
442 // * If all allocation attempts fail, even after trying to schedule
443 // an evacuation pause, allocate_new_tlab() will return NULL,
444 // whereas mem_allocate() will attempt a heap expansion and/or
445 // schedule a Full GC.
446 //
447 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
448 // should never be called with word_size being humongous. All
449 // humongous allocation requests should go to mem_allocate() which
450 // will satisfy them with a special path.
451
452 virtual HeapWord* allocate_new_tlab(size_t word_size);
453
454 virtual HeapWord* mem_allocate(size_t word_size,
455 bool* gc_overhead_limit_was_exceeded);
456
457 // The following three methods take a gc_count_before_ret
458 // parameter which is used to return the GC count if the method
459 // returns NULL. Given that we are required to read the GC count
460 // while holding the Heap_lock, and these paths will take the
461 // Heap_lock at some point, it's easier to get them to read the GC
462 // count while holding the Heap_lock before they return NULL instead
463 // of the caller (namely: mem_allocate()) having to also take the
464 // Heap_lock just to read the GC count.
465
466 // First-level mutator allocation attempt: try to allocate out of
467 // the mutator alloc region without taking the Heap_lock. This
468 // should only be used for non-humongous allocations.
469 inline HeapWord* attempt_allocation(size_t word_size,
470 uint* gc_count_before_ret,
471 uint* gclocker_retry_count_ret);
472
473 // Second-level mutator allocation attempt: take the Heap_lock and
474 // retry the allocation attempt, potentially scheduling a GC
475 // pause. This should only be used for non-humongous allocations.
476 HeapWord* attempt_allocation_slow(size_t word_size,
477 AllocationContext_t context,
478 uint* gc_count_before_ret,
479 uint* gclocker_retry_count_ret);
480
481 // Takes the Heap_lock and attempts a humongous allocation. It can
482 // potentially schedule a GC pause.
483 HeapWord* attempt_allocation_humongous(size_t word_size,
484 uint* gc_count_before_ret,
485 uint* gclocker_retry_count_ret);
486
487 // Allocation attempt that should be called during safepoints (e.g.,
488 // at the end of a successful GC). expect_null_mutator_alloc_region
489 // specifies whether the mutator alloc region is expected to be NULL
490 // or not.
491 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
492 AllocationContext_t context,
493 bool expect_null_mutator_alloc_region);
494
495 // These methods are the "callbacks" from the G1AllocRegion class.
496
497 // For mutator alloc regions.
498 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
499 void retire_mutator_alloc_region(HeapRegion* alloc_region,
500 size_t allocated_bytes);
501
502 // For GC alloc regions.
503 bool has_more_regions(InCSetState dest);
504 HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
505 void retire_gc_alloc_region(HeapRegion* alloc_region,
1059
1060 // The Concurrent Marking reference processor...
1061 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1062
1063 size_t unused_committed_regions_in_bytes() const;
1064 virtual size_t capacity() const;
1065 virtual size_t used() const;
1066 // This should be called when we're not holding the heap lock. The
1067 // result might be a bit inaccurate.
1068 size_t used_unlocked() const;
1069 size_t recalculate_used() const;
1070
1071 // These virtual functions do the actual allocation.
1072 // Some heaps may offer a contiguous region for shared non-blocking
1073 // allocation, via inlined code (by exporting the address of the top and
1074 // end fields defining the extent of the contiguous allocation region.)
1075 // But G1CollectedHeap doesn't yet support this.
1076
1077 virtual bool is_maximal_no_gc() const {
1078 return _hrm.available() == 0;
1079 }
1080
1081 // The current number of regions in the heap.
1082 uint num_regions() const { return _hrm.length(); }
1083
1084 // The max number of regions in the heap.
1085 uint max_regions() const { return _hrm.max_length(); }
1086
1087 // The number of regions that are completely free.
1088 uint num_free_regions() const { return _hrm.num_free_regions(); }
1089
1090 MemoryUsage get_auxiliary_data_memory_usage() const {
1091 return _hrm.get_auxiliary_data_memory_usage();
1092 }
1093
1094 // The number of regions that are not completely free.
1095 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1096
1097 #ifdef ASSERT
1098 bool is_on_master_free_list(HeapRegion* hr) {
|
1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
108 // reference processing during STW evacuation pauses.
109 class G1STWIsAliveClosure: public BoolObjectClosure {
110 G1CollectedHeap* _g1;
111 public:
112 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
113 bool do_object_b(oop p);
114 };
115
116 class G1RegionMappingChangedListener : public G1MappingChangedListener {
117 private:
118 void reset_from_card_cache(uint start_idx, size_t num_regions);
119 public:
120 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
121 };
122
123 class G1CollectedHeap : public CollectedHeap {
124 friend class G1FreeCollectionSetTask;
125 friend class VM_CollectForMetadataAllocation;
126 friend class VM_G1CollectForAllocation;
127 friend class VM_G1CollectFull;
128 friend class VMStructs;
129 friend class MutatorAllocRegion;
130 friend class G1FullCollector;
131 friend class G1GCAllocRegion;
132 friend class G1HeapVerifier;
133
134 // Closures used in implementation.
135 friend class G1ParScanThreadState;
136 friend class G1ParScanThreadStateSet;
137 friend class G1ParTask;
138 friend class G1PLABAllocator;
139 friend class G1PrepareCompactClosure;
140
141 // Other related classes.
142 friend class HeapRegionClaimer;
143
144 // Testing classes.
145 friend class G1CheckCSetFastTableClosure;
146
147 private:
436 // * If either call cannot satisfy the allocation request using the
437 // current allocating region, they will try to get a new one. If
438 // this fails, they will attempt to do an evacuation pause and
439 // retry the allocation.
440 //
441 // * If all allocation attempts fail, even after trying to schedule
442 // an evacuation pause, allocate_new_tlab() will return NULL,
443 // whereas mem_allocate() will attempt a heap expansion and/or
444 // schedule a Full GC.
445 //
446 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
447 // should never be called with word_size being humongous. All
448 // humongous allocation requests should go to mem_allocate() which
449 // will satisfy them with a special path.
450
451 virtual HeapWord* allocate_new_tlab(size_t word_size);
452
453 virtual HeapWord* mem_allocate(size_t word_size,
454 bool* gc_overhead_limit_was_exceeded);
455
456 // First-level mutator allocation attempt: try to allocate out of
457 // the mutator alloc region without taking the Heap_lock. This
458 // should only be used for non-humongous allocations.
459 inline HeapWord* attempt_allocation(size_t word_size);
460
461 // Second-level mutator allocation attempt: take the Heap_lock and
462 // retry the allocation attempt, potentially scheduling a GC
463 // pause. This should only be used for non-humongous allocations.
464 HeapWord* attempt_allocation_slow(size_t word_size,
465 AllocationContext_t context);
466
467 // Takes the Heap_lock and attempts a humongous allocation. It can
468 // potentially schedule a GC pause.
469 HeapWord* attempt_allocation_humongous(size_t word_size);
470
471 // Allocation attempt that should be called during safepoints (e.g.,
472 // at the end of a successful GC). expect_null_mutator_alloc_region
473 // specifies whether the mutator alloc region is expected to be NULL
474 // or not.
475 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
476 AllocationContext_t context,
477 bool expect_null_mutator_alloc_region);
478
479 // These methods are the "callbacks" from the G1AllocRegion class.
480
481 // For mutator alloc regions.
482 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
483 void retire_mutator_alloc_region(HeapRegion* alloc_region,
484 size_t allocated_bytes);
485
486 // For GC alloc regions.
487 bool has_more_regions(InCSetState dest);
488 HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
489 void retire_gc_alloc_region(HeapRegion* alloc_region,
1043
1044 // The Concurrent Marking reference processor...
1045 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1046
1047 size_t unused_committed_regions_in_bytes() const;
1048 virtual size_t capacity() const;
1049 virtual size_t used() const;
1050 // This should be called when we're not holding the heap lock. The
1051 // result might be a bit inaccurate.
1052 size_t used_unlocked() const;
1053 size_t recalculate_used() const;
1054
1055 // These virtual functions do the actual allocation.
1056 // Some heaps may offer a contiguous region for shared non-blocking
1057 // allocation, via inlined code (by exporting the address of the top and
1058 // end fields defining the extent of the contiguous allocation region.)
1059 // But G1CollectedHeap doesn't yet support this.
1060
1061 virtual bool is_maximal_no_gc() const {
1062 return _hrm.available() == 0;
1063 }
1064
1065 // Returns whether there are any regions left in the heap for allocation.
1066 bool has_regions_left_for_allocation() const {
1067 return !is_maximal_no_gc() || num_free_regions() != 0;
1068 }
1069
1070 // The current number of regions in the heap.
1071 uint num_regions() const { return _hrm.length(); }
1072
1073 // The max number of regions in the heap.
1074 uint max_regions() const { return _hrm.max_length(); }
1075
1076 // The number of regions that are completely free.
1077 uint num_free_regions() const { return _hrm.num_free_regions(); }
1078
1079 MemoryUsage get_auxiliary_data_memory_usage() const {
1080 return _hrm.get_auxiliary_data_memory_usage();
1081 }
1082
1083 // The number of regions that are not completely free.
1084 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1085
1086 #ifdef ASSERT
1087 bool is_on_master_free_list(HeapRegion* hr) {
|