170 SvcGCMarker sgcm(SvcGCMarker::MINOR);
171
172 GenCollectedHeap* gch = GenCollectedHeap::heap();
173 GCCauseSetter gccs(gch, _gc_cause);
174 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
175 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
176
177 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
178 set_gc_locked();
179 }
180 }
181
182 void VM_GenCollectFull::doit() {
183 SvcGCMarker sgcm(SvcGCMarker::FULL);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
188 }
189
190 // Returns true iff concurrent GCs unloads metadata.
191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
192 #if INCLUDE_ALL_GCS
193 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
194 MetaspaceGC::set_should_concurrent_collect(true);
195 return true;
196 }
197
198 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
199 G1CollectedHeap* g1h = G1CollectedHeap::heap();
200 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
201
202 GCCauseSetter x(g1h, _gc_cause);
203
204 // At this point we are supposed to start a concurrent cycle. We
205 // will do so if one is not already in progress.
206 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
207
208 if (should_start) {
209 double pause_target = g1h->g1_policy()->max_pause_time_ms();
272 }
273
274 // If expansion failed, do a last-ditch collection and try allocating
275 // again. A last-ditch collection will clear softrefs. This
276 // behavior is similar to the last-ditch collection done for perm
277 // gen when it was full and a collection for failed allocation
278 // did not free perm gen space.
279 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
280 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
281 if (_result != NULL) {
282 return;
283 }
284
285 if (Verbose && PrintGCDetails) {
286 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
287 SIZE_FORMAT, _size);
288 }
289
290 if (GC_locker::is_active_and_needs_gc()) {
291 set_gc_locked();
292 }
293 }
|
170 SvcGCMarker sgcm(SvcGCMarker::MINOR);
171
172 GenCollectedHeap* gch = GenCollectedHeap::heap();
173 GCCauseSetter gccs(gch, _gc_cause);
174 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
175 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
176
177 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
178 set_gc_locked();
179 }
180 }
181
182 void VM_GenCollectFull::doit() {
183 SvcGCMarker sgcm(SvcGCMarker::FULL);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
188 }
189
190 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
191 size_t size,
192 Metaspace::MetadataType mdtype,
193 uint gc_count_before,
194 uint full_gc_count_before,
195 GCCause::Cause gc_cause)
196 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
197 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
198 assert(_size != 0, "An allocation should always be requested with this operation.");
199 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
200 }
201
202 // Returns true iff concurrent GCs unloads metadata.
203 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
204 #if INCLUDE_ALL_GCS
205 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
206 MetaspaceGC::set_should_concurrent_collect(true);
207 return true;
208 }
209
210 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
211 G1CollectedHeap* g1h = G1CollectedHeap::heap();
212 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
213
214 GCCauseSetter x(g1h, _gc_cause);
215
216 // At this point we are supposed to start a concurrent cycle. We
217 // will do so if one is not already in progress.
218 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
219
220 if (should_start) {
221 double pause_target = g1h->g1_policy()->max_pause_time_ms();
284 }
285
286 // If expansion failed, do a last-ditch collection and try allocating
287 // again. A last-ditch collection will clear softrefs. This
288 // behavior is similar to the last-ditch collection done for perm
289 // gen when it was full and a collection for failed allocation
290 // did not free perm gen space.
291 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
292 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
293 if (_result != NULL) {
294 return;
295 }
296
297 if (Verbose && PrintGCDetails) {
298 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
299 SIZE_FORMAT, _size);
300 }
301
302 if (GC_locker::is_active_and_needs_gc()) {
303 set_gc_locked();
304 }
305 }
306
307 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
308 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
309 // Only report if operation was really caused by an allocation.
310 if (_word_size != 0) {
311 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
312 }
313 }
|