183 SvcGCMarker sgcm(SvcGCMarker::MINOR);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
188 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
189
190 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
191 set_gc_locked();
192 }
193 }
194
195 void VM_GenCollectFull::doit() {
196 SvcGCMarker sgcm(SvcGCMarker::FULL);
197
198 GenCollectedHeap* gch = GenCollectedHeap::heap();
199 GCCauseSetter gccs(gch, _gc_cause);
200 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
201 }
202
203 // Returns true iff concurrent GCs unloads metadata.
204 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
205 #if INCLUDE_ALL_GCS
206 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
207 MetaspaceGC::set_should_concurrent_collect(true);
208 return true;
209 }
210
211 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
212 G1CollectedHeap* g1h = G1CollectedHeap::heap();
213 g1h->g1_policy()->set_initiate_conc_mark_if_possible();
214
215 GCCauseSetter x(g1h, _gc_cause);
216
217 // At this point we are supposed to start a concurrent cycle. We
218 // will do so if one is not already in progress.
219 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
220
221 if (should_start) {
222 double pause_target = g1h->g1_policy()->max_pause_time_ms();
286
287 // If expansion failed, do a last-ditch collection and try allocating
288 // again. A last-ditch collection will clear softrefs. This
289 // behavior is similar to the last-ditch collection done for perm
290 // gen when it was full and a collection for failed allocation
291 // did not free perm gen space.
292 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
293 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
294 if (_result != NULL) {
295 return;
296 }
297
298 if (Verbose && PrintGCDetails) {
299 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
300 SIZE_FORMAT, _size);
301 }
302
303 if (GC_locker::is_active_and_needs_gc()) {
304 set_gc_locked();
305 }
306 }
|
183 SvcGCMarker sgcm(SvcGCMarker::MINOR);
184
185 GenCollectedHeap* gch = GenCollectedHeap::heap();
186 GCCauseSetter gccs(gch, _gc_cause);
187 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
188 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
189
190 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
191 set_gc_locked();
192 }
193 }
194
195 void VM_GenCollectFull::doit() {
196 SvcGCMarker sgcm(SvcGCMarker::FULL);
197
198 GenCollectedHeap* gch = GenCollectedHeap::heap();
199 GCCauseSetter gccs(gch, _gc_cause);
200 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
201 }
202
203 void VM_CollectForMetadataAllocation::doit_epilogue() {
204 AllocTracer::send_collect_for_allocation_event(_size * HeapWordSize, _gcid, _gc_attempt);
205 VM_GC_Operation::doit_epilogue();
206 }
207
208 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
209 size_t size,
210 Metaspace::MetadataType mdtype,
211 uint gc_count_before,
212 uint full_gc_count_before,
213 GCCause::Cause gc_cause,
214 uint gc_attempt)
215 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
216 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL), _gc_attempt(gc_attempt), _gcid(GCId::peek()) {
217 assert(_size != 0, "An allocation should always be requested with this operation.");
218 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, _gcid, _gc_attempt);
219 }
220
221 // Returns true iff concurrent GCs unloads metadata.
222 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
223 #if INCLUDE_ALL_GCS
224 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
225 MetaspaceGC::set_should_concurrent_collect(true);
226 return true;
227 }
228
229 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
230 G1CollectedHeap* g1h = G1CollectedHeap::heap();
231 g1h->g1_policy()->set_initiate_conc_mark_if_possible();
232
233 GCCauseSetter x(g1h, _gc_cause);
234
235 // At this point we are supposed to start a concurrent cycle. We
236 // will do so if one is not already in progress.
237 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
238
239 if (should_start) {
240 double pause_target = g1h->g1_policy()->max_pause_time_ms();
304
305 // If expansion failed, do a last-ditch collection and try allocating
306 // again. A last-ditch collection will clear softrefs. This
307 // behavior is similar to the last-ditch collection done for perm
308 // gen when it was full and a collection for failed allocation
309 // did not free perm gen space.
310 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
311 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
312 if (_result != NULL) {
313 return;
314 }
315
316 if (Verbose && PrintGCDetails) {
317 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
318 SIZE_FORMAT, _size);
319 }
320
321 if (GC_locker::is_active_and_needs_gc()) {
322 set_gc_locked();
323 }
324 }
325
326 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause, uint gc_attempt)
327 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size), _gc_attempt(gc_attempt), _gcid(GCId::peek()) {
328 // Only report if operation was really caused by an allocation.
329 if (_word_size != 0) {
330 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, _gcid, _gc_attempt);
331 }
332 }
333
334 void VM_CollectForAllocation::doit_epilogue() {
335 // Only report if operation was caused by an allocation.
336 if (_word_size != 0) {
337 AllocTracer::send_collect_for_allocation_event(_word_size * HeapWordSize, _gcid, _gc_attempt);
338 }
339 VM_GC_Operation::doit_epilogue();
340 }
|