240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
241 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
242
243 if (cur == NULL) {
244 return false;
245 }
246
247 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
248
249 add_chunk_to_free_list(cur);
250 return true;
251 }
252
253 void G1CMMarkStack::set_empty() {
254 _chunks_in_chunk_list = 0;
255 _hwm = 0;
256 _chunk_list = NULL;
257 _free_list = NULL;
258 }
259
260 G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
261 _root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
262 _max_regions(max_regions),
263 _num_root_regions(0),
264 _claimed_root_regions(0),
265 _scan_in_progress(false),
266 _should_abort(false) { }
267
268 G1CMRootRegions::~G1CMRootRegions() {
269 FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
270 }
271
272 void G1CMRootRegions::reset() {
273 _num_root_regions = 0;
274 }
275
276 void G1CMRootRegions::add(HeapRegion* hr) {
277 assert_at_safepoint();
278 size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
279 assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
280 _root_regions[idx] = hr;
281 }
282
283 void G1CMRootRegions::prepare_for_scan() {
284 assert(!scan_in_progress(), "pre-condition");
285
286 _scan_in_progress = _num_root_regions > 0;
287
288 _claimed_root_regions = 0;
289 _should_abort = false;
290 }
291
292 HeapRegion* G1CMRootRegions::claim_next() {
293 if (_should_abort) {
294 // If someone has set the should_abort flag, we return NULL to
295 // force the caller to bail out of their loop.
296 return NULL;
297 }
298
299 if (_claimed_root_regions >= _num_root_regions) {
300 return NULL;
301 }
302
303 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
304 if (claimed_index < _num_root_regions) {
305 return _root_regions[claimed_index];
306 }
307 return NULL;
308 }
309
310 uint G1CMRootRegions::num_root_regions() const {
311 return (uint)_num_root_regions;
312 }
313
314 void G1CMRootRegions::notify_scan_done() {
315 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
316 _scan_in_progress = false;
317 RootRegionScan_lock->notify_all();
318 }
319
320 void G1CMRootRegions::cancel_scan() {
321 notify_scan_done();
322 }
323
324 void G1CMRootRegions::scan_finished() {
325 assert(scan_in_progress(), "pre-condition");
326
327 if (!_should_abort) {
328 assert(_claimed_root_regions >= num_root_regions(),
329 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
330 _claimed_root_regions, num_root_regions());
331 }
332
333 notify_scan_done();
334 }
335
336 bool G1CMRootRegions::wait_until_scan_finished() {
337 if (!scan_in_progress()) {
338 return false;
339 }
340
341 {
342 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
343 while (scan_in_progress()) {
344 ml.wait();
345 }
346 }
347 return true;
348 }
349
350 // Returns the maximum number of workers to be used in a concurrent
351 // phase based on the number of GC workers being used in a STW
352 // phase.
353 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
354 return MAX2((num_gc_workers + 2) / 4, 1U);
355 }
356
858 uint result = 0;
859 if (!UseDynamicNumberOfGCThreads ||
860 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
861 !ForceDynamicNumberOfGCThreads)) {
862 result = _max_concurrent_workers;
863 } else {
864 result =
865 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
866 1, /* Minimum workers */
867 _num_concurrent_workers,
868 Threads::number_of_non_daemon_threads());
869 // Don't scale the result down by scale_concurrent_workers() because
870 // that scaling has already gone into "_max_concurrent_workers".
871 }
872 assert(result > 0 && result <= _max_concurrent_workers,
873 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
874 _max_concurrent_workers, result);
875 return result;
876 }
877
878 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
879 assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
880 "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
881 G1RootRegionScanClosure cl(_g1h, this, worker_id);
882
883 const uintx interval = PrefetchScanIntervalInBytes;
884 HeapWord* curr = hr->next_top_at_mark_start();
885 const HeapWord* end = hr->top();
886 while (curr < end) {
887 Prefetch::read(curr, interval);
888 oop obj = oop(curr);
889 int size = obj->oop_iterate_size(&cl);
890 assert(size == obj->size(), "sanity");
891 curr += size;
892 }
893 }
894
895 class G1CMRootRegionScanTask : public AbstractGangTask {
896 G1ConcurrentMark* _cm;
897 public:
898 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
899 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
900
901 void work(uint worker_id) {
902 assert(Thread::current()->is_ConcurrentGC_thread(),
903 "this should only be done by a conc GC thread");
904
905 G1CMRootRegions* root_regions = _cm->root_regions();
906 HeapRegion* hr = root_regions->claim_next();
907 while (hr != NULL) {
908 _cm->scan_root_region(hr, worker_id);
909 hr = root_regions->claim_next();
910 }
911 }
912 };
913
914 void G1ConcurrentMark::scan_root_regions() {
915 // scan_in_progress() will have been set to true only if there was
916 // at least one root region to scan. So, if it's false, we
917 // should not attempt to do any further work.
918 if (root_regions()->scan_in_progress()) {
919 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
920
921 _num_concurrent_workers = MIN2(calc_active_marking_workers(),
922 // We distribute work on a per-region basis, so starting
923 // more threads than that is useless.
924 root_regions()->num_root_regions());
925 assert(_num_concurrent_workers <= _max_concurrent_workers,
926 "Maximum number of marking threads exceeded");
927
928 G1CMRootRegionScanTask task(this);
929 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
|
240 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
241 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list();
242
243 if (cur == NULL) {
244 return false;
245 }
246
247 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
248
249 add_chunk_to_free_list(cur);
250 return true;
251 }
252
253 void G1CMMarkStack::set_empty() {
254 _chunks_in_chunk_list = 0;
255 _hwm = 0;
256 _chunk_list = NULL;
257 _free_list = NULL;
258 }
259
260 G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
261 _root_regions(NULL),
262 _max_regions(max_regions),
263 _num_root_regions(0),
264 _claimed_root_regions(0),
265 _scan_in_progress(false),
266 _should_abort(false) {
267 _root_regions = new MemRegion[_max_regions];
268 if (_root_regions == NULL) {
269 vm_exit_during_initialization("Could not allocate root MemRegion set.");
270 }
271 }
272
273 G1CMRootMemRegions::~G1CMRootMemRegions() {
274 delete[] _root_regions;
275 _root_regions = NULL;
276 }
277
278 void G1CMRootMemRegions::reset() {
279 _num_root_regions = 0;
280 }
281
282 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
283 assert_at_safepoint();
284 size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
285 assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
286 assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be equal to or greater "
287 "than end (" PTR_FORMAT ")", p2i(start), p2i(end));
288 _root_regions[idx].set_start(start);
289 _root_regions[idx].set_end(end);
290 }
291
292 void G1CMRootMemRegions::prepare_for_scan() {
293 assert(!scan_in_progress(), "pre-condition");
294
295 _scan_in_progress = _num_root_regions > 0;
296
297 _claimed_root_regions = 0;
298 _should_abort = false;
299 }
300
301 MemRegion* G1CMRootMemRegions::claim_next() {
302 if (_should_abort) {
303 // If someone has set the should_abort flag, we return NULL to
304 // force the caller to bail out of their loop.
305 return NULL;
306 }
307
308 if (_claimed_root_regions >= _num_root_regions) {
309 return NULL;
310 }
311
312 size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
313 if (claimed_index < _num_root_regions) {
314 return &_root_regions[claimed_index];
315 }
316 return NULL;
317 }
318
319 uint G1CMRootMemRegions::num_root_regions() const {
320 return (uint)_num_root_regions;
321 }
322
323 void G1CMRootMemRegions::notify_scan_done() {
324 MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
325 _scan_in_progress = false;
326 RootRegionScan_lock->notify_all();
327 }
328
329 void G1CMRootMemRegions::cancel_scan() {
330 notify_scan_done();
331 }
332
333 void G1CMRootMemRegions::scan_finished() {
334 assert(scan_in_progress(), "pre-condition");
335
336 if (!_should_abort) {
337 assert(_claimed_root_regions >= num_root_regions(),
338 "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
339 _claimed_root_regions, num_root_regions());
340 }
341
342 notify_scan_done();
343 }
344
345 bool G1CMRootMemRegions::wait_until_scan_finished() {
346 if (!scan_in_progress()) {
347 return false;
348 }
349
350 {
351 MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
352 while (scan_in_progress()) {
353 ml.wait();
354 }
355 }
356 return true;
357 }
358
359 // Returns the maximum number of workers to be used in a concurrent
360 // phase based on the number of GC workers being used in a STW
361 // phase.
362 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
363 return MAX2((num_gc_workers + 2) / 4, 1U);
364 }
365
867 uint result = 0;
868 if (!UseDynamicNumberOfGCThreads ||
869 (!FLAG_IS_DEFAULT(ConcGCThreads) &&
870 !ForceDynamicNumberOfGCThreads)) {
871 result = _max_concurrent_workers;
872 } else {
873 result =
874 WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
875 1, /* Minimum workers */
876 _num_concurrent_workers,
877 Threads::number_of_non_daemon_threads());
878 // Don't scale the result down by scale_concurrent_workers() because
879 // that scaling has already gone into "_max_concurrent_workers".
880 }
881 assert(result > 0 && result <= _max_concurrent_workers,
882 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
883 _max_concurrent_workers, result);
884 return result;
885 }
886
887 void G1ConcurrentMark::scan_root_region(MemRegion* region, uint worker_id) {
888 #ifdef ASSERT
889 HeapWord* last = region->last();
890 HeapRegion* hr = _g1h->heap_region_containing(last);
891 assert(hr->is_old() || hr->next_top_at_mark_start() == hr->bottom(),
892 "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
893 assert(hr->next_top_at_mark_start() == region->start(),
894 "MemRegion start should be equal to nTAMS");
895 #endif
896
897 G1RootRegionScanClosure cl(_g1h, this, worker_id);
898
899 const uintx interval = PrefetchScanIntervalInBytes;
900 HeapWord* curr = region->start();
901 const HeapWord* end = region->end();
902 while (curr < end) {
903 Prefetch::read(curr, interval);
904 oop obj = oop(curr);
905 int size = obj->oop_iterate_size(&cl);
906 assert(size == obj->size(), "sanity");
907 curr += size;
908 }
909 }
910
911 class G1CMRootRegionScanTask : public AbstractGangTask {
912 G1ConcurrentMark* _cm;
913 public:
914 G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
915 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { }
916
917 void work(uint worker_id) {
918 assert(Thread::current()->is_ConcurrentGC_thread(),
919 "this should only be done by a conc GC thread");
920
921 G1CMRootMemRegions* root_regions = _cm->root_regions();
922 MemRegion* region = root_regions->claim_next();
923 while (region != NULL) {
924 _cm->scan_root_region(region, worker_id);
925 region = root_regions->claim_next();
926 }
927 }
928 };
929
930 void G1ConcurrentMark::scan_root_regions() {
931 // scan_in_progress() will have been set to true only if there was
932 // at least one root region to scan. So, if it's false, we
933 // should not attempt to do any further work.
934 if (root_regions()->scan_in_progress()) {
935 assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
936
937 _num_concurrent_workers = MIN2(calc_active_marking_workers(),
938 // We distribute work on a per-region basis, so starting
939 // more threads than that is useless.
940 root_regions()->num_root_regions());
941 assert(_num_concurrent_workers <= _max_concurrent_workers,
942 "Maximum number of marking threads exceeded");
943
944 G1CMRootRegionScanTask task(this);
945 log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
|