1319 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1320
1321 // Iterate over all objects, calling "cl.do_object" on each.
1322 virtual void object_iterate(ObjectClosure* cl);
1323
1324 virtual void safe_object_iterate(ObjectClosure* cl) {
1325 object_iterate(cl);
1326 }
1327
1328 // Iterate over all spaces in use in the heap, in ascending address order.
1329 virtual void space_iterate(SpaceClosure* cl);
1330
1331 // Iterate over heap regions, in address order, terminating the
1332 // iteration early if the "doHeapRegion" method returns "true".
1333 void heap_region_iterate(HeapRegionClosure* blk) const;
1334
1335 // Return the region with the given index. It assumes the index is valid.
1336 inline HeapRegion* region_at(uint index) const;
1337
1338 // Divide the heap region sequence into "chunks" of some size (the number
1339 // of regions divided by the number of parallel threads times some
1340 // overpartition factor, currently 4). Assumes that this will be called
1341 // in parallel by ParallelGCThreads worker threads with distinct worker
1342 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1343 // calls will use the same "claim_value", and that that claim value is
1344 // different from the claim_value of any heap region before the start of
1345 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1346 // attempting to claim the first region in each chunk, and, if
1347 // successful, applying the closure to each region in the chunk (and
1348 // setting the claim value of the second and subsequent regions of the
1349 // chunk.) For now requires that "doHeapRegion" always returns "false",
1350 // i.e., that a closure never attempt to abort a traversal.
1351 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1352 uint worker,
1353 uint no_of_par_workers,
1354 jint claim_value);
1355
1356 // It resets all the region claim values to the default.
1357 void reset_heap_region_claim_values();
1358
1359 // Resets the claim values of regions in the current
1360 // collection set to the default.
1361 void reset_cset_heap_region_claim_values();
1362
1363 #ifdef ASSERT
1364 bool check_heap_region_claim_values(jint claim_value);
1365
1366 // Same as the routine above but only checks regions in the
1367 // current collection set.
1368 bool check_cset_heap_region_claim_values(jint claim_value);
1369 #endif // ASSERT
1370
1371 // Clear the cached cset start regions and (more importantly)
1372 // the time stamps. Called when we reset the GC time stamp.
1373 void clear_cset_start_regions();
1374
1375 // Given the id of a worker, obtain or calculate a suitable
1376 // starting region for iterating over the current collection set.
1377 HeapRegion* start_cset_region_for_worker(uint worker_i);
1378
1379 // This is a convenience method that is used by the
1380 // HeapRegionIterator classes to calculate the starting region for
1381 // each worker so that they do not all start from the same region.
1382 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
1383
1384 // Iterate over the regions (if any) in the current collection set.
1385 void collection_set_iterate(HeapRegionClosure* blk);
1386
1387 // As above but starting from region r
1388 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1389
1390 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1391
1392 // A CollectedHeap will contain some number of spaces. This finds the
1393 // space containing a given address, or else returns NULL.
1394 virtual Space* space_containing(const void* addr) const;
1395
1396 // Returns the HeapRegion that contains addr. addr must not be NULL.
1397 template <class T>
1398 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1399
1400 // Returns the HeapRegion that contains addr. addr must not be NULL.
1401 // If addr is within a humongous continues region, it returns its humongous start region.
1402 template <class T>
|
1319 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1320
1321 // Iterate over all objects, calling "cl.do_object" on each.
1322 virtual void object_iterate(ObjectClosure* cl);
1323
1324 virtual void safe_object_iterate(ObjectClosure* cl) {
1325 object_iterate(cl);
1326 }
1327
1328 // Iterate over all spaces in use in the heap, in ascending address order.
1329 virtual void space_iterate(SpaceClosure* cl);
1330
1331 // Iterate over heap regions, in address order, terminating the
1332 // iteration early if the "doHeapRegion" method returns "true".
1333 void heap_region_iterate(HeapRegionClosure* blk) const;
1334
1335 // Return the region with the given index. It assumes the index is valid.
1336 inline HeapRegion* region_at(uint index) const;
1337
1338 // Divide the heap region sequence into "chunks" of some size (the number
1339 // of regions divided by the number of parallel threads).
1340 // Assumes that this will be called
1341 // in parallel by ParallelGCThreads worker threads with distinct worker
1342 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1343 // calls will use the same "claim_value", and that that claim value is
1344 // different from the claim_value of any heap region before the start of
1345 // the iteration.
1346 // Applies "blk->doHeapRegion" to each of the regions, by
1347 // attempting to claim the first region in each chunk, and, if
1348 // successful, applying the closure to each region in the chunk (and
1349 // setting the claim value of the second and subsequent regions of the
1350 // chunk.)
1351 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1352 uint worker,
1353 uint no_of_par_workers,
1354 jint claim_value) const;
1355
1356 // It resets all the region claim values to the default.
1357 void reset_heap_region_claim_values();
1358
1359 // Resets the claim values of regions in the current
1360 // collection set to the default.
1361 void reset_cset_heap_region_claim_values();
1362
1363 #ifdef ASSERT
1364 bool check_heap_region_claim_values(jint claim_value);
1365
1366 // Same as the routine above but only checks regions in the
1367 // current collection set.
1368 bool check_cset_heap_region_claim_values(jint claim_value);
1369 #endif // ASSERT
1370
1371 // Clear the cached cset start regions and (more importantly)
1372 // the time stamps. Called when we reset the GC time stamp.
1373 void clear_cset_start_regions();
1374
1375 // Given the id of a worker, obtain or calculate a suitable
1376 // starting region for iterating over the current collection set.
1377 HeapRegion* start_cset_region_for_worker(uint worker_i);
1378
1379 // This is a convenience method that is used by the to calculate the starting
1380 // region index for each worker so that they do not all start from the same
1381 // region.
1382 uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
1383
1384 // Iterate over the regions (if any) in the current collection set.
1385 void collection_set_iterate(HeapRegionClosure* blk);
1386
1387 // As above but starting from region r
1388 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1389
1390 HeapRegion* next_compaction_region(const HeapRegion* from) const;
1391
1392 // A CollectedHeap will contain some number of spaces. This finds the
1393 // space containing a given address, or else returns NULL.
1394 virtual Space* space_containing(const void* addr) const;
1395
1396 // Returns the HeapRegion that contains addr. addr must not be NULL.
1397 template <class T>
1398 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1399
1400 // Returns the HeapRegion that contains addr. addr must not be NULL.
1401 // If addr is within a humongous continues region, it returns its humongous start region.
1402 template <class T>
|