< prev index next >

src/share/vm/gc/g1/heapRegion.cpp

Print this page




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"

  29 #include "gc/g1/g1OopClosures.inline.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionBounds.inline.hpp"
  32 #include "gc/g1/heapRegionManager.inline.hpp"
  33 #include "gc/g1/heapRegionRemSet.hpp"

  34 #include "gc/shared/genOopClosures.inline.hpp"
  35 #include "gc/shared/liveRange.hpp"
  36 #include "gc/shared/space.inline.hpp"
  37 #include "logging/log.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/atomic.inline.hpp"
  41 #include "runtime/orderAccess.inline.hpp"
  42 
  43 int    HeapRegion::LogOfHRGrainBytes = 0;
  44 int    HeapRegion::LogOfHRGrainWords = 0;
  45 size_t HeapRegion::GrainBytes        = 0;
  46 size_t HeapRegion::GrainWords        = 0;
  47 size_t HeapRegion::CardsPerRegion    = 0;
  48 
  49 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  50                                  HeapRegion* hr,
  51                                  G1ParPushHeapRSClosure* cl,
  52                                  CardTableModRefBS::PrecisionStyle precision) :
  53   DirtyCardToOopClosure(hr, cl, precision, NULL),


 195   hrrs->clear();
 196   CardTableModRefBS* ct_bs =
 197     barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
 198   ct_bs->clear(MemRegion(bottom(), end()));
 199 }
 200 
 201 void HeapRegion::calc_gc_efficiency() {
 202   // GC efficiency is the ratio of how much space would be
 203   // reclaimed over how long we predict it would take to reclaim it.
 204   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 205   G1CollectorPolicy* g1p = g1h->g1_policy();
 206 
 207   // Retrieve a prediction of the elapsed time for this region for
 208   // a mixed gc because the region will only be evacuated during a
 209   // mixed gc.
 210   double region_elapsed_time_ms =
 211     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 212   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 213 }
 214 






























 215 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
 216   assert(!is_humongous(), "sanity / pre-condition");
 217   assert(top() == bottom(), "should be empty");
 218 

 219   _type.set_starts_humongous();
 220   _humongous_start_region = this;
 221 
 222   _bot_part.set_for_starts_humongous(obj_top, fill_size);
 223 }
 224 
 225 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
 226   assert(!is_humongous(), "sanity / pre-condition");
 227   assert(top() == bottom(), "should be empty");
 228   assert(first_hr->is_starts_humongous(), "pre-condition");
 229 

 230   _type.set_continues_humongous();
 231   _humongous_start_region = first_hr;
 232 }
 233 
 234 void HeapRegion::clear_humongous() {
 235   assert(is_humongous(), "pre-condition");
 236 
 237   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
 238   _humongous_start_region = NULL;
 239 }
 240 
 241 HeapRegion::HeapRegion(uint hrm_index,
 242                        G1BlockOffsetTable* bot,
 243                        MemRegion mr) :
 244     G1ContiguousSpace(bot),
 245     _hrm_index(hrm_index),
 246     _allocation_context(AllocationContext::system()),
 247     _humongous_start_region(NULL),
 248     _next_in_special_set(NULL),
 249     _evacuation_failed(false),


 253 #ifdef ASSERT
 254     _containing_set(NULL),
 255 #endif // ASSERT
 256      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 257     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 258     _predicted_bytes_to_copy(0)
 259 {
 260   _rem_set = new HeapRegionRemSet(bot, this);
 261 
 262   initialize(mr);
 263 }
 264 
 265 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 266   assert(_rem_set->is_empty(), "Remembered set must be empty");
 267 
 268   G1ContiguousSpace::initialize(mr, clear_space, mangle_space);
 269 
 270   hr_clear(false /*par*/, false /*clear_space*/);
 271   set_top(bottom());
 272   record_timestamp();









 273 }
 274 
 275 CompactibleSpace* HeapRegion::next_compaction_space() const {
 276   return G1CollectedHeap::heap()->next_compaction_region(this);
 277 }
 278 
 279 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 280                                                     bool during_conc_mark) {
 281   // We always recreate the prev marking info and we'll explicitly
 282   // mark all objects we find to be self-forwarded on the prev
 283   // bitmap. So all objects need to be below PTAMS.
 284   _prev_marked_bytes = 0;
 285 
 286   if (during_initial_mark) {
 287     // During initial-mark, we'll also explicitly mark all objects
 288     // we find to be self-forwarded on the next bitmap. So all
 289     // objects need to be below NTAMS.
 290     _next_top_at_mark_start = top();
 291     _next_marked_bytes = 0;
 292   } else if (during_conc_mark) {




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/nmethod.hpp"
  27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1HeapRegionTraceType.hpp"
  30 #include "gc/g1/g1OopClosures.inline.hpp"
  31 #include "gc/g1/heapRegion.inline.hpp"
  32 #include "gc/g1/heapRegionBounds.inline.hpp"
  33 #include "gc/g1/heapRegionManager.inline.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/g1/heapRegionTracer.hpp"
  36 #include "gc/shared/genOopClosures.inline.hpp"
  37 #include "gc/shared/liveRange.hpp"
  38 #include "gc/shared/space.inline.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/iterator.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/atomic.inline.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 
  45 int    HeapRegion::LogOfHRGrainBytes = 0;
  46 int    HeapRegion::LogOfHRGrainWords = 0;
  47 size_t HeapRegion::GrainBytes        = 0;
  48 size_t HeapRegion::GrainWords        = 0;
  49 size_t HeapRegion::CardsPerRegion    = 0;
  50 
  51 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
  52                                  HeapRegion* hr,
  53                                  G1ParPushHeapRSClosure* cl,
  54                                  CardTableModRefBS::PrecisionStyle precision) :
  55   DirtyCardToOopClosure(hr, cl, precision, NULL),


 197   hrrs->clear();
 198   CardTableModRefBS* ct_bs =
 199     barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
 200   ct_bs->clear(MemRegion(bottom(), end()));
 201 }
 202 
 203 void HeapRegion::calc_gc_efficiency() {
 204   // GC efficiency is the ratio of how much space would be
 205   // reclaimed over how long we predict it would take to reclaim it.
 206   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 207   G1CollectorPolicy* g1p = g1h->g1_policy();
 208 
 209   // Retrieve a prediction of the elapsed time for this region for
 210   // a mixed gc because the region will only be evacuated during a
 211   // mixed gc.
 212   double region_elapsed_time_ms =
 213     g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
 214   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 215 }
 216 
 217 void HeapRegion::set_free() {
 218   report_region_type_change(G1HeapRegionTraceType::Free);
 219   _type.set_free();
 220 }
 221 
 222 void HeapRegion::set_eden() {
 223   report_region_type_change(G1HeapRegionTraceType::Eden);
 224   _type.set_eden();
 225 }
 226 
 227 void HeapRegion::set_eden_pre_gc() {
 228   report_region_type_change(G1HeapRegionTraceType::Eden);
 229   _type.set_eden_pre_gc();
 230 }
 231 
 232 void HeapRegion::set_survivor() {
 233   report_region_type_change(G1HeapRegionTraceType::Survivor);
 234   _type.set_survivor();
 235 }
 236 
 237 void HeapRegion::set_old() {
 238   report_region_type_change(G1HeapRegionTraceType::Old);
 239   _type.set_old();
 240 }
 241 
 242 void HeapRegion::set_archive() {
 243   report_region_type_change(G1HeapRegionTraceType::Archive);
 244   _type.set_archive();
 245 }
 246 
 247 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
 248   assert(!is_humongous(), "sanity / pre-condition");
 249   assert(top() == bottom(), "should be empty");
 250 
 251   report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
 252   _type.set_starts_humongous();
 253   _humongous_start_region = this;
 254 
 255   _bot_part.set_for_starts_humongous(obj_top, fill_size);
 256 }
 257 
 258 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
 259   assert(!is_humongous(), "sanity / pre-condition");
 260   assert(top() == bottom(), "should be empty");
 261   assert(first_hr->is_starts_humongous(), "pre-condition");
 262 
 263   report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
 264   _type.set_continues_humongous();
 265   _humongous_start_region = first_hr;
 266 }
 267 
 268 void HeapRegion::clear_humongous() {
 269   assert(is_humongous(), "pre-condition");
 270 
 271   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
 272   _humongous_start_region = NULL;
 273 }
 274 
 275 HeapRegion::HeapRegion(uint hrm_index,
 276                        G1BlockOffsetTable* bot,
 277                        MemRegion mr) :
 278     G1ContiguousSpace(bot),
 279     _hrm_index(hrm_index),
 280     _allocation_context(AllocationContext::system()),
 281     _humongous_start_region(NULL),
 282     _next_in_special_set(NULL),
 283     _evacuation_failed(false),


 287 #ifdef ASSERT
 288     _containing_set(NULL),
 289 #endif // ASSERT
 290      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
 291     _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
 292     _predicted_bytes_to_copy(0)
 293 {
 294   _rem_set = new HeapRegionRemSet(bot, this);
 295 
 296   initialize(mr);
 297 }
 298 
 299 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
 300   assert(_rem_set->is_empty(), "Remembered set must be empty");
 301 
 302   G1ContiguousSpace::initialize(mr, clear_space, mangle_space);
 303 
 304   hr_clear(false /*par*/, false /*clear_space*/);
 305   set_top(bottom());
 306   record_timestamp();
 307 }
 308 
 309 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
 310   HeapRegionTracer::send_region_type_change(_hrm_index,
 311                                             get_trace_type(),
 312                                             to,
 313                                             (uintptr_t)bottom(),
 314                                             used(),
 315                                             (uint)allocation_context());
 316 }
 317 
 318 CompactibleSpace* HeapRegion::next_compaction_space() const {
 319   return G1CollectedHeap::heap()->next_compaction_region(this);
 320 }
 321 
 322 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
 323                                                     bool during_conc_mark) {
 324   // We always recreate the prev marking info and we'll explicitly
 325   // mark all objects we find to be self-forwarded on the prev
 326   // bitmap. So all objects need to be below PTAMS.
 327   _prev_marked_bytes = 0;
 328 
 329   if (during_initial_mark) {
 330     // During initial-mark, we'll also explicitly mark all objects
 331     // we find to be self-forwarded on the next bitmap. So all
 332     // objects need to be below NTAMS.
 333     _next_top_at_mark_start = top();
 334     _next_marked_bytes = 0;
 335   } else if (during_conc_mark) {


< prev index next >