1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1HeapRegionTraceType.hpp" 30 #include "gc/g1/g1OopClosures.inline.hpp" 31 #include "gc/g1/heapRegion.inline.hpp" 32 #include "gc/g1/heapRegionBounds.inline.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 #include "gc/g1/heapRegionTracer.hpp" 36 #include "gc/shared/genOopClosures.inline.hpp" 37 #include "gc/shared/space.inline.hpp" 38 #include "logging/log.hpp" 39 #include "logging/logStream.hpp" 40 #include "memory/iterator.inline.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/access.inline.hpp" 43 #include "oops/compressedOops.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/atomic.hpp" 46 #include "runtime/orderAccess.hpp" 47 #include "utilities/growableArray.hpp" 48 49 int HeapRegion::LogOfHRGrainBytes = 0; 50 int HeapRegion::LogOfHRGrainWords = 0; 51 size_t HeapRegion::GrainBytes = 0; 52 size_t HeapRegion::GrainWords = 0; 53 size_t HeapRegion::CardsPerRegion = 0; 54 55 size_t HeapRegion::max_region_size() { 56 return HeapRegionBounds::max_size(); 57 } 58 59 size_t HeapRegion::min_region_size_in_words() { 60 return HeapRegionBounds::min_size() >> LogHeapWordSize; 61 } 62 63 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { 64 size_t region_size = G1HeapRegionSize; 65 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { 66 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; 67 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), 68 HeapRegionBounds::min_size()); 69 } 70 71 int region_size_log = log2_long((jlong) region_size); 72 // Recalculate the region size to make sure it's a power of 73 // 2. This means that region_size is the largest power of 2 that's 74 // <= what we've calculated so far. 75 region_size = ((size_t)1 << region_size_log); 76 77 // Now make sure that we don't go over or under our limits. 78 if (region_size < HeapRegionBounds::min_size()) { 79 region_size = HeapRegionBounds::min_size(); 80 } else if (region_size > HeapRegionBounds::max_size()) { 81 region_size = HeapRegionBounds::max_size(); 82 } 83 84 // And recalculate the log. 85 region_size_log = log2_long((jlong) region_size); 86 87 // Now, set up the globals. 88 guarantee(LogOfHRGrainBytes == 0, "we should only set it once"); 89 LogOfHRGrainBytes = region_size_log; 90 91 guarantee(LogOfHRGrainWords == 0, "we should only set it once"); 92 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; 93 94 guarantee(GrainBytes == 0, "we should only set it once"); 95 // The cast to int is safe, given that we've bounded region_size by 96 // MIN_REGION_SIZE and MAX_REGION_SIZE. 97 GrainBytes = region_size; 98 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M); 99 100 guarantee(GrainWords == 0, "we should only set it once"); 101 GrainWords = GrainBytes >> LogHeapWordSize; 102 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity"); 103 104 guarantee(CardsPerRegion == 0, "we should only set it once"); 105 CardsPerRegion = GrainBytes >> G1CardTable::card_shift; 106 107 if (G1HeapRegionSize != GrainBytes) { 108 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes); 109 } 110 } 111 112 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { 113 assert(_humongous_start_region == NULL, 114 "we should have already filtered out humongous regions"); 115 assert(!in_collection_set(), 116 "Should not clear heap region %u in the collection set", hrm_index()); 117 118 set_young_index_in_cset(-1); 119 uninstall_surv_rate_group(); 120 set_free(); 121 reset_pre_dummy_top(); 122 123 if (!keep_remset) { 124 if (locked) { 125 rem_set()->clear_locked(); 126 } else { 127 rem_set()->clear(); 128 } 129 } 130 131 zero_marked_bytes(); 132 133 init_top_at_mark_start(); 134 if (clear_space) clear(SpaceDecorator::Mangle); 135 } 136 137 void HeapRegion::clear_cardtable() { 138 G1CardTable* ct = G1CollectedHeap::heap()->card_table(); 139 ct->clear(MemRegion(bottom(), end())); 140 } 141 142 void HeapRegion::calc_gc_efficiency() { 143 // GC efficiency is the ratio of how much space would be 144 // reclaimed over how long we predict it would take to reclaim it. 145 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 146 G1Policy* g1p = g1h->g1_policy(); 147 148 // Retrieve a prediction of the elapsed time for this region for 149 // a mixed gc because the region will only be evacuated during a 150 // mixed gc. 151 double region_elapsed_time_ms = 152 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */); 153 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; 154 } 155 156 void HeapRegion::set_free() { 157 report_region_type_change(G1HeapRegionTraceType::Free); 158 _type.set_free(); 159 } 160 161 void HeapRegion::set_eden() { 162 report_region_type_change(G1HeapRegionTraceType::Eden); 163 _type.set_eden(); 164 } 165 166 void HeapRegion::set_eden_pre_gc() { 167 report_region_type_change(G1HeapRegionTraceType::Eden); 168 _type.set_eden_pre_gc(); 169 } 170 171 void HeapRegion::set_survivor() { 172 report_region_type_change(G1HeapRegionTraceType::Survivor); 173 _type.set_survivor(); 174 } 175 176 void HeapRegion::move_to_old() { 177 if (_type.relabel_as_old()) { 178 report_region_type_change(G1HeapRegionTraceType::Old); 179 } 180 } 181 182 void HeapRegion::set_old() { 183 report_region_type_change(G1HeapRegionTraceType::Old); 184 _type.set_old(); 185 } 186 187 void HeapRegion::set_open_archive() { 188 report_region_type_change(G1HeapRegionTraceType::OpenArchive); 189 _type.set_open_archive(); 190 } 191 192 void HeapRegion::set_closed_archive() { 193 report_region_type_change(G1HeapRegionTraceType::ClosedArchive); 194 _type.set_closed_archive(); 195 } 196 197 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { 198 assert(!is_humongous(), "sanity / pre-condition"); 199 assert(top() == bottom(), "should be empty"); 200 201 report_region_type_change(G1HeapRegionTraceType::StartsHumongous); 202 _type.set_starts_humongous(); 203 _humongous_start_region = this; 204 205 _bot_part.set_for_starts_humongous(obj_top, fill_size); 206 } 207 208 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { 209 assert(!is_humongous(), "sanity / pre-condition"); 210 assert(top() == bottom(), "should be empty"); 211 assert(first_hr->is_starts_humongous(), "pre-condition"); 212 213 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); 214 _type.set_continues_humongous(); 215 _humongous_start_region = first_hr; 216 217 _bot_part.set_object_can_span(true); 218 } 219 220 void HeapRegion::clear_humongous() { 221 assert(is_humongous(), "pre-condition"); 222 223 assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); 224 _humongous_start_region = NULL; 225 226 _bot_part.set_object_can_span(false); 227 } 228 229 HeapRegion::HeapRegion(uint hrm_index, 230 G1BlockOffsetTable* bot, 231 MemRegion mr) : 232 G1ContiguousSpace(bot), 233 _hrm_index(hrm_index), 234 _humongous_start_region(NULL), 235 _evacuation_failed(false), 236 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 237 _next(NULL), _prev(NULL), 238 #ifdef ASSERT 239 _containing_set(NULL), 240 #endif // ASSERT 241 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), 242 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0) 243 { 244 _rem_set = new HeapRegionRemSet(bot, this); 245 246 initialize(mr); 247 } 248 249 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 250 assert(_rem_set->is_empty(), "Remembered set must be empty"); 251 252 G1ContiguousSpace::initialize(mr, clear_space, mangle_space); 253 254 hr_clear(false /*par*/, false /*clear_space*/); 255 set_top(bottom()); 256 } 257 258 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { 259 HeapRegionTracer::send_region_type_change(_hrm_index, 260 get_trace_type(), 261 to, 262 (uintptr_t)bottom(), 263 used()); 264 } 265 266 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, 267 bool during_conc_mark) { 268 // We always recreate the prev marking info and we'll explicitly 269 // mark all objects we find to be self-forwarded on the prev 270 // bitmap. So all objects need to be below PTAMS. 271 _prev_marked_bytes = 0; 272 273 if (during_initial_mark) { 274 // During initial-mark, we'll also explicitly mark all objects 275 // we find to be self-forwarded on the next bitmap. So all 276 // objects need to be below NTAMS. 277 _next_top_at_mark_start = top(); 278 _next_marked_bytes = 0; 279 } else if (during_conc_mark) { 280 // During concurrent mark, all objects in the CSet (including 281 // the ones we find to be self-forwarded) are implicitly live. 282 // So all objects need to be above NTAMS. 283 _next_top_at_mark_start = bottom(); 284 _next_marked_bytes = 0; 285 } 286 } 287 288 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { 289 assert(marked_bytes <= used(), 290 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); 291 _prev_top_at_mark_start = top(); 292 _prev_marked_bytes = marked_bytes; 293 } 294 295 // Code roots support 296 297 void HeapRegion::add_strong_code_root(nmethod* nm) { 298 HeapRegionRemSet* hrrs = rem_set(); 299 hrrs->add_strong_code_root(nm); 300 } 301 302 void HeapRegion::add_strong_code_root_locked(nmethod* nm) { 303 assert_locked_or_safepoint(CodeCache_lock); 304 HeapRegionRemSet* hrrs = rem_set(); 305 hrrs->add_strong_code_root_locked(nm); 306 } 307 308 void HeapRegion::remove_strong_code_root(nmethod* nm) { 309 HeapRegionRemSet* hrrs = rem_set(); 310 hrrs->remove_strong_code_root(nm); 311 } 312 313 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { 314 HeapRegionRemSet* hrrs = rem_set(); 315 hrrs->strong_code_roots_do(blk); 316 } 317 318 class VerifyStrongCodeRootOopClosure: public OopClosure { 319 const HeapRegion* _hr; 320 bool _failures; 321 bool _has_oops_in_region; 322 323 template <class T> void do_oop_work(T* p) { 324 T heap_oop = RawAccess<>::oop_load(p); 325 if (!CompressedOops::is_null(heap_oop)) { 326 oop obj = CompressedOops::decode_not_null(heap_oop); 327 328 // Note: not all the oops embedded in the nmethod are in the 329 // current region. We only look at those which are. 330 if (_hr->is_in(obj)) { 331 // Object is in the region. Check that its less than top 332 if (_hr->top() <= (HeapWord*)obj) { 333 // Object is above top 334 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT, 335 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top())); 336 _failures = true; 337 return; 338 } 339 // Nmethod has at least one oop in the current region 340 _has_oops_in_region = true; 341 } 342 } 343 } 344 345 public: 346 VerifyStrongCodeRootOopClosure(const HeapRegion* hr): 347 _hr(hr), _failures(false), _has_oops_in_region(false) {} 348 349 void do_oop(narrowOop* p) { do_oop_work(p); } 350 void do_oop(oop* p) { do_oop_work(p); } 351 352 bool failures() { return _failures; } 353 bool has_oops_in_region() { return _has_oops_in_region; } 354 }; 355 356 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { 357 const HeapRegion* _hr; 358 bool _failures; 359 public: 360 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : 361 _hr(hr), _failures(false) {} 362 363 void do_code_blob(CodeBlob* cb) { 364 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); 365 if (nm != NULL) { 366 // Verify that the nemthod is live 367 if (!nm->is_alive()) { 368 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots", 369 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 370 _failures = true; 371 } else { 372 VerifyStrongCodeRootOopClosure oop_cl(_hr); 373 nm->oops_do(&oop_cl); 374 if (!oop_cl.has_oops_in_region()) { 375 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region", 376 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 377 _failures = true; 378 } else if (oop_cl.failures()) { 379 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, 380 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); 381 _failures = true; 382 } 383 } 384 } 385 } 386 387 bool failures() { return _failures; } 388 }; 389 390 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { 391 if (!G1VerifyHeapRegionCodeRoots) { 392 // We're not verifying code roots. 393 return; 394 } 395 if (vo == VerifyOption_G1UseFullMarking) { 396 // Marking verification during a full GC is performed after class 397 // unloading, code cache unloading, etc so the strong code roots 398 // attached to each heap region are in an inconsistent state. They won't 399 // be consistent until the strong code roots are rebuilt after the 400 // actual GC. Skip verifying the strong code roots in this particular 401 // time. 402 assert(VerifyDuringGC, "only way to get here"); 403 return; 404 } 405 406 HeapRegionRemSet* hrrs = rem_set(); 407 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); 408 409 // if this region is empty then there should be no entries 410 // on its strong code root list 411 if (is_empty()) { 412 if (strong_code_roots_length > 0) { 413 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries", 414 p2i(bottom()), p2i(end()), strong_code_roots_length); 415 *failures = true; 416 } 417 return; 418 } 419 420 if (is_continues_humongous()) { 421 if (strong_code_roots_length > 0) { 422 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries", 423 HR_FORMAT_PARAMS(this), strong_code_roots_length); 424 *failures = true; 425 } 426 return; 427 } 428 429 VerifyStrongCodeRootCodeBlobClosure cb_cl(this); 430 strong_code_roots_do(&cb_cl); 431 432 if (cb_cl.failures()) { 433 *failures = true; 434 } 435 } 436 437 void HeapRegion::print() const { print_on(tty); } 438 void HeapRegion::print_on(outputStream* st) const { 439 st->print("|%4u", this->_hrm_index); 440 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, 441 p2i(bottom()), p2i(top()), p2i(end())); 442 st->print("|%3d%%", (int) ((double) used() * 100 / capacity())); 443 st->print("|%2s", get_short_type_str()); 444 if (in_collection_set()) { 445 st->print("|CS"); 446 } else { 447 st->print("| "); 448 } 449 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ", 450 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str()); 451 } 452 453 class G1VerificationClosure : public BasicOopIterateClosure { 454 protected: 455 G1CollectedHeap* _g1h; 456 G1CardTable *_ct; 457 oop _containing_obj; 458 bool _failures; 459 int _n_failures; 460 VerifyOption _vo; 461 public: 462 // _vo == UsePrevMarking -> use "prev" marking information, 463 // _vo == UseNextMarking -> use "next" marking information, 464 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. 465 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : 466 _g1h(g1h), _ct(g1h->card_table()), 467 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { 468 } 469 470 void set_containing_obj(oop obj) { 471 _containing_obj = obj; 472 } 473 474 bool failures() { return _failures; } 475 int n_failures() { return _n_failures; } 476 477 void print_object(outputStream* out, oop obj) { 478 #ifdef PRODUCT 479 Klass* k = obj->klass(); 480 const char* class_name = k->external_name(); 481 out->print_cr("class name %s", class_name); 482 #else // PRODUCT 483 obj->print_on(out); 484 #endif // PRODUCT 485 } 486 487 // This closure provides its own oop verification code. 488 debug_only(virtual bool should_verify_oops() { return false; }) 489 }; 490 491 class VerifyLiveClosure : public G1VerificationClosure { 492 public: 493 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 494 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 495 virtual void do_oop(oop* p) { do_oop_work(p); } 496 497 template <class T> 498 void do_oop_work(T* p) { 499 assert(_containing_obj != NULL, "Precondition"); 500 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 501 "Precondition"); 502 verify_liveness(p); 503 } 504 505 template <class T> 506 void verify_liveness(T* p) { 507 T heap_oop = RawAccess<>::oop_load(p); 508 Log(gc, verify) log; 509 if (!CompressedOops::is_null(heap_oop)) { 510 oop obj = CompressedOops::decode_not_null(heap_oop); 511 bool failed = false; 512 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { 513 MutexLockerEx x(ParGCRareEvent_lock, 514 Mutex::_no_safepoint_check_flag); 515 516 if (!_failures) { 517 log.error("----------"); 518 } 519 ResourceMark rm; 520 if (!_g1h->is_in_closed_subset(obj)) { 521 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 522 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 523 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 524 LogStream ls(log.error()); 525 print_object(&ls, _containing_obj); 526 HeapRegion* const to = _g1h->heap_region_containing(obj); 527 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 528 } else { 529 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 530 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); 531 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 532 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end())); 533 LogStream ls(log.error()); 534 print_object(&ls, _containing_obj); 535 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")", 536 p2i(obj), p2i(to->bottom()), p2i(to->end())); 537 print_object(&ls, obj); 538 } 539 log.error("----------"); 540 _failures = true; 541 failed = true; 542 _n_failures++; 543 } 544 } 545 } 546 }; 547 548 class VerifyRemSetClosure : public G1VerificationClosure { 549 public: 550 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} 551 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 552 virtual void do_oop(oop* p) { do_oop_work(p); } 553 554 template <class T> 555 void do_oop_work(T* p) { 556 assert(_containing_obj != NULL, "Precondition"); 557 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), 558 "Precondition"); 559 verify_remembered_set(p); 560 } 561 562 template <class T> 563 void verify_remembered_set(T* p) { 564 T heap_oop = RawAccess<>::oop_load(p); 565 Log(gc, verify) log; 566 if (!CompressedOops::is_null(heap_oop)) { 567 oop obj = CompressedOops::decode_not_null(heap_oop); 568 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); 569 HeapRegion* to = _g1h->heap_region_containing(obj); 570 if (from != NULL && to != NULL && 571 from != to && 572 !to->is_pinned() && 573 to->rem_set()->is_complete()) { 574 jbyte cv_obj = *_ct->byte_for_const(_containing_obj); 575 jbyte cv_field = *_ct->byte_for_const(p); 576 const jbyte dirty = G1CardTable::dirty_card_val(); 577 578 bool is_bad = !(from->is_young() 579 || to->rem_set()->contains_reference(p) 580 || (_containing_obj->is_objArray() ? 581 cv_field == dirty : 582 cv_obj == dirty || cv_field == dirty)); 583 if (is_bad) { 584 MutexLockerEx x(ParGCRareEvent_lock, 585 Mutex::_no_safepoint_check_flag); 586 587 if (!_failures) { 588 log.error("----------"); 589 } 590 log.error("Missing rem set entry:"); 591 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT, 592 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); 593 ResourceMark rm; 594 LogStream ls(log.error()); 595 _containing_obj->print_on(&ls); 596 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); 597 if (oopDesc::is_oop(obj)) { 598 obj->print_on(&ls); 599 } 600 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); 601 log.error("----------"); 602 _failures = true; 603 _n_failures++; 604 } 605 } 606 } 607 } 608 }; 609 610 // Closure that applies the given two closures in sequence. 611 class G1Mux2Closure : public BasicOopIterateClosure { 612 OopClosure* _c1; 613 OopClosure* _c2; 614 public: 615 G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; } 616 template <class T> inline void do_oop_work(T* p) { 617 // Apply first closure; then apply the second. 618 _c1->do_oop(p); 619 _c2->do_oop(p); 620 } 621 virtual inline void do_oop(oop* p) { do_oop_work(p); } 622 virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } 623 624 // This closure provides its own oop verification code. 625 debug_only(virtual bool should_verify_oops() { return false; }) 626 }; 627 628 // This really ought to be commoned up into OffsetTableContigSpace somehow. 629 // We would need a mechanism to make that code skip dead objects. 630 631 void HeapRegion::verify(VerifyOption vo, 632 bool* failures) const { 633 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 634 *failures = false; 635 HeapWord* p = bottom(); 636 HeapWord* prev_p = NULL; 637 VerifyLiveClosure vl_cl(g1h, vo); 638 VerifyRemSetClosure vr_cl(g1h, vo); 639 bool is_region_humongous = is_humongous(); 640 size_t object_num = 0; 641 while (p < top()) { 642 oop obj = oop(p); 643 size_t obj_size = block_size(p); 644 object_num += 1; 645 646 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 647 if (oopDesc::is_oop(obj)) { 648 Klass* klass = obj->klass(); 649 bool is_metaspace_object = Metaspace::contains(klass); 650 if (!is_metaspace_object) { 651 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 652 "not metadata", p2i(klass), p2i(obj)); 653 *failures = true; 654 return; 655 } else if (!klass->is_klass()) { 656 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " 657 "not a klass", p2i(klass), p2i(obj)); 658 *failures = true; 659 return; 660 } else { 661 vl_cl.set_containing_obj(obj); 662 if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) { 663 // verify liveness and rem_set 664 vr_cl.set_containing_obj(obj); 665 G1Mux2Closure mux(&vl_cl, &vr_cl); 666 obj->oop_iterate(&mux); 667 668 if (vr_cl.failures()) { 669 *failures = true; 670 } 671 if (G1MaxVerifyFailures >= 0 && 672 vr_cl.n_failures() >= G1MaxVerifyFailures) { 673 return; 674 } 675 } else { 676 // verify only liveness 677 obj->oop_iterate(&vl_cl); 678 } 679 if (vl_cl.failures()) { 680 *failures = true; 681 } 682 if (G1MaxVerifyFailures >= 0 && 683 vl_cl.n_failures() >= G1MaxVerifyFailures) { 684 return; 685 } 686 } 687 } else { 688 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 689 *failures = true; 690 return; 691 } 692 } 693 prev_p = p; 694 p += obj_size; 695 } 696 697 if (!is_young() && !is_empty()) { 698 _bot_part.verify(); 699 } 700 701 if (is_region_humongous) { 702 oop obj = oop(this->humongous_start_region()->bottom()); 703 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { 704 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); 705 *failures = true; 706 return; 707 } 708 } 709 710 if (!is_region_humongous && p != top()) { 711 log_error(gc, verify)("end of last object " PTR_FORMAT " " 712 "does not match top " PTR_FORMAT, p2i(p), p2i(top())); 713 *failures = true; 714 return; 715 } 716 717 HeapWord* the_end = end(); 718 // Do some extra BOT consistency checking for addresses in the 719 // range [top, end). BOT look-ups in this range should yield 720 // top. No point in doing that if top == end (there's nothing there). 721 if (p < the_end) { 722 // Look up top 723 HeapWord* addr_1 = p; 724 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); 725 if (b_start_1 != p) { 726 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " 727 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 728 p2i(addr_1), p2i(b_start_1), p2i(p)); 729 *failures = true; 730 return; 731 } 732 733 // Look up top + 1 734 HeapWord* addr_2 = p + 1; 735 if (addr_2 < the_end) { 736 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); 737 if (b_start_2 != p) { 738 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " 739 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 740 p2i(addr_2), p2i(b_start_2), p2i(p)); 741 *failures = true; 742 return; 743 } 744 } 745 746 // Look up an address between top and end 747 size_t diff = pointer_delta(the_end, p) / 2; 748 HeapWord* addr_3 = p + diff; 749 if (addr_3 < the_end) { 750 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); 751 if (b_start_3 != p) { 752 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " 753 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 754 p2i(addr_3), p2i(b_start_3), p2i(p)); 755 *failures = true; 756 return; 757 } 758 } 759 760 // Look up end - 1 761 HeapWord* addr_4 = the_end - 1; 762 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); 763 if (b_start_4 != p) { 764 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " 765 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, 766 p2i(addr_4), p2i(b_start_4), p2i(p)); 767 *failures = true; 768 return; 769 } 770 } 771 772 verify_strong_code_roots(vo, failures); 773 } 774 775 void HeapRegion::verify() const { 776 bool dummy = false; 777 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); 778 } 779 780 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { 781 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 782 *failures = false; 783 HeapWord* p = bottom(); 784 HeapWord* prev_p = NULL; 785 VerifyRemSetClosure vr_cl(g1h, vo); 786 while (p < top()) { 787 oop obj = oop(p); 788 size_t obj_size = block_size(p); 789 790 if (!g1h->is_obj_dead_cond(obj, this, vo)) { 791 if (oopDesc::is_oop(obj)) { 792 vr_cl.set_containing_obj(obj); 793 obj->oop_iterate(&vr_cl); 794 795 if (vr_cl.failures()) { 796 *failures = true; 797 } 798 if (G1MaxVerifyFailures >= 0 && 799 vr_cl.n_failures() >= G1MaxVerifyFailures) { 800 return; 801 } 802 } else { 803 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj)); 804 *failures = true; 805 return; 806 } 807 } 808 809 prev_p = p; 810 p += obj_size; 811 } 812 } 813 814 void HeapRegion::verify_rem_set() const { 815 bool failures = false; 816 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); 817 guarantee(!failures, "HeapRegion RemSet verification failed"); 818 } 819 820 void HeapRegion::prepare_for_compaction(CompactPoint* cp) { 821 // Not used for G1 anymore, but pure virtual in Space. 822 ShouldNotReachHere(); 823 } 824 825 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go 826 // away eventually. 827 828 void G1ContiguousSpace::clear(bool mangle_space) { 829 set_top(bottom()); 830 CompactibleSpace::clear(mangle_space); 831 reset_bot(); 832 } 833 #ifndef PRODUCT 834 void G1ContiguousSpace::mangle_unused_area() { 835 mangle_unused_area_complete(); 836 } 837 838 void G1ContiguousSpace::mangle_unused_area_complete() { 839 SpaceMangler::mangle_region(MemRegion(top(), end())); 840 } 841 #endif 842 843 void G1ContiguousSpace::print() const { 844 print_short(); 845 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 846 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 847 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); 848 } 849 850 HeapWord* G1ContiguousSpace::initialize_threshold() { 851 return _bot_part.initialize_threshold(); 852 } 853 854 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, 855 HeapWord* end) { 856 _bot_part.alloc_block(start, end); 857 return _bot_part.threshold(); 858 } 859 860 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 861 object_iterate(blk); 862 } 863 864 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { 865 HeapWord* p = bottom(); 866 while (p < top()) { 867 if (block_is_obj(p)) { 868 blk->do_object(oop(p)); 869 } 870 p += block_size(p); 871 } 872 } 873 874 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : 875 _bot_part(bot, this), 876 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 877 { 878 } 879 880 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { 881 CompactibleSpace::initialize(mr, clear_space, mangle_space); 882 _top = bottom(); 883 set_saved_mark_word(NULL); 884 reset_bot(); 885 }