1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1HeapRegionTraceType.hpp"
30 #include "gc/g1/g1OopClosures.inline.hpp"
31 #include "gc/g1/heapRegion.inline.hpp"
32 #include "gc/g1/heapRegionBounds.inline.hpp"
33 #include "gc/g1/heapRegionManager.inline.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/g1/heapRegionTracer.hpp"
36 #include "gc/shared/genOopClosures.inline.hpp"
37 #include "gc/shared/space.inline.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/iterator.inline.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "oops/access.inline.hpp"
43 #include "oops/compressedOops.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/orderAccess.hpp"
47 #include "utilities/growableArray.hpp"
48
49 int HeapRegion::LogOfHRGrainBytes = 0;
50 int HeapRegion::LogOfHRGrainWords = 0;
51 size_t HeapRegion::GrainBytes = 0;
52 size_t HeapRegion::GrainWords = 0;
53 size_t HeapRegion::CardsPerRegion = 0;
54
55 size_t HeapRegion::max_region_size() {
56 return HeapRegionBounds::max_size();
57 }
58
59 size_t HeapRegion::min_region_size_in_words() {
60 return HeapRegionBounds::min_size() >> LogHeapWordSize;
61 }
62
63 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
64 size_t region_size = G1HeapRegionSize;
65 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
66 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
67 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
68 HeapRegionBounds::min_size());
69 if (DumpSharedSpaces && max_heap_size >= 128*M && region_size < 8*M) {
70 // CDS archived heap supports up to 32G heaps, with region size up to 8MB.
71 // At CDS dump time, if we use small regions for G1, the CARC and OARC regions may end up
72 // in the same 8MB block. At run time with a large heap, G1CollectedHeap::alloc_archive_regions
73 // might fail because CARC and OARC will end up in the same G1 region.
74 region_size = 8*M;
75 }
76 }
77
78 int region_size_log = log2_long((jlong) region_size);
79 // Recalculate the region size to make sure it's a power of
80 // 2. This means that region_size is the largest power of 2 that's
81 // <= what we've calculated so far.
82 region_size = ((size_t)1 << region_size_log);
83
84 // Now make sure that we don't go over or under our limits.
85 if (region_size < HeapRegionBounds::min_size()) {
86 region_size = HeapRegionBounds::min_size();
87 } else if (region_size > HeapRegionBounds::max_size()) {
88 region_size = HeapRegionBounds::max_size();
89 }
90
91 // And recalculate the log.
92 region_size_log = log2_long((jlong) region_size);
93
94 // Now, set up the globals.
95 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
96 LogOfHRGrainBytes = region_size_log;
97
98 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
99 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
100
101 guarantee(GrainBytes == 0, "we should only set it once");
102 // The cast to int is safe, given that we've bounded region_size by
103 // MIN_REGION_SIZE and MAX_REGION_SIZE.
104 GrainBytes = region_size;
105 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M);
106
107 guarantee(GrainWords == 0, "we should only set it once");
108 GrainWords = GrainBytes >> LogHeapWordSize;
109 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
110
111 guarantee(CardsPerRegion == 0, "we should only set it once");
112 CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
113
114 if (G1HeapRegionSize != GrainBytes) {
115 FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
116 }
117 }
118
119 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
120 assert(_humongous_start_region == NULL,
121 "we should have already filtered out humongous regions");
122 assert(!in_collection_set(),
123 "Should not clear heap region %u in the collection set", hrm_index());
124
125 set_young_index_in_cset(-1);
126 uninstall_surv_rate_group();
127 set_free();
128 reset_pre_dummy_top();
129
130 if (!keep_remset) {
131 if (locked) {
132 rem_set()->clear_locked();
133 } else {
134 rem_set()->clear();
135 }
136 }
137
138 zero_marked_bytes();
139
140 init_top_at_mark_start();
141 if (clear_space) clear(SpaceDecorator::Mangle);
142 }
143
144 void HeapRegion::clear_cardtable() {
145 G1CardTable* ct = G1CollectedHeap::heap()->card_table();
146 ct->clear(MemRegion(bottom(), end()));
147 }
148
149 void HeapRegion::calc_gc_efficiency() {
150 // GC efficiency is the ratio of how much space would be
151 // reclaimed over how long we predict it would take to reclaim it.
152 G1CollectedHeap* g1h = G1CollectedHeap::heap();
153 G1Policy* g1p = g1h->g1_policy();
154
155 // Retrieve a prediction of the elapsed time for this region for
156 // a mixed gc because the region will only be evacuated during a
157 // mixed gc.
158 double region_elapsed_time_ms =
159 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
160 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
161 }
162
163 void HeapRegion::set_free() {
164 report_region_type_change(G1HeapRegionTraceType::Free);
165 _type.set_free();
166 }
167
168 void HeapRegion::set_eden() {
169 report_region_type_change(G1HeapRegionTraceType::Eden);
170 _type.set_eden();
171 }
172
173 void HeapRegion::set_eden_pre_gc() {
174 report_region_type_change(G1HeapRegionTraceType::Eden);
175 _type.set_eden_pre_gc();
176 }
177
178 void HeapRegion::set_survivor() {
179 report_region_type_change(G1HeapRegionTraceType::Survivor);
180 _type.set_survivor();
181 }
182
183 void HeapRegion::move_to_old() {
184 if (_type.relabel_as_old()) {
185 report_region_type_change(G1HeapRegionTraceType::Old);
186 }
187 }
188
189 void HeapRegion::set_old() {
190 report_region_type_change(G1HeapRegionTraceType::Old);
191 _type.set_old();
192 }
193
194 void HeapRegion::set_open_archive() {
195 report_region_type_change(G1HeapRegionTraceType::OpenArchive);
196 _type.set_open_archive();
197 }
198
199 void HeapRegion::set_closed_archive() {
200 report_region_type_change(G1HeapRegionTraceType::ClosedArchive);
201 _type.set_closed_archive();
202 }
203
204 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
205 assert(!is_humongous(), "sanity / pre-condition");
206 assert(top() == bottom(), "should be empty");
207
208 report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
209 _type.set_starts_humongous();
210 _humongous_start_region = this;
211
212 _bot_part.set_for_starts_humongous(obj_top, fill_size);
213 }
214
215 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
216 assert(!is_humongous(), "sanity / pre-condition");
217 assert(top() == bottom(), "should be empty");
218 assert(first_hr->is_starts_humongous(), "pre-condition");
219
220 report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
221 _type.set_continues_humongous();
222 _humongous_start_region = first_hr;
223
224 _bot_part.set_object_can_span(true);
225 }
226
227 void HeapRegion::clear_humongous() {
228 assert(is_humongous(), "pre-condition");
229
230 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
231 _humongous_start_region = NULL;
232
233 _bot_part.set_object_can_span(false);
234 }
235
236 HeapRegion::HeapRegion(uint hrm_index,
237 G1BlockOffsetTable* bot,
238 MemRegion mr) :
239 G1ContiguousSpace(bot),
240 _hrm_index(hrm_index),
241 _humongous_start_region(NULL),
242 _evacuation_failed(false),
243 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
244 _next(NULL), _prev(NULL),
245 #ifdef ASSERT
246 _containing_set(NULL),
247 #endif // ASSERT
248 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
249 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0)
250 {
251 _rem_set = new HeapRegionRemSet(bot, this);
252
253 initialize(mr);
254 }
255
256 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
257 assert(_rem_set->is_empty(), "Remembered set must be empty");
258
259 G1ContiguousSpace::initialize(mr, clear_space, mangle_space);
260
261 hr_clear(false /*par*/, false /*clear_space*/);
262 set_top(bottom());
263 }
264
265 void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
266 HeapRegionTracer::send_region_type_change(_hrm_index,
267 get_trace_type(),
268 to,
269 (uintptr_t)bottom(),
270 used());
271 }
272
273 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
274 bool during_conc_mark) {
275 // We always recreate the prev marking info and we'll explicitly
276 // mark all objects we find to be self-forwarded on the prev
277 // bitmap. So all objects need to be below PTAMS.
278 _prev_marked_bytes = 0;
279
280 if (during_initial_mark) {
281 // During initial-mark, we'll also explicitly mark all objects
282 // we find to be self-forwarded on the next bitmap. So all
283 // objects need to be below NTAMS.
284 _next_top_at_mark_start = top();
285 _next_marked_bytes = 0;
286 } else if (during_conc_mark) {
287 // During concurrent mark, all objects in the CSet (including
288 // the ones we find to be self-forwarded) are implicitly live.
289 // So all objects need to be above NTAMS.
290 _next_top_at_mark_start = bottom();
291 _next_marked_bytes = 0;
292 }
293 }
294
295 void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
296 assert(marked_bytes <= used(),
297 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
298 _prev_top_at_mark_start = top();
299 _prev_marked_bytes = marked_bytes;
300 }
301
302 // Code roots support
303
304 void HeapRegion::add_strong_code_root(nmethod* nm) {
305 HeapRegionRemSet* hrrs = rem_set();
306 hrrs->add_strong_code_root(nm);
307 }
308
309 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
310 assert_locked_or_safepoint(CodeCache_lock);
311 HeapRegionRemSet* hrrs = rem_set();
312 hrrs->add_strong_code_root_locked(nm);
313 }
314
315 void HeapRegion::remove_strong_code_root(nmethod* nm) {
316 HeapRegionRemSet* hrrs = rem_set();
317 hrrs->remove_strong_code_root(nm);
318 }
319
320 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
321 HeapRegionRemSet* hrrs = rem_set();
322 hrrs->strong_code_roots_do(blk);
323 }
324
325 class VerifyStrongCodeRootOopClosure: public OopClosure {
326 const HeapRegion* _hr;
327 bool _failures;
328 bool _has_oops_in_region;
329
330 template <class T> void do_oop_work(T* p) {
331 T heap_oop = RawAccess<>::oop_load(p);
332 if (!CompressedOops::is_null(heap_oop)) {
333 oop obj = CompressedOops::decode_not_null(heap_oop);
334
335 // Note: not all the oops embedded in the nmethod are in the
336 // current region. We only look at those which are.
337 if (_hr->is_in(obj)) {
338 // Object is in the region. Check that its less than top
339 if (_hr->top() <= (HeapWord*)obj) {
340 // Object is above top
341 log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT,
342 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
343 _failures = true;
344 return;
345 }
346 // Nmethod has at least one oop in the current region
347 _has_oops_in_region = true;
348 }
349 }
350 }
351
352 public:
353 VerifyStrongCodeRootOopClosure(const HeapRegion* hr):
354 _hr(hr), _failures(false), _has_oops_in_region(false) {}
355
356 void do_oop(narrowOop* p) { do_oop_work(p); }
357 void do_oop(oop* p) { do_oop_work(p); }
358
359 bool failures() { return _failures; }
360 bool has_oops_in_region() { return _has_oops_in_region; }
361 };
362
363 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
364 const HeapRegion* _hr;
365 bool _failures;
366 public:
367 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
368 _hr(hr), _failures(false) {}
369
370 void do_code_blob(CodeBlob* cb) {
371 nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null();
372 if (nm != NULL) {
373 // Verify that the nemthod is live
374 if (!nm->is_alive()) {
375 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots",
376 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
377 _failures = true;
378 } else {
379 VerifyStrongCodeRootOopClosure oop_cl(_hr);
380 nm->oops_do(&oop_cl);
381 if (!oop_cl.has_oops_in_region()) {
382 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region",
383 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
384 _failures = true;
385 } else if (oop_cl.failures()) {
386 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
387 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
388 _failures = true;
389 }
390 }
391 }
392 }
393
394 bool failures() { return _failures; }
395 };
396
397 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
398 if (!G1VerifyHeapRegionCodeRoots) {
399 // We're not verifying code roots.
400 return;
401 }
402 if (vo == VerifyOption_G1UseFullMarking) {
403 // Marking verification during a full GC is performed after class
404 // unloading, code cache unloading, etc so the strong code roots
405 // attached to each heap region are in an inconsistent state. They won't
406 // be consistent until the strong code roots are rebuilt after the
407 // actual GC. Skip verifying the strong code roots in this particular
408 // time.
409 assert(VerifyDuringGC, "only way to get here");
410 return;
411 }
412
413 HeapRegionRemSet* hrrs = rem_set();
414 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
415
416 // if this region is empty then there should be no entries
417 // on its strong code root list
418 if (is_empty()) {
419 if (strong_code_roots_length > 0) {
420 log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries",
421 p2i(bottom()), p2i(end()), strong_code_roots_length);
422 *failures = true;
423 }
424 return;
425 }
426
427 if (is_continues_humongous()) {
428 if (strong_code_roots_length > 0) {
429 log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries",
430 HR_FORMAT_PARAMS(this), strong_code_roots_length);
431 *failures = true;
432 }
433 return;
434 }
435
436 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
437 strong_code_roots_do(&cb_cl);
438
439 if (cb_cl.failures()) {
440 *failures = true;
441 }
442 }
443
444 void HeapRegion::print() const { print_on(tty); }
445 void HeapRegion::print_on(outputStream* st) const {
446 st->print("|%4u", this->_hrm_index);
447 st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT,
448 p2i(bottom()), p2i(top()), p2i(end()));
449 st->print("|%3d%%", (int) ((double) used() * 100 / capacity()));
450 st->print("|%2s", get_short_type_str());
451 if (in_collection_set()) {
452 st->print("|CS");
453 } else {
454 st->print("| ");
455 }
456 st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
457 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
458 }
459
460 class G1VerificationClosure : public BasicOopIterateClosure {
461 protected:
462 G1CollectedHeap* _g1h;
463 G1CardTable *_ct;
464 oop _containing_obj;
465 bool _failures;
466 int _n_failures;
467 VerifyOption _vo;
468 public:
469 // _vo == UsePrevMarking -> use "prev" marking information,
470 // _vo == UseNextMarking -> use "next" marking information,
471 // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
472 G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
473 _g1h(g1h), _ct(g1h->card_table()),
474 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
475 }
476
477 void set_containing_obj(oop obj) {
478 _containing_obj = obj;
479 }
480
481 bool failures() { return _failures; }
482 int n_failures() { return _n_failures; }
483
484 void print_object(outputStream* out, oop obj) {
485 #ifdef PRODUCT
486 Klass* k = obj->klass();
487 const char* class_name = k->external_name();
488 out->print_cr("class name %s", class_name);
489 #else // PRODUCT
490 obj->print_on(out);
491 #endif // PRODUCT
492 }
493
494 // This closure provides its own oop verification code.
495 debug_only(virtual bool should_verify_oops() { return false; })
496 };
497
498 class VerifyLiveClosure : public G1VerificationClosure {
499 public:
500 VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
501 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
502 virtual void do_oop(oop* p) { do_oop_work(p); }
503
504 template <class T>
505 void do_oop_work(T* p) {
506 assert(_containing_obj != NULL, "Precondition");
507 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
508 "Precondition");
509 verify_liveness(p);
510 }
511
512 template <class T>
513 void verify_liveness(T* p) {
514 T heap_oop = RawAccess<>::oop_load(p);
515 Log(gc, verify) log;
516 if (!CompressedOops::is_null(heap_oop)) {
517 oop obj = CompressedOops::decode_not_null(heap_oop);
518 bool failed = false;
519 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
520 MutexLockerEx x(ParGCRareEvent_lock,
521 Mutex::_no_safepoint_check_flag);
522
523 if (!_failures) {
524 log.error("----------");
525 }
526 ResourceMark rm;
527 if (!_g1h->is_in_closed_subset(obj)) {
528 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
529 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
530 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
531 LogStream ls(log.error());
532 print_object(&ls, _containing_obj);
533 HeapRegion* const to = _g1h->heap_region_containing(obj);
534 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
535 } else {
536 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
537 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
538 log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
539 p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
540 LogStream ls(log.error());
541 print_object(&ls, _containing_obj);
542 log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
543 p2i(obj), p2i(to->bottom()), p2i(to->end()));
544 print_object(&ls, obj);
545 }
546 log.error("----------");
547 _failures = true;
548 failed = true;
549 _n_failures++;
550 }
551 }
552 }
553 };
554
555 class VerifyRemSetClosure : public G1VerificationClosure {
556 public:
557 VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {}
558 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
559 virtual void do_oop(oop* p) { do_oop_work(p); }
560
561 template <class T>
562 void do_oop_work(T* p) {
563 assert(_containing_obj != NULL, "Precondition");
564 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
565 "Precondition");
566 verify_remembered_set(p);
567 }
568
569 template <class T>
570 void verify_remembered_set(T* p) {
571 T heap_oop = RawAccess<>::oop_load(p);
572 Log(gc, verify) log;
573 if (!CompressedOops::is_null(heap_oop)) {
574 oop obj = CompressedOops::decode_not_null(heap_oop);
575 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
576 HeapRegion* to = _g1h->heap_region_containing(obj);
577 if (from != NULL && to != NULL &&
578 from != to &&
579 !to->is_pinned() &&
580 to->rem_set()->is_complete()) {
581 jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
582 jbyte cv_field = *_ct->byte_for_const(p);
583 const jbyte dirty = G1CardTable::dirty_card_val();
584
585 bool is_bad = !(from->is_young()
586 || to->rem_set()->contains_reference(p)
587 || (_containing_obj->is_objArray() ?
588 cv_field == dirty :
589 cv_obj == dirty || cv_field == dirty));
590 if (is_bad) {
591 MutexLockerEx x(ParGCRareEvent_lock,
592 Mutex::_no_safepoint_check_flag);
593
594 if (!_failures) {
595 log.error("----------");
596 }
597 log.error("Missing rem set entry:");
598 log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
599 p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
600 ResourceMark rm;
601 LogStream ls(log.error());
602 _containing_obj->print_on(&ls);
603 log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
604 if (oopDesc::is_oop(obj)) {
605 obj->print_on(&ls);
606 }
607 log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
608 log.error("----------");
609 _failures = true;
610 _n_failures++;
611 }
612 }
613 }
614 }
615 };
616
617 // Closure that applies the given two closures in sequence.
618 class G1Mux2Closure : public BasicOopIterateClosure {
619 OopClosure* _c1;
620 OopClosure* _c2;
621 public:
622 G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; }
623 template <class T> inline void do_oop_work(T* p) {
624 // Apply first closure; then apply the second.
625 _c1->do_oop(p);
626 _c2->do_oop(p);
627 }
628 virtual inline void do_oop(oop* p) { do_oop_work(p); }
629 virtual inline void do_oop(narrowOop* p) { do_oop_work(p); }
630
631 // This closure provides its own oop verification code.
632 debug_only(virtual bool should_verify_oops() { return false; })
633 };
634
635 // This really ought to be commoned up into OffsetTableContigSpace somehow.
636 // We would need a mechanism to make that code skip dead objects.
637
638 void HeapRegion::verify(VerifyOption vo,
639 bool* failures) const {
640 G1CollectedHeap* g1h = G1CollectedHeap::heap();
641 *failures = false;
642 HeapWord* p = bottom();
643 HeapWord* prev_p = NULL;
644 VerifyLiveClosure vl_cl(g1h, vo);
645 VerifyRemSetClosure vr_cl(g1h, vo);
646 bool is_region_humongous = is_humongous();
647 size_t object_num = 0;
648 while (p < top()) {
649 oop obj = oop(p);
650 size_t obj_size = block_size(p);
651 object_num += 1;
652
653 if (!g1h->is_obj_dead_cond(obj, this, vo)) {
654 if (oopDesc::is_oop(obj)) {
655 Klass* klass = obj->klass();
656 bool is_metaspace_object = Metaspace::contains(klass);
657 if (!is_metaspace_object) {
658 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
659 "not metadata", p2i(klass), p2i(obj));
660 *failures = true;
661 return;
662 } else if (!klass->is_klass()) {
663 log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
664 "not a klass", p2i(klass), p2i(obj));
665 *failures = true;
666 return;
667 } else {
668 vl_cl.set_containing_obj(obj);
669 if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) {
670 // verify liveness and rem_set
671 vr_cl.set_containing_obj(obj);
672 G1Mux2Closure mux(&vl_cl, &vr_cl);
673 obj->oop_iterate(&mux);
674
675 if (vr_cl.failures()) {
676 *failures = true;
677 }
678 if (G1MaxVerifyFailures >= 0 &&
679 vr_cl.n_failures() >= G1MaxVerifyFailures) {
680 return;
681 }
682 } else {
683 // verify only liveness
684 obj->oop_iterate(&vl_cl);
685 }
686 if (vl_cl.failures()) {
687 *failures = true;
688 }
689 if (G1MaxVerifyFailures >= 0 &&
690 vl_cl.n_failures() >= G1MaxVerifyFailures) {
691 return;
692 }
693 }
694 } else {
695 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
696 *failures = true;
697 return;
698 }
699 }
700 prev_p = p;
701 p += obj_size;
702 }
703
704 if (!is_young() && !is_empty()) {
705 _bot_part.verify();
706 }
707
708 if (is_region_humongous) {
709 oop obj = oop(this->humongous_start_region()->bottom());
710 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
711 log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
712 *failures = true;
713 return;
714 }
715 }
716
717 if (!is_region_humongous && p != top()) {
718 log_error(gc, verify)("end of last object " PTR_FORMAT " "
719 "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
720 *failures = true;
721 return;
722 }
723
724 HeapWord* the_end = end();
725 // Do some extra BOT consistency checking for addresses in the
726 // range [top, end). BOT look-ups in this range should yield
727 // top. No point in doing that if top == end (there's nothing there).
728 if (p < the_end) {
729 // Look up top
730 HeapWord* addr_1 = p;
731 HeapWord* b_start_1 = _bot_part.block_start_const(addr_1);
732 if (b_start_1 != p) {
733 log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " "
734 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
735 p2i(addr_1), p2i(b_start_1), p2i(p));
736 *failures = true;
737 return;
738 }
739
740 // Look up top + 1
741 HeapWord* addr_2 = p + 1;
742 if (addr_2 < the_end) {
743 HeapWord* b_start_2 = _bot_part.block_start_const(addr_2);
744 if (b_start_2 != p) {
745 log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
746 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
747 p2i(addr_2), p2i(b_start_2), p2i(p));
748 *failures = true;
749 return;
750 }
751 }
752
753 // Look up an address between top and end
754 size_t diff = pointer_delta(the_end, p) / 2;
755 HeapWord* addr_3 = p + diff;
756 if (addr_3 < the_end) {
757 HeapWord* b_start_3 = _bot_part.block_start_const(addr_3);
758 if (b_start_3 != p) {
759 log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
760 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
761 p2i(addr_3), p2i(b_start_3), p2i(p));
762 *failures = true;
763 return;
764 }
765 }
766
767 // Look up end - 1
768 HeapWord* addr_4 = the_end - 1;
769 HeapWord* b_start_4 = _bot_part.block_start_const(addr_4);
770 if (b_start_4 != p) {
771 log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
772 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
773 p2i(addr_4), p2i(b_start_4), p2i(p));
774 *failures = true;
775 return;
776 }
777 }
778
779 verify_strong_code_roots(vo, failures);
780 }
781
782 void HeapRegion::verify() const {
783 bool dummy = false;
784 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
785 }
786
787 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const {
788 G1CollectedHeap* g1h = G1CollectedHeap::heap();
789 *failures = false;
790 HeapWord* p = bottom();
791 HeapWord* prev_p = NULL;
792 VerifyRemSetClosure vr_cl(g1h, vo);
793 while (p < top()) {
794 oop obj = oop(p);
795 size_t obj_size = block_size(p);
796
797 if (!g1h->is_obj_dead_cond(obj, this, vo)) {
798 if (oopDesc::is_oop(obj)) {
799 vr_cl.set_containing_obj(obj);
800 obj->oop_iterate(&vr_cl);
801
802 if (vr_cl.failures()) {
803 *failures = true;
804 }
805 if (G1MaxVerifyFailures >= 0 &&
806 vr_cl.n_failures() >= G1MaxVerifyFailures) {
807 return;
808 }
809 } else {
810 log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
811 *failures = true;
812 return;
813 }
814 }
815
816 prev_p = p;
817 p += obj_size;
818 }
819 }
820
821 void HeapRegion::verify_rem_set() const {
822 bool failures = false;
823 verify_rem_set(VerifyOption_G1UsePrevMarking, &failures);
824 guarantee(!failures, "HeapRegion RemSet verification failed");
825 }
826
827 void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
828 // Not used for G1 anymore, but pure virtual in Space.
829 ShouldNotReachHere();
830 }
831
832 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
833 // away eventually.
834
835 void G1ContiguousSpace::clear(bool mangle_space) {
836 set_top(bottom());
837 CompactibleSpace::clear(mangle_space);
838 reset_bot();
839 }
840 #ifndef PRODUCT
841 void G1ContiguousSpace::mangle_unused_area() {
842 mangle_unused_area_complete();
843 }
844
845 void G1ContiguousSpace::mangle_unused_area_complete() {
846 SpaceMangler::mangle_region(MemRegion(top(), end()));
847 }
848 #endif
849
850 void G1ContiguousSpace::print() const {
851 print_short();
852 tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
853 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
854 p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end()));
855 }
856
857 HeapWord* G1ContiguousSpace::initialize_threshold() {
858 return _bot_part.initialize_threshold();
859 }
860
861 HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start,
862 HeapWord* end) {
863 _bot_part.alloc_block(start, end);
864 return _bot_part.threshold();
865 }
866
867 void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
868 object_iterate(blk);
869 }
870
871 void G1ContiguousSpace::object_iterate(ObjectClosure* blk) {
872 HeapWord* p = bottom();
873 while (p < top()) {
874 if (block_is_obj(p)) {
875 blk->do_object(oop(p));
876 }
877 p += block_size(p);
878 }
879 }
880
881 G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) :
882 _bot_part(bot, this),
883 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
884 {
885 }
886
887 void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
888 CompactibleSpace::initialize(mr, clear_space, mangle_space);
889 _top = bottom();
890 set_saved_mark_word(NULL);
891 reset_bot();
892 }