1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTable.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/virtualspace.hpp"
  31 #include "runtime/java.hpp"
  32 #include "runtime/os.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 
  36 size_t CardTable::compute_byte_map_size() {
  37   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
  38                                         "uninitialized, check declaration order");
  39   assert(_page_size != 0, "uninitialized, check declaration order");
  40   const size_t granularity = os::vm_allocation_granularity();
  41   return align_up(_guard_index + 1, MAX2(_page_size, granularity));
  42 }
  43 
  44 CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
  45   _scanned_concurrently(conc_scan),
  46   _whole_heap(whole_heap),
  47   _guard_index(0),
  48   _last_valid_index(0),
  49   _page_size(os::vm_page_size()),
  50   _byte_map_size(0),
  51   _byte_map(NULL),
  52   _byte_map_base(NULL),
  53   _cur_covered_regions(0),
  54   _covered(MemRegion::create(_max_covered_regions, mtGC)),
  55   _committed(MemRegion::create(_max_covered_regions, mtGC)),
  56   _guard_region()
  57 {
  58   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
  59   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
  60 
  61   assert(card_size <= 512, "card_size must be less than 512"); // why?
  62 }
  63 
  64 CardTable::~CardTable() {
  65   FREE_C_HEAP_ARRAY(MemRegion, _covered);
  66   FREE_C_HEAP_ARRAY(MemRegion, _committed);
  67 }
  68 
  69 void CardTable::initialize() {
  70   _guard_index = cards_required(_whole_heap.word_size()) - 1;
  71   _last_valid_index = _guard_index - 1;
  72 
  73   _byte_map_size = compute_byte_map_size();
  74 
  75   HeapWord* low_bound  = _whole_heap.start();
  76   HeapWord* high_bound = _whole_heap.end();
  77 
  78   _cur_covered_regions = 0;
  79 
  80   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
  81     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
  82   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
  83 
  84   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
  85 
  86   os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
  87                        _page_size, heap_rs.base(), heap_rs.size());
  88   if (!heap_rs.is_reserved()) {
  89     vm_exit_during_initialization("Could not reserve enough space for the "
  90                                   "card marking array");
  91   }
  92 
  93   // The assembler store_check code will do an unsigned shift of the oop,
  94   // then add it to _byte_map_base, i.e.
  95   //
  96   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
  97   _byte_map = (CardValue*) heap_rs.base();
  98   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
  99   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 100   assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
 101 
 102   CardValue* guard_card = &_byte_map[_guard_index];
 103   HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
 104   _guard_region = MemRegion(guard_page, _page_size);
 105   os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
 106                             !ExecMem, "card table last card");
 107   *guard_card = last_card;
 108 
 109   log_trace(gc, barrier)("CardTable::CardTable: ");
 110   log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 111                   p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
 112   log_trace(gc, barrier)("    _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
 113 }
 114 
 115 int CardTable::find_covering_region_by_base(HeapWord* base) {
 116   int i;
 117   for (i = 0; i < _cur_covered_regions; i++) {
 118     if (_covered[i].start() == base) return i;
 119     if (_covered[i].start() > base) break;
 120   }
 121   // If we didn't find it, create a new one.
 122   assert(_cur_covered_regions < _max_covered_regions,
 123          "too many covered regions");
 124   // Move the ones above up, to maintain sorted order.
 125   for (int j = _cur_covered_regions; j > i; j--) {
 126     _covered[j] = _covered[j-1];
 127     _committed[j] = _committed[j-1];
 128   }
 129   int res = i;
 130   _cur_covered_regions++;
 131   _covered[res].set_start(base);
 132   _covered[res].set_word_size(0);
 133   CardValue* ct_start = byte_for(base);
 134   HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
 135   _committed[res].set_start(ct_start_aligned);
 136   _committed[res].set_word_size(0);
 137   return res;
 138 }
 139 
 140 int CardTable::find_covering_region_containing(HeapWord* addr) {
 141   for (int i = 0; i < _cur_covered_regions; i++) {
 142     if (_covered[i].contains(addr)) {
 143       return i;
 144     }
 145   }
 146   assert(0, "address outside of heap?");
 147   return -1;
 148 }
 149 
 150 HeapWord* CardTable::largest_prev_committed_end(int ind) const {
 151   HeapWord* max_end = NULL;
 152   for (int j = 0; j < ind; j++) {
 153     HeapWord* this_end = _committed[j].end();
 154     if (this_end > max_end) max_end = this_end;
 155   }
 156   return max_end;
 157 }
 158 
 159 MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
 160   MemRegion result = mr;
 161   for (int r = 0; r < _cur_covered_regions; r += 1) {
 162     if (r != self) {
 163       result = result.minus(_committed[r]);
 164     }
 165   }
 166   // Never include the guard page.
 167   result = result.minus(_guard_region);
 168   return result;
 169 }
 170 
 171 void CardTable::resize_covered_region(MemRegion new_region) {
 172   // We don't change the start of a region, only the end.
 173   assert(_whole_heap.contains(new_region),
 174            "attempt to cover area not in reserved area");
 175   debug_only(verify_guard();)
 176   // collided is true if the expansion would push into another committed region
 177   debug_only(bool collided = false;)
 178   int const ind = find_covering_region_by_base(new_region.start());
 179   MemRegion const old_region = _covered[ind];
 180   assert(old_region.start() == new_region.start(), "just checking");
 181   if (new_region.word_size() != old_region.word_size()) {
 182     // Commit new or uncommit old pages, if necessary.
 183     MemRegion cur_committed = _committed[ind];
 184     // Extend the end of this _committed region
 185     // to cover the end of any lower _committed regions.
 186     // This forms overlapping regions, but never interior regions.
 187     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
 188     if (max_prev_end > cur_committed.end()) {
 189       cur_committed.set_end(max_prev_end);
 190     }
 191     // Align the end up to a page size (starts are already aligned).
 192     HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
 193     HeapWord* new_end_aligned = align_up(new_end, _page_size);
 194     assert(new_end_aligned >= new_end, "align up, but less");
 195     // Check the other regions (excludes "ind") to ensure that
 196     // the new_end_aligned does not intrude onto the committed
 197     // space of another region.
 198     int ri = 0;
 199     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
 200       if (new_end_aligned > _committed[ri].start()) {
 201         assert(new_end_aligned <= _committed[ri].end(),
 202                "An earlier committed region can't cover a later committed region");
 203         // Any region containing the new end
 204         // should start at or beyond the region found (ind)
 205         // for the new end (committed regions are not expected to
 206         // be proper subsets of other committed regions).
 207         assert(_committed[ri].start() >= _committed[ind].start(),
 208                "New end of committed region is inconsistent");
 209         new_end_aligned = _committed[ri].start();
 210         // new_end_aligned can be equal to the start of its
 211         // committed region (i.e., of "ind") if a second
 212         // region following "ind" also start at the same location
 213         // as "ind".
 214         assert(new_end_aligned >= _committed[ind].start(),
 215           "New end of committed region is before start");
 216         debug_only(collided = true;)
 217         // Should only collide with 1 region
 218         break;
 219       }
 220     }
 221 #ifdef ASSERT
 222     for (++ri; ri < _cur_covered_regions; ri++) {
 223       assert(!_committed[ri].contains(new_end_aligned),
 224         "New end of committed region is in a second committed region");
 225     }
 226 #endif
 227     // The guard page is always committed and should not be committed over.
 228     // "guarded" is used for assertion checking below and recalls the fact
 229     // that the would-be end of the new committed region would have
 230     // penetrated the guard page.
 231     HeapWord* new_end_for_commit = new_end_aligned;
 232 
 233     DEBUG_ONLY(bool guarded = false;)
 234     if (new_end_for_commit > _guard_region.start()) {
 235       new_end_for_commit = _guard_region.start();
 236       DEBUG_ONLY(guarded = true;)
 237     }
 238 
 239     if (new_end_for_commit > cur_committed.end()) {
 240       // Must commit new pages.
 241       MemRegion const new_committed =
 242         MemRegion(cur_committed.end(), new_end_for_commit);
 243 
 244       assert(!new_committed.is_empty(), "Region should not be empty here");
 245       os::commit_memory_or_exit((char*)new_committed.start(),
 246                                 new_committed.byte_size(), _page_size,
 247                                 !ExecMem, "card table expansion");
 248     // Use new_end_aligned (as opposed to new_end_for_commit) because
 249     // the cur_committed region may include the guard region.
 250     } else if (new_end_aligned < cur_committed.end()) {
 251       // Must uncommit pages.
 252       MemRegion const uncommit_region =
 253         committed_unique_to_self(ind, MemRegion(new_end_aligned,
 254                                                 cur_committed.end()));
 255       if (!uncommit_region.is_empty()) {
 256         // It is not safe to uncommit cards if the boundary between
 257         // the generations is moving.  A shrink can uncommit cards
 258         // owned by generation A but being used by generation B.
 259         if (!UseAdaptiveGCBoundary) {
 260           if (!os::uncommit_memory((char*)uncommit_region.start(),
 261                                    uncommit_region.byte_size())) {
 262             assert(false, "Card table contraction failed");
 263             // The call failed so don't change the end of the
 264             // committed region.  This is better than taking the
 265             // VM down.
 266             new_end_aligned = _committed[ind].end();
 267           }
 268         } else {
 269           new_end_aligned = _committed[ind].end();
 270         }
 271       }
 272     }
 273     // In any case, we can reset the end of the current committed entry.
 274     _committed[ind].set_end(new_end_aligned);
 275 
 276 #ifdef ASSERT
 277     // Check that the last card in the new region is committed according
 278     // to the tables.
 279     bool covered = false;
 280     for (int cr = 0; cr < _cur_covered_regions; cr++) {
 281       if (_committed[cr].contains(new_end - 1)) {
 282         covered = true;
 283         break;
 284       }
 285     }
 286     assert(covered, "Card for end of new region not committed");
 287 #endif
 288 
 289     // The default of 0 is not necessarily clean cards.
 290     CardValue* entry;
 291     if (old_region.last() < _whole_heap.start()) {
 292       entry = byte_for(_whole_heap.start());
 293     } else {
 294       entry = byte_after(old_region.last());
 295     }
 296     assert(index_for(new_region.last()) <  _guard_index,
 297       "The guard card will be overwritten");
 298     // This line commented out cleans the newly expanded region and
 299     // not the aligned up expanded region.
 300     // CardValue* const end = byte_after(new_region.last());
 301     CardValue* const end = (CardValue*) new_end_for_commit;
 302     assert((end >= byte_after(new_region.last())) || collided || guarded,
 303       "Expect to be beyond new region unless impacting another region");
 304     // do nothing if we resized downward.
 305 #ifdef ASSERT
 306     for (int ri = 0; ri < _cur_covered_regions; ri++) {
 307       if (ri != ind) {
 308         // The end of the new committed region should not
 309         // be in any existing region unless it matches
 310         // the start of the next region.
 311         assert(!_committed[ri].contains(end) ||
 312                (_committed[ri].start() == (HeapWord*) end),
 313                "Overlapping committed regions");
 314       }
 315     }
 316 #endif
 317     if (entry < end) {
 318       memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
 319     }
 320   }
 321   // In any case, the covered size changes.
 322   _covered[ind].set_word_size(new_region.word_size());
 323 
 324   log_trace(gc, barrier)("CardTable::resize_covered_region: ");
 325   log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
 326                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
 327   log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
 328                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
 329   log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
 330                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
 331   log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
 332                          p2i(addr_for((CardValue*) _committed[ind].start())),  p2i(addr_for((CardValue*) _committed[ind].last())));
 333 
 334   // Touch the last card of the covered region to show that it
 335   // is committed (or SEGV).
 336   debug_only((void) (*byte_for(_covered[ind].last()));)
 337   debug_only(verify_guard();)
 338 }
 339 
 340 // Note that these versions are precise!  The scanning code has to handle the
 341 // fact that the write barrier may be either precise or imprecise.
 342 void CardTable::dirty_MemRegion(MemRegion mr) {
 343   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 344   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 345   CardValue* cur  = byte_for(mr.start());
 346   CardValue* last = byte_after(mr.last());
 347   while (cur < last) {
 348     *cur = dirty_card;
 349     cur++;
 350   }
 351 }
 352 
 353 void CardTable::clear_MemRegion(MemRegion mr) {
 354   // Be conservative: only clean cards entirely contained within the
 355   // region.
 356   CardValue* cur;
 357   if (mr.start() == _whole_heap.start()) {
 358     cur = byte_for(mr.start());
 359   } else {
 360     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
 361     cur = byte_after(mr.start() - 1);
 362   }
 363   CardValue* last = byte_after(mr.last());
 364   memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
 365 }
 366 
 367 void CardTable::clear(MemRegion mr) {
 368   for (int i = 0; i < _cur_covered_regions; i++) {
 369     MemRegion mri = mr.intersection(_covered[i]);
 370     if (!mri.is_empty()) clear_MemRegion(mri);
 371   }
 372 }
 373 
 374 void CardTable::dirty(MemRegion mr) {
 375   CardValue* first = byte_for(mr.start());
 376   CardValue* last  = byte_after(mr.last());
 377   memset(first, dirty_card, last-first);
 378 }
 379 
 380 // Unlike several other card table methods, dirty_card_iterate()
 381 // iterates over dirty cards ranges in increasing address order.
 382 void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
 383   for (int i = 0; i < _cur_covered_regions; i++) {
 384     MemRegion mri = mr.intersection(_covered[i]);
 385     if (!mri.is_empty()) {
 386       CardValue *cur_entry, *next_entry, *limit;
 387       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 388            cur_entry <= limit;
 389            cur_entry  = next_entry) {
 390         next_entry = cur_entry + 1;
 391         if (*cur_entry == dirty_card) {
 392           size_t dirty_cards;
 393           // Accumulate maximal dirty card range, starting at cur_entry
 394           for (dirty_cards = 1;
 395                next_entry <= limit && *next_entry == dirty_card;
 396                dirty_cards++, next_entry++);
 397           MemRegion cur_cards(addr_for(cur_entry),
 398                               dirty_cards*card_size_in_words);
 399           cl->do_MemRegion(cur_cards);
 400         }
 401       }
 402     }
 403   }
 404 }
 405 
 406 MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
 407                                                   bool reset,
 408                                                   int reset_val) {
 409   for (int i = 0; i < _cur_covered_regions; i++) {
 410     MemRegion mri = mr.intersection(_covered[i]);
 411     if (!mri.is_empty()) {
 412       CardValue* cur_entry, *next_entry, *limit;
 413       for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
 414            cur_entry <= limit;
 415            cur_entry  = next_entry) {
 416         next_entry = cur_entry + 1;
 417         if (*cur_entry == dirty_card) {
 418           size_t dirty_cards;
 419           // Accumulate maximal dirty card range, starting at cur_entry
 420           for (dirty_cards = 1;
 421                next_entry <= limit && *next_entry == dirty_card;
 422                dirty_cards++, next_entry++);
 423           MemRegion cur_cards(addr_for(cur_entry),
 424                               dirty_cards*card_size_in_words);
 425           if (reset) {
 426             for (size_t i = 0; i < dirty_cards; i++) {
 427               cur_entry[i] = reset_val;
 428             }
 429           }
 430           return cur_cards;
 431         }
 432       }
 433     }
 434   }
 435   return MemRegion(mr.end(), mr.end());
 436 }
 437 
 438 uintx CardTable::ct_max_alignment_constraint() {
 439   return card_size * os::vm_page_size();
 440 }
 441 
 442 void CardTable::verify_guard() {
 443   // For product build verification
 444   guarantee(_byte_map[_guard_index] == last_card,
 445             "card table guard has been modified");
 446 }
 447 
 448 void CardTable::invalidate(MemRegion mr) {
 449   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
 450   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
 451   for (int i = 0; i < _cur_covered_regions; i++) {
 452     MemRegion mri = mr.intersection(_covered[i]);
 453     if (!mri.is_empty()) dirty_MemRegion(mri);
 454   }
 455 }
 456 
 457 void CardTable::verify() {
 458   verify_guard();
 459 }
 460 
 461 #ifndef PRODUCT
 462 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
 463   CardValue* start    = byte_for(mr.start());
 464   CardValue* end      = byte_for(mr.last());
 465   bool failures = false;
 466   for (CardValue* curr = start; curr <= end; ++curr) {
 467     CardValue curr_val = *curr;
 468     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
 469     if (failed) {
 470       if (!failures) {
 471         log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
 472         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
 473         failures = true;
 474       }
 475       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
 476                             p2i(curr), p2i(addr_for(curr)),
 477                             p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
 478                             (int) curr_val);
 479     }
 480   }
 481   guarantee(!failures, "there should not have been any failures");
 482 }
 483 
 484 void CardTable::verify_not_dirty_region(MemRegion mr) {
 485   verify_region(mr, dirty_card, false /* val_equals */);
 486 }
 487 
 488 void CardTable::verify_dirty_region(MemRegion mr) {
 489   verify_region(mr, dirty_card, true /* val_equals */);
 490 }
 491 #endif
 492 
 493 void CardTable::print_on(outputStream* st) const {
 494   st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
 495                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
 496 }