rev 7084 : [mq]: demacro
1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc_implementation/shared/liveRange.hpp" 29 #include "gc_implementation/shared/markSweep.hpp" 30 #include "gc_implementation/shared/spaceDecorator.hpp" 31 #include "gc_interface/collectedHeap.inline.hpp" 32 #include "memory/blockOffsetTable.inline.hpp" 33 #include "memory/defNewGeneration.hpp" 34 #include "memory/genCollectedHeap.hpp" 35 #include "memory/space.hpp" 36 #include "memory/space.inline.hpp" 37 #include "memory/universe.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "oops/oop.inline2.hpp" 40 #include "runtime/java.hpp" 41 #include "runtime/atomic.inline.hpp" 42 #include "runtime/prefetch.inline.hpp" 43 #include "runtime/orderAccess.inline.hpp" 44 #include "runtime/safepoint.hpp" 45 #include "utilities/copy.hpp" 46 #include "utilities/globalDefinitions.hpp" 47 #include "utilities/macros.hpp" 48 49 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 50 51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 52 HeapWord* top_obj) { 53 if (top_obj != NULL) { 54 if (_sp->block_is_obj(top_obj)) { 55 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 56 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 57 // An arrayOop is starting on the dirty card - since we do exact 58 // store checks for objArrays we are done. 59 } else { 60 // Otherwise, it is possible that the object starting on the dirty 61 // card spans the entire card, and that the store happened on a 62 // later card. Figure out where the object ends. 63 // Use the block_size() method of the space over which 64 // the iteration is being done. That space (e.g. CMS) may have 65 // specific requirements on object sizes which will 66 // be reflected in the block_size() method. 67 top = top_obj + oop(top_obj)->size(); 68 } 69 } 70 } else { 71 top = top_obj; 72 } 73 } else { 74 assert(top == _sp->end(), "only case where top_obj == NULL"); 75 } 76 return top; 77 } 78 79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 80 HeapWord* bottom, 81 HeapWord* top) { 82 // 1. Blocks may or may not be objects. 83 // 2. Even when a block_is_obj(), it may not entirely 84 // occupy the block if the block quantum is larger than 85 // the object size. 86 // We can and should try to optimize by calling the non-MemRegion 87 // version of oop_iterate() for all but the extremal objects 88 // (for which we need to call the MemRegion version of 89 // oop_iterate()) To be done post-beta XXX 90 for (; bottom < top; bottom += _sp->block_size(bottom)) { 91 // As in the case of contiguous space above, we'd like to 92 // just use the value returned by oop_iterate to increment the 93 // current pointer; unfortunately, that won't work in CMS because 94 // we'd need an interface change (it seems) to have the space 95 // "adjust the object size" (for instance pad it up to its 96 // block alignment or minimum block size restrictions. XXX 97 if (_sp->block_is_obj(bottom) && 98 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 99 oop(bottom)->oop_iterate(_cl, mr); 100 } 101 } 102 } 103 104 // We get called with "mr" representing the dirty region 105 // that we want to process. Because of imprecise marking, 106 // we may need to extend the incoming "mr" to the right, 107 // and scan more. However, because we may already have 108 // scanned some of that extended region, we may need to 109 // trim its right-end back some so we do not scan what 110 // we (or another worker thread) may already have scanned 111 // or planning to scan. 112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 113 114 // Some collectors need to do special things whenever their dirty 115 // cards are processed. For instance, CMS must remember mutator updates 116 // (i.e. dirty cards) so as to re-scan mutated objects. 117 // Such work can be piggy-backed here on dirty card scanning, so as to make 118 // it slightly more efficient than doing a complete non-destructive pre-scan 119 // of the card table. 120 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); 121 if (pCl != NULL) { 122 pCl->do_MemRegion(mr); 123 } 124 125 HeapWord* bottom = mr.start(); 126 HeapWord* last = mr.last(); 127 HeapWord* top = mr.end(); 128 HeapWord* bottom_obj; 129 HeapWord* top_obj; 130 131 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || 132 _precision == CardTableModRefBS::Precise, 133 "Only ones we deal with for now."); 134 135 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || 136 _cl->idempotent() || _last_bottom == NULL || 137 top <= _last_bottom, 138 "Not decreasing"); 139 NOT_PRODUCT(_last_bottom = mr.start()); 140 141 bottom_obj = _sp->block_start(bottom); 142 top_obj = _sp->block_start(last); 143 144 assert(bottom_obj <= bottom, "just checking"); 145 assert(top_obj <= top, "just checking"); 146 147 // Given what we think is the top of the memory region and 148 // the start of the object at the top, get the actual 149 // value of the top. 150 top = get_actual_top(top, top_obj); 151 152 // If the previous call did some part of this region, don't redo. 153 if (_precision == CardTableModRefBS::ObjHeadPreciseArray && 154 _min_done != NULL && 155 _min_done < top) { 156 top = _min_done; 157 } 158 159 // Top may have been reset, and in fact may be below bottom, 160 // e.g. the dirty card region is entirely in a now free object 161 // -- something that could happen with a concurrent sweeper. 162 bottom = MIN2(bottom, top); 163 MemRegion extended_mr = MemRegion(bottom, top); 164 assert(bottom <= top && 165 (_precision != CardTableModRefBS::ObjHeadPreciseArray || 166 _min_done == NULL || 167 top <= _min_done), 168 "overlap!"); 169 170 // Walk the region if it is not empty; otherwise there is nothing to do. 171 if (!extended_mr.is_empty()) { 172 walk_mem_region(extended_mr, bottom_obj, top); 173 } 174 175 // An idempotent closure might be applied in any order, so we don't 176 // record a _min_done for it. 177 if (!_cl->idempotent()) { 178 _min_done = bottom; 179 } else { 180 assert(_min_done == _last_explicit_min_done, 181 "Don't update _min_done for idempotent cl"); 182 } 183 } 184 185 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, 186 CardTableModRefBS::PrecisionStyle precision, 187 HeapWord* boundary) { 188 return new DirtyCardToOopClosure(this, cl, precision, boundary); 189 } 190 191 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 192 HeapWord* top_obj) { 193 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 194 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 195 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 196 // An arrayOop is starting on the dirty card - since we do exact 197 // store checks for objArrays we are done. 198 } else { 199 // Otherwise, it is possible that the object starting on the dirty 200 // card spans the entire card, and that the store happened on a 201 // later card. Figure out where the object ends. 202 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 203 "Block size and object size mismatch"); 204 top = top_obj + oop(top_obj)->size(); 205 } 206 } 207 } else { 208 top = (_sp->toContiguousSpace())->top(); 209 } 210 return top; 211 } 212 213 void Filtering_DCTOC::walk_mem_region(MemRegion mr, 214 HeapWord* bottom, 215 HeapWord* top) { 216 // Note that this assumption won't hold if we have a concurrent 217 // collector in this space, which may have freed up objects after 218 // they were dirtied and before the stop-the-world GC that is 219 // examining cards here. 220 assert(bottom < top, "ought to be at least one obj on a dirty card."); 221 222 if (_boundary != NULL) { 223 // We have a boundary outside of which we don't want to look 224 // at objects, so create a filtering closure around the 225 // oop closure before walking the region. 226 FilteringClosure filter(_boundary, _cl); 227 walk_mem_region_with_cl(mr, bottom, top, &filter); 228 } else { 229 // No boundary, simply walk the heap with the oop closure. 230 walk_mem_region_with_cl(mr, bottom, top, _cl); 231 } 232 233 } 234 235 // We must replicate this so that the static type of "FilteringClosure" 236 // (see above) is apparent at the oop_iterate calls. 237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 239 HeapWord* bottom, \ 240 HeapWord* top, \ 241 ClosureType* cl) { \ 242 bottom += oop(bottom)->oop_iterate(cl, mr); \ 243 if (bottom < top) { \ 244 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 245 while (next_obj < top) { \ 246 /* Bottom lies entirely below top, so we can call the */ \ 247 /* non-memRegion version of oop_iterate below. */ \ 248 oop(bottom)->oop_iterate(cl); \ 249 bottom = next_obj; \ 250 next_obj = bottom + oop(bottom)->size(); \ 251 } \ 252 /* Last object. */ \ 253 oop(bottom)->oop_iterate(cl, mr); \ 254 } \ 255 } 256 257 // (There are only two of these, rather than N, because the split is due 258 // only to the introduction of the FilteringClosure, a local part of the 259 // impl of this abstraction.) 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) 261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 262 263 DirtyCardToOopClosure* 264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, 265 CardTableModRefBS::PrecisionStyle precision, 266 HeapWord* boundary) { 267 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 268 } 269 270 void Space::initialize(MemRegion mr, 271 bool clear_space, 272 bool mangle_space) { 273 HeapWord* bottom = mr.start(); 274 HeapWord* end = mr.end(); 275 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 276 "invalid space boundaries"); 277 set_bottom(bottom); 278 set_end(end); 279 if (clear_space) clear(mangle_space); 280 } 281 282 void Space::clear(bool mangle_space) { 283 if (ZapUnusedHeapArea && mangle_space) { 284 mangle_unused_area(); 285 } 286 } 287 288 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 289 _concurrent_iteration_safe_limit(NULL) { 290 _mangler = new GenSpaceMangler(this); 291 } 292 293 ContiguousSpace::~ContiguousSpace() { 294 delete _mangler; 295 } 296 297 void ContiguousSpace::initialize(MemRegion mr, 298 bool clear_space, 299 bool mangle_space) 300 { 301 CompactibleSpace::initialize(mr, clear_space, mangle_space); 302 set_concurrent_iteration_safe_limit(top()); 303 } 304 305 void ContiguousSpace::clear(bool mangle_space) { 306 set_top(bottom()); 307 set_saved_mark(); 308 CompactibleSpace::clear(mangle_space); 309 } 310 311 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 312 return p >= _top; 313 } 314 315 void OffsetTableContigSpace::clear(bool mangle_space) { 316 ContiguousSpace::clear(mangle_space); 317 _offsets.initialize_threshold(); 318 } 319 320 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 321 Space::set_bottom(new_bottom); 322 _offsets.set_bottom(new_bottom); 323 } 324 325 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 326 // Space should not advertise an increase in size 327 // until after the underlying offset table has been enlarged. 328 _offsets.resize(pointer_delta(new_end, bottom())); 329 Space::set_end(new_end); 330 } 331 332 #ifndef PRODUCT 333 334 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 335 mangler()->set_top_for_allocations(v); 336 } 337 void ContiguousSpace::set_top_for_allocations() { 338 mangler()->set_top_for_allocations(top()); 339 } 340 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 341 mangler()->check_mangled_unused_area(limit); 342 } 343 344 void ContiguousSpace::check_mangled_unused_area_complete() { 345 mangler()->check_mangled_unused_area_complete(); 346 } 347 348 // Mangled only the unused space that has not previously 349 // been mangled and that has not been allocated since being 350 // mangled. 351 void ContiguousSpace::mangle_unused_area() { 352 mangler()->mangle_unused_area(); 353 } 354 void ContiguousSpace::mangle_unused_area_complete() { 355 mangler()->mangle_unused_area_complete(); 356 } 357 void ContiguousSpace::mangle_region(MemRegion mr) { 358 // Although this method uses SpaceMangler::mangle_region() which 359 // is not specific to a space, the when the ContiguousSpace version 360 // is called, it is always with regard to a space and this 361 // bounds checking is appropriate. 362 MemRegion space_mr(bottom(), end()); 363 assert(space_mr.contains(mr), "Mangling outside space"); 364 SpaceMangler::mangle_region(mr); 365 } 366 #endif // NOT_PRODUCT 367 368 void CompactibleSpace::initialize(MemRegion mr, 369 bool clear_space, 370 bool mangle_space) { 371 Space::initialize(mr, clear_space, mangle_space); 372 set_compaction_top(bottom()); 373 _next_compaction_space = NULL; 374 } 375 376 void CompactibleSpace::clear(bool mangle_space) { 377 Space::clear(mangle_space); 378 _compaction_top = bottom(); 379 } 380 381 HeapWord* CompactibleSpace::forward(oop q, size_t size, 382 CompactPoint* cp, HeapWord* compact_top) { 383 // q is alive 384 // First check if we should switch compaction space 385 assert(this == cp->space, "'this' should be current compaction space."); 386 size_t compaction_max_size = pointer_delta(end(), compact_top); 387 while (size > compaction_max_size) { 388 // switch to next compaction space 389 cp->space->set_compaction_top(compact_top); 390 cp->space = cp->space->next_compaction_space(); 391 if (cp->space == NULL) { 392 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); 393 assert(cp->gen != NULL, "compaction must succeed"); 394 cp->space = cp->gen->first_compaction_space(); 395 assert(cp->space != NULL, "generation must have a first compaction space"); 396 } 397 compact_top = cp->space->bottom(); 398 cp->space->set_compaction_top(compact_top); 399 cp->threshold = cp->space->initialize_threshold(); 400 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 401 } 402 403 // store the forwarding pointer into the mark word 404 if ((HeapWord*)q != compact_top) { 405 q->forward_to(oop(compact_top)); 406 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 407 } else { 408 // if the object isn't moving we can just set the mark to the default 409 // mark and handle it specially later on. 410 q->init_mark(); 411 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 412 } 413 414 compact_top += size; 415 416 // we need to update the offset table so that the beginnings of objects can be 417 // found during scavenge. Note that we are updating the offset table based on 418 // where the object will be once the compaction phase finishes. 419 if (compact_top > cp->threshold) 420 cp->threshold = 421 cp->space->cross_threshold(compact_top - size, compact_top); 422 return compact_top; 423 } 424 425 426 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, 427 HeapWord* q, size_t deadlength) { 428 if (allowed_deadspace_words >= deadlength) { 429 allowed_deadspace_words -= deadlength; 430 CollectedHeap::fill_with_object(q, deadlength); 431 oop(q)->set_mark(oop(q)->mark()->set_marked()); 432 assert((int) deadlength == oop(q)->size(), "bad filler object size"); 433 // Recall that we required "q == compaction_top". 434 return true; 435 } else { 436 allowed_deadspace_words = 0; 437 return false; 438 } 439 } 440 441 #define block_is_always_obj(q) true 442 #define obj_size(q) oop(q)->size() 443 #define adjust_obj_size(s) s 444 445 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { 446 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); 447 } 448 449 // Faster object search. 450 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 451 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); 452 } 453 454 void Space::adjust_pointers() { 455 // adjust all the interior pointers to point at the new locations of objects 456 // Used by MarkSweep::mark_sweep_phase3() 457 458 // First check to see if there is any work to be done. 459 if (used() == 0) { 460 return; // Nothing to do. 461 } 462 463 // Otherwise... 464 HeapWord* q = bottom(); 465 HeapWord* t = end(); 466 467 debug_only(HeapWord* prev_q = NULL); 468 while (q < t) { 469 if (oop(q)->is_gc_marked()) { 470 // q is alive 471 472 // point all the oops to the new location 473 size_t size = oop(q)->adjust_pointers(); 474 475 debug_only(prev_q = q); 476 477 q += size; 478 } else { 479 // q is not a live object. But we're not in a compactible space, 480 // So we don't have live ranges. 481 debug_only(prev_q = q); 482 q += block_size(q); 483 assert(q > prev_q, "we should be moving forward through memory"); 484 } 485 } 486 assert(q == t, "just checking"); 487 } 488 489 void CompactibleSpace::adjust_pointers() { 490 // Check first is there is any work to do. 491 if (used() == 0) { 492 return; // Nothing to do. 493 } 494 495 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); 496 } 497 498 void CompactibleSpace::compact() { 499 SCAN_AND_COMPACT(obj_size); 500 } 501 502 void Space::print_short() const { print_short_on(tty); } 503 504 void Space::print_short_on(outputStream* st) const { 505 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 506 (int) ((double) used() * 100 / capacity())); 507 } 508 509 void Space::print() const { print_on(tty); } 510 511 void Space::print_on(outputStream* st) const { 512 print_short_on(st); 513 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 514 bottom(), end()); 515 } 516 517 void ContiguousSpace::print_on(outputStream* st) const { 518 print_short_on(st); 519 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 520 bottom(), top(), end()); 521 } 522 523 void OffsetTableContigSpace::print_on(outputStream* st) const { 524 print_short_on(st); 525 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 526 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 527 bottom(), top(), _offsets.threshold(), end()); 528 } 529 530 void ContiguousSpace::verify() const { 531 HeapWord* p = bottom(); 532 HeapWord* t = top(); 533 HeapWord* prev_p = NULL; 534 while (p < t) { 535 oop(p)->verify(); 536 prev_p = p; 537 p += oop(p)->size(); 538 } 539 guarantee(p == top(), "end of last object must match end of space"); 540 if (top() != end()) { 541 guarantee(top() == block_start_const(end()-1) && 542 top() == block_start_const(top()), 543 "top should be start of unallocated block, if it exists"); 544 } 545 } 546 547 void Space::oop_iterate(ExtendedOopClosure* blk) { 548 ObjectToOopClosure blk2(blk); 549 object_iterate(&blk2); 550 } 551 552 bool Space::obj_is_alive(const HeapWord* p) const { 553 assert (block_is_obj(p), "The address should point to an object"); 554 return true; 555 } 556 557 #if INCLUDE_ALL_GCS 558 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 559 \ 560 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ 561 HeapWord* obj_addr = mr.start(); \ 562 HeapWord* t = mr.end(); \ 563 while (obj_addr < t) { \ 564 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ 565 obj_addr += oop(obj_addr)->oop_iterate(blk); \ 566 } \ 567 } 568 569 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) 570 571 #undef ContigSpace_PAR_OOP_ITERATE_DEFN 572 #endif // INCLUDE_ALL_GCS 573 574 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { 575 if (is_empty()) return; 576 HeapWord* obj_addr = bottom(); 577 HeapWord* t = top(); 578 // Could call objects iterate, but this is easier. 579 while (obj_addr < t) { 580 obj_addr += oop(obj_addr)->oop_iterate(blk); 581 } 582 } 583 584 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 585 if (is_empty()) return; 586 WaterMark bm = bottom_mark(); 587 object_iterate_from(bm, blk); 588 } 589 590 // For a ContiguousSpace object_iterate() and safe_object_iterate() 591 // are the same. 592 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 593 object_iterate(blk); 594 } 595 596 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { 597 assert(mark.space() == this, "Mark does not match space"); 598 HeapWord* p = mark.point(); 599 while (p < top()) { 600 blk->do_object(oop(p)); 601 p += oop(p)->size(); 602 } 603 } 604 605 HeapWord* 606 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { 607 HeapWord * limit = concurrent_iteration_safe_limit(); 608 assert(limit <= top(), "sanity check"); 609 for (HeapWord* p = bottom(); p < limit;) { 610 size_t size = blk->do_object_careful(oop(p)); 611 if (size == 0) { 612 return p; // failed at p 613 } else { 614 p += size; 615 } 616 } 617 return NULL; // all done 618 } 619 620 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 621 \ 622 void ContiguousSpace:: \ 623 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ 624 HeapWord* t; \ 625 HeapWord* p = saved_mark_word(); \ 626 assert(p != NULL, "expected saved mark"); \ 627 \ 628 const intx interval = PrefetchScanIntervalInBytes; \ 629 do { \ 630 t = top(); \ 631 while (p < t) { \ 632 Prefetch::write(p, interval); \ 633 debug_only(HeapWord* prev = p); \ 634 oop m = oop(p); \ 635 p += m->oop_iterate(blk); \ 636 } \ 637 } while (t < top()); \ 638 \ 639 set_saved_mark_word(p); \ 640 } 641 642 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) 643 644 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN 645 646 // Very general, slow implementation. 647 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 648 assert(MemRegion(bottom(), end()).contains(p), 649 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 650 p, bottom(), end())); 651 if (p >= top()) { 652 return top(); 653 } else { 654 HeapWord* last = bottom(); 655 HeapWord* cur = last; 656 while (cur <= p) { 657 last = cur; 658 cur += oop(cur)->size(); 659 } 660 assert(oop(last)->is_oop(), 661 err_msg(PTR_FORMAT " should be an object start", last)); 662 return last; 663 } 664 } 665 666 size_t ContiguousSpace::block_size(const HeapWord* p) const { 667 assert(MemRegion(bottom(), end()).contains(p), 668 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 669 p, bottom(), end())); 670 HeapWord* current_top = top(); 671 assert(p <= current_top, 672 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 673 p, current_top)); 674 assert(p == current_top || oop(p)->is_oop(), 675 err_msg("p (" PTR_FORMAT ") is not a block start - " 676 "current_top: " PTR_FORMAT ", is_oop: %s", 677 p, current_top, BOOL_TO_STR(oop(p)->is_oop()))); 678 if (p < current_top) { 679 return oop(p)->size(); 680 } else { 681 assert(p == current_top, "just checking"); 682 return pointer_delta(end(), (HeapWord*) p); 683 } 684 } 685 686 // This version requires locking. 687 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, 688 HeapWord* const end_value) { 689 assert(Heap_lock->owned_by_self() || 690 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 691 "not locked"); 692 HeapWord* obj = top(); 693 if (pointer_delta(end_value, obj) >= size) { 694 HeapWord* new_top = obj + size; 695 set_top(new_top); 696 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 697 return obj; 698 } else { 699 return NULL; 700 } 701 } 702 703 // This version is lock-free. 704 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, 705 HeapWord* const end_value) { 706 do { 707 HeapWord* obj = top(); 708 if (pointer_delta(end_value, obj) >= size) { 709 HeapWord* new_top = obj + size; 710 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 711 // result can be one of two: 712 // the old top value: the exchange succeeded 713 // otherwise: the new value of the top is returned. 714 if (result == obj) { 715 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 716 return obj; 717 } 718 } else { 719 return NULL; 720 } 721 } while (true); 722 } 723 724 HeapWord* ContiguousSpace::allocate_aligned(size_t size) { 725 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); 726 HeapWord* end_value = end(); 727 728 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); 729 if (obj == NULL) { 730 return NULL; 731 } 732 733 if (pointer_delta(end_value, obj) >= size) { 734 HeapWord* new_top = obj + size; 735 set_top(new_top); 736 assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), 737 "checking alignment"); 738 return obj; 739 } else { 740 set_top(obj); 741 return NULL; 742 } 743 } 744 745 // Requires locking. 746 HeapWord* ContiguousSpace::allocate(size_t size) { 747 return allocate_impl(size, end()); 748 } 749 750 // Lock-free. 751 HeapWord* ContiguousSpace::par_allocate(size_t size) { 752 return par_allocate_impl(size, end()); 753 } 754 755 void ContiguousSpace::allocate_temporary_filler(int factor) { 756 // allocate temporary type array decreasing free size with factor 'factor' 757 assert(factor >= 0, "just checking"); 758 size_t size = pointer_delta(end(), top()); 759 760 // if space is full, return 761 if (size == 0) return; 762 763 if (factor > 0) { 764 size -= size/factor; 765 } 766 size = align_object_size(size); 767 768 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 769 if (size >= (size_t)align_object_size(array_header_size)) { 770 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 771 // allocate uninitialized int array 772 typeArrayOop t = (typeArrayOop) allocate(size); 773 assert(t != NULL, "allocation should succeed"); 774 t->set_mark(markOopDesc::prototype()); 775 t->set_klass(Universe::intArrayKlassObj()); 776 t->set_length((int)length); 777 } else { 778 assert(size == CollectedHeap::min_fill_size(), 779 "size for smallest fake object doesn't match"); 780 instanceOop obj = (instanceOop) allocate(size); 781 obj->set_mark(markOopDesc::prototype()); 782 obj->set_klass_gap(0); 783 obj->set_klass(SystemDictionary::Object_klass()); 784 } 785 } 786 787 void EdenSpace::clear(bool mangle_space) { 788 ContiguousSpace::clear(mangle_space); 789 set_soft_end(end()); 790 } 791 792 // Requires locking. 793 HeapWord* EdenSpace::allocate(size_t size) { 794 return allocate_impl(size, soft_end()); 795 } 796 797 // Lock-free. 798 HeapWord* EdenSpace::par_allocate(size_t size) { 799 return par_allocate_impl(size, soft_end()); 800 } 801 802 HeapWord* ConcEdenSpace::par_allocate(size_t size) 803 { 804 do { 805 // The invariant is top() should be read before end() because 806 // top() can't be greater than end(), so if an update of _soft_end 807 // occurs between 'end_val = end();' and 'top_val = top();' top() 808 // also can grow up to the new end() and the condition 809 // 'top_val > end_val' is true. To ensure the loading order 810 // OrderAccess::loadload() is required after top() read. 811 HeapWord* obj = top(); 812 OrderAccess::loadload(); 813 if (pointer_delta(*soft_end_addr(), obj) >= size) { 814 HeapWord* new_top = obj + size; 815 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 816 // result can be one of two: 817 // the old top value: the exchange succeeded 818 // otherwise: the new value of the top is returned. 819 if (result == obj) { 820 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 821 return obj; 822 } 823 } else { 824 return NULL; 825 } 826 } while (true); 827 } 828 829 830 HeapWord* OffsetTableContigSpace::initialize_threshold() { 831 return _offsets.initialize_threshold(); 832 } 833 834 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 835 _offsets.alloc_block(start, end); 836 return _offsets.threshold(); 837 } 838 839 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 840 MemRegion mr) : 841 _offsets(sharedOffsetArray, mr), 842 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 843 { 844 _offsets.set_contig_space(this); 845 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 846 } 847 848 #define OBJ_SAMPLE_INTERVAL 0 849 #define BLOCK_SAMPLE_INTERVAL 100 850 851 void OffsetTableContigSpace::verify() const { 852 HeapWord* p = bottom(); 853 HeapWord* prev_p = NULL; 854 int objs = 0; 855 int blocks = 0; 856 857 if (VerifyObjectStartArray) { 858 _offsets.verify(); 859 } 860 861 while (p < top()) { 862 size_t size = oop(p)->size(); 863 // For a sampling of objects in the space, find it using the 864 // block offset table. 865 if (blocks == BLOCK_SAMPLE_INTERVAL) { 866 guarantee(p == block_start_const(p + (size/2)), 867 "check offset computation"); 868 blocks = 0; 869 } else { 870 blocks++; 871 } 872 873 if (objs == OBJ_SAMPLE_INTERVAL) { 874 oop(p)->verify(); 875 objs = 0; 876 } else { 877 objs++; 878 } 879 prev_p = p; 880 p += size; 881 } 882 guarantee(p == top(), "end of last object must match end of space"); 883 } 884 885 886 size_t TenuredSpace::allowed_dead_ratio() const { 887 return MarkSweepDeadRatio; 888 } --- EOF ---