1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zMark.inline.hpp"
  27 #include "gc/z/zMarkCache.inline.hpp"
  28 #include "gc/z/zMarkStack.inline.hpp"
  29 #include "gc/z/zMarkTerminate.inline.hpp"
  30 #include "gc/z/zOopClosures.inline.hpp"
  31 #include "gc/z/zPage.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRootsIterator.hpp"
  34 #include "gc/z/zStat.hpp"

  35 #include "gc/z/zTask.hpp"
  36 #include "gc/z/zThread.hpp"
  37 #include "gc/z/zUtils.inline.hpp"
  38 #include "gc/z/zWorkers.inline.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "oops/objArrayOop.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/atomic.hpp"
  44 #include "runtime/handshake.hpp"
  45 #include "runtime/orderAccess.hpp"
  46 #include "runtime/prefetch.inline.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 #include "utilities/ticks.hpp"
  51 
  52 static const ZStatSubPhase ZSubPhaseConcurrentMark("Concurrent Mark");
  53 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryFlush("Concurrent Mark Try Flush");
  54 static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
  55 static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
  56 static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
  57 
  58 ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
  59     _workers(workers),
  60     _pagetable(pagetable),
  61     _allocator(),
  62     _stripes(),
  63     _terminate(),
  64     _work_terminateflush(true),
  65     _work_nproactiveflush(0),
  66     _work_nterminateflush(0),
  67     _nproactiveflush(0),
  68     _nterminateflush(0),
  69     _ntrycomplete(0),
  70     _ncontinue(0),
  71     _nworkers(0) {}
  72 
  73 bool ZMark::is_initialized() const {
  74   return _allocator.is_initialized();
  75 }
  76 
  77 size_t ZMark::calculate_nstripes(uint nworkers) const {
  78   // Calculate the number of stripes from the number of workers we use,
  79   // where the number of stripes must be a power of two and we want to
  80   // have at least one worker per stripe.
  81   const size_t nstripes = ZUtils::round_down_power_of_2(nworkers);
  82   return MIN2(nstripes, ZMarkStripesMax);
  83 }
  84 
  85 void ZMark::prepare_mark() {
  86   // Increment global sequence number to invalidate
  87   // marking information for all pages.
  88   ZGlobalSeqNum++;
  89 
  90   // Reset flush/continue counters
  91   _nproactiveflush = 0;
  92   _nterminateflush = 0;
  93   _ntrycomplete = 0;
  94   _ncontinue = 0;
  95 
  96   // Set number of workers to use
  97   _nworkers = _workers->nconcurrent();
  98 
  99   // Set number of mark stripes to use, based on number
 100   // of workers we will use in the concurrent mark phase.
 101   const size_t nstripes = calculate_nstripes(_nworkers);
 102   _stripes.set_nstripes(nstripes);
 103 
 104   // Update statistics
 105   ZStatMark::set_at_mark_start(nstripes);
 106 
 107   // Print worker/stripe distribution
 108   LogTarget(Debug, gc, marking) log;
 109   if (log.is_enabled()) {
 110     log.print("Mark Worker/Stripe Distribution");
 111     for (uint worker_id = 0; worker_id < _nworkers; worker_id++) {
 112       const ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, worker_id);
 113       const size_t stripe_id = _stripes.stripe_id(stripe);
 114       log.print("  Worker %u(%u) -> Stripe " SIZE_FORMAT "(" SIZE_FORMAT ")",
 115                 worker_id, _nworkers, stripe_id, nstripes);
 116     }
 117   }
 118 }
 119 
 120 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
 121 public:








 122   virtual void do_thread(Thread* thread) {
 123     ZRootsIteratorClosure::do_thread(thread);
 124 
 125     // Update thread local address bad mask
 126     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);






 127   }
 128 
 129   virtual void do_oop(oop* p) {
 130     ZBarrier::mark_barrier_on_root_oop_field(p);
 131   }
 132 
 133   virtual void do_oop(narrowOop* p) {
 134     ShouldNotReachHere();
 135   }
 136 };
 137 
 138 class ZMarkRootsTask : public ZTask {
 139 private:
 140   ZMark* const   _mark;
 141   ZRootsIterator _roots;
 142 
 143 public:
 144   ZMarkRootsTask(ZMark* mark) :
 145       ZTask("ZMarkRootsTask"),
 146       _mark(mark),
 147       _roots() {}
 148 
 149   virtual void work() {
 150     ZMarkRootsIteratorClosure cl;
 151     _roots.oops_do(&cl);
 152 
 153     // Flush and free worker stacks. Needed here since
 154     // the set of workers executing during root scanning
 155     // can be different from the set of workers executing
 156     // during mark.
 157     _mark->flush_and_free();
 158   }
 159 };
 160 
 161 void ZMark::start() {
 162   // Verification
 163   if (ZVerifyMarking) {
 164     verify_all_stacks_empty();
 165   }
 166 
 167   // Prepare for concurrent mark
 168   prepare_mark();
 169 
 170   // Mark roots
 171   ZMarkRootsTask task(this);
 172   _workers->run_parallel(&task);
 173 }
 174 
 175 void ZMark::prepare_work() {
 176   assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
 177 
 178   // Set number of active workers
 179   _terminate.reset(_nworkers);
 180 
 181   // Reset flush counters
 182   _work_nproactiveflush = _work_nterminateflush = 0;
 183   _work_terminateflush = true;
 184 }
 185 
 186 void ZMark::finish_work() {
 187   // Accumulate proactive/terminate flush counters
 188   _nproactiveflush += _work_nproactiveflush;
 189   _nterminateflush += _work_nterminateflush;
 190 }
 191 
 192 bool ZMark::is_array(uintptr_t addr) const {
 193   return ZOop::to_oop(addr)->is_objArray();
 194 }
 195 
 196 void ZMark::push_partial_array(uintptr_t addr, size_t size, bool finalizable) {
 197   assert(is_aligned(addr, ZMarkPartialArrayMinSize), "Address misaligned");
 198   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 199   ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
 200   const uintptr_t offset = ZAddress::offset(addr) >> ZMarkPartialArrayMinSizeShift;
 201   const uintptr_t length = size / oopSize;
 202   const ZMarkStackEntry entry(offset, length, finalizable);
 203 
 204   log_develop_trace(gc, marking)("Array push partial: " PTR_FORMAT " (" SIZE_FORMAT "), stripe: " SIZE_FORMAT,
 205                                  addr, size, _stripes.stripe_id(stripe));
 206 
 207   stacks->push(&_allocator, &_stripes, stripe, entry, false /* publish */);
 208 }
 209 
 210 void ZMark::follow_small_array(uintptr_t addr, size_t size, bool finalizable) {
 211   assert(size <= ZMarkPartialArrayMinSize, "Too large, should be split");
 212   const size_t length = size / oopSize;
 213 
 214   log_develop_trace(gc, marking)("Array follow small: " PTR_FORMAT " (" SIZE_FORMAT ")", addr, size);
 215 
 216   ZBarrier::mark_barrier_on_oop_array((oop*)addr, length, finalizable);
 217 }
 218 
 219 void ZMark::follow_large_array(uintptr_t addr, size_t size, bool finalizable) {
 220   assert(size <= (size_t)arrayOopDesc::max_array_length(T_OBJECT) * oopSize, "Too large");
 221   assert(size > ZMarkPartialArrayMinSize, "Too small, should not be split");
 222   const uintptr_t start = addr;
 223   const uintptr_t end = start + size;
 224 
 225   // Calculate the aligned middle start/end/size, where the middle start
 226   // should always be greater than the start (hence the +1 below) to make
 227   // sure we always do some follow work, not just split the array into pieces.
 228   const uintptr_t middle_start = align_up(start + 1, ZMarkPartialArrayMinSize);
 229   const size_t    middle_size = align_down(end - middle_start, ZMarkPartialArrayMinSize);
 230   const uintptr_t middle_end = middle_start + middle_size;
 231 
 232   log_develop_trace(gc, marking)("Array follow large: " PTR_FORMAT "-" PTR_FORMAT" (" SIZE_FORMAT "), "
 233                                  "middle: " PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT ")",
 234                                  start, end, size, middle_start, middle_end, middle_size);
 235 
 236   // Push unaligned trailing part
 237   if (end > middle_end) {
 238     const uintptr_t trailing_addr = middle_end;
 239     const size_t trailing_size = end - middle_end;
 240     push_partial_array(trailing_addr, trailing_size, finalizable);
 241   }
 242 
 243   // Push aligned middle part(s)
 244   uintptr_t partial_addr = middle_end;
 245   while (partial_addr > middle_start) {
 246     const size_t parts = 2;
 247     const size_t partial_size = align_up((partial_addr - middle_start) / parts, ZMarkPartialArrayMinSize);
 248     partial_addr -= partial_size;
 249     push_partial_array(partial_addr, partial_size, finalizable);
 250   }
 251 
 252   // Follow leading part
 253   assert(start < middle_start, "Miscalculated middle start");
 254   const uintptr_t leading_addr = start;
 255   const size_t leading_size = middle_start - start;
 256   follow_small_array(leading_addr, leading_size, finalizable);
 257 }
 258 
 259 void ZMark::follow_array(uintptr_t addr, size_t size, bool finalizable) {
 260   if (size <= ZMarkPartialArrayMinSize) {
 261     follow_small_array(addr, size, finalizable);
 262   } else {
 263     follow_large_array(addr, size, finalizable);
 264   }
 265 }
 266 
 267 void ZMark::follow_partial_array(ZMarkStackEntry entry, bool finalizable) {
 268   const uintptr_t addr = ZAddress::good(entry.partial_array_offset() << ZMarkPartialArrayMinSizeShift);
 269   const size_t size = entry.partial_array_length() * oopSize;
 270 
 271   follow_array(addr, size, finalizable);
 272 }
 273 
 274 void ZMark::follow_array_object(objArrayOop obj, bool finalizable) {
 275   const uintptr_t addr = (uintptr_t)obj->base();
 276   const size_t size = (size_t)obj->length() * oopSize;
 277 
 278   follow_array(addr, size, finalizable);
 279 }
 280 
 281 void ZMark::follow_object(oop obj, bool finalizable) {
 282   if (finalizable) {
 283     ZMarkBarrierOopClosure<true /* finalizable */> cl;
 284     obj->oop_iterate(&cl);
 285   } else {
 286     ZMarkBarrierOopClosure<false /* finalizable */> cl;
 287     obj->oop_iterate(&cl);
 288   }
 289 }
 290 
 291 bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
 292   ZPage* const page = _pagetable->get(addr);
 293   if (page->is_allocating()) {
 294     // Newly allocated objects are implicitly marked
 295     return false;
 296   }
 297 
 298   // Try mark object
 299   bool inc_live = false;
 300   const bool success = page->mark_object(addr, finalizable, inc_live);
 301   if (inc_live) {
 302     // Update live objects/bytes for page. We use the aligned object
 303     // size since that is the actual number of bytes used on the page
 304     // and alignment paddings can never be reclaimed.
 305     const size_t size = ZUtils::object_size(addr);
 306     const size_t aligned_size = align_up(size, page->object_alignment());
 307     cache->inc_live(page, aligned_size);
 308   }
 309 
 310   return success;
 311 }
 312 
 313 void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
 314   // Decode flags
 315   const bool finalizable = entry.finalizable();
 316   const bool partial_array = entry.partial_array();
 317 
 318   if (partial_array) {
 319     follow_partial_array(entry, finalizable);
 320     return;
 321   }
 322 
 323   // Decode object address
 324   const uintptr_t addr = entry.object_address();
 325 
 326   if (!try_mark_object(cache, addr, finalizable)) {
 327     // Already marked
 328     return;
 329   }
 330 
 331   if (is_array(addr)) {
 332     follow_array_object(objArrayOop(ZOop::to_oop(addr)), finalizable);
 333   } else {
 334     follow_object(ZOop::to_oop(addr), finalizable);
 335   }
 336 }
 337 
 338 template <typename T>
 339 bool ZMark::drain(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 340   ZMarkStackEntry entry;
 341 
 342   // Drain stripe stacks
 343   while (stacks->pop(&_allocator, &_stripes, stripe, entry)) {
 344     mark_and_follow(cache, entry);
 345 
 346     // Check timeout
 347     if (timeout->has_expired()) {
 348       // Timeout
 349       return false;
 350     }
 351   }
 352 
 353   // Success
 354   return true;
 355 }
 356 
 357 template <typename T>
 358 bool ZMark::drain_and_flush(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, ZMarkCache* cache, T* timeout) {
 359   const bool success = drain(stripe, stacks, cache, timeout);
 360 
 361   // Flush and publish worker stacks
 362   stacks->flush(&_allocator, &_stripes);
 363 
 364   return success;
 365 }
 366 
 367 bool ZMark::try_steal(ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 368   // Try to steal a stack from another stripe
 369   for (ZMarkStripe* victim_stripe = _stripes.stripe_next(stripe);
 370        victim_stripe != stripe;
 371        victim_stripe = _stripes.stripe_next(victim_stripe)) {
 372     ZMarkStack* const stack = victim_stripe->steal_stack();
 373     if (stack != NULL) {
 374       // Success, install the stolen stack
 375       stacks->install(&_stripes, stripe, stack);
 376       return true;
 377     }
 378   }
 379 
 380   // Nothing to steal
 381   return false;
 382 }
 383 
 384 void ZMark::idle() const {
 385   ZStatTimer timer(ZSubPhaseConcurrentMarkIdle);
 386   os::naked_short_sleep(1);
 387 }
 388 
 389 class ZMarkFlushAndFreeStacksClosure : public ThreadClosure {
 390 private:
 391   ZMark* const _mark;
 392   bool         _flushed;
 393 
 394 public:
 395   ZMarkFlushAndFreeStacksClosure(ZMark* mark) :
 396       _mark(mark),
 397       _flushed(false) {}
 398 
 399   void do_thread(Thread* thread) {
 400     if (_mark->flush_and_free(thread)) {
 401       _flushed = true;
 402     }
 403   }
 404 
 405   bool flushed() const {
 406     return _flushed;
 407   }
 408 };
 409 
 410 bool ZMark::flush(bool at_safepoint) {
 411   ZMarkFlushAndFreeStacksClosure cl(this);
 412   if (at_safepoint) {
 413     Threads::threads_do(&cl);
 414   } else {
 415     Handshake::execute(&cl);
 416   }
 417 
 418   // Returns true if more work is available
 419   return cl.flushed() || !_stripes.is_empty();
 420 }
 421 
 422 bool ZMark::try_flush(volatile size_t* nflush) {
 423   // Only flush if handshakes are enabled
 424   if (!ThreadLocalHandshakes) {
 425     return false;
 426   }
 427 
 428   Atomic::inc(nflush);
 429 
 430   ZStatTimer timer(ZSubPhaseConcurrentMarkTryFlush);
 431   return flush(false /* at_safepoint */);
 432 }
 433 
 434 bool ZMark::try_proactive_flush() {
 435   // Only do proactive flushes from worker 0
 436   if (ZThread::worker_id() != 0) {
 437     return false;
 438   }
 439 
 440   if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax ||
 441       Atomic::load(&_work_nterminateflush) != 0) {
 442     // Limit reached or we're trying to terminate
 443     return false;
 444   }
 445 
 446   return try_flush(&_work_nproactiveflush);
 447 }
 448 
 449 bool ZMark::try_terminate() {
 450   ZStatTimer timer(ZSubPhaseConcurrentMarkTryTerminate);
 451 
 452   if (_terminate.enter_stage0()) {
 453     // Last thread entered stage 0, flush
 454     if (Atomic::load(&_work_terminateflush) &&
 455         Atomic::load(&_work_nterminateflush) != ZMarkTerminateFlushMax) {
 456       // Exit stage 0 to allow other threads to continue marking
 457       _terminate.exit_stage0();
 458 
 459       // Flush before termination
 460       if (!try_flush(&_work_nterminateflush)) {
 461         // No more work available, skip further flush attempts
 462         Atomic::store(false, &_work_terminateflush);
 463       }
 464 
 465       // Don't terminate, regardless of whether we successfully
 466       // flushed out more work or not. We've already exited
 467       // termination stage 0, to allow other threads to continue
 468       // marking, so this thread has to return false and also
 469       // make another round of attempted marking.
 470       return false;
 471     }
 472   }
 473 
 474   for (;;) {
 475     if (_terminate.enter_stage1()) {
 476       // Last thread entered stage 1, terminate
 477       return true;
 478     }
 479 
 480     // Idle to give the other threads
 481     // a chance to enter termination.
 482     idle();
 483 
 484     if (!_terminate.try_exit_stage1()) {
 485       // All workers in stage 1, terminate
 486       return true;
 487     }
 488 
 489     if (_terminate.try_exit_stage0()) {
 490       // More work available, don't terminate
 491       return false;
 492     }
 493   }
 494 }
 495 
 496 class ZMarkNoTimeout : public StackObj {
 497 public:
 498   bool has_expired() {
 499     return false;
 500   }
 501 };
 502 
 503 void ZMark::work_without_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks) {
 504   ZStatTimer timer(ZSubPhaseConcurrentMark);
 505   ZMarkNoTimeout no_timeout;
 506 
 507   for (;;) {
 508     drain_and_flush(stripe, stacks, cache, &no_timeout);
 509 
 510     if (try_steal(stripe, stacks)) {
 511       // Stole work
 512       continue;
 513     }
 514 
 515     if (try_proactive_flush()) {
 516       // Work available
 517       continue;
 518     }
 519 
 520     if (try_terminate()) {
 521       // Terminate
 522       break;
 523     }
 524   }
 525 }
 526 
 527 class ZMarkTimeout : public StackObj {
 528 private:
 529   const Ticks    _start;
 530   const uint64_t _timeout;
 531   const uint64_t _check_interval;
 532   uint64_t       _check_at;
 533   uint64_t       _check_count;
 534   bool           _expired;
 535 
 536 public:
 537   ZMarkTimeout(uint64_t timeout_in_millis) :
 538       _start(Ticks::now()),
 539       _timeout(_start.value() + TimeHelper::millis_to_counter(timeout_in_millis)),
 540       _check_interval(200),
 541       _check_at(_check_interval),
 542       _check_count(0),
 543       _expired(false) {}
 544 
 545   ~ZMarkTimeout() {
 546     const Tickspan duration = Ticks::now() - _start;
 547     log_debug(gc, marking)("Mark With Timeout (%s): %s, " UINT64_FORMAT " oops, %.3fms",
 548                            ZThread::name(), _expired ? "Expired" : "Completed",
 549                            _check_count, TimeHelper::counter_to_millis(duration.value()));
 550   }
 551 
 552   bool has_expired() {
 553     if (++_check_count == _check_at) {
 554       _check_at += _check_interval;
 555       if ((uint64_t)Ticks::now().value() >= _timeout) {
 556         // Timeout
 557         _expired = true;
 558       }
 559     }
 560 
 561     return _expired;
 562   }
 563 };
 564 
 565 void ZMark::work_with_timeout(ZMarkCache* cache, ZMarkStripe* stripe, ZMarkThreadLocalStacks* stacks, uint64_t timeout_in_millis) {
 566   ZStatTimer timer(ZSubPhaseMarkTryComplete);
 567   ZMarkTimeout timeout(timeout_in_millis);
 568 
 569   for (;;) {
 570     if (!drain_and_flush(stripe, stacks, cache, &timeout)) {
 571       // Timed out
 572       break;
 573     }
 574 
 575     if (try_steal(stripe, stacks)) {
 576       // Stole work
 577       continue;
 578     }
 579 
 580     // Terminate
 581     break;
 582   }
 583 }
 584 
 585 void ZMark::work(uint64_t timeout_in_millis) {
 586   ZMarkCache cache(_stripes.nstripes());
 587   ZMarkStripe* const stripe = _stripes.stripe_for_worker(_nworkers, ZThread::worker_id());
 588   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
 589 
 590   if (timeout_in_millis == 0) {
 591     work_without_timeout(&cache, stripe, stacks);
 592   } else {
 593     work_with_timeout(&cache, stripe, stacks, timeout_in_millis);
 594   }
 595 
 596   // Make sure stacks have been flushed
 597   assert(stacks->is_empty(&_stripes), "Should be empty");
 598 
 599   // Free remaining stacks
 600   stacks->free(&_allocator);
 601 }
 602 
 603 class ZMarkTask : public ZTask {
 604 private:
 605   ZMark* const   _mark;
 606   const uint64_t _timeout_in_millis;
 607 
 608 public:
 609   ZMarkTask(ZMark* mark, uint64_t timeout_in_millis = 0) :
 610       ZTask("ZMarkTask"),
 611       _mark(mark),
 612       _timeout_in_millis(timeout_in_millis) {
 613     _mark->prepare_work();
 614   }
 615 
 616   ~ZMarkTask() {
 617     _mark->finish_work();
 618   }
 619 
 620   virtual void work() {
 621     _mark->work(_timeout_in_millis);
 622   }
 623 };
 624 
 625 void ZMark::mark() {
 626   ZMarkTask task(this);
 627   _workers->run_concurrent(&task);
 628 }
 629 
 630 bool ZMark::try_complete() {
 631   _ntrycomplete++;
 632 
 633   // Use nconcurrent number of worker threads to maintain the
 634   // worker/stripe distribution used during concurrent mark.
 635   ZMarkTask task(this, ZMarkCompleteTimeout);
 636   _workers->run_concurrent(&task);
 637 
 638   // Successful if all stripes are empty
 639   return _stripes.is_empty();
 640 }
 641 
 642 bool ZMark::try_end() {
 643   // Flush all mark stacks
 644   if (!flush(true /* at_safepoint */)) {
 645     // Mark completed
 646     return true;
 647   }
 648 
 649   // Try complete marking by doing a limited
 650   // amount of mark work in this phase.
 651   return try_complete();
 652 }
 653 
 654 bool ZMark::end() {
 655   // Try end marking
 656   if (!try_end()) {
 657     // Mark not completed
 658     _ncontinue++;
 659     return false;
 660   }
 661 
 662   // Verification
 663   if (ZVerifyMarking) {
 664     verify_all_stacks_empty();
 665   }
 666 
 667   // Update statistics
 668   ZStatMark::set_at_mark_end(_nproactiveflush, _nterminateflush, _ntrycomplete, _ncontinue);
 669 
 670   // Mark completed
 671   return true;
 672 }
 673 
 674 void ZMark::flush_and_free() {
 675   Thread* const thread = Thread::current();
 676   flush_and_free(thread);
 677 }
 678 
 679 bool ZMark::flush_and_free(Thread* thread) {
 680   ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 681   const bool flushed = stacks->flush(&_allocator, &_stripes);
 682   stacks->free(&_allocator);
 683   return flushed;
 684 }
 685 
 686 class ZVerifyMarkStacksEmptyClosure : public ThreadClosure {
 687 private:
 688   const ZMarkStripeSet* const _stripes;
 689 
 690 public:
 691   ZVerifyMarkStacksEmptyClosure(const ZMarkStripeSet* stripes) :
 692       _stripes(stripes) {}
 693 
 694   void do_thread(Thread* thread) {
 695     ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(thread);
 696     guarantee(stacks->is_empty(_stripes), "Should be empty");
 697   }
 698 };
 699 
 700 void ZMark::verify_all_stacks_empty() const {
 701   // Verify thread stacks
 702   ZVerifyMarkStacksEmptyClosure cl(&_stripes);
 703   Threads::threads_do(&cl);
 704 
 705   // Verify stripe stacks
 706   guarantee(_stripes.is_empty(), "Should be empty");
 707 }
--- EOF ---