1 /*
   2  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "memory/padded.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/atomic.inline.hpp"
  32 #include "runtime/biasedLocking.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/interfaceSupport.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/objectMonitor.hpp"
  37 #include "runtime/objectMonitor.inline.hpp"
  38 #include "runtime/osThread.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "runtime/synchronizer.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "utilities/dtrace.hpp"
  43 #include "utilities/events.hpp"
  44 #include "utilities/preserveException.hpp"
  45 
  46 #if defined(__GNUC__) && !defined(PPC64)
  47 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
  48   #define NOINLINE __attribute__((noinline))
  49 #else
  50   #define NOINLINE
  51 #endif
  52 
  53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  54 
  55 // The "core" versions of monitor enter and exit reside in this file.
  56 // The interpreter and compilers contain specialized transliterated
  57 // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
  58 // for instance.  If you make changes here, make sure to modify the
  59 // interpreter, and both C1 and C2 fast-path inline locking code emission.
  60 //
  61 // -----------------------------------------------------------------------------
  62 
  63 #ifdef DTRACE_ENABLED
  64 
  65 // Only bother with this argument setup if dtrace is available
  66 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
  67 
  68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
  69   char* bytes = NULL;                                                      \
  70   int len = 0;                                                             \
  71   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
  72   Symbol* klassname = ((oop)(obj))->klass()->name();                       \
  73   if (klassname != NULL) {                                                 \
  74     bytes = (char*)klassname->bytes();                                     \
  75     len = klassname->utf8_length();                                        \
  76   }
  77 
  78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
  79   {                                                                        \
  80     if (DTraceMonitorProbes) {                                             \
  81       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  82       HOTSPOT_MONITOR_WAIT(jtid,                                           \
  83                            (uintptr_t)(monitor), bytes, len, (millis));    \
  84     }                                                                      \
  85   }
  86 
  87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
  88 
  89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
  90   {                                                                        \
  91     if (DTraceMonitorProbes) {                                             \
  92       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
  93       HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */             \
  94                                     (uintptr_t)(monitor), bytes, len);     \
  95     }                                                                      \
  96   }
  97 
  98 #else //  ndef DTRACE_ENABLED
  99 
 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 102 
 103 #endif // ndef DTRACE_ENABLED
 104 
 105 // This exists only as a workaround of dtrace bug 6254741
 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 107   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 108   return 0;
 109 }
 110 
 111 #define NINFLATIONLOCKS 256
 112 static volatile intptr_t InflationLocks[NINFLATIONLOCKS];
 113 
 114 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 115 // want to expose the PaddedEnd template more than necessary.
 116 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL;
 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 118 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 119 int ObjectSynchronizer::gOmInUseCount = 0;
 120 static volatile intptr_t ListLock = 0;      // protects global monitor free-list cache
 121 static volatile int MonitorFreeCount  = 0;  // # on gFreeList
 122 static volatile int MonitorPopulation = 0;  // # Extant -- in circulation
 123 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 124 
 125 // -----------------------------------------------------------------------------
 126 //  Fast Monitor Enter/Exit
 127 // This the fast monitor enter. The interpreter and compiler use
 128 // some assembly copies of this code. Make sure update those code
 129 // if the following function is changed. The implementation is
 130 // extremely sensitive to race condition. Be careful.
 131 
 132 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
 133                                     bool attempt_rebias, TRAPS) {
 134   if (UseBiasedLocking) {
 135     if (!SafepointSynchronize::is_at_safepoint()) {
 136       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 137       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 138         return;
 139       }
 140     } else {
 141       assert(!attempt_rebias, "can not rebias toward VM thread");
 142       BiasedLocking::revoke_at_safepoint(obj);
 143     }
 144     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 145   }
 146 
 147   slow_enter(obj, lock, THREAD);
 148 }
 149 
 150 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 151   assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
 152   // if displaced header is null, the previous enter is recursive enter, no-op
 153   markOop dhw = lock->displaced_header();
 154   markOop mark;
 155   if (dhw == NULL) {
 156     // Recursive stack-lock.
 157     // Diagnostics -- Could be: stack-locked, inflating, inflated.
 158     mark = object->mark();
 159     assert(!mark->is_neutral(), "invariant");
 160     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
 161       assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
 162     }
 163     if (mark->has_monitor()) {
 164       ObjectMonitor * m = mark->monitor();
 165       assert(((oop)(m->object()))->mark() == mark, "invariant");
 166       assert(m->is_entered(THREAD), "invariant");
 167     }
 168     return;
 169   }
 170 
 171   mark = object->mark();
 172 
 173   // If the object is stack-locked by the current thread, try to
 174   // swing the displaced header from the box back to the mark.
 175   if (mark == (markOop) lock) {
 176     assert(dhw->is_neutral(), "invariant");
 177     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
 178       TEVENT(fast_exit: release stacklock);
 179       return;
 180     }
 181   }
 182 
 183   ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD);
 184 }
 185 
 186 // -----------------------------------------------------------------------------
 187 // Interpreter/Compiler Slow Case
 188 // This routine is used to handle interpreter/compiler slow case
 189 // We don't need to use fast path here, because it must have been
 190 // failed in the interpreter/compiler code.
 191 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 192   markOop mark = obj->mark();
 193   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 194 
 195   if (mark->is_neutral()) {
 196     // Anticipate successful CAS -- the ST of the displaced mark must
 197     // be visible <= the ST performed by the CAS.
 198     lock->set_displaced_header(mark);
 199     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 200       TEVENT(slow_enter: release stacklock);
 201       return;
 202     }
 203     // Fall through to inflate() ...
 204   } else if (mark->has_locker() &&
 205              THREAD->is_lock_owned((address)mark->locker())) {
 206     assert(lock != mark->locker(), "must not re-lock the same lock");
 207     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 208     lock->set_displaced_header(NULL);
 209     return;
 210   }
 211 
 212   // The object header will never be displaced to this lock,
 213   // so it does not matter what the value is, except that it
 214   // must be non-zero to avoid looking like a re-entrant lock,
 215   // and must not look locked either.
 216   lock->set_displaced_header(markOopDesc::unused_mark());
 217   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 218 }
 219 
 220 // This routine is used to handle interpreter/compiler slow case
 221 // We don't need to use fast path here, because it must have
 222 // failed in the interpreter/compiler code. Simply use the heavy
 223 // weight monitor should be ok, unless someone find otherwise.
 224 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 225   fast_exit(object, lock, THREAD);
 226 }
 227 
 228 // -----------------------------------------------------------------------------
 229 // Class Loader  support to workaround deadlocks on the class loader lock objects
 230 // Also used by GC
 231 // complete_exit()/reenter() are used to wait on a nested lock
 232 // i.e. to give up an outer lock completely and then re-enter
 233 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 234 //  1) complete_exit lock1 - saving recursion count
 235 //  2) wait on lock2
 236 //  3) when notified on lock2, unlock lock2
 237 //  4) reenter lock1 with original recursion count
 238 //  5) lock lock2
 239 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 240 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 241   TEVENT(complete_exit);
 242   if (UseBiasedLocking) {
 243     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 244     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 245   }
 246 
 247   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 248 
 249   return monitor->complete_exit(THREAD);
 250 }
 251 
 252 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 253 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 254   TEVENT(reenter);
 255   if (UseBiasedLocking) {
 256     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 257     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 258   }
 259 
 260   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 261 
 262   monitor->reenter(recursion, THREAD);
 263 }
 264 // -----------------------------------------------------------------------------
 265 // JNI locks on java objects
 266 // NOTE: must use heavy weight monitor to handle jni monitor enter
 267 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 268   // the current locking is from JNI instead of Java code
 269   TEVENT(jni_enter);
 270   if (UseBiasedLocking) {
 271     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 272     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 273   }
 274   THREAD->set_current_pending_monitor_is_from_java(false);
 275   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 276   THREAD->set_current_pending_monitor_is_from_java(true);
 277 }
 278 
 279 // NOTE: must use heavy weight monitor to handle jni monitor exit
 280 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 281   TEVENT(jni_exit);
 282   if (UseBiasedLocking) {
 283     Handle h_obj(THREAD, obj);
 284     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 285     obj = h_obj();
 286   }
 287   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 288 
 289   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
 290   // If this thread has locked the object, exit the monitor.  Note:  can't use
 291   // monitor->check(CHECK); must exit even if an exception is pending.
 292   if (monitor->check(THREAD)) {
 293     monitor->exit(true, THREAD);
 294   }
 295 }
 296 
 297 // -----------------------------------------------------------------------------
 298 // Internal VM locks on java objects
 299 // standard constructor, allows locking failures
 300 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
 301   _dolock = doLock;
 302   _thread = thread;
 303   debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
 304   _obj = obj;
 305 
 306   if (_dolock) {
 307     TEVENT(ObjectLocker);
 308 
 309     ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
 310   }
 311 }
 312 
 313 ObjectLocker::~ObjectLocker() {
 314   if (_dolock) {
 315     ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
 316   }
 317 }
 318 
 319 
 320 // -----------------------------------------------------------------------------
 321 //  Wait/Notify/NotifyAll
 322 // NOTE: must use heavy weight monitor to handle wait()
 323 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 324   if (UseBiasedLocking) {
 325     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 326     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 327   }
 328   if (millis < 0) {
 329     TEVENT(wait - throw IAX);
 330     THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 331   }
 332   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 333   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 334   monitor->wait(millis, true, THREAD);
 335 
 336   // This dummy call is in place to get around dtrace bug 6254741.  Once
 337   // that's fixed we can uncomment the following line, remove the call
 338   // and change this function back into a "void" func.
 339   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 340   return dtrace_waited_probe(monitor, obj, THREAD);
 341 }
 342 
 343 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
 344   if (UseBiasedLocking) {
 345     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 346     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 347   }
 348   if (millis < 0) {
 349     TEVENT(wait - throw IAX);
 350     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 351   }
 352   ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD);
 353 }
 354 
 355 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 356   if (UseBiasedLocking) {
 357     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 358     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 359   }
 360 
 361   markOop mark = obj->mark();
 362   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 363     return;
 364   }
 365   ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
 366 }
 367 
 368 // NOTE: see comment of notify()
 369 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 370   if (UseBiasedLocking) {
 371     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 372     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 373   }
 374 
 375   markOop mark = obj->mark();
 376   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 377     return;
 378   }
 379   ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
 380 }
 381 
 382 // -----------------------------------------------------------------------------
 383 // Hash Code handling
 384 //
 385 // Performance concern:
 386 // OrderAccess::storestore() calls release() which at one time stored 0
 387 // into the global volatile OrderAccess::dummy variable. This store was
 388 // unnecessary for correctness. Many threads storing into a common location
 389 // causes considerable cache migration or "sloshing" on large SMP systems.
 390 // As such, I avoided using OrderAccess::storestore(). In some cases
 391 // OrderAccess::fence() -- which incurs local latency on the executing
 392 // processor -- is a better choice as it scales on SMP systems.
 393 //
 394 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
 395 // a discussion of coherency costs. Note that all our current reference
 396 // platforms provide strong ST-ST order, so the issue is moot on IA32,
 397 // x64, and SPARC.
 398 //
 399 // As a general policy we use "volatile" to control compiler-based reordering
 400 // and explicit fences (barriers) to control for architectural reordering
 401 // performed by the CPU(s) or platform.
 402 
 403 struct SharedGlobals {
 404   char         _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
 405   // These are highly shared mostly-read variables.
 406   // To avoid false-sharing they need to be the sole occupants of a cache line.
 407   volatile int stwRandom;
 408   volatile int stwCycle;
 409   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
 410   // Hot RW variable -- Sequester to avoid false-sharing
 411   volatile int hcSequence;
 412   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
 413 };
 414 
 415 static SharedGlobals GVars;
 416 static int MonitorScavengeThreshold = 1000000;
 417 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
 418 
 419 static markOop ReadStableMark(oop obj) {
 420   markOop mark = obj->mark();
 421   if (!mark->is_being_inflated()) {
 422     return mark;       // normal fast-path return
 423   }
 424 
 425   int its = 0;
 426   for (;;) {
 427     markOop mark = obj->mark();
 428     if (!mark->is_being_inflated()) {
 429       return mark;    // normal fast-path return
 430     }
 431 
 432     // The object is being inflated by some other thread.
 433     // The caller of ReadStableMark() must wait for inflation to complete.
 434     // Avoid live-lock
 435     // TODO: consider calling SafepointSynchronize::do_call_back() while
 436     // spinning to see if there's a safepoint pending.  If so, immediately
 437     // yielding or blocking would be appropriate.  Avoid spinning while
 438     // there is a safepoint pending.
 439     // TODO: add inflation contention performance counters.
 440     // TODO: restrict the aggregate number of spinners.
 441 
 442     ++its;
 443     if (its > 10000 || !os::is_MP()) {
 444       if (its & 1) {
 445         os::naked_yield();
 446         TEVENT(Inflate: INFLATING - yield);
 447       } else {
 448         // Note that the following code attenuates the livelock problem but is not
 449         // a complete remedy.  A more complete solution would require that the inflating
 450         // thread hold the associated inflation lock.  The following code simply restricts
 451         // the number of spinners to at most one.  We'll have N-2 threads blocked
 452         // on the inflationlock, 1 thread holding the inflation lock and using
 453         // a yield/park strategy, and 1 thread in the midst of inflation.
 454         // A more refined approach would be to change the encoding of INFLATING
 455         // to allow encapsulation of a native thread pointer.  Threads waiting for
 456         // inflation to complete would use CAS to push themselves onto a singly linked
 457         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 458         // and calling park().  When inflation was complete the thread that accomplished inflation
 459         // would detach the list and set the markword to inflated with a single CAS and
 460         // then for each thread on the list, set the flag and unpark() the thread.
 461         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 462         // wakes at most one thread whereas we need to wake the entire list.
 463         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
 464         int YieldThenBlock = 0;
 465         assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
 466         assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
 467         Thread::muxAcquire(InflationLocks + ix, "InflationLock");
 468         while (obj->mark() == markOopDesc::INFLATING()) {
 469           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 470           // so we periodically call Self->_ParkEvent->park(1).
 471           // We use a mixed spin/yield/block mechanism.
 472           if ((YieldThenBlock++) >= 16) {
 473             Thread::current()->_ParkEvent->park(1);
 474           } else {
 475             os::naked_yield();
 476           }
 477         }
 478         Thread::muxRelease(InflationLocks + ix);
 479         TEVENT(Inflate: INFLATING - yield/park);
 480       }
 481     } else {
 482       SpinPause();       // SMP-polite spinning
 483     }
 484   }
 485 }
 486 
 487 // hashCode() generation :
 488 //
 489 // Possibilities:
 490 // * MD5Digest of {obj,stwRandom}
 491 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
 492 // * A DES- or AES-style SBox[] mechanism
 493 // * One of the Phi-based schemes, such as:
 494 //   2654435761 = 2^32 * Phi (golden ratio)
 495 //   HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
 496 // * A variation of Marsaglia's shift-xor RNG scheme.
 497 // * (obj ^ stwRandom) is appealing, but can result
 498 //   in undesirable regularity in the hashCode values of adjacent objects
 499 //   (objects allocated back-to-back, in particular).  This could potentially
 500 //   result in hashtable collisions and reduced hashtable efficiency.
 501 //   There are simple ways to "diffuse" the middle address bits over the
 502 //   generated hashCode values:
 503 
 504 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
 505   intptr_t value = 0;
 506   if (hashCode == 0) {
 507     // This form uses an unguarded global Park-Miller RNG,
 508     // so it's possible for two threads to race and generate the same RNG.
 509     // On MP system we'll have lots of RW access to a global, so the
 510     // mechanism induces lots of coherency traffic.
 511     value = os::random();
 512   } else if (hashCode == 1) {
 513     // This variation has the property of being stable (idempotent)
 514     // between STW operations.  This can be useful in some of the 1-0
 515     // synchronization schemes.
 516     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
 517     value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
 518   } else if (hashCode == 2) {
 519     value = 1;            // for sensitivity testing
 520   } else if (hashCode == 3) {
 521     value = ++GVars.hcSequence;
 522   } else if (hashCode == 4) {
 523     value = cast_from_oop<intptr_t>(obj);
 524   } else {
 525     // Marsaglia's xor-shift scheme with thread-specific state
 526     // This is probably the best overall implementation -- we'll
 527     // likely make this the default in future releases.
 528     unsigned t = Self->_hashStateX;
 529     t ^= (t << 11);
 530     Self->_hashStateX = Self->_hashStateY;
 531     Self->_hashStateY = Self->_hashStateZ;
 532     Self->_hashStateZ = Self->_hashStateW;
 533     unsigned v = Self->_hashStateW;
 534     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 535     Self->_hashStateW = v;
 536     value = v;
 537   }
 538 
 539   value &= markOopDesc::hash_mask;
 540   if (value == 0) value = 0xBAD;
 541   assert(value != markOopDesc::no_hash, "invariant");
 542   TEVENT(hashCode: GENERATE);
 543   return value;
 544 }
 545 
 546 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 547   if (UseBiasedLocking) {
 548     // NOTE: many places throughout the JVM do not expect a safepoint
 549     // to be taken here, in particular most operations on perm gen
 550     // objects. However, we only ever bias Java instances and all of
 551     // the call sites of identity_hash that might revoke biases have
 552     // been checked to make sure they can handle a safepoint. The
 553     // added check of the bias pattern is to avoid useless calls to
 554     // thread-local storage.
 555     if (obj->mark()->has_bias_pattern()) {
 556       // Handle for oop obj in case of STW safepoint
 557       Handle hobj(Self, obj);
 558       // Relaxing assertion for bug 6320749.
 559       assert(Universe::verify_in_progress() ||
 560              !SafepointSynchronize::is_at_safepoint(),
 561              "biases should not be seen by VM thread here");
 562       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 563       obj = hobj();
 564       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 565     }
 566   }
 567 
 568   // hashCode() is a heap mutator ...
 569   // Relaxing assertion for bug 6320749.
 570   assert(Universe::verify_in_progress() ||
 571          !SafepointSynchronize::is_at_safepoint(), "invariant");
 572   assert(Universe::verify_in_progress() ||
 573          Self->is_Java_thread() , "invariant");
 574   assert(Universe::verify_in_progress() ||
 575          ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 576 
 577   ObjectMonitor* monitor = NULL;
 578   markOop temp, test;
 579   intptr_t hash;
 580   markOop mark = ReadStableMark(obj);
 581 
 582   // object should remain ineligible for biased locking
 583   assert(!mark->has_bias_pattern(), "invariant");
 584 
 585   if (mark->is_neutral()) {
 586     hash = mark->hash();              // this is a normal header
 587     if (hash) {                       // if it has hash, just return it
 588       return hash;
 589     }
 590     hash = get_next_hash(Self, obj);  // allocate a new hash code
 591     temp = mark->copy_set_hash(hash); // merge the hash code into header
 592     // use (machine word version) atomic operation to install the hash
 593     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 594     if (test == mark) {
 595       return hash;
 596     }
 597     // If atomic operation failed, we must inflate the header
 598     // into heavy weight monitor. We could add more code here
 599     // for fast path, but it does not worth the complexity.
 600   } else if (mark->has_monitor()) {
 601     monitor = mark->monitor();
 602     temp = monitor->header();
 603     assert(temp->is_neutral(), "invariant");
 604     hash = temp->hash();
 605     if (hash) {
 606       return hash;
 607     }
 608     // Skip to the following code to reduce code size
 609   } else if (Self->is_lock_owned((address)mark->locker())) {
 610     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 611     assert(temp->is_neutral(), "invariant");
 612     hash = temp->hash();              // by current thread, check if the displaced
 613     if (hash) {                       // header contains hash code
 614       return hash;
 615     }
 616     // WARNING:
 617     //   The displaced header is strictly immutable.
 618     // It can NOT be changed in ANY cases. So we have
 619     // to inflate the header into heavyweight monitor
 620     // even the current thread owns the lock. The reason
 621     // is the BasicLock (stack slot) will be asynchronously
 622     // read by other threads during the inflate() function.
 623     // Any change to stack may not propagate to other threads
 624     // correctly.
 625   }
 626 
 627   // Inflate the monitor to set hash code
 628   monitor = ObjectSynchronizer::inflate(Self, obj);
 629   // Load displaced header and check it has hash code
 630   mark = monitor->header();
 631   assert(mark->is_neutral(), "invariant");
 632   hash = mark->hash();
 633   if (hash == 0) {
 634     hash = get_next_hash(Self, obj);
 635     temp = mark->copy_set_hash(hash); // merge hash code into header
 636     assert(temp->is_neutral(), "invariant");
 637     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 638     if (test != mark) {
 639       // The only update to the header in the monitor (outside GC)
 640       // is install the hash code. If someone add new usage of
 641       // displaced header, please update this code
 642       hash = test->hash();
 643       assert(test->is_neutral(), "invariant");
 644       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 645     }
 646   }
 647   // We finally get the hash
 648   return hash;
 649 }
 650 
 651 // Deprecated -- use FastHashCode() instead.
 652 
 653 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 654   return FastHashCode(Thread::current(), obj());
 655 }
 656 
 657 
 658 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 659                                                    Handle h_obj) {
 660   if (UseBiasedLocking) {
 661     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 662     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 663   }
 664 
 665   assert(thread == JavaThread::current(), "Can only be called on current thread");
 666   oop obj = h_obj();
 667 
 668   markOop mark = ReadStableMark(obj);
 669 
 670   // Uncontended case, header points to stack
 671   if (mark->has_locker()) {
 672     return thread->is_lock_owned((address)mark->locker());
 673   }
 674   // Contended case, header points to ObjectMonitor (tagged pointer)
 675   if (mark->has_monitor()) {
 676     ObjectMonitor* monitor = mark->monitor();
 677     return monitor->is_entered(thread) != 0;
 678   }
 679   // Unlocked case, header in place
 680   assert(mark->is_neutral(), "sanity check");
 681   return false;
 682 }
 683 
 684 // Be aware of this method could revoke bias of the lock object.
 685 // This method queries the ownership of the lock handle specified by 'h_obj'.
 686 // If the current thread owns the lock, it returns owner_self. If no
 687 // thread owns the lock, it returns owner_none. Otherwise, it will return
 688 // owner_other.
 689 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 690 (JavaThread *self, Handle h_obj) {
 691   // The caller must beware this method can revoke bias, and
 692   // revocation can result in a safepoint.
 693   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
 694   assert(self->thread_state() != _thread_blocked, "invariant");
 695 
 696   // Possible mark states: neutral, biased, stack-locked, inflated
 697 
 698   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
 699     // CASE: biased
 700     BiasedLocking::revoke_and_rebias(h_obj, false, self);
 701     assert(!h_obj->mark()->has_bias_pattern(),
 702            "biases should be revoked by now");
 703   }
 704 
 705   assert(self == JavaThread::current(), "Can only be called on current thread");
 706   oop obj = h_obj();
 707   markOop mark = ReadStableMark(obj);
 708 
 709   // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
 710   if (mark->has_locker()) {
 711     return self->is_lock_owned((address)mark->locker()) ?
 712       owner_self : owner_other;
 713   }
 714 
 715   // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
 716   // The Object:ObjectMonitor relationship is stable as long as we're
 717   // not at a safepoint.
 718   if (mark->has_monitor()) {
 719     void * owner = mark->monitor()->_owner;
 720     if (owner == NULL) return owner_none;
 721     return (owner == self ||
 722             self->is_lock_owned((address)owner)) ? owner_self : owner_other;
 723   }
 724 
 725   // CASE: neutral
 726   assert(mark->is_neutral(), "sanity check");
 727   return owner_none;           // it's unlocked
 728 }
 729 
 730 // FIXME: jvmti should call this
 731 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
 732   if (UseBiasedLocking) {
 733     if (SafepointSynchronize::is_at_safepoint()) {
 734       BiasedLocking::revoke_at_safepoint(h_obj);
 735     } else {
 736       BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
 737     }
 738     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 739   }
 740 
 741   oop obj = h_obj();
 742   address owner = NULL;
 743 
 744   markOop mark = ReadStableMark(obj);
 745 
 746   // Uncontended case, header points to stack
 747   if (mark->has_locker()) {
 748     owner = (address) mark->locker();
 749   }
 750 
 751   // Contended case, header points to ObjectMonitor (tagged pointer)
 752   if (mark->has_monitor()) {
 753     ObjectMonitor* monitor = mark->monitor();
 754     assert(monitor != NULL, "monitor should be non-null");
 755     owner = (address) monitor->owner();
 756   }
 757 
 758   if (owner != NULL) {
 759     // owning_thread_from_monitor_owner() may also return NULL here
 760     return Threads::owning_thread_from_monitor_owner(owner, doLock);
 761   }
 762 
 763   // Unlocked case, header in place
 764   // Cannot have assertion since this object may have been
 765   // locked by another thread when reaching here.
 766   // assert(mark->is_neutral(), "sanity check");
 767 
 768   return NULL;
 769 }
 770 // Visitors ...
 771 
 772 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 773   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
 774   ObjectMonitor* mid;
 775   while (block) {
 776     assert(block->object() == CHAINMARKER, "must be a block header");
 777     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 778       mid = (ObjectMonitor *)(block + i);
 779       oop object = (oop) mid->object();
 780       if (object != NULL) {
 781         closure->do_monitor(mid);
 782       }
 783     }
 784     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
 785   }
 786 }
 787 
 788 // Get the next block in the block list.
 789 static inline ObjectMonitor* next(ObjectMonitor* block) {
 790   assert(block->object() == CHAINMARKER, "must be a block header");
 791   block = block->FreeNext;
 792   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 793   return block;
 794 }
 795 
 796 
 797 void ObjectSynchronizer::oops_do(OopClosure* f) {
 798   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 799   for (PaddedEnd<ObjectMonitor> * block =
 800        (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
 801        block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 802     assert(block->object() == CHAINMARKER, "must be a block header");
 803     for (int i = 1; i < _BLOCKSIZE; i++) {
 804       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 805       if (mid->object() != NULL) {
 806         f->do_oop((oop*)mid->object_addr());
 807       }
 808     }
 809   }
 810 }
 811 
 812 
 813 // -----------------------------------------------------------------------------
 814 // ObjectMonitor Lifecycle
 815 // -----------------------
 816 // Inflation unlinks monitors from the global gFreeList and
 817 // associates them with objects.  Deflation -- which occurs at
 818 // STW-time -- disassociates idle monitors from objects.  Such
 819 // scavenged monitors are returned to the gFreeList.
 820 //
 821 // The global list is protected by ListLock.  All the critical sections
 822 // are short and operate in constant-time.
 823 //
 824 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
 825 //
 826 // Lifecycle:
 827 // --   unassigned and on the global free list
 828 // --   unassigned and on a thread's private omFreeList
 829 // --   assigned to an object.  The object is inflated and the mark refers
 830 //      to the objectmonitor.
 831 
 832 
 833 // Constraining monitor pool growth via MonitorBound ...
 834 //
 835 // The monitor pool is grow-only.  We scavenge at STW safepoint-time, but the
 836 // the rate of scavenging is driven primarily by GC.  As such,  we can find
 837 // an inordinate number of monitors in circulation.
 838 // To avoid that scenario we can artificially induce a STW safepoint
 839 // if the pool appears to be growing past some reasonable bound.
 840 // Generally we favor time in space-time tradeoffs, but as there's no
 841 // natural back-pressure on the # of extant monitors we need to impose some
 842 // type of limit.  Beware that if MonitorBound is set to too low a value
 843 // we could just loop. In addition, if MonitorBound is set to a low value
 844 // we'll incur more safepoints, which are harmful to performance.
 845 // See also: GuaranteedSafepointInterval
 846 //
 847 // The current implementation uses asynchronous VM operations.
 848 
 849 static void InduceScavenge(Thread * Self, const char * Whence) {
 850   // Induce STW safepoint to trim monitors
 851   // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
 852   // More precisely, trigger an asynchronous STW safepoint as the number
 853   // of active monitors passes the specified threshold.
 854   // TODO: assert thread state is reasonable
 855 
 856   if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
 857     if (ObjectMonitor::Knob_Verbose) {
 858       ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 859       ::fflush(stdout);
 860     }
 861     // Induce a 'null' safepoint to scavenge monitors
 862     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
 863     // to the VMthread and have a lifespan longer than that of this activation record.
 864     // The VMThread will delete the op when completed.
 865     VMThread::execute(new VM_ForceAsyncSafepoint());
 866 
 867     if (ObjectMonitor::Knob_Verbose) {
 868       ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 869       ::fflush(stdout);
 870     }
 871   }
 872 }
 873 
 874 void ObjectSynchronizer::verifyInUse(Thread *Self) {
 875   ObjectMonitor* mid;
 876   int inusetally = 0;
 877   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
 878     inusetally++;
 879   }
 880   assert(inusetally == Self->omInUseCount, "inuse count off");
 881 
 882   int freetally = 0;
 883   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
 884     freetally++;
 885   }
 886   assert(freetally == Self->omFreeCount, "free count off");
 887 }
 888 
 889 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) {
 890   // A large MAXPRIVATE value reduces both list lock contention
 891   // and list coherency traffic, but also tends to increase the
 892   // number of objectMonitors in circulation as well as the STW
 893   // scavenge costs.  As usual, we lean toward time in space-time
 894   // tradeoffs.
 895   const int MAXPRIVATE = 1024;
 896   for (;;) {
 897     ObjectMonitor * m;
 898 
 899     // 1: try to allocate from the thread's local omFreeList.
 900     // Threads will attempt to allocate first from their local list, then
 901     // from the global list, and only after those attempts fail will the thread
 902     // attempt to instantiate new monitors.   Thread-local free lists take
 903     // heat off the ListLock and improve allocation latency, as well as reducing
 904     // coherency traffic on the shared global list.
 905     m = Self->omFreeList;
 906     if (m != NULL) {
 907       Self->omFreeList = m->FreeNext;
 908       Self->omFreeCount--;
 909       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
 910       guarantee(m->object() == NULL, "invariant");
 911       if (MonitorInUseLists) {
 912         m->FreeNext = Self->omInUseList;
 913         Self->omInUseList = m;
 914         Self->omInUseCount++;
 915         if (ObjectMonitor::Knob_VerifyInUse) {
 916           verifyInUse(Self);
 917         }
 918       } else {
 919         m->FreeNext = NULL;
 920       }
 921       return m;
 922     }
 923 
 924     // 2: try to allocate from the global gFreeList
 925     // CONSIDER: use muxTry() instead of muxAcquire().
 926     // If the muxTry() fails then drop immediately into case 3.
 927     // If we're using thread-local free lists then try
 928     // to reprovision the caller's free list.
 929     if (gFreeList != NULL) {
 930       // Reprovision the thread's omFreeList.
 931       // Use bulk transfers to reduce the allocation rate and heat
 932       // on various locks.
 933       Thread::muxAcquire(&ListLock, "omAlloc");
 934       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
 935         MonitorFreeCount--;
 936         ObjectMonitor * take = gFreeList;
 937         gFreeList = take->FreeNext;
 938         guarantee(take->object() == NULL, "invariant");
 939         guarantee(!take->is_busy(), "invariant");
 940         take->Recycle();
 941         omRelease(Self, take, false);
 942       }
 943       Thread::muxRelease(&ListLock);
 944       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
 945       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
 946       TEVENT(omFirst - reprovision);
 947 
 948       const int mx = MonitorBound;
 949       if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
 950         // We can't safely induce a STW safepoint from omAlloc() as our thread
 951         // state may not be appropriate for such activities and callers may hold
 952         // naked oops, so instead we defer the action.
 953         InduceScavenge(Self, "omAlloc");
 954       }
 955       continue;
 956     }
 957 
 958     // 3: allocate a block of new ObjectMonitors
 959     // Both the local and global free lists are empty -- resort to malloc().
 960     // In the current implementation objectMonitors are TSM - immortal.
 961     // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
 962     // each ObjectMonitor to start at the beginning of a cache line,
 963     // so we use align_size_up().
 964     // A better solution would be to use C++ placement-new.
 965     // BEWARE: As it stands currently, we don't run the ctors!
 966     assert(_BLOCKSIZE > 1, "invariant");
 967     size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
 968     PaddedEnd<ObjectMonitor> * temp;
 969     size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
 970     void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
 971                                                       mtInternal);
 972     temp = (PaddedEnd<ObjectMonitor> *)
 973              align_size_up((intptr_t)real_malloc_addr,
 974                            DEFAULT_CACHE_LINE_SIZE);
 975 
 976     // NOTE: (almost) no way to recover if allocation failed.
 977     // We might be able to induce a STW safepoint and scavenge enough
 978     // objectMonitors to permit progress.
 979     if (temp == NULL) {
 980       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
 981                             "Allocate ObjectMonitors");
 982     }
 983     (void)memset((void *) temp, 0, neededsize);
 984 
 985     // Format the block.
 986     // initialize the linked list, each monitor points to its next
 987     // forming the single linked free list, the very first monitor
 988     // will points to next block, which forms the block list.
 989     // The trick of using the 1st element in the block as gBlockList
 990     // linkage should be reconsidered.  A better implementation would
 991     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
 992 
 993     for (int i = 1; i < _BLOCKSIZE; i++) {
 994       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
 995     }
 996 
 997     // terminate the last monitor as the end of list
 998     temp[_BLOCKSIZE - 1].FreeNext = NULL;
 999 
1000     // Element [0] is reserved for global list linkage
1001     temp[0].set_object(CHAINMARKER);
1002 
1003     // Consider carving out this thread's current request from the
1004     // block in hand.  This avoids some lock traffic and redundant
1005     // list activity.
1006 
1007     // Acquire the ListLock to manipulate BlockList and FreeList.
1008     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1009     Thread::muxAcquire(&ListLock, "omAlloc [2]");
1010     MonitorPopulation += _BLOCKSIZE-1;
1011     MonitorFreeCount += _BLOCKSIZE-1;
1012 
1013     // Add the new block to the list of extant blocks (gBlockList).
1014     // The very first objectMonitor in a block is reserved and dedicated.
1015     // It serves as blocklist "next" linkage.
1016     temp[0].FreeNext = gBlockList;
1017     gBlockList = temp;
1018 
1019     // Add the new string of objectMonitors to the global free list
1020     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1021     gFreeList = temp + 1;
1022     Thread::muxRelease(&ListLock);
1023     TEVENT(Allocate block of monitors);
1024   }
1025 }
1026 
1027 // Place "m" on the caller's private per-thread omFreeList.
1028 // In practice there's no need to clamp or limit the number of
1029 // monitors on a thread's omFreeList as the only time we'll call
1030 // omRelease is to return a monitor to the free list after a CAS
1031 // attempt failed.  This doesn't allow unbounded #s of monitors to
1032 // accumulate on a thread's free list.
1033 
1034 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1035                                    bool fromPerThreadAlloc) {
1036   guarantee(m->object() == NULL, "invariant");
1037 
1038   // Remove from omInUseList
1039   if (MonitorInUseLists && fromPerThreadAlloc) {
1040     ObjectMonitor* curmidinuse = NULL;
1041     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) {
1042       if (m == mid) {
1043         // extract from per-thread in-use-list
1044         if (mid == Self->omInUseList) {
1045           Self->omInUseList = mid->FreeNext;
1046         } else if (curmidinuse != NULL) {
1047           curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1048         }
1049         Self->omInUseCount--;
1050         if (ObjectMonitor::Knob_VerifyInUse) {
1051           verifyInUse(Self);
1052         }
1053         break;
1054       } else {
1055         curmidinuse = mid;
1056         mid = mid->FreeNext;
1057       }
1058     }
1059   }
1060 
1061   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1062   m->FreeNext = Self->omFreeList;
1063   Self->omFreeList = m;
1064   Self->omFreeCount++;
1065 }
1066 
1067 // Return the monitors of a moribund thread's local free list to
1068 // the global free list.  Typically a thread calls omFlush() when
1069 // it's dying.  We could also consider having the VM thread steal
1070 // monitors from threads that have not run java code over a few
1071 // consecutive STW safepoints.  Relatedly, we might decay
1072 // omFreeProvision at STW safepoints.
1073 //
1074 // Also return the monitors of a moribund thread's omInUseList to
1075 // a global gOmInUseList under the global list lock so these
1076 // will continue to be scanned.
1077 //
1078 // We currently call omFlush() from the Thread:: dtor _after the thread
1079 // has been excised from the thread list and is no longer a mutator.
1080 // That means that omFlush() can run concurrently with a safepoint and
1081 // the scavenge operator.  Calling omFlush() from JavaThread::exit() might
1082 // be a better choice as we could safely reason that that the JVM is
1083 // not at a safepoint at the time of the call, and thus there could
1084 // be not inopportune interleavings between omFlush() and the scavenge
1085 // operator.
1086 
1087 void ObjectSynchronizer::omFlush(Thread * Self) {
1088   ObjectMonitor * List = Self->omFreeList;  // Null-terminated SLL
1089   Self->omFreeList = NULL;
1090   ObjectMonitor * Tail = NULL;
1091   int Tally = 0;
1092   if (List != NULL) {
1093     ObjectMonitor * s;
1094     for (s = List; s != NULL; s = s->FreeNext) {
1095       Tally++;
1096       Tail = s;
1097       guarantee(s->object() == NULL, "invariant");
1098       guarantee(!s->is_busy(), "invariant");
1099       s->set_owner(NULL);   // redundant but good hygiene
1100       TEVENT(omFlush - Move one);
1101     }
1102     guarantee(Tail != NULL && List != NULL, "invariant");
1103   }
1104 
1105   ObjectMonitor * InUseList = Self->omInUseList;
1106   ObjectMonitor * InUseTail = NULL;
1107   int InUseTally = 0;
1108   if (InUseList != NULL) {
1109     Self->omInUseList = NULL;
1110     ObjectMonitor *curom;
1111     for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1112       InUseTail = curom;
1113       InUseTally++;
1114     }
1115     assert(Self->omInUseCount == InUseTally, "inuse count off");
1116     Self->omInUseCount = 0;
1117     guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
1118   }
1119 
1120   Thread::muxAcquire(&ListLock, "omFlush");
1121   if (Tail != NULL) {
1122     Tail->FreeNext = gFreeList;
1123     gFreeList = List;
1124     MonitorFreeCount += Tally;
1125   }
1126 
1127   if (InUseTail != NULL) {
1128     InUseTail->FreeNext = gOmInUseList;
1129     gOmInUseList = InUseList;
1130     gOmInUseCount += InUseTally;
1131   }
1132 
1133   Thread::muxRelease(&ListLock);
1134   TEVENT(omFlush);
1135 }
1136 
1137 // Fast path code shared by multiple functions
1138 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1139   markOop mark = obj->mark();
1140   if (mark->has_monitor()) {
1141     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1142     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1143     return mark->monitor();
1144   }
1145   return ObjectSynchronizer::inflate(Thread::current(), obj);
1146 }
1147 
1148 
1149 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
1150                                                      oop object) {
1151   // Inflate mutates the heap ...
1152   // Relaxing assertion for bug 6320749.
1153   assert(Universe::verify_in_progress() ||
1154          !SafepointSynchronize::is_at_safepoint(), "invariant");
1155 
1156   for (;;) {
1157     const markOop mark = object->mark();
1158     assert(!mark->has_bias_pattern(), "invariant");
1159 
1160     // The mark can be in one of the following states:
1161     // *  Inflated     - just return
1162     // *  Stack-locked - coerce it to inflated
1163     // *  INFLATING    - busy wait for conversion to complete
1164     // *  Neutral      - aggressively inflate the object.
1165     // *  BIASED       - Illegal.  We should never see this
1166 
1167     // CASE: inflated
1168     if (mark->has_monitor()) {
1169       ObjectMonitor * inf = mark->monitor();
1170       assert(inf->header()->is_neutral(), "invariant");
1171       assert(inf->object() == object, "invariant");
1172       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1173       return inf;
1174     }
1175 
1176     // CASE: inflation in progress - inflating over a stack-lock.
1177     // Some other thread is converting from stack-locked to inflated.
1178     // Only that thread can complete inflation -- other threads must wait.
1179     // The INFLATING value is transient.
1180     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1181     // We could always eliminate polling by parking the thread on some auxiliary list.
1182     if (mark == markOopDesc::INFLATING()) {
1183       TEVENT(Inflate: spin while INFLATING);
1184       ReadStableMark(object);
1185       continue;
1186     }
1187 
1188     // CASE: stack-locked
1189     // Could be stack-locked either by this thread or by some other thread.
1190     //
1191     // Note that we allocate the objectmonitor speculatively, _before_ attempting
1192     // to install INFLATING into the mark word.  We originally installed INFLATING,
1193     // allocated the objectmonitor, and then finally STed the address of the
1194     // objectmonitor into the mark.  This was correct, but artificially lengthened
1195     // the interval in which INFLATED appeared in the mark, thus increasing
1196     // the odds of inflation contention.
1197     //
1198     // We now use per-thread private objectmonitor free lists.
1199     // These list are reprovisioned from the global free list outside the
1200     // critical INFLATING...ST interval.  A thread can transfer
1201     // multiple objectmonitors en-mass from the global free list to its local free list.
1202     // This reduces coherency traffic and lock contention on the global free list.
1203     // Using such local free lists, it doesn't matter if the omAlloc() call appears
1204     // before or after the CAS(INFLATING) operation.
1205     // See the comments in omAlloc().
1206 
1207     if (mark->has_locker()) {
1208       ObjectMonitor * m = omAlloc(Self);
1209       // Optimistically prepare the objectmonitor - anticipate successful CAS
1210       // We do this before the CAS in order to minimize the length of time
1211       // in which INFLATING appears in the mark.
1212       m->Recycle();
1213       m->_Responsible  = NULL;
1214       m->_recursions   = 0;
1215       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;   // Consider: maintain by type/class
1216 
1217       markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
1218       if (cmp != mark) {
1219         omRelease(Self, m, true);
1220         continue;       // Interference -- just retry
1221       }
1222 
1223       // We've successfully installed INFLATING (0) into the mark-word.
1224       // This is the only case where 0 will appear in a mark-work.
1225       // Only the singular thread that successfully swings the mark-word
1226       // to 0 can perform (or more precisely, complete) inflation.
1227       //
1228       // Why do we CAS a 0 into the mark-word instead of just CASing the
1229       // mark-word from the stack-locked value directly to the new inflated state?
1230       // Consider what happens when a thread unlocks a stack-locked object.
1231       // It attempts to use CAS to swing the displaced header value from the
1232       // on-stack basiclock back into the object header.  Recall also that the
1233       // header value (hashcode, etc) can reside in (a) the object header, or
1234       // (b) a displaced header associated with the stack-lock, or (c) a displaced
1235       // header in an objectMonitor.  The inflate() routine must copy the header
1236       // value from the basiclock on the owner's stack to the objectMonitor, all
1237       // the while preserving the hashCode stability invariants.  If the owner
1238       // decides to release the lock while the value is 0, the unlock will fail
1239       // and control will eventually pass from slow_exit() to inflate.  The owner
1240       // will then spin, waiting for the 0 value to disappear.   Put another way,
1241       // the 0 causes the owner to stall if the owner happens to try to
1242       // drop the lock (restoring the header from the basiclock to the object)
1243       // while inflation is in-progress.  This protocol avoids races that might
1244       // would otherwise permit hashCode values to change or "flicker" for an object.
1245       // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1246       // 0 serves as a "BUSY" inflate-in-progress indicator.
1247 
1248 
1249       // fetch the displaced mark from the owner's stack.
1250       // The owner can't die or unwind past the lock while our INFLATING
1251       // object is in the mark.  Furthermore the owner can't complete
1252       // an unlock on the object, either.
1253       markOop dmw = mark->displaced_mark_helper();
1254       assert(dmw->is_neutral(), "invariant");
1255 
1256       // Setup monitor fields to proper values -- prepare the monitor
1257       m->set_header(dmw);
1258 
1259       // Optimization: if the mark->locker stack address is associated
1260       // with this thread we could simply set m->_owner = Self.
1261       // Note that a thread can inflate an object
1262       // that it has stack-locked -- as might happen in wait() -- directly
1263       // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
1264       m->set_owner(mark->locker());
1265       m->set_object(object);
1266       // TODO-FIXME: assert BasicLock->dhw != 0.
1267 
1268       // Must preserve store ordering. The monitor state must
1269       // be stable at the time of publishing the monitor address.
1270       guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1271       object->release_set_mark(markOopDesc::encode(m));
1272 
1273       // Hopefully the performance counters are allocated on distinct cache lines
1274       // to avoid false sharing on MP systems ...
1275       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
1276       TEVENT(Inflate: overwrite stacklock);
1277       if (TraceMonitorInflation) {
1278         if (object->is_instance()) {
1279           ResourceMark rm;
1280           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1281                         (void *) object, (intptr_t) object->mark(),
1282                         object->klass()->external_name());
1283         }
1284       }
1285       return m;
1286     }
1287 
1288     // CASE: neutral
1289     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1290     // If we know we're inflating for entry it's better to inflate by swinging a
1291     // pre-locked objectMonitor pointer into the object header.   A successful
1292     // CAS inflates the object *and* confers ownership to the inflating thread.
1293     // In the current implementation we use a 2-step mechanism where we CAS()
1294     // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1295     // An inflateTry() method that we could call from fast_enter() and slow_enter()
1296     // would be useful.
1297 
1298     assert(mark->is_neutral(), "invariant");
1299     ObjectMonitor * m = omAlloc(Self);
1300     // prepare m for installation - set monitor to initial state
1301     m->Recycle();
1302     m->set_header(mark);
1303     m->set_owner(NULL);
1304     m->set_object(object);
1305     m->_recursions   = 0;
1306     m->_Responsible  = NULL;
1307     m->_SpinDuration = ObjectMonitor::Knob_SpinLimit;       // consider: keep metastats by type/class
1308 
1309     if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1310       m->set_object(NULL);
1311       m->set_owner(NULL);
1312       m->Recycle();
1313       omRelease(Self, m, true);
1314       m = NULL;
1315       continue;
1316       // interference - the markword changed - just retry.
1317       // The state-transitions are one-way, so there's no chance of
1318       // live-lock -- "Inflated" is an absorbing state.
1319     }
1320 
1321     // Hopefully the performance counters are allocated on distinct
1322     // cache lines to avoid false sharing on MP systems ...
1323     if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc();
1324     TEVENT(Inflate: overwrite neutral);
1325     if (TraceMonitorInflation) {
1326       if (object->is_instance()) {
1327         ResourceMark rm;
1328         tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1329                       (void *) object, (intptr_t) object->mark(),
1330                       object->klass()->external_name());
1331       }
1332     }
1333     return m;
1334   }
1335 }
1336 
1337 
1338 // Deflate_idle_monitors() is called at all safepoints, immediately
1339 // after all mutators are stopped, but before any objects have moved.
1340 // It traverses the list of known monitors, deflating where possible.
1341 // The scavenged monitor are returned to the monitor free list.
1342 //
1343 // Beware that we scavenge at *every* stop-the-world point.
1344 // Having a large number of monitors in-circulation negatively
1345 // impacts the performance of some applications (e.g., PointBase).
1346 // Broadly, we want to minimize the # of monitors in circulation.
1347 //
1348 // We have added a flag, MonitorInUseLists, which creates a list
1349 // of active monitors for each thread. deflate_idle_monitors()
1350 // only scans the per-thread inuse lists. omAlloc() puts all
1351 // assigned monitors on the per-thread list. deflate_idle_monitors()
1352 // returns the non-busy monitors to the global free list.
1353 // When a thread dies, omFlush() adds the list of active monitors for
1354 // that thread to a global gOmInUseList acquiring the
1355 // global list lock. deflate_idle_monitors() acquires the global
1356 // list lock to scan for non-busy monitors to the global free list.
1357 // An alternative could have used a single global inuse list. The
1358 // downside would have been the additional cost of acquiring the global list lock
1359 // for every omAlloc().
1360 //
1361 // Perversely, the heap size -- and thus the STW safepoint rate --
1362 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1363 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1364 // This is an unfortunate aspect of this design.
1365 
1366 enum ManifestConstants {
1367   ClearResponsibleAtSTW   = 0,
1368   MaximumRecheckInterval  = 1000
1369 };
1370 
1371 // Deflate a single monitor if not in use
1372 // Return true if deflated, false if in use
1373 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1374                                          ObjectMonitor** freeHeadp,
1375                                          ObjectMonitor** freeTailp) {
1376   bool deflated;
1377   // Normal case ... The monitor is associated with obj.
1378   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1379   guarantee(mid == obj->mark()->monitor(), "invariant");
1380   guarantee(mid->header()->is_neutral(), "invariant");
1381 
1382   if (mid->is_busy()) {
1383     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1384     deflated = false;
1385   } else {
1386     // Deflate the monitor if it is no longer being used
1387     // It's idle - scavenge and return to the global free list
1388     // plain old deflation ...
1389     TEVENT(deflate_idle_monitors - scavenge1);
1390     if (TraceMonitorInflation) {
1391       if (obj->is_instance()) {
1392         ResourceMark rm;
1393         tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1394                       (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1395       }
1396     }
1397 
1398     // Restore the header back to obj
1399     obj->release_set_mark(mid->header());
1400     mid->clear();
1401 
1402     assert(mid->object() == NULL, "invariant");
1403 
1404     // Move the object to the working free list defined by FreeHead,FreeTail.
1405     if (*freeHeadp == NULL) *freeHeadp = mid;
1406     if (*freeTailp != NULL) {
1407       ObjectMonitor * prevtail = *freeTailp;
1408       assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1409       prevtail->FreeNext = mid;
1410     }
1411     *freeTailp = mid;
1412     deflated = true;
1413   }
1414   return deflated;
1415 }
1416 
1417 // Caller acquires ListLock
1418 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1419                                           ObjectMonitor** freeHeadp,
1420                                           ObjectMonitor** freeTailp) {
1421   ObjectMonitor* mid;
1422   ObjectMonitor* next;
1423   ObjectMonitor* curmidinuse = NULL;
1424   int deflatedcount = 0;
1425 
1426   for (mid = *listheadp; mid != NULL;) {
1427     oop obj = (oop) mid->object();
1428     bool deflated = false;
1429     if (obj != NULL) {
1430       deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
1431     }
1432     if (deflated) {
1433       // extract from per-thread in-use-list
1434       if (mid == *listheadp) {
1435         *listheadp = mid->FreeNext;
1436       } else if (curmidinuse != NULL) {
1437         curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1438       }
1439       next = mid->FreeNext;
1440       mid->FreeNext = NULL;  // This mid is current tail in the FreeHead list
1441       mid = next;
1442       deflatedcount++;
1443     } else {
1444       curmidinuse = mid;
1445       mid = mid->FreeNext;
1446     }
1447   }
1448   return deflatedcount;
1449 }
1450 
1451 void ObjectSynchronizer::deflate_idle_monitors() {
1452   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1453   int nInuse = 0;              // currently associated with objects
1454   int nInCirculation = 0;      // extant
1455   int nScavenged = 0;          // reclaimed
1456   bool deflated = false;
1457 
1458   ObjectMonitor * FreeHead = NULL;  // Local SLL of scavenged monitors
1459   ObjectMonitor * FreeTail = NULL;
1460 
1461   TEVENT(deflate_idle_monitors);
1462   // Prevent omFlush from changing mids in Thread dtor's during deflation
1463   // And in case the vm thread is acquiring a lock during a safepoint
1464   // See e.g. 6320749
1465   Thread::muxAcquire(&ListLock, "scavenge - return");
1466 
1467   if (MonitorInUseLists) {
1468     int inUse = 0;
1469     for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1470       nInCirculation+= cur->omInUseCount;
1471       int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1472       cur->omInUseCount-= deflatedcount;
1473       if (ObjectMonitor::Knob_VerifyInUse) {
1474         verifyInUse(cur);
1475       }
1476       nScavenged += deflatedcount;
1477       nInuse += cur->omInUseCount;
1478     }
1479 
1480     // For moribund threads, scan gOmInUseList
1481     if (gOmInUseList) {
1482       nInCirculation += gOmInUseCount;
1483       int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1484       gOmInUseCount-= deflatedcount;
1485       nScavenged += deflatedcount;
1486       nInuse += gOmInUseCount;
1487     }
1488 
1489   } else for (PaddedEnd<ObjectMonitor> * block =
1490               (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL;
1491               block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1492     // Iterate over all extant monitors - Scavenge all idle monitors.
1493     assert(block->object() == CHAINMARKER, "must be a block header");
1494     nInCirculation += _BLOCKSIZE;
1495     for (int i = 1; i < _BLOCKSIZE; i++) {
1496       ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1497       oop obj = (oop) mid->object();
1498 
1499       if (obj == NULL) {
1500         // The monitor is not associated with an object.
1501         // The monitor should either be a thread-specific private
1502         // free list or the global free list.
1503         // obj == NULL IMPLIES mid->is_busy() == 0
1504         guarantee(!mid->is_busy(), "invariant");
1505         continue;
1506       }
1507       deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1508 
1509       if (deflated) {
1510         mid->FreeNext = NULL;
1511         nScavenged++;
1512       } else {
1513         nInuse++;
1514       }
1515     }
1516   }
1517 
1518   MonitorFreeCount += nScavenged;
1519 
1520   // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1521 
1522   if (ObjectMonitor::Knob_Verbose) {
1523     ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1524              nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1525              MonitorPopulation, MonitorFreeCount);
1526     ::fflush(stdout);
1527   }
1528 
1529   ForceMonitorScavenge = 0;    // Reset
1530 
1531   // Move the scavenged monitors back to the global free list.
1532   if (FreeHead != NULL) {
1533     guarantee(FreeTail != NULL && nScavenged > 0, "invariant");
1534     assert(FreeTail->FreeNext == NULL, "invariant");
1535     // constant-time list splice - prepend scavenged segment to gFreeList
1536     FreeTail->FreeNext = gFreeList;
1537     gFreeList = FreeHead;
1538   }
1539   Thread::muxRelease(&ListLock);
1540 
1541   if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged);
1542   if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1543 
1544   // TODO: Add objectMonitor leak detection.
1545   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1546   GVars.stwRandom = os::random();
1547   GVars.stwCycle++;
1548 }
1549 
1550 // Monitor cleanup on JavaThread::exit
1551 
1552 // Iterate through monitor cache and attempt to release thread's monitors
1553 // Gives up on a particular monitor if an exception occurs, but continues
1554 // the overall iteration, swallowing the exception.
1555 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1556  private:
1557   TRAPS;
1558 
1559  public:
1560   ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1561   void do_monitor(ObjectMonitor* mid) {
1562     if (mid->owner() == THREAD) {
1563       (void)mid->complete_exit(CHECK);
1564     }
1565   }
1566 };
1567 
1568 // Release all inflated monitors owned by THREAD.  Lightweight monitors are
1569 // ignored.  This is meant to be called during JNI thread detach which assumes
1570 // all remaining monitors are heavyweight.  All exceptions are swallowed.
1571 // Scanning the extant monitor list can be time consuming.
1572 // A simple optimization is to add a per-thread flag that indicates a thread
1573 // called jni_monitorenter() during its lifetime.
1574 //
1575 // Instead of No_Savepoint_Verifier it might be cheaper to
1576 // use an idiom of the form:
1577 //   auto int tmp = SafepointSynchronize::_safepoint_counter ;
1578 //   <code that must not run at safepoint>
1579 //   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1580 // Since the tests are extremely cheap we could leave them enabled
1581 // for normal product builds.
1582 
1583 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1584   assert(THREAD == JavaThread::current(), "must be current Java thread");
1585   No_Safepoint_Verifier nsv;
1586   ReleaseJavaMonitorsClosure rjmc(THREAD);
1587   Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1588   ObjectSynchronizer::monitors_iterate(&rjmc);
1589   Thread::muxRelease(&ListLock);
1590   THREAD->clear_pending_exception();
1591 }
1592 
1593 //------------------------------------------------------------------------------
1594 // Debugging code
1595 
1596 void ObjectSynchronizer::sanity_checks(const bool verbose,
1597                                        const uint cache_line_size,
1598                                        int *error_cnt_ptr,
1599                                        int *warning_cnt_ptr) {
1600   u_char *addr_begin      = (u_char*)&GVars;
1601   u_char *addr_stwRandom  = (u_char*)&GVars.stwRandom;
1602   u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1603 
1604   if (verbose) {
1605     tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1606                   sizeof(SharedGlobals));
1607   }
1608 
1609   uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1610   if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1611 
1612   uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1613   if (verbose) {
1614     tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1615   }
1616 
1617   if (cache_line_size != 0) {
1618     // We were able to determine the L1 data cache line size so
1619     // do some cache line specific sanity checks
1620 
1621     if (offset_stwRandom < cache_line_size) {
1622       tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1623                     "to the struct beginning than a cache line which permits "
1624                     "false sharing.");
1625       (*warning_cnt_ptr)++;
1626     }
1627 
1628     if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1629       tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1630                     "SharedGlobals.hcSequence fields are closer than a cache "
1631                     "line which permits false sharing.");
1632       (*warning_cnt_ptr)++;
1633     }
1634 
1635     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1636       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1637                     "to the struct end than a cache line which permits false "
1638                     "sharing.");
1639       (*warning_cnt_ptr)++;
1640     }
1641   }
1642 }
1643 
1644 #ifndef PRODUCT
1645 
1646 // Verify all monitors in the monitor cache, the verification is weak.
1647 void ObjectSynchronizer::verify() {
1648   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1649   ObjectMonitor* mid;
1650   while (block) {
1651     assert(block->object() == CHAINMARKER, "must be a block header");
1652     for (int i = 1; i < _BLOCKSIZE; i++) {
1653       mid = (ObjectMonitor *)(block + i);
1654       oop object = (oop) mid->object();
1655       if (object != NULL) {
1656         mid->verify();
1657       }
1658     }
1659     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1660   }
1661 }
1662 
1663 // Check if monitor belongs to the monitor cache
1664 // The list is grow-only so it's *relatively* safe to traverse
1665 // the list of extant blocks without taking a lock.
1666 
1667 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1668   PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList;
1669 
1670   while (block) {
1671     assert(block->object() == CHAINMARKER, "must be a block header");
1672     if (monitor > (ObjectMonitor *)&block[0] &&
1673         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1674       address mon = (address) monitor;
1675       address blk = (address) block;
1676       size_t diff = mon - blk;
1677       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check");
1678       return 1;
1679     }
1680     block = (PaddedEnd<ObjectMonitor> *) block->FreeNext;
1681   }
1682   return 0;
1683 }
1684 
1685 #endif