1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/padded.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.inline.hpp" 32 #include "runtime/biasedLocking.hpp" 33 #include "runtime/handles.inline.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/objectMonitor.hpp" 37 #include "runtime/objectMonitor.inline.hpp" 38 #include "runtime/osThread.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "utilities/dtrace.hpp" 43 #include "utilities/events.hpp" 44 #include "utilities/preserveException.hpp" 45 46 #if defined(__GNUC__) && !defined(PPC64) 47 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 48 #define NOINLINE __attribute__((noinline)) 49 #else 50 #define NOINLINE 51 #endif 52 53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 54 55 // The "core" versions of monitor enter and exit reside in this file. 56 // The interpreter and compilers contain specialized transliterated 57 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 58 // for instance. If you make changes here, make sure to modify the 59 // interpreter, and both C1 and C2 fast-path inline locking code emission. 60 // 61 // ----------------------------------------------------------------------------- 62 63 #ifdef DTRACE_ENABLED 64 65 // Only bother with this argument setup if dtrace is available 66 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 67 68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 69 char* bytes = NULL; \ 70 int len = 0; \ 71 jlong jtid = SharedRuntime::get_java_tid(thread); \ 72 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 73 if (klassname != NULL) { \ 74 bytes = (char*)klassname->bytes(); \ 75 len = klassname->utf8_length(); \ 76 } 77 78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 79 { \ 80 if (DTraceMonitorProbes) { \ 81 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 82 HOTSPOT_MONITOR_WAIT(jtid, \ 83 (uintptr_t)(monitor), bytes, len, (millis)); \ 84 } \ 85 } 86 87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 88 89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 90 { \ 91 if (DTraceMonitorProbes) { \ 92 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 93 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 94 (uintptr_t)(monitor), bytes, len); \ 95 } \ 96 } 97 98 #else // ndef DTRACE_ENABLED 99 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 102 103 #endif // ndef DTRACE_ENABLED 104 105 // This exists only as a workaround of dtrace bug 6254741 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 107 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 108 return 0; 109 } 110 111 #define NINFLATIONLOCKS 256 112 static volatile intptr_t InflationLocks[NINFLATIONLOCKS]; 113 114 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 115 // want to expose the PaddedEnd template more than necessary. 116 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 118 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 119 int ObjectSynchronizer::gOmInUseCount = 0; 120 static volatile intptr_t ListLock = 0; // protects global monitor free-list cache 121 static volatile int MonitorFreeCount = 0; // # on gFreeList 122 static volatile int MonitorPopulation = 0; // # Extant -- in circulation 123 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 124 125 // ----------------------------------------------------------------------------- 126 // Fast Monitor Enter/Exit 127 // This the fast monitor enter. The interpreter and compiler use 128 // some assembly copies of this code. Make sure update those code 129 // if the following function is changed. The implementation is 130 // extremely sensitive to race condition. Be careful. 131 132 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 133 bool attempt_rebias, TRAPS) { 134 if (UseBiasedLocking) { 135 if (!SafepointSynchronize::is_at_safepoint()) { 136 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 137 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 138 return; 139 } 140 } else { 141 assert(!attempt_rebias, "can not rebias toward VM thread"); 142 BiasedLocking::revoke_at_safepoint(obj); 143 } 144 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 145 } 146 147 slow_enter(obj, lock, THREAD); 148 } 149 150 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 151 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 152 // if displaced header is null, the previous enter is recursive enter, no-op 153 markOop dhw = lock->displaced_header(); 154 markOop mark; 155 if (dhw == NULL) { 156 // Recursive stack-lock. 157 // Diagnostics -- Could be: stack-locked, inflating, inflated. 158 mark = object->mark(); 159 assert(!mark->is_neutral(), "invariant"); 160 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 161 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 162 } 163 if (mark->has_monitor()) { 164 ObjectMonitor * m = mark->monitor(); 165 assert(((oop)(m->object()))->mark() == mark, "invariant"); 166 assert(m->is_entered(THREAD), "invariant"); 167 } 168 return; 169 } 170 171 mark = object->mark(); 172 173 // If the object is stack-locked by the current thread, try to 174 // swing the displaced header from the box back to the mark. 175 if (mark == (markOop) lock) { 176 assert(dhw->is_neutral(), "invariant"); 177 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 178 TEVENT(fast_exit: release stacklock); 179 return; 180 } 181 } 182 183 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 184 } 185 186 // ----------------------------------------------------------------------------- 187 // Interpreter/Compiler Slow Case 188 // This routine is used to handle interpreter/compiler slow case 189 // We don't need to use fast path here, because it must have been 190 // failed in the interpreter/compiler code. 191 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 192 markOop mark = obj->mark(); 193 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 194 195 if (mark->is_neutral()) { 196 // Anticipate successful CAS -- the ST of the displaced mark must 197 // be visible <= the ST performed by the CAS. 198 lock->set_displaced_header(mark); 199 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 200 TEVENT(slow_enter: release stacklock); 201 return; 202 } 203 // Fall through to inflate() ... 204 } else if (mark->has_locker() && 205 THREAD->is_lock_owned((address)mark->locker())) { 206 assert(lock != mark->locker(), "must not re-lock the same lock"); 207 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 208 lock->set_displaced_header(NULL); 209 return; 210 } 211 212 // The object header will never be displaced to this lock, 213 // so it does not matter what the value is, except that it 214 // must be non-zero to avoid looking like a re-entrant lock, 215 // and must not look locked either. 216 lock->set_displaced_header(markOopDesc::unused_mark()); 217 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 218 } 219 220 // This routine is used to handle interpreter/compiler slow case 221 // We don't need to use fast path here, because it must have 222 // failed in the interpreter/compiler code. Simply use the heavy 223 // weight monitor should be ok, unless someone find otherwise. 224 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 225 fast_exit(object, lock, THREAD); 226 } 227 228 // ----------------------------------------------------------------------------- 229 // Class Loader support to workaround deadlocks on the class loader lock objects 230 // Also used by GC 231 // complete_exit()/reenter() are used to wait on a nested lock 232 // i.e. to give up an outer lock completely and then re-enter 233 // Used when holding nested locks - lock acquisition order: lock1 then lock2 234 // 1) complete_exit lock1 - saving recursion count 235 // 2) wait on lock2 236 // 3) when notified on lock2, unlock lock2 237 // 4) reenter lock1 with original recursion count 238 // 5) lock lock2 239 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 240 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 241 TEVENT(complete_exit); 242 if (UseBiasedLocking) { 243 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 244 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 245 } 246 247 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 248 249 return monitor->complete_exit(THREAD); 250 } 251 252 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 253 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 254 TEVENT(reenter); 255 if (UseBiasedLocking) { 256 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 257 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 258 } 259 260 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 261 262 monitor->reenter(recursion, THREAD); 263 } 264 // ----------------------------------------------------------------------------- 265 // JNI locks on java objects 266 // NOTE: must use heavy weight monitor to handle jni monitor enter 267 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 268 // the current locking is from JNI instead of Java code 269 TEVENT(jni_enter); 270 if (UseBiasedLocking) { 271 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 273 } 274 THREAD->set_current_pending_monitor_is_from_java(false); 275 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 276 THREAD->set_current_pending_monitor_is_from_java(true); 277 } 278 279 // NOTE: must use heavy weight monitor to handle jni monitor enter 280 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { 281 if (UseBiasedLocking) { 282 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 283 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 284 } 285 286 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); 287 return monitor->try_enter(THREAD); 288 } 289 290 291 // NOTE: must use heavy weight monitor to handle jni monitor exit 292 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 293 TEVENT(jni_exit); 294 if (UseBiasedLocking) { 295 Handle h_obj(THREAD, obj); 296 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 297 obj = h_obj(); 298 } 299 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 300 301 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 302 // If this thread has locked the object, exit the monitor. Note: can't use 303 // monitor->check(CHECK); must exit even if an exception is pending. 304 if (monitor->check(THREAD)) { 305 monitor->exit(true, THREAD); 306 } 307 } 308 309 // ----------------------------------------------------------------------------- 310 // Internal VM locks on java objects 311 // standard constructor, allows locking failures 312 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 313 _dolock = doLock; 314 _thread = thread; 315 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 316 _obj = obj; 317 318 if (_dolock) { 319 TEVENT(ObjectLocker); 320 321 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 322 } 323 } 324 325 ObjectLocker::~ObjectLocker() { 326 if (_dolock) { 327 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 328 } 329 } 330 331 332 // ----------------------------------------------------------------------------- 333 // Wait/Notify/NotifyAll 334 // NOTE: must use heavy weight monitor to handle wait() 335 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 336 if (UseBiasedLocking) { 337 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 338 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 339 } 340 if (millis < 0) { 341 TEVENT(wait - throw IAX); 342 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 343 } 344 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 345 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 346 monitor->wait(millis, true, THREAD); 347 348 // This dummy call is in place to get around dtrace bug 6254741. Once 349 // that's fixed we can uncomment the following line, remove the call 350 // and change this function back into a "void" func. 351 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 352 return dtrace_waited_probe(monitor, obj, THREAD); 353 } 354 355 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 356 if (UseBiasedLocking) { 357 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 358 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 359 } 360 if (millis < 0) { 361 TEVENT(wait - throw IAX); 362 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 363 } 364 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 365 } 366 367 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 368 if (UseBiasedLocking) { 369 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 370 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 371 } 372 373 markOop mark = obj->mark(); 374 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 375 return; 376 } 377 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 378 } 379 380 // NOTE: see comment of notify() 381 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 382 if (UseBiasedLocking) { 383 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 384 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 385 } 386 387 markOop mark = obj->mark(); 388 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 389 return; 390 } 391 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 392 } 393 394 // ----------------------------------------------------------------------------- 395 // Hash Code handling 396 // 397 // Performance concern: 398 // OrderAccess::storestore() calls release() which at one time stored 0 399 // into the global volatile OrderAccess::dummy variable. This store was 400 // unnecessary for correctness. Many threads storing into a common location 401 // causes considerable cache migration or "sloshing" on large SMP systems. 402 // As such, I avoided using OrderAccess::storestore(). In some cases 403 // OrderAccess::fence() -- which incurs local latency on the executing 404 // processor -- is a better choice as it scales on SMP systems. 405 // 406 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 407 // a discussion of coherency costs. Note that all our current reference 408 // platforms provide strong ST-ST order, so the issue is moot on IA32, 409 // x64, and SPARC. 410 // 411 // As a general policy we use "volatile" to control compiler-based reordering 412 // and explicit fences (barriers) to control for architectural reordering 413 // performed by the CPU(s) or platform. 414 415 struct SharedGlobals { 416 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 417 // These are highly shared mostly-read variables. 418 // To avoid false-sharing they need to be the sole occupants of a cache line. 419 volatile int stwRandom; 420 volatile int stwCycle; 421 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 422 // Hot RW variable -- Sequester to avoid false-sharing 423 volatile int hcSequence; 424 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 425 }; 426 427 static SharedGlobals GVars; 428 static int MonitorScavengeThreshold = 1000000; 429 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 430 431 static markOop ReadStableMark(oop obj) { 432 markOop mark = obj->mark(); 433 if (!mark->is_being_inflated()) { 434 return mark; // normal fast-path return 435 } 436 437 int its = 0; 438 for (;;) { 439 markOop mark = obj->mark(); 440 if (!mark->is_being_inflated()) { 441 return mark; // normal fast-path return 442 } 443 444 // The object is being inflated by some other thread. 445 // The caller of ReadStableMark() must wait for inflation to complete. 446 // Avoid live-lock 447 // TODO: consider calling SafepointSynchronize::do_call_back() while 448 // spinning to see if there's a safepoint pending. If so, immediately 449 // yielding or blocking would be appropriate. Avoid spinning while 450 // there is a safepoint pending. 451 // TODO: add inflation contention performance counters. 452 // TODO: restrict the aggregate number of spinners. 453 454 ++its; 455 if (its > 10000 || !os::is_MP()) { 456 if (its & 1) { 457 os::naked_yield(); 458 TEVENT(Inflate: INFLATING - yield); 459 } else { 460 // Note that the following code attenuates the livelock problem but is not 461 // a complete remedy. A more complete solution would require that the inflating 462 // thread hold the associated inflation lock. The following code simply restricts 463 // the number of spinners to at most one. We'll have N-2 threads blocked 464 // on the inflationlock, 1 thread holding the inflation lock and using 465 // a yield/park strategy, and 1 thread in the midst of inflation. 466 // A more refined approach would be to change the encoding of INFLATING 467 // to allow encapsulation of a native thread pointer. Threads waiting for 468 // inflation to complete would use CAS to push themselves onto a singly linked 469 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 470 // and calling park(). When inflation was complete the thread that accomplished inflation 471 // would detach the list and set the markword to inflated with a single CAS and 472 // then for each thread on the list, set the flag and unpark() the thread. 473 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 474 // wakes at most one thread whereas we need to wake the entire list. 475 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 476 int YieldThenBlock = 0; 477 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 478 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 479 Thread::muxAcquire(InflationLocks + ix, "InflationLock"); 480 while (obj->mark() == markOopDesc::INFLATING()) { 481 // Beware: NakedYield() is advisory and has almost no effect on some platforms 482 // so we periodically call Self->_ParkEvent->park(1). 483 // We use a mixed spin/yield/block mechanism. 484 if ((YieldThenBlock++) >= 16) { 485 Thread::current()->_ParkEvent->park(1); 486 } else { 487 os::naked_yield(); 488 } 489 } 490 Thread::muxRelease(InflationLocks + ix); 491 TEVENT(Inflate: INFLATING - yield/park); 492 } 493 } else { 494 SpinPause(); // SMP-polite spinning 495 } 496 } 497 } 498 499 // hashCode() generation : 500 // 501 // Possibilities: 502 // * MD5Digest of {obj,stwRandom} 503 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 504 // * A DES- or AES-style SBox[] mechanism 505 // * One of the Phi-based schemes, such as: 506 // 2654435761 = 2^32 * Phi (golden ratio) 507 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 508 // * A variation of Marsaglia's shift-xor RNG scheme. 509 // * (obj ^ stwRandom) is appealing, but can result 510 // in undesirable regularity in the hashCode values of adjacent objects 511 // (objects allocated back-to-back, in particular). This could potentially 512 // result in hashtable collisions and reduced hashtable efficiency. 513 // There are simple ways to "diffuse" the middle address bits over the 514 // generated hashCode values: 515 516 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 517 intptr_t value = 0; 518 if (hashCode == 0) { 519 // This form uses an unguarded global Park-Miller RNG, 520 // so it's possible for two threads to race and generate the same RNG. 521 // On MP system we'll have lots of RW access to a global, so the 522 // mechanism induces lots of coherency traffic. 523 value = os::random(); 524 } else if (hashCode == 1) { 525 // This variation has the property of being stable (idempotent) 526 // between STW operations. This can be useful in some of the 1-0 527 // synchronization schemes. 528 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 529 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 530 } else if (hashCode == 2) { 531 value = 1; // for sensitivity testing 532 } else if (hashCode == 3) { 533 value = ++GVars.hcSequence; 534 } else if (hashCode == 4) { 535 value = cast_from_oop<intptr_t>(obj); 536 } else { 537 // Marsaglia's xor-shift scheme with thread-specific state 538 // This is probably the best overall implementation -- we'll 539 // likely make this the default in future releases. 540 unsigned t = Self->_hashStateX; 541 t ^= (t << 11); 542 Self->_hashStateX = Self->_hashStateY; 543 Self->_hashStateY = Self->_hashStateZ; 544 Self->_hashStateZ = Self->_hashStateW; 545 unsigned v = Self->_hashStateW; 546 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 547 Self->_hashStateW = v; 548 value = v; 549 } 550 551 value &= markOopDesc::hash_mask; 552 if (value == 0) value = 0xBAD; 553 assert(value != markOopDesc::no_hash, "invariant"); 554 TEVENT(hashCode: GENERATE); 555 return value; 556 } 557 558 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 559 if (UseBiasedLocking) { 560 // NOTE: many places throughout the JVM do not expect a safepoint 561 // to be taken here, in particular most operations on perm gen 562 // objects. However, we only ever bias Java instances and all of 563 // the call sites of identity_hash that might revoke biases have 564 // been checked to make sure they can handle a safepoint. The 565 // added check of the bias pattern is to avoid useless calls to 566 // thread-local storage. 567 if (obj->mark()->has_bias_pattern()) { 568 // Handle for oop obj in case of STW safepoint 569 Handle hobj(Self, obj); 570 // Relaxing assertion for bug 6320749. 571 assert(Universe::verify_in_progress() || 572 !SafepointSynchronize::is_at_safepoint(), 573 "biases should not be seen by VM thread here"); 574 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 575 obj = hobj(); 576 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 577 } 578 } 579 580 // hashCode() is a heap mutator ... 581 // Relaxing assertion for bug 6320749. 582 assert(Universe::verify_in_progress() || 583 !SafepointSynchronize::is_at_safepoint(), "invariant"); 584 assert(Universe::verify_in_progress() || 585 Self->is_Java_thread() , "invariant"); 586 assert(Universe::verify_in_progress() || 587 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 588 589 ObjectMonitor* monitor = NULL; 590 markOop temp, test; 591 intptr_t hash; 592 markOop mark = ReadStableMark(obj); 593 594 // object should remain ineligible for biased locking 595 assert(!mark->has_bias_pattern(), "invariant"); 596 597 if (mark->is_neutral()) { 598 hash = mark->hash(); // this is a normal header 599 if (hash) { // if it has hash, just return it 600 return hash; 601 } 602 hash = get_next_hash(Self, obj); // allocate a new hash code 603 temp = mark->copy_set_hash(hash); // merge the hash code into header 604 // use (machine word version) atomic operation to install the hash 605 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 606 if (test == mark) { 607 return hash; 608 } 609 // If atomic operation failed, we must inflate the header 610 // into heavy weight monitor. We could add more code here 611 // for fast path, but it does not worth the complexity. 612 } else if (mark->has_monitor()) { 613 monitor = mark->monitor(); 614 temp = monitor->header(); 615 assert(temp->is_neutral(), "invariant"); 616 hash = temp->hash(); 617 if (hash) { 618 return hash; 619 } 620 // Skip to the following code to reduce code size 621 } else if (Self->is_lock_owned((address)mark->locker())) { 622 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 623 assert(temp->is_neutral(), "invariant"); 624 hash = temp->hash(); // by current thread, check if the displaced 625 if (hash) { // header contains hash code 626 return hash; 627 } 628 // WARNING: 629 // The displaced header is strictly immutable. 630 // It can NOT be changed in ANY cases. So we have 631 // to inflate the header into heavyweight monitor 632 // even the current thread owns the lock. The reason 633 // is the BasicLock (stack slot) will be asynchronously 634 // read by other threads during the inflate() function. 635 // Any change to stack may not propagate to other threads 636 // correctly. 637 } 638 639 // Inflate the monitor to set hash code 640 monitor = ObjectSynchronizer::inflate(Self, obj); 641 // Load displaced header and check it has hash code 642 mark = monitor->header(); 643 assert(mark->is_neutral(), "invariant"); 644 hash = mark->hash(); 645 if (hash == 0) { 646 hash = get_next_hash(Self, obj); 647 temp = mark->copy_set_hash(hash); // merge hash code into header 648 assert(temp->is_neutral(), "invariant"); 649 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 650 if (test != mark) { 651 // The only update to the header in the monitor (outside GC) 652 // is install the hash code. If someone add new usage of 653 // displaced header, please update this code 654 hash = test->hash(); 655 assert(test->is_neutral(), "invariant"); 656 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 657 } 658 } 659 // We finally get the hash 660 return hash; 661 } 662 663 // Deprecated -- use FastHashCode() instead. 664 665 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 666 return FastHashCode(Thread::current(), obj()); 667 } 668 669 670 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 671 Handle h_obj) { 672 if (UseBiasedLocking) { 673 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 674 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 675 } 676 677 assert(thread == JavaThread::current(), "Can only be called on current thread"); 678 oop obj = h_obj(); 679 680 markOop mark = ReadStableMark(obj); 681 682 // Uncontended case, header points to stack 683 if (mark->has_locker()) { 684 return thread->is_lock_owned((address)mark->locker()); 685 } 686 // Contended case, header points to ObjectMonitor (tagged pointer) 687 if (mark->has_monitor()) { 688 ObjectMonitor* monitor = mark->monitor(); 689 return monitor->is_entered(thread) != 0; 690 } 691 // Unlocked case, header in place 692 assert(mark->is_neutral(), "sanity check"); 693 return false; 694 } 695 696 // Be aware of this method could revoke bias of the lock object. 697 // This method queries the ownership of the lock handle specified by 'h_obj'. 698 // If the current thread owns the lock, it returns owner_self. If no 699 // thread owns the lock, it returns owner_none. Otherwise, it will return 700 // owner_other. 701 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 702 (JavaThread *self, Handle h_obj) { 703 // The caller must beware this method can revoke bias, and 704 // revocation can result in a safepoint. 705 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 706 assert(self->thread_state() != _thread_blocked, "invariant"); 707 708 // Possible mark states: neutral, biased, stack-locked, inflated 709 710 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 711 // CASE: biased 712 BiasedLocking::revoke_and_rebias(h_obj, false, self); 713 assert(!h_obj->mark()->has_bias_pattern(), 714 "biases should be revoked by now"); 715 } 716 717 assert(self == JavaThread::current(), "Can only be called on current thread"); 718 oop obj = h_obj(); 719 markOop mark = ReadStableMark(obj); 720 721 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 722 if (mark->has_locker()) { 723 return self->is_lock_owned((address)mark->locker()) ? 724 owner_self : owner_other; 725 } 726 727 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 728 // The Object:ObjectMonitor relationship is stable as long as we're 729 // not at a safepoint. 730 if (mark->has_monitor()) { 731 void * owner = mark->monitor()->_owner; 732 if (owner == NULL) return owner_none; 733 return (owner == self || 734 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 735 } 736 737 // CASE: neutral 738 assert(mark->is_neutral(), "sanity check"); 739 return owner_none; // it's unlocked 740 } 741 742 // FIXME: jvmti should call this 743 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 744 if (UseBiasedLocking) { 745 if (SafepointSynchronize::is_at_safepoint()) { 746 BiasedLocking::revoke_at_safepoint(h_obj); 747 } else { 748 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 749 } 750 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 751 } 752 753 oop obj = h_obj(); 754 address owner = NULL; 755 756 markOop mark = ReadStableMark(obj); 757 758 // Uncontended case, header points to stack 759 if (mark->has_locker()) { 760 owner = (address) mark->locker(); 761 } 762 763 // Contended case, header points to ObjectMonitor (tagged pointer) 764 if (mark->has_monitor()) { 765 ObjectMonitor* monitor = mark->monitor(); 766 assert(monitor != NULL, "monitor should be non-null"); 767 owner = (address) monitor->owner(); 768 } 769 770 if (owner != NULL) { 771 // owning_thread_from_monitor_owner() may also return NULL here 772 return Threads::owning_thread_from_monitor_owner(owner, doLock); 773 } 774 775 // Unlocked case, header in place 776 // Cannot have assertion since this object may have been 777 // locked by another thread when reaching here. 778 // assert(mark->is_neutral(), "sanity check"); 779 780 return NULL; 781 } 782 // Visitors ... 783 784 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 785 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 786 ObjectMonitor* mid; 787 while (block) { 788 assert(block->object() == CHAINMARKER, "must be a block header"); 789 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 790 mid = (ObjectMonitor *)(block + i); 791 oop object = (oop) mid->object(); 792 if (object != NULL) { 793 closure->do_monitor(mid); 794 } 795 } 796 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 797 } 798 } 799 800 // Get the next block in the block list. 801 static inline ObjectMonitor* next(ObjectMonitor* block) { 802 assert(block->object() == CHAINMARKER, "must be a block header"); 803 block = block->FreeNext; 804 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 805 return block; 806 } 807 808 809 void ObjectSynchronizer::oops_do(OopClosure* f) { 810 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 811 for (PaddedEnd<ObjectMonitor> * block = 812 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 813 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 814 assert(block->object() == CHAINMARKER, "must be a block header"); 815 for (int i = 1; i < _BLOCKSIZE; i++) { 816 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 817 if (mid->object() != NULL) { 818 f->do_oop((oop*)mid->object_addr()); 819 } 820 } 821 } 822 } 823 824 825 // ----------------------------------------------------------------------------- 826 // ObjectMonitor Lifecycle 827 // ----------------------- 828 // Inflation unlinks monitors from the global gFreeList and 829 // associates them with objects. Deflation -- which occurs at 830 // STW-time -- disassociates idle monitors from objects. Such 831 // scavenged monitors are returned to the gFreeList. 832 // 833 // The global list is protected by ListLock. All the critical sections 834 // are short and operate in constant-time. 835 // 836 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 837 // 838 // Lifecycle: 839 // -- unassigned and on the global free list 840 // -- unassigned and on a thread's private omFreeList 841 // -- assigned to an object. The object is inflated and the mark refers 842 // to the objectmonitor. 843 844 845 // Constraining monitor pool growth via MonitorBound ... 846 // 847 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 848 // the rate of scavenging is driven primarily by GC. As such, we can find 849 // an inordinate number of monitors in circulation. 850 // To avoid that scenario we can artificially induce a STW safepoint 851 // if the pool appears to be growing past some reasonable bound. 852 // Generally we favor time in space-time tradeoffs, but as there's no 853 // natural back-pressure on the # of extant monitors we need to impose some 854 // type of limit. Beware that if MonitorBound is set to too low a value 855 // we could just loop. In addition, if MonitorBound is set to a low value 856 // we'll incur more safepoints, which are harmful to performance. 857 // See also: GuaranteedSafepointInterval 858 // 859 // The current implementation uses asynchronous VM operations. 860 861 static void InduceScavenge(Thread * Self, const char * Whence) { 862 // Induce STW safepoint to trim monitors 863 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 864 // More precisely, trigger an asynchronous STW safepoint as the number 865 // of active monitors passes the specified threshold. 866 // TODO: assert thread state is reasonable 867 868 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 869 if (ObjectMonitor::Knob_Verbose) { 870 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 871 ::fflush(stdout); 872 } 873 // Induce a 'null' safepoint to scavenge monitors 874 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 875 // to the VMthread and have a lifespan longer than that of this activation record. 876 // The VMThread will delete the op when completed. 877 VMThread::execute(new VM_ForceAsyncSafepoint()); 878 879 if (ObjectMonitor::Knob_Verbose) { 880 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 881 ::fflush(stdout); 882 } 883 } 884 } 885 886 void ObjectSynchronizer::verifyInUse(Thread *Self) { 887 ObjectMonitor* mid; 888 int inusetally = 0; 889 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 890 inusetally++; 891 } 892 assert(inusetally == Self->omInUseCount, "inuse count off"); 893 894 int freetally = 0; 895 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 896 freetally++; 897 } 898 assert(freetally == Self->omFreeCount, "free count off"); 899 } 900 901 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 902 // A large MAXPRIVATE value reduces both list lock contention 903 // and list coherency traffic, but also tends to increase the 904 // number of objectMonitors in circulation as well as the STW 905 // scavenge costs. As usual, we lean toward time in space-time 906 // tradeoffs. 907 const int MAXPRIVATE = 1024; 908 for (;;) { 909 ObjectMonitor * m; 910 911 // 1: try to allocate from the thread's local omFreeList. 912 // Threads will attempt to allocate first from their local list, then 913 // from the global list, and only after those attempts fail will the thread 914 // attempt to instantiate new monitors. Thread-local free lists take 915 // heat off the ListLock and improve allocation latency, as well as reducing 916 // coherency traffic on the shared global list. 917 m = Self->omFreeList; 918 if (m != NULL) { 919 Self->omFreeList = m->FreeNext; 920 Self->omFreeCount--; 921 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 922 guarantee(m->object() == NULL, "invariant"); 923 if (MonitorInUseLists) { 924 m->FreeNext = Self->omInUseList; 925 Self->omInUseList = m; 926 Self->omInUseCount++; 927 if (ObjectMonitor::Knob_VerifyInUse) { 928 verifyInUse(Self); 929 } 930 } else { 931 m->FreeNext = NULL; 932 } 933 return m; 934 } 935 936 // 2: try to allocate from the global gFreeList 937 // CONSIDER: use muxTry() instead of muxAcquire(). 938 // If the muxTry() fails then drop immediately into case 3. 939 // If we're using thread-local free lists then try 940 // to reprovision the caller's free list. 941 if (gFreeList != NULL) { 942 // Reprovision the thread's omFreeList. 943 // Use bulk transfers to reduce the allocation rate and heat 944 // on various locks. 945 Thread::muxAcquire(&ListLock, "omAlloc"); 946 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 947 MonitorFreeCount--; 948 ObjectMonitor * take = gFreeList; 949 gFreeList = take->FreeNext; 950 guarantee(take->object() == NULL, "invariant"); 951 guarantee(!take->is_busy(), "invariant"); 952 take->Recycle(); 953 omRelease(Self, take, false); 954 } 955 Thread::muxRelease(&ListLock); 956 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 957 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 958 TEVENT(omFirst - reprovision); 959 960 const int mx = MonitorBound; 961 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { 962 // We can't safely induce a STW safepoint from omAlloc() as our thread 963 // state may not be appropriate for such activities and callers may hold 964 // naked oops, so instead we defer the action. 965 InduceScavenge(Self, "omAlloc"); 966 } 967 continue; 968 } 969 970 // 3: allocate a block of new ObjectMonitors 971 // Both the local and global free lists are empty -- resort to malloc(). 972 // In the current implementation objectMonitors are TSM - immortal. 973 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 974 // each ObjectMonitor to start at the beginning of a cache line, 975 // so we use align_size_up(). 976 // A better solution would be to use C++ placement-new. 977 // BEWARE: As it stands currently, we don't run the ctors! 978 assert(_BLOCKSIZE > 1, "invariant"); 979 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 980 PaddedEnd<ObjectMonitor> * temp; 981 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 982 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 983 mtInternal); 984 temp = (PaddedEnd<ObjectMonitor> *) 985 align_size_up((intptr_t)real_malloc_addr, 986 DEFAULT_CACHE_LINE_SIZE); 987 988 // NOTE: (almost) no way to recover if allocation failed. 989 // We might be able to induce a STW safepoint and scavenge enough 990 // objectMonitors to permit progress. 991 if (temp == NULL) { 992 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 993 "Allocate ObjectMonitors"); 994 } 995 (void)memset((void *) temp, 0, neededsize); 996 997 // Format the block. 998 // initialize the linked list, each monitor points to its next 999 // forming the single linked free list, the very first monitor 1000 // will points to next block, which forms the block list. 1001 // The trick of using the 1st element in the block as gBlockList 1002 // linkage should be reconsidered. A better implementation would 1003 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1004 1005 for (int i = 1; i < _BLOCKSIZE; i++) { 1006 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1007 } 1008 1009 // terminate the last monitor as the end of list 1010 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1011 1012 // Element [0] is reserved for global list linkage 1013 temp[0].set_object(CHAINMARKER); 1014 1015 // Consider carving out this thread's current request from the 1016 // block in hand. This avoids some lock traffic and redundant 1017 // list activity. 1018 1019 // Acquire the ListLock to manipulate BlockList and FreeList. 1020 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1021 Thread::muxAcquire(&ListLock, "omAlloc [2]"); 1022 MonitorPopulation += _BLOCKSIZE-1; 1023 MonitorFreeCount += _BLOCKSIZE-1; 1024 1025 // Add the new block to the list of extant blocks (gBlockList). 1026 // The very first objectMonitor in a block is reserved and dedicated. 1027 // It serves as blocklist "next" linkage. 1028 temp[0].FreeNext = gBlockList; 1029 gBlockList = temp; 1030 1031 // Add the new string of objectMonitors to the global free list 1032 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1033 gFreeList = temp + 1; 1034 Thread::muxRelease(&ListLock); 1035 TEVENT(Allocate block of monitors); 1036 } 1037 } 1038 1039 // Place "m" on the caller's private per-thread omFreeList. 1040 // In practice there's no need to clamp or limit the number of 1041 // monitors on a thread's omFreeList as the only time we'll call 1042 // omRelease is to return a monitor to the free list after a CAS 1043 // attempt failed. This doesn't allow unbounded #s of monitors to 1044 // accumulate on a thread's free list. 1045 1046 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1047 bool fromPerThreadAlloc) { 1048 guarantee(m->object() == NULL, "invariant"); 1049 1050 // Remove from omInUseList 1051 if (MonitorInUseLists && fromPerThreadAlloc) { 1052 ObjectMonitor* curmidinuse = NULL; 1053 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) { 1054 if (m == mid) { 1055 // extract from per-thread in-use-list 1056 if (mid == Self->omInUseList) { 1057 Self->omInUseList = mid->FreeNext; 1058 } else if (curmidinuse != NULL) { 1059 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1060 } 1061 Self->omInUseCount--; 1062 if (ObjectMonitor::Knob_VerifyInUse) { 1063 verifyInUse(Self); 1064 } 1065 break; 1066 } else { 1067 curmidinuse = mid; 1068 mid = mid->FreeNext; 1069 } 1070 } 1071 } 1072 1073 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1074 m->FreeNext = Self->omFreeList; 1075 Self->omFreeList = m; 1076 Self->omFreeCount++; 1077 } 1078 1079 // Return the monitors of a moribund thread's local free list to 1080 // the global free list. Typically a thread calls omFlush() when 1081 // it's dying. We could also consider having the VM thread steal 1082 // monitors from threads that have not run java code over a few 1083 // consecutive STW safepoints. Relatedly, we might decay 1084 // omFreeProvision at STW safepoints. 1085 // 1086 // Also return the monitors of a moribund thread's omInUseList to 1087 // a global gOmInUseList under the global list lock so these 1088 // will continue to be scanned. 1089 // 1090 // We currently call omFlush() from the Thread:: dtor _after the thread 1091 // has been excised from the thread list and is no longer a mutator. 1092 // That means that omFlush() can run concurrently with a safepoint and 1093 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1094 // be a better choice as we could safely reason that that the JVM is 1095 // not at a safepoint at the time of the call, and thus there could 1096 // be not inopportune interleavings between omFlush() and the scavenge 1097 // operator. 1098 1099 void ObjectSynchronizer::omFlush(Thread * Self) { 1100 ObjectMonitor * List = Self->omFreeList; // Null-terminated SLL 1101 Self->omFreeList = NULL; 1102 ObjectMonitor * Tail = NULL; 1103 int Tally = 0; 1104 if (List != NULL) { 1105 ObjectMonitor * s; 1106 for (s = List; s != NULL; s = s->FreeNext) { 1107 Tally++; 1108 Tail = s; 1109 guarantee(s->object() == NULL, "invariant"); 1110 guarantee(!s->is_busy(), "invariant"); 1111 s->set_owner(NULL); // redundant but good hygiene 1112 TEVENT(omFlush - Move one); 1113 } 1114 guarantee(Tail != NULL && List != NULL, "invariant"); 1115 } 1116 1117 ObjectMonitor * InUseList = Self->omInUseList; 1118 ObjectMonitor * InUseTail = NULL; 1119 int InUseTally = 0; 1120 if (InUseList != NULL) { 1121 Self->omInUseList = NULL; 1122 ObjectMonitor *curom; 1123 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { 1124 InUseTail = curom; 1125 InUseTally++; 1126 } 1127 assert(Self->omInUseCount == InUseTally, "inuse count off"); 1128 Self->omInUseCount = 0; 1129 guarantee(InUseTail != NULL && InUseList != NULL, "invariant"); 1130 } 1131 1132 Thread::muxAcquire(&ListLock, "omFlush"); 1133 if (Tail != NULL) { 1134 Tail->FreeNext = gFreeList; 1135 gFreeList = List; 1136 MonitorFreeCount += Tally; 1137 } 1138 1139 if (InUseTail != NULL) { 1140 InUseTail->FreeNext = gOmInUseList; 1141 gOmInUseList = InUseList; 1142 gOmInUseCount += InUseTally; 1143 } 1144 1145 Thread::muxRelease(&ListLock); 1146 TEVENT(omFlush); 1147 } 1148 1149 // Fast path code shared by multiple functions 1150 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1151 markOop mark = obj->mark(); 1152 if (mark->has_monitor()) { 1153 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1154 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1155 return mark->monitor(); 1156 } 1157 return ObjectSynchronizer::inflate(Thread::current(), obj); 1158 } 1159 1160 1161 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1162 oop object) { 1163 // Inflate mutates the heap ... 1164 // Relaxing assertion for bug 6320749. 1165 assert(Universe::verify_in_progress() || 1166 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1167 1168 for (;;) { 1169 const markOop mark = object->mark(); 1170 assert(!mark->has_bias_pattern(), "invariant"); 1171 1172 // The mark can be in one of the following states: 1173 // * Inflated - just return 1174 // * Stack-locked - coerce it to inflated 1175 // * INFLATING - busy wait for conversion to complete 1176 // * Neutral - aggressively inflate the object. 1177 // * BIASED - Illegal. We should never see this 1178 1179 // CASE: inflated 1180 if (mark->has_monitor()) { 1181 ObjectMonitor * inf = mark->monitor(); 1182 assert(inf->header()->is_neutral(), "invariant"); 1183 assert(inf->object() == object, "invariant"); 1184 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1185 return inf; 1186 } 1187 1188 // CASE: inflation in progress - inflating over a stack-lock. 1189 // Some other thread is converting from stack-locked to inflated. 1190 // Only that thread can complete inflation -- other threads must wait. 1191 // The INFLATING value is transient. 1192 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1193 // We could always eliminate polling by parking the thread on some auxiliary list. 1194 if (mark == markOopDesc::INFLATING()) { 1195 TEVENT(Inflate: spin while INFLATING); 1196 ReadStableMark(object); 1197 continue; 1198 } 1199 1200 // CASE: stack-locked 1201 // Could be stack-locked either by this thread or by some other thread. 1202 // 1203 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1204 // to install INFLATING into the mark word. We originally installed INFLATING, 1205 // allocated the objectmonitor, and then finally STed the address of the 1206 // objectmonitor into the mark. This was correct, but artificially lengthened 1207 // the interval in which INFLATED appeared in the mark, thus increasing 1208 // the odds of inflation contention. 1209 // 1210 // We now use per-thread private objectmonitor free lists. 1211 // These list are reprovisioned from the global free list outside the 1212 // critical INFLATING...ST interval. A thread can transfer 1213 // multiple objectmonitors en-mass from the global free list to its local free list. 1214 // This reduces coherency traffic and lock contention on the global free list. 1215 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1216 // before or after the CAS(INFLATING) operation. 1217 // See the comments in omAlloc(). 1218 1219 if (mark->has_locker()) { 1220 ObjectMonitor * m = omAlloc(Self); 1221 // Optimistically prepare the objectmonitor - anticipate successful CAS 1222 // We do this before the CAS in order to minimize the length of time 1223 // in which INFLATING appears in the mark. 1224 m->Recycle(); 1225 m->_Responsible = NULL; 1226 m->_recursions = 0; 1227 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1228 1229 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1230 if (cmp != mark) { 1231 omRelease(Self, m, true); 1232 continue; // Interference -- just retry 1233 } 1234 1235 // We've successfully installed INFLATING (0) into the mark-word. 1236 // This is the only case where 0 will appear in a mark-work. 1237 // Only the singular thread that successfully swings the mark-word 1238 // to 0 can perform (or more precisely, complete) inflation. 1239 // 1240 // Why do we CAS a 0 into the mark-word instead of just CASing the 1241 // mark-word from the stack-locked value directly to the new inflated state? 1242 // Consider what happens when a thread unlocks a stack-locked object. 1243 // It attempts to use CAS to swing the displaced header value from the 1244 // on-stack basiclock back into the object header. Recall also that the 1245 // header value (hashcode, etc) can reside in (a) the object header, or 1246 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1247 // header in an objectMonitor. The inflate() routine must copy the header 1248 // value from the basiclock on the owner's stack to the objectMonitor, all 1249 // the while preserving the hashCode stability invariants. If the owner 1250 // decides to release the lock while the value is 0, the unlock will fail 1251 // and control will eventually pass from slow_exit() to inflate. The owner 1252 // will then spin, waiting for the 0 value to disappear. Put another way, 1253 // the 0 causes the owner to stall if the owner happens to try to 1254 // drop the lock (restoring the header from the basiclock to the object) 1255 // while inflation is in-progress. This protocol avoids races that might 1256 // would otherwise permit hashCode values to change or "flicker" for an object. 1257 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1258 // 0 serves as a "BUSY" inflate-in-progress indicator. 1259 1260 1261 // fetch the displaced mark from the owner's stack. 1262 // The owner can't die or unwind past the lock while our INFLATING 1263 // object is in the mark. Furthermore the owner can't complete 1264 // an unlock on the object, either. 1265 markOop dmw = mark->displaced_mark_helper(); 1266 assert(dmw->is_neutral(), "invariant"); 1267 1268 // Setup monitor fields to proper values -- prepare the monitor 1269 m->set_header(dmw); 1270 1271 // Optimization: if the mark->locker stack address is associated 1272 // with this thread we could simply set m->_owner = Self. 1273 // Note that a thread can inflate an object 1274 // that it has stack-locked -- as might happen in wait() -- directly 1275 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1276 m->set_owner(mark->locker()); 1277 m->set_object(object); 1278 // TODO-FIXME: assert BasicLock->dhw != 0. 1279 1280 // Must preserve store ordering. The monitor state must 1281 // be stable at the time of publishing the monitor address. 1282 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1283 object->release_set_mark(markOopDesc::encode(m)); 1284 1285 // Hopefully the performance counters are allocated on distinct cache lines 1286 // to avoid false sharing on MP systems ... 1287 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1288 TEVENT(Inflate: overwrite stacklock); 1289 if (TraceMonitorInflation) { 1290 if (object->is_instance()) { 1291 ResourceMark rm; 1292 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1293 (void *) object, (intptr_t) object->mark(), 1294 object->klass()->external_name()); 1295 } 1296 } 1297 return m; 1298 } 1299 1300 // CASE: neutral 1301 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1302 // If we know we're inflating for entry it's better to inflate by swinging a 1303 // pre-locked objectMonitor pointer into the object header. A successful 1304 // CAS inflates the object *and* confers ownership to the inflating thread. 1305 // In the current implementation we use a 2-step mechanism where we CAS() 1306 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1307 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1308 // would be useful. 1309 1310 assert(mark->is_neutral(), "invariant"); 1311 ObjectMonitor * m = omAlloc(Self); 1312 // prepare m for installation - set monitor to initial state 1313 m->Recycle(); 1314 m->set_header(mark); 1315 m->set_owner(NULL); 1316 m->set_object(object); 1317 m->_recursions = 0; 1318 m->_Responsible = NULL; 1319 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1320 1321 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1322 m->set_object(NULL); 1323 m->set_owner(NULL); 1324 m->Recycle(); 1325 omRelease(Self, m, true); 1326 m = NULL; 1327 continue; 1328 // interference - the markword changed - just retry. 1329 // The state-transitions are one-way, so there's no chance of 1330 // live-lock -- "Inflated" is an absorbing state. 1331 } 1332 1333 // Hopefully the performance counters are allocated on distinct 1334 // cache lines to avoid false sharing on MP systems ... 1335 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1336 TEVENT(Inflate: overwrite neutral); 1337 if (TraceMonitorInflation) { 1338 if (object->is_instance()) { 1339 ResourceMark rm; 1340 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1341 (void *) object, (intptr_t) object->mark(), 1342 object->klass()->external_name()); 1343 } 1344 } 1345 return m; 1346 } 1347 } 1348 1349 1350 // Deflate_idle_monitors() is called at all safepoints, immediately 1351 // after all mutators are stopped, but before any objects have moved. 1352 // It traverses the list of known monitors, deflating where possible. 1353 // The scavenged monitor are returned to the monitor free list. 1354 // 1355 // Beware that we scavenge at *every* stop-the-world point. 1356 // Having a large number of monitors in-circulation negatively 1357 // impacts the performance of some applications (e.g., PointBase). 1358 // Broadly, we want to minimize the # of monitors in circulation. 1359 // 1360 // We have added a flag, MonitorInUseLists, which creates a list 1361 // of active monitors for each thread. deflate_idle_monitors() 1362 // only scans the per-thread inuse lists. omAlloc() puts all 1363 // assigned monitors on the per-thread list. deflate_idle_monitors() 1364 // returns the non-busy monitors to the global free list. 1365 // When a thread dies, omFlush() adds the list of active monitors for 1366 // that thread to a global gOmInUseList acquiring the 1367 // global list lock. deflate_idle_monitors() acquires the global 1368 // list lock to scan for non-busy monitors to the global free list. 1369 // An alternative could have used a single global inuse list. The 1370 // downside would have been the additional cost of acquiring the global list lock 1371 // for every omAlloc(). 1372 // 1373 // Perversely, the heap size -- and thus the STW safepoint rate -- 1374 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1375 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1376 // This is an unfortunate aspect of this design. 1377 1378 enum ManifestConstants { 1379 ClearResponsibleAtSTW = 0, 1380 MaximumRecheckInterval = 1000 1381 }; 1382 1383 // Deflate a single monitor if not in use 1384 // Return true if deflated, false if in use 1385 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1386 ObjectMonitor** freeHeadp, 1387 ObjectMonitor** freeTailp) { 1388 bool deflated; 1389 // Normal case ... The monitor is associated with obj. 1390 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1391 guarantee(mid == obj->mark()->monitor(), "invariant"); 1392 guarantee(mid->header()->is_neutral(), "invariant"); 1393 1394 if (mid->is_busy()) { 1395 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1396 deflated = false; 1397 } else { 1398 // Deflate the monitor if it is no longer being used 1399 // It's idle - scavenge and return to the global free list 1400 // plain old deflation ... 1401 TEVENT(deflate_idle_monitors - scavenge1); 1402 if (TraceMonitorInflation) { 1403 if (obj->is_instance()) { 1404 ResourceMark rm; 1405 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1406 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1407 } 1408 } 1409 1410 // Restore the header back to obj 1411 obj->release_set_mark(mid->header()); 1412 mid->clear(); 1413 1414 assert(mid->object() == NULL, "invariant"); 1415 1416 // Move the object to the working free list defined by FreeHead,FreeTail. 1417 if (*freeHeadp == NULL) *freeHeadp = mid; 1418 if (*freeTailp != NULL) { 1419 ObjectMonitor * prevtail = *freeTailp; 1420 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK 1421 prevtail->FreeNext = mid; 1422 } 1423 *freeTailp = mid; 1424 deflated = true; 1425 } 1426 return deflated; 1427 } 1428 1429 // Caller acquires ListLock 1430 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, 1431 ObjectMonitor** freeHeadp, 1432 ObjectMonitor** freeTailp) { 1433 ObjectMonitor* mid; 1434 ObjectMonitor* next; 1435 ObjectMonitor* curmidinuse = NULL; 1436 int deflatedcount = 0; 1437 1438 for (mid = *listheadp; mid != NULL;) { 1439 oop obj = (oop) mid->object(); 1440 bool deflated = false; 1441 if (obj != NULL) { 1442 deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp); 1443 } 1444 if (deflated) { 1445 // extract from per-thread in-use-list 1446 if (mid == *listheadp) { 1447 *listheadp = mid->FreeNext; 1448 } else if (curmidinuse != NULL) { 1449 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1450 } 1451 next = mid->FreeNext; 1452 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list 1453 mid = next; 1454 deflatedcount++; 1455 } else { 1456 curmidinuse = mid; 1457 mid = mid->FreeNext; 1458 } 1459 } 1460 return deflatedcount; 1461 } 1462 1463 void ObjectSynchronizer::deflate_idle_monitors() { 1464 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1465 int nInuse = 0; // currently associated with objects 1466 int nInCirculation = 0; // extant 1467 int nScavenged = 0; // reclaimed 1468 bool deflated = false; 1469 1470 ObjectMonitor * FreeHead = NULL; // Local SLL of scavenged monitors 1471 ObjectMonitor * FreeTail = NULL; 1472 1473 TEVENT(deflate_idle_monitors); 1474 // Prevent omFlush from changing mids in Thread dtor's during deflation 1475 // And in case the vm thread is acquiring a lock during a safepoint 1476 // See e.g. 6320749 1477 Thread::muxAcquire(&ListLock, "scavenge - return"); 1478 1479 if (MonitorInUseLists) { 1480 int inUse = 0; 1481 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1482 nInCirculation+= cur->omInUseCount; 1483 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); 1484 cur->omInUseCount-= deflatedcount; 1485 if (ObjectMonitor::Knob_VerifyInUse) { 1486 verifyInUse(cur); 1487 } 1488 nScavenged += deflatedcount; 1489 nInuse += cur->omInUseCount; 1490 } 1491 1492 // For moribund threads, scan gOmInUseList 1493 if (gOmInUseList) { 1494 nInCirculation += gOmInUseCount; 1495 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); 1496 gOmInUseCount-= deflatedcount; 1497 nScavenged += deflatedcount; 1498 nInuse += gOmInUseCount; 1499 } 1500 1501 } else for (PaddedEnd<ObjectMonitor> * block = 1502 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 1503 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1504 // Iterate over all extant monitors - Scavenge all idle monitors. 1505 assert(block->object() == CHAINMARKER, "must be a block header"); 1506 nInCirculation += _BLOCKSIZE; 1507 for (int i = 1; i < _BLOCKSIZE; i++) { 1508 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1509 oop obj = (oop) mid->object(); 1510 1511 if (obj == NULL) { 1512 // The monitor is not associated with an object. 1513 // The monitor should either be a thread-specific private 1514 // free list or the global free list. 1515 // obj == NULL IMPLIES mid->is_busy() == 0 1516 guarantee(!mid->is_busy(), "invariant"); 1517 continue; 1518 } 1519 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); 1520 1521 if (deflated) { 1522 mid->FreeNext = NULL; 1523 nScavenged++; 1524 } else { 1525 nInuse++; 1526 } 1527 } 1528 } 1529 1530 MonitorFreeCount += nScavenged; 1531 1532 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. 1533 1534 if (ObjectMonitor::Knob_Verbose) { 1535 ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1536 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1537 MonitorPopulation, MonitorFreeCount); 1538 ::fflush(stdout); 1539 } 1540 1541 ForceMonitorScavenge = 0; // Reset 1542 1543 // Move the scavenged monitors back to the global free list. 1544 if (FreeHead != NULL) { 1545 guarantee(FreeTail != NULL && nScavenged > 0, "invariant"); 1546 assert(FreeTail->FreeNext == NULL, "invariant"); 1547 // constant-time list splice - prepend scavenged segment to gFreeList 1548 FreeTail->FreeNext = gFreeList; 1549 gFreeList = FreeHead; 1550 } 1551 Thread::muxRelease(&ListLock); 1552 1553 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged); 1554 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1555 1556 // TODO: Add objectMonitor leak detection. 1557 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1558 GVars.stwRandom = os::random(); 1559 GVars.stwCycle++; 1560 } 1561 1562 // Monitor cleanup on JavaThread::exit 1563 1564 // Iterate through monitor cache and attempt to release thread's monitors 1565 // Gives up on a particular monitor if an exception occurs, but continues 1566 // the overall iteration, swallowing the exception. 1567 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1568 private: 1569 TRAPS; 1570 1571 public: 1572 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1573 void do_monitor(ObjectMonitor* mid) { 1574 if (mid->owner() == THREAD) { 1575 (void)mid->complete_exit(CHECK); 1576 } 1577 } 1578 }; 1579 1580 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1581 // ignored. This is meant to be called during JNI thread detach which assumes 1582 // all remaining monitors are heavyweight. All exceptions are swallowed. 1583 // Scanning the extant monitor list can be time consuming. 1584 // A simple optimization is to add a per-thread flag that indicates a thread 1585 // called jni_monitorenter() during its lifetime. 1586 // 1587 // Instead of No_Savepoint_Verifier it might be cheaper to 1588 // use an idiom of the form: 1589 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1590 // <code that must not run at safepoint> 1591 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1592 // Since the tests are extremely cheap we could leave them enabled 1593 // for normal product builds. 1594 1595 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1596 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1597 No_Safepoint_Verifier nsv; 1598 ReleaseJavaMonitorsClosure rjmc(THREAD); 1599 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1600 ObjectSynchronizer::monitors_iterate(&rjmc); 1601 Thread::muxRelease(&ListLock); 1602 THREAD->clear_pending_exception(); 1603 } 1604 1605 //------------------------------------------------------------------------------ 1606 // Debugging code 1607 1608 void ObjectSynchronizer::sanity_checks(const bool verbose, 1609 const uint cache_line_size, 1610 int *error_cnt_ptr, 1611 int *warning_cnt_ptr) { 1612 u_char *addr_begin = (u_char*)&GVars; 1613 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1614 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1615 1616 if (verbose) { 1617 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1618 sizeof(SharedGlobals)); 1619 } 1620 1621 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1622 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1623 1624 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1625 if (verbose) { 1626 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1627 } 1628 1629 if (cache_line_size != 0) { 1630 // We were able to determine the L1 data cache line size so 1631 // do some cache line specific sanity checks 1632 1633 if (offset_stwRandom < cache_line_size) { 1634 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1635 "to the struct beginning than a cache line which permits " 1636 "false sharing."); 1637 (*warning_cnt_ptr)++; 1638 } 1639 1640 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1641 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1642 "SharedGlobals.hcSequence fields are closer than a cache " 1643 "line which permits false sharing."); 1644 (*warning_cnt_ptr)++; 1645 } 1646 1647 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1648 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1649 "to the struct end than a cache line which permits false " 1650 "sharing."); 1651 (*warning_cnt_ptr)++; 1652 } 1653 } 1654 } 1655 1656 #ifndef PRODUCT 1657 1658 // Verify all monitors in the monitor cache, the verification is weak. 1659 void ObjectSynchronizer::verify() { 1660 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1661 ObjectMonitor* mid; 1662 while (block) { 1663 assert(block->object() == CHAINMARKER, "must be a block header"); 1664 for (int i = 1; i < _BLOCKSIZE; i++) { 1665 mid = (ObjectMonitor *)(block + i); 1666 oop object = (oop) mid->object(); 1667 if (object != NULL) { 1668 mid->verify(); 1669 } 1670 } 1671 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1672 } 1673 } 1674 1675 // Check if monitor belongs to the monitor cache 1676 // The list is grow-only so it's *relatively* safe to traverse 1677 // the list of extant blocks without taking a lock. 1678 1679 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1680 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1681 1682 while (block) { 1683 assert(block->object() == CHAINMARKER, "must be a block header"); 1684 if (monitor > (ObjectMonitor *)&block[0] && 1685 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1686 address mon = (address) monitor; 1687 address blk = (address) block; 1688 size_t diff = mon - blk; 1689 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); 1690 return 1; 1691 } 1692 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1693 } 1694 return 0; 1695 } 1696 1697 #endif