1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "trace/traceMacros.hpp" 46 #include "trace/tracing.hpp" 47 #include "utilities/dtrace.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/preserveException.hpp" 50 51 // The "core" versions of monitor enter and exit reside in this file. 52 // The interpreter and compilers contain specialized transliterated 53 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 54 // for instance. If you make changes here, make sure to modify the 55 // interpreter, and both C1 and C2 fast-path inline locking code emission. 56 // 57 // ----------------------------------------------------------------------------- 58 59 #ifdef DTRACE_ENABLED 60 61 // Only bother with this argument setup if dtrace is available 62 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 63 64 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 65 char* bytes = NULL; \ 66 int len = 0; \ 67 jlong jtid = SharedRuntime::get_java_tid(thread); \ 68 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 69 if (klassname != NULL) { \ 70 bytes = (char*)klassname->bytes(); \ 71 len = klassname->utf8_length(); \ 72 } 73 74 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 75 { \ 76 if (DTraceMonitorProbes) { \ 77 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 78 HOTSPOT_MONITOR_WAIT(jtid, \ 79 (uintptr_t)(monitor), bytes, len, (millis)); \ 80 } \ 81 } 82 83 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 84 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 85 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 86 87 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 88 { \ 89 if (DTraceMonitorProbes) { \ 90 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 91 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 92 (uintptr_t)(monitor), bytes, len); \ 93 } \ 94 } 95 96 #else // ndef DTRACE_ENABLED 97 98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 100 101 #endif // ndef DTRACE_ENABLED 102 103 // This exists only as a workaround of dtrace bug 6254741 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 105 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 106 return 0; 107 } 108 109 #define NINFLATIONLOCKS 256 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 111 112 // global list of blocks of monitors 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 114 // want to expose the PaddedEnd template more than necessary. 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 116 // global monitor free list 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 118 // global monitor in-use list, for moribund threads, 119 // monitors they inflated need to be scanned for deflation 120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 121 // count of entries in gOmInUseList 122 int ObjectSynchronizer::gOmInUseCount = 0; 123 124 static volatile intptr_t gListLock = 0; // protects global monitor lists 125 static volatile int gMonitorFreeCount = 0; // # on gFreeList 126 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 127 128 static void post_monitor_inflate_event(EventJavaMonitorInflate&, 129 const oop, 130 const ObjectSynchronizer::InflateCause); 131 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 133 134 135 // =====================> Quick functions 136 137 // The quick_* forms are special fast-path variants used to improve 138 // performance. In the simplest case, a "quick_*" implementation could 139 // simply return false, in which case the caller will perform the necessary 140 // state transitions and call the slow-path form. 141 // The fast-path is designed to handle frequently arising cases in an efficient 142 // manner and is just a degenerate "optimistic" variant of the slow-path. 143 // returns true -- to indicate the call was satisfied. 144 // returns false -- to indicate the call needs the services of the slow-path. 145 // A no-loitering ordinance is in effect for code in the quick_* family 146 // operators: safepoints or indefinite blocking (blocking that might span a 147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 148 // entry. 149 // 150 // Consider: An interesting optimization is to have the JIT recognize the 151 // following common idiom: 152 // synchronized (someobj) { .... ; notify(); } 153 // That is, we find a notify() or notifyAll() call that immediately precedes 154 // the monitorexit operation. In that case the JIT could fuse the operations 155 // into a single notifyAndExit() runtime primitive. 156 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 158 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 159 assert(self->is_Java_thread(), "invariant"); 160 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 161 NoSafepointVerifier nsv; 162 if (obj == NULL) return false; // slow-path for invalid obj 163 const markOop mark = obj->mark(); 164 165 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 166 // Degenerate notify 167 // stack-locked by caller so by definition the implied waitset is empty. 168 return true; 169 } 170 171 if (mark->has_monitor()) { 172 ObjectMonitor * const mon = mark->monitor(); 173 assert(mon->object() == obj, "invariant"); 174 if (mon->owner() != self) return false; // slow-path for IMS exception 175 176 if (mon->first_waiter() != NULL) { 177 // We have one or more waiters. Since this is an inflated monitor 178 // that we own, we can transfer one or more threads from the waitset 179 // to the entrylist here and now, avoiding the slow-path. 180 if (all) { 181 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 182 } else { 183 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 184 } 185 int tally = 0; 186 do { 187 mon->INotify(self); 188 ++tally; 189 } while (mon->first_waiter() != NULL && all); 190 OM_PERFDATA_OP(Notifications, inc(tally)); 191 } 192 return true; 193 } 194 195 // biased locking and any other IMS exception states take the slow-path 196 return false; 197 } 198 199 200 // The LockNode emitted directly at the synchronization site would have 201 // been too big if it were to have included support for the cases of inflated 202 // recursive enter and exit, so they go here instead. 203 // Note that we can't safely call AsyncPrintJavaStack() from within 204 // quick_enter() as our thread state remains _in_Java. 205 206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 207 BasicLock * lock) { 208 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 209 assert(Self->is_Java_thread(), "invariant"); 210 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 211 NoSafepointVerifier nsv; 212 if (obj == NULL) return false; // Need to throw NPE 213 const markOop mark = obj->mark(); 214 215 if (mark->has_monitor()) { 216 ObjectMonitor * const m = mark->monitor(); 217 assert(m->object() == obj, "invariant"); 218 Thread * const owner = (Thread *) m->_owner; 219 220 // Lock contention and Transactional Lock Elision (TLE) diagnostics 221 // and observability 222 // Case: light contention possibly amenable to TLE 223 // Case: TLE inimical operations such as nested/recursive synchronization 224 225 if (owner == Self) { 226 m->_recursions++; 227 return true; 228 } 229 230 // This Java Monitor is inflated so obj's header will never be 231 // displaced to this thread's BasicLock. Make the displaced header 232 // non-NULL so this BasicLock is not seen as recursive nor as 233 // being locked. We do this unconditionally so that this thread's 234 // BasicLock cannot be mis-interpreted by any stack walkers. For 235 // performance reasons, stack walkers generally first check for 236 // Biased Locking in the object's header, the second check is for 237 // stack-locking in the object's header, the third check is for 238 // recursive stack-locking in the displaced header in the BasicLock, 239 // and last are the inflated Java Monitor (ObjectMonitor) checks. 240 lock->set_displaced_header(markOopDesc::unused_mark()); 241 242 if (owner == NULL && 243 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 244 assert(m->_recursions == 0, "invariant"); 245 assert(m->_owner == Self, "invariant"); 246 return true; 247 } 248 } 249 250 // Note that we could inflate in quick_enter. 251 // This is likely a useful optimization 252 // Critically, in quick_enter() we must not: 253 // -- perform bias revocation, or 254 // -- block indefinitely, or 255 // -- reach a safepoint 256 257 return false; // revert to slow-path 258 } 259 260 // ----------------------------------------------------------------------------- 261 // Fast Monitor Enter/Exit 262 // This the fast monitor enter. The interpreter and compiler use 263 // some assembly copies of this code. Make sure update those code 264 // if the following function is changed. The implementation is 265 // extremely sensitive to race condition. Be careful. 266 267 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 268 bool attempt_rebias, TRAPS) { 269 if (UseBiasedLocking) { 270 if (!SafepointSynchronize::is_at_safepoint()) { 271 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 272 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 273 return; 274 } 275 } else { 276 assert(!attempt_rebias, "can not rebias toward VM thread"); 277 BiasedLocking::revoke_at_safepoint(obj); 278 } 279 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 280 } 281 282 slow_enter(obj, lock, THREAD); 283 } 284 285 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 286 markOop mark = object->mark(); 287 // We cannot check for Biased Locking if we are racing an inflation. 288 assert(mark == markOopDesc::INFLATING() || 289 !mark->has_bias_pattern(), "should not see bias pattern here"); 290 291 markOop dhw = lock->displaced_header(); 292 if (dhw == NULL) { 293 // If the displaced header is NULL, then this exit matches up with 294 // a recursive enter. No real work to do here except for diagnostics. 295 #ifndef PRODUCT 296 if (mark != markOopDesc::INFLATING()) { 297 // Only do diagnostics if we are not racing an inflation. Simply 298 // exiting a recursive enter of a Java Monitor that is being 299 // inflated is safe; see the has_monitor() comment below. 300 assert(!mark->is_neutral(), "invariant"); 301 assert(!mark->has_locker() || 302 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 303 if (mark->has_monitor()) { 304 // The BasicLock's displaced_header is marked as a recursive 305 // enter and we have an inflated Java Monitor (ObjectMonitor). 306 // This is a special case where the Java Monitor was inflated 307 // after this thread entered the stack-lock recursively. When a 308 // Java Monitor is inflated, we cannot safely walk the Java 309 // Monitor owner's stack and update the BasicLocks because a 310 // Java Monitor can be asynchronously inflated by a thread that 311 // does not own the Java Monitor. 312 ObjectMonitor * m = mark->monitor(); 313 assert(((oop)(m->object()))->mark() == mark, "invariant"); 314 assert(m->is_entered(THREAD), "invariant"); 315 } 316 } 317 #endif 318 return; 319 } 320 321 if (mark == (markOop) lock) { 322 // If the object is stack-locked by the current thread, try to 323 // swing the displaced header from the BasicLock back to the mark. 324 assert(dhw->is_neutral(), "invariant"); 325 if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) { 326 TEVENT(fast_exit: release stack-lock); 327 return; 328 } 329 } 330 331 // We have to take the slow-path of possible inflation and then exit. 332 ObjectSynchronizer::inflate(THREAD, 333 object, 334 inflate_cause_vm_internal)->exit(true, THREAD); 335 } 336 337 // ----------------------------------------------------------------------------- 338 // Interpreter/Compiler Slow Case 339 // This routine is used to handle interpreter/compiler slow case 340 // We don't need to use fast path here, because it must have been 341 // failed in the interpreter/compiler code. 342 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 343 markOop mark = obj->mark(); 344 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 345 346 if (mark->is_neutral()) { 347 // Anticipate successful CAS -- the ST of the displaced mark must 348 // be visible <= the ST performed by the CAS. 349 lock->set_displaced_header(mark); 350 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 351 TEVENT(slow_enter: release stacklock); 352 return; 353 } 354 // Fall through to inflate() ... 355 } else if (mark->has_locker() && 356 THREAD->is_lock_owned((address)mark->locker())) { 357 assert(lock != mark->locker(), "must not re-lock the same lock"); 358 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 359 lock->set_displaced_header(NULL); 360 return; 361 } 362 363 // The object header will never be displaced to this lock, 364 // so it does not matter what the value is, except that it 365 // must be non-zero to avoid looking like a re-entrant lock, 366 // and must not look locked either. 367 lock->set_displaced_header(markOopDesc::unused_mark()); 368 ObjectSynchronizer::inflate(THREAD, 369 obj(), 370 inflate_cause_monitor_enter)->enter(THREAD); 371 } 372 373 // This routine is used to handle interpreter/compiler slow case 374 // We don't need to use fast path here, because it must have 375 // failed in the interpreter/compiler code. Simply use the heavy 376 // weight monitor should be ok, unless someone find otherwise. 377 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 378 fast_exit(object, lock, THREAD); 379 } 380 381 // ----------------------------------------------------------------------------- 382 // Class Loader support to workaround deadlocks on the class loader lock objects 383 // Also used by GC 384 // complete_exit()/reenter() are used to wait on a nested lock 385 // i.e. to give up an outer lock completely and then re-enter 386 // Used when holding nested locks - lock acquisition order: lock1 then lock2 387 // 1) complete_exit lock1 - saving recursion count 388 // 2) wait on lock2 389 // 3) when notified on lock2, unlock lock2 390 // 4) reenter lock1 with original recursion count 391 // 5) lock lock2 392 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 393 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 394 TEVENT(complete_exit); 395 if (UseBiasedLocking) { 396 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 397 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 398 } 399 400 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 401 obj(), 402 inflate_cause_vm_internal); 403 404 return monitor->complete_exit(THREAD); 405 } 406 407 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 408 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 409 TEVENT(reenter); 410 if (UseBiasedLocking) { 411 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 412 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 413 } 414 415 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 416 obj(), 417 inflate_cause_vm_internal); 418 419 monitor->reenter(recursion, THREAD); 420 } 421 // ----------------------------------------------------------------------------- 422 // JNI locks on java objects 423 // NOTE: must use heavy weight monitor to handle jni monitor enter 424 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 425 // the current locking is from JNI instead of Java code 426 TEVENT(jni_enter); 427 if (UseBiasedLocking) { 428 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 429 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 430 } 431 THREAD->set_current_pending_monitor_is_from_java(false); 432 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 433 THREAD->set_current_pending_monitor_is_from_java(true); 434 } 435 436 // NOTE: must use heavy weight monitor to handle jni monitor exit 437 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 438 TEVENT(jni_exit); 439 if (UseBiasedLocking) { 440 Handle h_obj(THREAD, obj); 441 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 442 obj = h_obj(); 443 } 444 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 445 446 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 447 obj, 448 inflate_cause_jni_exit); 449 // If this thread has locked the object, exit the monitor. Note: can't use 450 // monitor->check(CHECK); must exit even if an exception is pending. 451 if (monitor->check(THREAD)) { 452 monitor->exit(true, THREAD); 453 } 454 } 455 456 // ----------------------------------------------------------------------------- 457 // Internal VM locks on java objects 458 // standard constructor, allows locking failures 459 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 460 _dolock = doLock; 461 _thread = thread; 462 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 463 _obj = obj; 464 465 if (_dolock) { 466 TEVENT(ObjectLocker); 467 468 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 469 } 470 } 471 472 ObjectLocker::~ObjectLocker() { 473 if (_dolock) { 474 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 475 } 476 } 477 478 479 // ----------------------------------------------------------------------------- 480 // Wait/Notify/NotifyAll 481 // NOTE: must use heavy weight monitor to handle wait() 482 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 483 if (UseBiasedLocking) { 484 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 485 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 486 } 487 if (millis < 0) { 488 TEVENT(wait - throw IAX); 489 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 490 } 491 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 492 obj(), 493 inflate_cause_wait); 494 495 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 496 monitor->wait(millis, true, THREAD); 497 498 // This dummy call is in place to get around dtrace bug 6254741. Once 499 // that's fixed we can uncomment the following line, remove the call 500 // and change this function back into a "void" func. 501 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 502 return dtrace_waited_probe(monitor, obj, THREAD); 503 } 504 505 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 506 if (UseBiasedLocking) { 507 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 508 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 509 } 510 if (millis < 0) { 511 TEVENT(wait - throw IAX); 512 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 513 } 514 ObjectSynchronizer::inflate(THREAD, 515 obj(), 516 inflate_cause_wait)->wait(millis, false, THREAD); 517 } 518 519 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 520 if (UseBiasedLocking) { 521 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 522 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 523 } 524 525 markOop mark = obj->mark(); 526 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 527 return; 528 } 529 ObjectSynchronizer::inflate(THREAD, 530 obj(), 531 inflate_cause_notify)->notify(THREAD); 532 } 533 534 // NOTE: see comment of notify() 535 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 536 if (UseBiasedLocking) { 537 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 538 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 539 } 540 541 markOop mark = obj->mark(); 542 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 543 return; 544 } 545 ObjectSynchronizer::inflate(THREAD, 546 obj(), 547 inflate_cause_notify)->notifyAll(THREAD); 548 } 549 550 // ----------------------------------------------------------------------------- 551 // Hash Code handling 552 // 553 // Performance concern: 554 // OrderAccess::storestore() calls release() which at one time stored 0 555 // into the global volatile OrderAccess::dummy variable. This store was 556 // unnecessary for correctness. Many threads storing into a common location 557 // causes considerable cache migration or "sloshing" on large SMP systems. 558 // As such, I avoided using OrderAccess::storestore(). In some cases 559 // OrderAccess::fence() -- which incurs local latency on the executing 560 // processor -- is a better choice as it scales on SMP systems. 561 // 562 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 563 // a discussion of coherency costs. Note that all our current reference 564 // platforms provide strong ST-ST order, so the issue is moot on IA32, 565 // x64, and SPARC. 566 // 567 // As a general policy we use "volatile" to control compiler-based reordering 568 // and explicit fences (barriers) to control for architectural reordering 569 // performed by the CPU(s) or platform. 570 571 struct SharedGlobals { 572 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 573 // These are highly shared mostly-read variables. 574 // To avoid false-sharing they need to be the sole occupants of a cache line. 575 volatile int stwRandom; 576 volatile int stwCycle; 577 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 578 // Hot RW variable -- Sequester to avoid false-sharing 579 volatile int hcSequence; 580 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 581 }; 582 583 static SharedGlobals GVars; 584 static int MonitorScavengeThreshold = 1000000; 585 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 586 587 static markOop ReadStableMark(oop obj) { 588 markOop mark = obj->mark(); 589 if (!mark->is_being_inflated()) { 590 return mark; // normal fast-path return 591 } 592 593 int its = 0; 594 for (;;) { 595 markOop mark = obj->mark(); 596 if (!mark->is_being_inflated()) { 597 return mark; // normal fast-path return 598 } 599 600 // The object is being inflated by some other thread. 601 // The caller of ReadStableMark() must wait for inflation to complete. 602 // Avoid live-lock 603 // TODO: consider calling SafepointSynchronize::do_call_back() while 604 // spinning to see if there's a safepoint pending. If so, immediately 605 // yielding or blocking would be appropriate. Avoid spinning while 606 // there is a safepoint pending. 607 // TODO: add inflation contention performance counters. 608 // TODO: restrict the aggregate number of spinners. 609 610 ++its; 611 if (its > 10000 || !os::is_MP()) { 612 if (its & 1) { 613 os::naked_yield(); 614 TEVENT(Inflate: INFLATING - yield); 615 } else { 616 // Note that the following code attenuates the livelock problem but is not 617 // a complete remedy. A more complete solution would require that the inflating 618 // thread hold the associated inflation lock. The following code simply restricts 619 // the number of spinners to at most one. We'll have N-2 threads blocked 620 // on the inflationlock, 1 thread holding the inflation lock and using 621 // a yield/park strategy, and 1 thread in the midst of inflation. 622 // A more refined approach would be to change the encoding of INFLATING 623 // to allow encapsulation of a native thread pointer. Threads waiting for 624 // inflation to complete would use CAS to push themselves onto a singly linked 625 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 626 // and calling park(). When inflation was complete the thread that accomplished inflation 627 // would detach the list and set the markword to inflated with a single CAS and 628 // then for each thread on the list, set the flag and unpark() the thread. 629 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 630 // wakes at most one thread whereas we need to wake the entire list. 631 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 632 int YieldThenBlock = 0; 633 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 634 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 635 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 636 while (obj->mark() == markOopDesc::INFLATING()) { 637 // Beware: NakedYield() is advisory and has almost no effect on some platforms 638 // so we periodically call Self->_ParkEvent->park(1). 639 // We use a mixed spin/yield/block mechanism. 640 if ((YieldThenBlock++) >= 16) { 641 Thread::current()->_ParkEvent->park(1); 642 } else { 643 os::naked_yield(); 644 } 645 } 646 Thread::muxRelease(gInflationLocks + ix); 647 TEVENT(Inflate: INFLATING - yield/park); 648 } 649 } else { 650 SpinPause(); // SMP-polite spinning 651 } 652 } 653 } 654 655 // hashCode() generation : 656 // 657 // Possibilities: 658 // * MD5Digest of {obj,stwRandom} 659 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 660 // * A DES- or AES-style SBox[] mechanism 661 // * One of the Phi-based schemes, such as: 662 // 2654435761 = 2^32 * Phi (golden ratio) 663 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 664 // * A variation of Marsaglia's shift-xor RNG scheme. 665 // * (obj ^ stwRandom) is appealing, but can result 666 // in undesirable regularity in the hashCode values of adjacent objects 667 // (objects allocated back-to-back, in particular). This could potentially 668 // result in hashtable collisions and reduced hashtable efficiency. 669 // There are simple ways to "diffuse" the middle address bits over the 670 // generated hashCode values: 671 672 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 673 intptr_t value = 0; 674 if (hashCode == 0) { 675 // This form uses an unguarded global Park-Miller RNG, 676 // so it's possible for two threads to race and generate the same RNG. 677 // On MP system we'll have lots of RW access to a global, so the 678 // mechanism induces lots of coherency traffic. 679 value = os::random(); 680 } else if (hashCode == 1) { 681 // This variation has the property of being stable (idempotent) 682 // between STW operations. This can be useful in some of the 1-0 683 // synchronization schemes. 684 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 685 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 686 } else if (hashCode == 2) { 687 value = 1; // for sensitivity testing 688 } else if (hashCode == 3) { 689 value = ++GVars.hcSequence; 690 } else if (hashCode == 4) { 691 value = cast_from_oop<intptr_t>(obj); 692 } else { 693 // Marsaglia's xor-shift scheme with thread-specific state 694 // This is probably the best overall implementation -- we'll 695 // likely make this the default in future releases. 696 unsigned t = Self->_hashStateX; 697 t ^= (t << 11); 698 Self->_hashStateX = Self->_hashStateY; 699 Self->_hashStateY = Self->_hashStateZ; 700 Self->_hashStateZ = Self->_hashStateW; 701 unsigned v = Self->_hashStateW; 702 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 703 Self->_hashStateW = v; 704 value = v; 705 } 706 707 value &= markOopDesc::hash_mask; 708 if (value == 0) value = 0xBAD; 709 assert(value != markOopDesc::no_hash, "invariant"); 710 TEVENT(hashCode: GENERATE); 711 return value; 712 } 713 714 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 715 if (UseBiasedLocking) { 716 // NOTE: many places throughout the JVM do not expect a safepoint 717 // to be taken here, in particular most operations on perm gen 718 // objects. However, we only ever bias Java instances and all of 719 // the call sites of identity_hash that might revoke biases have 720 // been checked to make sure they can handle a safepoint. The 721 // added check of the bias pattern is to avoid useless calls to 722 // thread-local storage. 723 if (obj->mark()->has_bias_pattern()) { 724 // Handle for oop obj in case of STW safepoint 725 Handle hobj(Self, obj); 726 // Relaxing assertion for bug 6320749. 727 assert(Universe::verify_in_progress() || 728 !SafepointSynchronize::is_at_safepoint(), 729 "biases should not be seen by VM thread here"); 730 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 731 obj = hobj(); 732 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 733 } 734 } 735 736 // hashCode() is a heap mutator ... 737 // Relaxing assertion for bug 6320749. 738 assert(Universe::verify_in_progress() || DumpSharedSpaces || 739 !SafepointSynchronize::is_at_safepoint(), "invariant"); 740 assert(Universe::verify_in_progress() || DumpSharedSpaces || 741 Self->is_Java_thread() , "invariant"); 742 assert(Universe::verify_in_progress() || DumpSharedSpaces || 743 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 744 745 ObjectMonitor* monitor = NULL; 746 markOop temp, test; 747 intptr_t hash; 748 markOop mark = ReadStableMark(obj); 749 750 // object should remain ineligible for biased locking 751 assert(!mark->has_bias_pattern(), "invariant"); 752 753 if (mark->is_neutral()) { 754 hash = mark->hash(); // this is a normal header 755 if (hash) { // if it has hash, just return it 756 return hash; 757 } 758 hash = get_next_hash(Self, obj); // allocate a new hash code 759 temp = mark->copy_set_hash(hash); // merge the hash code into header 760 // use (machine word version) atomic operation to install the hash 761 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 762 if (test == mark) { 763 return hash; 764 } 765 // If atomic operation failed, we must inflate the header 766 // into heavy weight monitor. We could add more code here 767 // for fast path, but it does not worth the complexity. 768 } else if (mark->has_monitor()) { 769 monitor = mark->monitor(); 770 temp = monitor->header(); 771 assert(temp->is_neutral(), "invariant"); 772 hash = temp->hash(); 773 if (hash) { 774 return hash; 775 } 776 // Skip to the following code to reduce code size 777 } else if (Self->is_lock_owned((address)mark->locker())) { 778 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 779 assert(temp->is_neutral(), "invariant"); 780 hash = temp->hash(); // by current thread, check if the displaced 781 if (hash) { // header contains hash code 782 return hash; 783 } 784 // WARNING: 785 // The displaced header is strictly immutable. 786 // It can NOT be changed in ANY cases. So we have 787 // to inflate the header into heavyweight monitor 788 // even the current thread owns the lock. The reason 789 // is the BasicLock (stack slot) will be asynchronously 790 // read by other threads during the inflate() function. 791 // Any change to stack may not propagate to other threads 792 // correctly. 793 } 794 795 // Inflate the monitor to set hash code 796 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 797 // Load displaced header and check it has hash code 798 mark = monitor->header(); 799 assert(mark->is_neutral(), "invariant"); 800 hash = mark->hash(); 801 if (hash == 0) { 802 hash = get_next_hash(Self, obj); 803 temp = mark->copy_set_hash(hash); // merge hash code into header 804 assert(temp->is_neutral(), "invariant"); 805 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 806 if (test != mark) { 807 // The only update to the header in the monitor (outside GC) 808 // is install the hash code. If someone add new usage of 809 // displaced header, please update this code 810 hash = test->hash(); 811 assert(test->is_neutral(), "invariant"); 812 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 813 } 814 } 815 // We finally get the hash 816 return hash; 817 } 818 819 // Deprecated -- use FastHashCode() instead. 820 821 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 822 return FastHashCode(Thread::current(), obj()); 823 } 824 825 826 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 827 Handle h_obj) { 828 if (UseBiasedLocking) { 829 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 830 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 831 } 832 833 assert(thread == JavaThread::current(), "Can only be called on current thread"); 834 oop obj = h_obj(); 835 836 markOop mark = ReadStableMark(obj); 837 838 // Uncontended case, header points to stack 839 if (mark->has_locker()) { 840 return thread->is_lock_owned((address)mark->locker()); 841 } 842 // Contended case, header points to ObjectMonitor (tagged pointer) 843 if (mark->has_monitor()) { 844 ObjectMonitor* monitor = mark->monitor(); 845 return monitor->is_entered(thread) != 0; 846 } 847 // Unlocked case, header in place 848 assert(mark->is_neutral(), "sanity check"); 849 return false; 850 } 851 852 // Be aware of this method could revoke bias of the lock object. 853 // This method queries the ownership of the lock handle specified by 'h_obj'. 854 // If the current thread owns the lock, it returns owner_self. If no 855 // thread owns the lock, it returns owner_none. Otherwise, it will return 856 // owner_other. 857 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 858 (JavaThread *self, Handle h_obj) { 859 // The caller must beware this method can revoke bias, and 860 // revocation can result in a safepoint. 861 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 862 assert(self->thread_state() != _thread_blocked, "invariant"); 863 864 // Possible mark states: neutral, biased, stack-locked, inflated 865 866 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 867 // CASE: biased 868 BiasedLocking::revoke_and_rebias(h_obj, false, self); 869 assert(!h_obj->mark()->has_bias_pattern(), 870 "biases should be revoked by now"); 871 } 872 873 assert(self == JavaThread::current(), "Can only be called on current thread"); 874 oop obj = h_obj(); 875 markOop mark = ReadStableMark(obj); 876 877 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 878 if (mark->has_locker()) { 879 return self->is_lock_owned((address)mark->locker()) ? 880 owner_self : owner_other; 881 } 882 883 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 884 // The Object:ObjectMonitor relationship is stable as long as we're 885 // not at a safepoint. 886 if (mark->has_monitor()) { 887 void * owner = mark->monitor()->_owner; 888 if (owner == NULL) return owner_none; 889 return (owner == self || 890 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 891 } 892 893 // CASE: neutral 894 assert(mark->is_neutral(), "sanity check"); 895 return owner_none; // it's unlocked 896 } 897 898 // FIXME: jvmti should call this 899 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 900 if (UseBiasedLocking) { 901 if (SafepointSynchronize::is_at_safepoint()) { 902 BiasedLocking::revoke_at_safepoint(h_obj); 903 } else { 904 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 905 } 906 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 907 } 908 909 oop obj = h_obj(); 910 address owner = NULL; 911 912 markOop mark = ReadStableMark(obj); 913 914 // Uncontended case, header points to stack 915 if (mark->has_locker()) { 916 owner = (address) mark->locker(); 917 } 918 919 // Contended case, header points to ObjectMonitor (tagged pointer) 920 if (mark->has_monitor()) { 921 ObjectMonitor* monitor = mark->monitor(); 922 assert(monitor != NULL, "monitor should be non-null"); 923 owner = (address) monitor->owner(); 924 } 925 926 if (owner != NULL) { 927 // owning_thread_from_monitor_owner() may also return NULL here 928 return Threads::owning_thread_from_monitor_owner(owner, doLock); 929 } 930 931 // Unlocked case, header in place 932 // Cannot have assertion since this object may have been 933 // locked by another thread when reaching here. 934 // assert(mark->is_neutral(), "sanity check"); 935 936 return NULL; 937 } 938 939 // Visitors ... 940 941 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 942 PaddedEnd<ObjectMonitor> * block = 943 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 944 while (block != NULL) { 945 assert(block->object() == CHAINMARKER, "must be a block header"); 946 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 947 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 948 oop object = (oop)mid->object(); 949 if (object != NULL) { 950 closure->do_monitor(mid); 951 } 952 } 953 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 954 } 955 } 956 957 // Get the next block in the block list. 958 static inline ObjectMonitor* next(ObjectMonitor* block) { 959 assert(block->object() == CHAINMARKER, "must be a block header"); 960 block = block->FreeNext; 961 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 962 return block; 963 } 964 965 bool ObjectSynchronizer::is_cleanup_needed() { 966 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 967 return MonitorUsedDeflationThreshold > 0 && (monitors_used * 100LL) / gMonitorPopulation > MonitorUsedDeflationThreshold; 968 } 969 970 void ObjectSynchronizer::oops_do(OopClosure* f) { 971 if (MonitorInUseLists) { 972 // When using thread local monitor lists, we only scan the 973 // global used list here (for moribund threads), and 974 // the thread-local monitors in Thread::oops_do(). 975 global_used_oops_do(f); 976 } else { 977 global_oops_do(f); 978 } 979 } 980 981 void ObjectSynchronizer::global_oops_do(OopClosure* f) { 982 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 983 PaddedEnd<ObjectMonitor> * block = 984 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 985 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 986 assert(block->object() == CHAINMARKER, "must be a block header"); 987 for (int i = 1; i < _BLOCKSIZE; i++) { 988 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 989 if (mid->object() != NULL) { 990 f->do_oop((oop*)mid->object_addr()); 991 } 992 } 993 } 994 } 995 996 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 997 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 998 list_oops_do(gOmInUseList, f); 999 } 1000 1001 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1002 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1003 list_oops_do(thread->omInUseList, f); 1004 } 1005 1006 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1007 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1008 ObjectMonitor* mid; 1009 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1010 if (mid->object() != NULL) { 1011 f->do_oop((oop*)mid->object_addr()); 1012 } 1013 } 1014 } 1015 1016 1017 // ----------------------------------------------------------------------------- 1018 // ObjectMonitor Lifecycle 1019 // ----------------------- 1020 // Inflation unlinks monitors from the global gFreeList and 1021 // associates them with objects. Deflation -- which occurs at 1022 // STW-time -- disassociates idle monitors from objects. Such 1023 // scavenged monitors are returned to the gFreeList. 1024 // 1025 // The global list is protected by gListLock. All the critical sections 1026 // are short and operate in constant-time. 1027 // 1028 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1029 // 1030 // Lifecycle: 1031 // -- unassigned and on the global free list 1032 // -- unassigned and on a thread's private omFreeList 1033 // -- assigned to an object. The object is inflated and the mark refers 1034 // to the objectmonitor. 1035 1036 1037 // Constraining monitor pool growth via MonitorBound ... 1038 // 1039 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1040 // the rate of scavenging is driven primarily by GC. As such, we can find 1041 // an inordinate number of monitors in circulation. 1042 // To avoid that scenario we can artificially induce a STW safepoint 1043 // if the pool appears to be growing past some reasonable bound. 1044 // Generally we favor time in space-time tradeoffs, but as there's no 1045 // natural back-pressure on the # of extant monitors we need to impose some 1046 // type of limit. Beware that if MonitorBound is set to too low a value 1047 // we could just loop. In addition, if MonitorBound is set to a low value 1048 // we'll incur more safepoints, which are harmful to performance. 1049 // See also: GuaranteedSafepointInterval 1050 // 1051 // The current implementation uses asynchronous VM operations. 1052 1053 static void InduceScavenge(Thread * Self, const char * Whence) { 1054 // Induce STW safepoint to trim monitors 1055 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1056 // More precisely, trigger an asynchronous STW safepoint as the number 1057 // of active monitors passes the specified threshold. 1058 // TODO: assert thread state is reasonable 1059 1060 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1061 if (ObjectMonitor::Knob_Verbose) { 1062 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1063 Whence, ForceMonitorScavenge) ; 1064 tty->flush(); 1065 } 1066 // Induce a 'null' safepoint to scavenge monitors 1067 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1068 // to the VMthread and have a lifespan longer than that of this activation record. 1069 // The VMThread will delete the op when completed. 1070 VMThread::execute(new VM_ScavengeMonitors()); 1071 1072 if (ObjectMonitor::Knob_Verbose) { 1073 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1074 Whence, ForceMonitorScavenge) ; 1075 tty->flush(); 1076 } 1077 } 1078 } 1079 1080 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1081 ObjectMonitor* mid; 1082 int in_use_tally = 0; 1083 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1084 in_use_tally++; 1085 } 1086 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1087 1088 int free_tally = 0; 1089 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1090 free_tally++; 1091 } 1092 assert(free_tally == Self->omFreeCount, "free count off"); 1093 } 1094 1095 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1096 // A large MAXPRIVATE value reduces both list lock contention 1097 // and list coherency traffic, but also tends to increase the 1098 // number of objectMonitors in circulation as well as the STW 1099 // scavenge costs. As usual, we lean toward time in space-time 1100 // tradeoffs. 1101 const int MAXPRIVATE = 1024; 1102 for (;;) { 1103 ObjectMonitor * m; 1104 1105 // 1: try to allocate from the thread's local omFreeList. 1106 // Threads will attempt to allocate first from their local list, then 1107 // from the global list, and only after those attempts fail will the thread 1108 // attempt to instantiate new monitors. Thread-local free lists take 1109 // heat off the gListLock and improve allocation latency, as well as reducing 1110 // coherency traffic on the shared global list. 1111 m = Self->omFreeList; 1112 if (m != NULL) { 1113 Self->omFreeList = m->FreeNext; 1114 Self->omFreeCount--; 1115 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1116 guarantee(m->object() == NULL, "invariant"); 1117 if (MonitorInUseLists) { 1118 m->FreeNext = Self->omInUseList; 1119 Self->omInUseList = m; 1120 Self->omInUseCount++; 1121 if (ObjectMonitor::Knob_VerifyInUse) { 1122 verifyInUse(Self); 1123 } 1124 } else { 1125 m->FreeNext = NULL; 1126 } 1127 return m; 1128 } 1129 1130 // 2: try to allocate from the global gFreeList 1131 // CONSIDER: use muxTry() instead of muxAcquire(). 1132 // If the muxTry() fails then drop immediately into case 3. 1133 // If we're using thread-local free lists then try 1134 // to reprovision the caller's free list. 1135 if (gFreeList != NULL) { 1136 // Reprovision the thread's omFreeList. 1137 // Use bulk transfers to reduce the allocation rate and heat 1138 // on various locks. 1139 Thread::muxAcquire(&gListLock, "omAlloc"); 1140 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1141 gMonitorFreeCount--; 1142 ObjectMonitor * take = gFreeList; 1143 gFreeList = take->FreeNext; 1144 guarantee(take->object() == NULL, "invariant"); 1145 guarantee(!take->is_busy(), "invariant"); 1146 take->Recycle(); 1147 omRelease(Self, take, false); 1148 } 1149 Thread::muxRelease(&gListLock); 1150 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1151 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1152 TEVENT(omFirst - reprovision); 1153 1154 const int mx = MonitorBound; 1155 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1156 // We can't safely induce a STW safepoint from omAlloc() as our thread 1157 // state may not be appropriate for such activities and callers may hold 1158 // naked oops, so instead we defer the action. 1159 InduceScavenge(Self, "omAlloc"); 1160 } 1161 continue; 1162 } 1163 1164 // 3: allocate a block of new ObjectMonitors 1165 // Both the local and global free lists are empty -- resort to malloc(). 1166 // In the current implementation objectMonitors are TSM - immortal. 1167 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1168 // each ObjectMonitor to start at the beginning of a cache line, 1169 // so we use align_size_up(). 1170 // A better solution would be to use C++ placement-new. 1171 // BEWARE: As it stands currently, we don't run the ctors! 1172 assert(_BLOCKSIZE > 1, "invariant"); 1173 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1174 PaddedEnd<ObjectMonitor> * temp; 1175 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1176 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1177 mtInternal); 1178 temp = (PaddedEnd<ObjectMonitor> *) 1179 align_size_up((intptr_t)real_malloc_addr, 1180 DEFAULT_CACHE_LINE_SIZE); 1181 1182 // NOTE: (almost) no way to recover if allocation failed. 1183 // We might be able to induce a STW safepoint and scavenge enough 1184 // objectMonitors to permit progress. 1185 if (temp == NULL) { 1186 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1187 "Allocate ObjectMonitors"); 1188 } 1189 (void)memset((void *) temp, 0, neededsize); 1190 1191 // Format the block. 1192 // initialize the linked list, each monitor points to its next 1193 // forming the single linked free list, the very first monitor 1194 // will points to next block, which forms the block list. 1195 // The trick of using the 1st element in the block as gBlockList 1196 // linkage should be reconsidered. A better implementation would 1197 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1198 1199 for (int i = 1; i < _BLOCKSIZE; i++) { 1200 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1201 } 1202 1203 // terminate the last monitor as the end of list 1204 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1205 1206 // Element [0] is reserved for global list linkage 1207 temp[0].set_object(CHAINMARKER); 1208 1209 // Consider carving out this thread's current request from the 1210 // block in hand. This avoids some lock traffic and redundant 1211 // list activity. 1212 1213 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1214 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1215 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1216 gMonitorPopulation += _BLOCKSIZE-1; 1217 gMonitorFreeCount += _BLOCKSIZE-1; 1218 1219 // Add the new block to the list of extant blocks (gBlockList). 1220 // The very first objectMonitor in a block is reserved and dedicated. 1221 // It serves as blocklist "next" linkage. 1222 temp[0].FreeNext = gBlockList; 1223 // There are lock-free uses of gBlockList so make sure that 1224 // the previous stores happen before we update gBlockList. 1225 OrderAccess::release_store_ptr(&gBlockList, temp); 1226 1227 // Add the new string of objectMonitors to the global free list 1228 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1229 gFreeList = temp + 1; 1230 Thread::muxRelease(&gListLock); 1231 TEVENT(Allocate block of monitors); 1232 } 1233 } 1234 1235 // Place "m" on the caller's private per-thread omFreeList. 1236 // In practice there's no need to clamp or limit the number of 1237 // monitors on a thread's omFreeList as the only time we'll call 1238 // omRelease is to return a monitor to the free list after a CAS 1239 // attempt failed. This doesn't allow unbounded #s of monitors to 1240 // accumulate on a thread's free list. 1241 // 1242 // Key constraint: all ObjectMonitors on a thread's free list and the global 1243 // free list must have their object field set to null. This prevents the 1244 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1245 1246 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1247 bool fromPerThreadAlloc) { 1248 guarantee(m->object() == NULL, "invariant"); 1249 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1250 // Remove from omInUseList 1251 if (MonitorInUseLists && fromPerThreadAlloc) { 1252 ObjectMonitor* cur_mid_in_use = NULL; 1253 bool extracted = false; 1254 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1255 if (m == mid) { 1256 // extract from per-thread in-use list 1257 if (mid == Self->omInUseList) { 1258 Self->omInUseList = mid->FreeNext; 1259 } else if (cur_mid_in_use != NULL) { 1260 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1261 } 1262 extracted = true; 1263 Self->omInUseCount--; 1264 if (ObjectMonitor::Knob_VerifyInUse) { 1265 verifyInUse(Self); 1266 } 1267 break; 1268 } 1269 } 1270 assert(extracted, "Should have extracted from in-use list"); 1271 } 1272 1273 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1274 m->FreeNext = Self->omFreeList; 1275 Self->omFreeList = m; 1276 Self->omFreeCount++; 1277 } 1278 1279 // Return the monitors of a moribund thread's local free list to 1280 // the global free list. Typically a thread calls omFlush() when 1281 // it's dying. We could also consider having the VM thread steal 1282 // monitors from threads that have not run java code over a few 1283 // consecutive STW safepoints. Relatedly, we might decay 1284 // omFreeProvision at STW safepoints. 1285 // 1286 // Also return the monitors of a moribund thread's omInUseList to 1287 // a global gOmInUseList under the global list lock so these 1288 // will continue to be scanned. 1289 // 1290 // We currently call omFlush() from Threads::remove() _before the thread 1291 // has been excised from the thread list and is no longer a mutator. 1292 // This means that omFlush() can not run concurrently with a safepoint and 1293 // interleave with the scavenge operator. In particular, this ensures that 1294 // the thread's monitors are scanned by a GC safepoint, either via 1295 // Thread::oops_do() (if safepoint happens before omFlush()) or via 1296 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1297 // monitors have been transferred to the global in-use list). 1298 1299 void ObjectSynchronizer::omFlush(Thread * Self) { 1300 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1301 Self->omFreeList = NULL; 1302 ObjectMonitor * tail = NULL; 1303 int tally = 0; 1304 if (list != NULL) { 1305 ObjectMonitor * s; 1306 // The thread is going away, the per-thread free monitors 1307 // are freed via set_owner(NULL) 1308 // Link them to tail, which will be linked into the global free list 1309 // gFreeList below, under the gListLock 1310 for (s = list; s != NULL; s = s->FreeNext) { 1311 tally++; 1312 tail = s; 1313 guarantee(s->object() == NULL, "invariant"); 1314 guarantee(!s->is_busy(), "invariant"); 1315 s->set_owner(NULL); // redundant but good hygiene 1316 TEVENT(omFlush - Move one); 1317 } 1318 guarantee(tail != NULL && list != NULL, "invariant"); 1319 } 1320 1321 ObjectMonitor * inUseList = Self->omInUseList; 1322 ObjectMonitor * inUseTail = NULL; 1323 int inUseTally = 0; 1324 if (inUseList != NULL) { 1325 Self->omInUseList = NULL; 1326 ObjectMonitor *cur_om; 1327 // The thread is going away, however the omInUseList inflated 1328 // monitors may still be in-use by other threads. 1329 // Link them to inUseTail, which will be linked into the global in-use list 1330 // gOmInUseList below, under the gListLock 1331 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1332 inUseTail = cur_om; 1333 inUseTally++; 1334 } 1335 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1336 Self->omInUseCount = 0; 1337 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1338 } 1339 1340 Thread::muxAcquire(&gListLock, "omFlush"); 1341 if (tail != NULL) { 1342 tail->FreeNext = gFreeList; 1343 gFreeList = list; 1344 gMonitorFreeCount += tally; 1345 assert(Self->omFreeCount == tally, "free-count off"); 1346 Self->omFreeCount = 0; 1347 } 1348 1349 if (inUseTail != NULL) { 1350 inUseTail->FreeNext = gOmInUseList; 1351 gOmInUseList = inUseList; 1352 gOmInUseCount += inUseTally; 1353 } 1354 1355 Thread::muxRelease(&gListLock); 1356 TEVENT(omFlush); 1357 } 1358 1359 // Fast path code shared by multiple functions 1360 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1361 markOop mark = obj->mark(); 1362 if (mark->has_monitor()) { 1363 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1364 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1365 return mark->monitor(); 1366 } 1367 return ObjectSynchronizer::inflate(Thread::current(), 1368 obj, 1369 inflate_cause_vm_internal); 1370 } 1371 1372 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1373 oop object, 1374 const InflateCause cause) { 1375 1376 // Inflate mutates the heap ... 1377 // Relaxing assertion for bug 6320749. 1378 assert(Universe::verify_in_progress() || 1379 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1380 1381 EventJavaMonitorInflate event; 1382 1383 for (;;) { 1384 const markOop mark = object->mark(); 1385 assert(!mark->has_bias_pattern(), "invariant"); 1386 1387 // The mark can be in one of the following states: 1388 // * Inflated - just return 1389 // * Stack-locked - coerce it to inflated 1390 // * INFLATING - busy wait for conversion to complete 1391 // * Neutral - aggressively inflate the object. 1392 // * BIASED - Illegal. We should never see this 1393 1394 // CASE: inflated 1395 if (mark->has_monitor()) { 1396 ObjectMonitor * inf = mark->monitor(); 1397 assert(inf->header()->is_neutral(), "invariant"); 1398 assert(inf->object() == object, "invariant"); 1399 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1400 event.cancel(); // let's not post an inflation event, unless we did the deed ourselves 1401 return inf; 1402 } 1403 1404 // CASE: inflation in progress - inflating over a stack-lock. 1405 // Some other thread is converting from stack-locked to inflated. 1406 // Only that thread can complete inflation -- other threads must wait. 1407 // The INFLATING value is transient. 1408 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1409 // We could always eliminate polling by parking the thread on some auxiliary list. 1410 if (mark == markOopDesc::INFLATING()) { 1411 TEVENT(Inflate: spin while INFLATING); 1412 ReadStableMark(object); 1413 continue; 1414 } 1415 1416 // CASE: stack-locked 1417 // Could be stack-locked either by this thread or by some other thread. 1418 // 1419 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1420 // to install INFLATING into the mark word. We originally installed INFLATING, 1421 // allocated the objectmonitor, and then finally STed the address of the 1422 // objectmonitor into the mark. This was correct, but artificially lengthened 1423 // the interval in which INFLATED appeared in the mark, thus increasing 1424 // the odds of inflation contention. 1425 // 1426 // We now use per-thread private objectmonitor free lists. 1427 // These list are reprovisioned from the global free list outside the 1428 // critical INFLATING...ST interval. A thread can transfer 1429 // multiple objectmonitors en-mass from the global free list to its local free list. 1430 // This reduces coherency traffic and lock contention on the global free list. 1431 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1432 // before or after the CAS(INFLATING) operation. 1433 // See the comments in omAlloc(). 1434 1435 if (mark->has_locker()) { 1436 ObjectMonitor * m = omAlloc(Self); 1437 // Optimistically prepare the objectmonitor - anticipate successful CAS 1438 // We do this before the CAS in order to minimize the length of time 1439 // in which INFLATING appears in the mark. 1440 m->Recycle(); 1441 m->_Responsible = NULL; 1442 m->_recursions = 0; 1443 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1444 1445 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1446 if (cmp != mark) { 1447 omRelease(Self, m, true); 1448 continue; // Interference -- just retry 1449 } 1450 1451 // We've successfully installed INFLATING (0) into the mark-word. 1452 // This is the only case where 0 will appear in a mark-word. 1453 // Only the singular thread that successfully swings the mark-word 1454 // to 0 can perform (or more precisely, complete) inflation. 1455 // 1456 // Why do we CAS a 0 into the mark-word instead of just CASing the 1457 // mark-word from the stack-locked value directly to the new inflated state? 1458 // Consider what happens when a thread unlocks a stack-locked object. 1459 // It attempts to use CAS to swing the displaced header value from the 1460 // on-stack basiclock back into the object header. Recall also that the 1461 // header value (hashcode, etc) can reside in (a) the object header, or 1462 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1463 // header in an objectMonitor. The inflate() routine must copy the header 1464 // value from the basiclock on the owner's stack to the objectMonitor, all 1465 // the while preserving the hashCode stability invariants. If the owner 1466 // decides to release the lock while the value is 0, the unlock will fail 1467 // and control will eventually pass from slow_exit() to inflate. The owner 1468 // will then spin, waiting for the 0 value to disappear. Put another way, 1469 // the 0 causes the owner to stall if the owner happens to try to 1470 // drop the lock (restoring the header from the basiclock to the object) 1471 // while inflation is in-progress. This protocol avoids races that might 1472 // would otherwise permit hashCode values to change or "flicker" for an object. 1473 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1474 // 0 serves as a "BUSY" inflate-in-progress indicator. 1475 1476 1477 // fetch the displaced mark from the owner's stack. 1478 // The owner can't die or unwind past the lock while our INFLATING 1479 // object is in the mark. Furthermore the owner can't complete 1480 // an unlock on the object, either. 1481 markOop dmw = mark->displaced_mark_helper(); 1482 assert(dmw->is_neutral(), "invariant"); 1483 1484 // Setup monitor fields to proper values -- prepare the monitor 1485 m->set_header(dmw); 1486 1487 // Optimization: if the mark->locker stack address is associated 1488 // with this thread we could simply set m->_owner = Self. 1489 // Note that a thread can inflate an object 1490 // that it has stack-locked -- as might happen in wait() -- directly 1491 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1492 m->set_owner(mark->locker()); 1493 m->set_object(object); 1494 // TODO-FIXME: assert BasicLock->dhw != 0. 1495 1496 // Must preserve store ordering. The monitor state must 1497 // be stable at the time of publishing the monitor address. 1498 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1499 object->release_set_mark(markOopDesc::encode(m)); 1500 1501 // Hopefully the performance counters are allocated on distinct cache lines 1502 // to avoid false sharing on MP systems ... 1503 OM_PERFDATA_OP(Inflations, inc()); 1504 TEVENT(Inflate: overwrite stacklock); 1505 if (log_is_enabled(Debug, monitorinflation)) { 1506 if (object->is_instance()) { 1507 ResourceMark rm; 1508 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1509 p2i(object), p2i(object->mark()), 1510 object->klass()->external_name()); 1511 } 1512 } 1513 if (event.should_commit()) { 1514 post_monitor_inflate_event(event, object, cause); 1515 } 1516 return m; 1517 } 1518 1519 // CASE: neutral 1520 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1521 // If we know we're inflating for entry it's better to inflate by swinging a 1522 // pre-locked objectMonitor pointer into the object header. A successful 1523 // CAS inflates the object *and* confers ownership to the inflating thread. 1524 // In the current implementation we use a 2-step mechanism where we CAS() 1525 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1526 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1527 // would be useful. 1528 1529 assert(mark->is_neutral(), "invariant"); 1530 ObjectMonitor * m = omAlloc(Self); 1531 // prepare m for installation - set monitor to initial state 1532 m->Recycle(); 1533 m->set_header(mark); 1534 m->set_owner(NULL); 1535 m->set_object(object); 1536 m->_recursions = 0; 1537 m->_Responsible = NULL; 1538 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1539 1540 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1541 m->set_object(NULL); 1542 m->set_owner(NULL); 1543 m->Recycle(); 1544 omRelease(Self, m, true); 1545 m = NULL; 1546 continue; 1547 // interference - the markword changed - just retry. 1548 // The state-transitions are one-way, so there's no chance of 1549 // live-lock -- "Inflated" is an absorbing state. 1550 } 1551 1552 // Hopefully the performance counters are allocated on distinct 1553 // cache lines to avoid false sharing on MP systems ... 1554 OM_PERFDATA_OP(Inflations, inc()); 1555 TEVENT(Inflate: overwrite neutral); 1556 if (log_is_enabled(Debug, monitorinflation)) { 1557 if (object->is_instance()) { 1558 ResourceMark rm; 1559 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1560 p2i(object), p2i(object->mark()), 1561 object->klass()->external_name()); 1562 } 1563 } 1564 if (event.should_commit()) { 1565 post_monitor_inflate_event(event, object, cause); 1566 } 1567 return m; 1568 } 1569 } 1570 1571 1572 // Deflate_idle_monitors() is called at all safepoints, immediately 1573 // after all mutators are stopped, but before any objects have moved. 1574 // It traverses the list of known monitors, deflating where possible. 1575 // The scavenged monitor are returned to the monitor free list. 1576 // 1577 // Beware that we scavenge at *every* stop-the-world point. 1578 // Having a large number of monitors in-circulation negatively 1579 // impacts the performance of some applications (e.g., PointBase). 1580 // Broadly, we want to minimize the # of monitors in circulation. 1581 // 1582 // We have added a flag, MonitorInUseLists, which creates a list 1583 // of active monitors for each thread. deflate_idle_monitors() 1584 // only scans the per-thread in-use lists. omAlloc() puts all 1585 // assigned monitors on the per-thread list. deflate_idle_monitors() 1586 // returns the non-busy monitors to the global free list. 1587 // When a thread dies, omFlush() adds the list of active monitors for 1588 // that thread to a global gOmInUseList acquiring the 1589 // global list lock. deflate_idle_monitors() acquires the global 1590 // list lock to scan for non-busy monitors to the global free list. 1591 // An alternative could have used a single global in-use list. The 1592 // downside would have been the additional cost of acquiring the global list lock 1593 // for every omAlloc(). 1594 // 1595 // Perversely, the heap size -- and thus the STW safepoint rate -- 1596 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1597 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1598 // This is an unfortunate aspect of this design. 1599 1600 enum ManifestConstants { 1601 ClearResponsibleAtSTW = 0 1602 }; 1603 1604 // Deflate a single monitor if not in-use 1605 // Return true if deflated, false if in-use 1606 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1607 ObjectMonitor** freeHeadp, 1608 ObjectMonitor** freeTailp) { 1609 bool deflated; 1610 // Normal case ... The monitor is associated with obj. 1611 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1612 guarantee(mid == obj->mark()->monitor(), "invariant"); 1613 guarantee(mid->header()->is_neutral(), "invariant"); 1614 1615 if (mid->is_busy()) { 1616 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1617 deflated = false; 1618 } else { 1619 // Deflate the monitor if it is no longer being used 1620 // It's idle - scavenge and return to the global free list 1621 // plain old deflation ... 1622 TEVENT(deflate_idle_monitors - scavenge1); 1623 if (log_is_enabled(Debug, monitorinflation)) { 1624 if (obj->is_instance()) { 1625 ResourceMark rm; 1626 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1627 "mark " INTPTR_FORMAT " , type %s", 1628 p2i(obj), p2i(obj->mark()), 1629 obj->klass()->external_name()); 1630 } 1631 } 1632 1633 // Restore the header back to obj 1634 obj->release_set_mark(mid->header()); 1635 mid->clear(); 1636 1637 assert(mid->object() == NULL, "invariant"); 1638 1639 // Move the object to the working free list defined by freeHeadp, freeTailp 1640 if (*freeHeadp == NULL) *freeHeadp = mid; 1641 if (*freeTailp != NULL) { 1642 ObjectMonitor * prevtail = *freeTailp; 1643 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1644 prevtail->FreeNext = mid; 1645 } 1646 *freeTailp = mid; 1647 deflated = true; 1648 } 1649 return deflated; 1650 } 1651 1652 // Walk a given monitor list, and deflate idle monitors 1653 // The given list could be a per-thread list or a global list 1654 // Caller acquires gListLock 1655 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1656 ObjectMonitor** freeHeadp, 1657 ObjectMonitor** freeTailp) { 1658 ObjectMonitor* mid; 1659 ObjectMonitor* next; 1660 ObjectMonitor* cur_mid_in_use = NULL; 1661 int deflated_count = 0; 1662 1663 for (mid = *listHeadp; mid != NULL;) { 1664 oop obj = (oop) mid->object(); 1665 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1666 // if deflate_monitor succeeded, 1667 // extract from per-thread in-use list 1668 if (mid == *listHeadp) { 1669 *listHeadp = mid->FreeNext; 1670 } else if (cur_mid_in_use != NULL) { 1671 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1672 } 1673 next = mid->FreeNext; 1674 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1675 mid = next; 1676 deflated_count++; 1677 } else { 1678 cur_mid_in_use = mid; 1679 mid = mid->FreeNext; 1680 } 1681 } 1682 return deflated_count; 1683 } 1684 1685 void ObjectSynchronizer::deflate_idle_monitors() { 1686 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1687 int nInuse = 0; // currently associated with objects 1688 int nInCirculation = 0; // extant 1689 int nScavenged = 0; // reclaimed 1690 bool deflated = false; 1691 1692 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1693 ObjectMonitor * freeTailp = NULL; 1694 1695 TEVENT(deflate_idle_monitors); 1696 // Prevent omFlush from changing mids in Thread dtor's during deflation 1697 // And in case the vm thread is acquiring a lock during a safepoint 1698 // See e.g. 6320749 1699 Thread::muxAcquire(&gListLock, "scavenge - return"); 1700 1701 if (MonitorInUseLists) { 1702 int inUse = 0; 1703 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1704 nInCirculation+= cur->omInUseCount; 1705 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1706 cur->omInUseCount-= deflated_count; 1707 if (ObjectMonitor::Knob_VerifyInUse) { 1708 verifyInUse(cur); 1709 } 1710 nScavenged += deflated_count; 1711 nInuse += cur->omInUseCount; 1712 } 1713 1714 // For moribund threads, scan gOmInUseList 1715 if (gOmInUseList) { 1716 nInCirculation += gOmInUseCount; 1717 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1718 gOmInUseCount-= deflated_count; 1719 nScavenged += deflated_count; 1720 nInuse += gOmInUseCount; 1721 } 1722 1723 } else { 1724 PaddedEnd<ObjectMonitor> * block = 1725 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1726 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1727 // Iterate over all extant monitors - Scavenge all idle monitors. 1728 assert(block->object() == CHAINMARKER, "must be a block header"); 1729 nInCirculation += _BLOCKSIZE; 1730 for (int i = 1; i < _BLOCKSIZE; i++) { 1731 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1732 oop obj = (oop)mid->object(); 1733 1734 if (obj == NULL) { 1735 // The monitor is not associated with an object. 1736 // The monitor should either be a thread-specific private 1737 // free list or the global free list. 1738 // obj == NULL IMPLIES mid->is_busy() == 0 1739 guarantee(!mid->is_busy(), "invariant"); 1740 continue; 1741 } 1742 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1743 1744 if (deflated) { 1745 mid->FreeNext = NULL; 1746 nScavenged++; 1747 } else { 1748 nInuse++; 1749 } 1750 } 1751 } 1752 } 1753 1754 gMonitorFreeCount += nScavenged; 1755 1756 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1757 1758 if (ObjectMonitor::Knob_Verbose) { 1759 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1760 "ForceMonitorScavenge=%d : pop=%d free=%d", 1761 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1762 gMonitorPopulation, gMonitorFreeCount); 1763 tty->flush(); 1764 } 1765 1766 ForceMonitorScavenge = 0; // Reset 1767 1768 // Move the scavenged monitors back to the global free list. 1769 if (freeHeadp != NULL) { 1770 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1771 assert(freeTailp->FreeNext == NULL, "invariant"); 1772 // constant-time list splice - prepend scavenged segment to gFreeList 1773 freeTailp->FreeNext = gFreeList; 1774 gFreeList = freeHeadp; 1775 } 1776 Thread::muxRelease(&gListLock); 1777 1778 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1779 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1780 1781 // TODO: Add objectMonitor leak detection. 1782 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1783 GVars.stwRandom = os::random(); 1784 GVars.stwCycle++; 1785 } 1786 1787 // Monitor cleanup on JavaThread::exit 1788 1789 // Iterate through monitor cache and attempt to release thread's monitors 1790 // Gives up on a particular monitor if an exception occurs, but continues 1791 // the overall iteration, swallowing the exception. 1792 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1793 private: 1794 TRAPS; 1795 1796 public: 1797 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1798 void do_monitor(ObjectMonitor* mid) { 1799 if (mid->owner() == THREAD) { 1800 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1801 ResourceMark rm; 1802 Handle obj(THREAD, (oop) mid->object()); 1803 tty->print("INFO: unexpected locked object:"); 1804 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1805 fatal("exiting JavaThread=" INTPTR_FORMAT 1806 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1807 p2i(THREAD), p2i(mid)); 1808 } 1809 (void)mid->complete_exit(CHECK); 1810 } 1811 } 1812 }; 1813 1814 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1815 // ignored. This is meant to be called during JNI thread detach which assumes 1816 // all remaining monitors are heavyweight. All exceptions are swallowed. 1817 // Scanning the extant monitor list can be time consuming. 1818 // A simple optimization is to add a per-thread flag that indicates a thread 1819 // called jni_monitorenter() during its lifetime. 1820 // 1821 // Instead of No_Savepoint_Verifier it might be cheaper to 1822 // use an idiom of the form: 1823 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1824 // <code that must not run at safepoint> 1825 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1826 // Since the tests are extremely cheap we could leave them enabled 1827 // for normal product builds. 1828 1829 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1830 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1831 NoSafepointVerifier nsv; 1832 ReleaseJavaMonitorsClosure rjmc(THREAD); 1833 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1834 ObjectSynchronizer::monitors_iterate(&rjmc); 1835 Thread::muxRelease(&gListLock); 1836 THREAD->clear_pending_exception(); 1837 } 1838 1839 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1840 switch (cause) { 1841 case inflate_cause_vm_internal: return "VM Internal"; 1842 case inflate_cause_monitor_enter: return "Monitor Enter"; 1843 case inflate_cause_wait: return "Monitor Wait"; 1844 case inflate_cause_notify: return "Monitor Notify"; 1845 case inflate_cause_hash_code: return "Monitor Hash Code"; 1846 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1847 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1848 default: 1849 ShouldNotReachHere(); 1850 } 1851 return "Unknown"; 1852 } 1853 1854 static void post_monitor_inflate_event(EventJavaMonitorInflate& event, 1855 const oop obj, 1856 const ObjectSynchronizer::InflateCause cause) { 1857 #if INCLUDE_TRACE 1858 assert(event.should_commit(), "check outside"); 1859 event.set_monitorClass(obj->klass()); 1860 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 1861 event.set_cause((u1)cause); 1862 event.commit(); 1863 #endif 1864 } 1865 1866 //------------------------------------------------------------------------------ 1867 // Debugging code 1868 1869 void ObjectSynchronizer::sanity_checks(const bool verbose, 1870 const uint cache_line_size, 1871 int *error_cnt_ptr, 1872 int *warning_cnt_ptr) { 1873 u_char *addr_begin = (u_char*)&GVars; 1874 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1875 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1876 1877 if (verbose) { 1878 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1879 sizeof(SharedGlobals)); 1880 } 1881 1882 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1883 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1884 1885 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1886 if (verbose) { 1887 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1888 } 1889 1890 if (cache_line_size != 0) { 1891 // We were able to determine the L1 data cache line size so 1892 // do some cache line specific sanity checks 1893 1894 if (offset_stwRandom < cache_line_size) { 1895 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1896 "to the struct beginning than a cache line which permits " 1897 "false sharing."); 1898 (*warning_cnt_ptr)++; 1899 } 1900 1901 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1902 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1903 "SharedGlobals.hcSequence fields are closer than a cache " 1904 "line which permits false sharing."); 1905 (*warning_cnt_ptr)++; 1906 } 1907 1908 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1909 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1910 "to the struct end than a cache line which permits false " 1911 "sharing."); 1912 (*warning_cnt_ptr)++; 1913 } 1914 } 1915 } 1916 1917 #ifndef PRODUCT 1918 1919 // Check if monitor belongs to the monitor cache 1920 // The list is grow-only so it's *relatively* safe to traverse 1921 // the list of extant blocks without taking a lock. 1922 1923 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1924 PaddedEnd<ObjectMonitor> * block = 1925 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1926 while (block != NULL) { 1927 assert(block->object() == CHAINMARKER, "must be a block header"); 1928 if (monitor > (ObjectMonitor *)&block[0] && 1929 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1930 address mon = (address)monitor; 1931 address blk = (address)block; 1932 size_t diff = mon - blk; 1933 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1934 return 1; 1935 } 1936 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1937 } 1938 return 0; 1939 } 1940 1941 #endif