965 unsigned t = self->_hashStateX;
966 t ^= (t << 11);
967 self->_hashStateX = self->_hashStateY;
968 self->_hashStateY = self->_hashStateZ;
969 self->_hashStateZ = self->_hashStateW;
970 unsigned v = self->_hashStateW;
971 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
972 self->_hashStateW = v;
973 value = v;
974 }
975
976 value &= markWord::hash_mask;
977 if (value == 0) value = 0xBAD;
978 assert(value != markWord::no_hash, "invariant");
979 return value;
980 }
981
982 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
983 if (UseBiasedLocking) {
984 // NOTE: many places throughout the JVM do not expect a safepoint
985 // to be taken here, in particular most operations on perm gen
986 // objects. However, we only ever bias Java instances and all of
987 // the call sites of identity_hash that might revoke biases have
988 // been checked to make sure they can handle a safepoint. The
989 // added check of the bias pattern is to avoid useless calls to
990 // thread-local storage.
991 if (obj->mark().has_bias_pattern()) {
992 // Handle for oop obj in case of STW safepoint
993 Handle hobj(self, obj);
994 // Relaxing assertion for bug 6320749.
995 assert(Universe::verify_in_progress() ||
996 !SafepointSynchronize::is_at_safepoint(),
997 "biases should not be seen by VM thread here");
998 BiasedLocking::revoke(hobj, JavaThread::current());
999 obj = hobj();
1000 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
1001 }
1002 }
1003
1004 // hashCode() is a heap mutator ...
1005 // Relaxing assertion for bug 6320749.
1006 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
1007 !SafepointSynchronize::is_at_safepoint(), "invariant");
1355 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1356 list_oops_do(thread->om_in_use_list, f);
1357 }
1358
1359 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1360 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1361 // The oops_do() phase does not overlap with monitor deflation
1362 // so no need to lock ObjectMonitors for the list traversal.
1363 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1364 if (mid->object() != NULL) {
1365 f->do_oop((oop*)mid->object_addr());
1366 }
1367 }
1368 }
1369
1370
1371 // -----------------------------------------------------------------------------
1372 // ObjectMonitor Lifecycle
1373 // -----------------------
1374 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1375 // free list and associates them with objects. Deflation -- which occurs at
1376 // STW-time or asynchronously -- disassociates idle monitors from objects.
1377 // Such scavenged monitors are returned to the om_list_globals._free_list.
1378 //
1379 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1380 //
1381 // Lifecycle:
1382 // -- unassigned and on the om_list_globals._free_list
1383 // -- unassigned and on a per-thread free list
1384 // -- assigned to an object. The object is inflated and the mark refers
1385 // to the ObjectMonitor.
1386
1387 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1388 // A large MAXPRIVATE value reduces both list lock contention
1389 // and list coherency traffic, but also tends to increase the
1390 // number of ObjectMonitors in circulation as well as the STW
1391 // scavenge costs. As usual, we lean toward time in space-time
1392 // tradeoffs.
1393 const int MAXPRIVATE = 1024;
1394 NoSafepointVerifier nsv;
1395
1396 for (;;) {
1397 ObjectMonitor* m;
1398
1399 // 1: try to allocate from the thread's local om_free_list.
1400 // Threads will attempt to allocate first from their local list, then
1401 // from the global list, and only after those attempts fail will the
1402 // thread attempt to instantiate new monitors. Thread-local free lists
1403 // improve allocation latency, as well as reducing coherency traffic
1404 // on the shared global list.
1405 m = take_from_start_of_om_free_list(self);
1406 if (m != NULL) {
1407 guarantee(m->object() == NULL, "invariant");
1408 m->set_allocation_state(ObjectMonitor::New);
1409 prepend_to_om_in_use_list(self, m);
1410 return m;
1418 // Use bulk transfers to reduce the allocation rate and heat
1419 // on various locks.
1420 for (int i = self->om_free_provision; --i >= 0;) {
1421 ObjectMonitor* take = take_from_start_of_global_free_list();
1422 if (take == NULL) {
1423 break; // No more are available.
1424 }
1425 guarantee(take->object() == NULL, "invariant");
1426 // We allowed 3 field values to linger during async deflation.
1427 // Clear or restore them as appropriate.
1428 take->set_header(markWord::zero());
1429 // DEFLATER_MARKER is the only non-NULL value we should see here.
1430 take->try_set_owner_from(DEFLATER_MARKER, NULL);
1431 if (take->contentions() < 0) {
1432 // Add back max_jint to restore the contentions field to its
1433 // proper value.
1434 take->add_to_contentions(max_jint);
1435
1436 #ifdef ASSERT
1437 jint l_contentions = take->contentions();
1438 #endif
1439 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d",
1440 l_contentions, take->contentions());
1441 }
1442 take->Recycle();
1443 // Since we're taking from the global free-list, take must be Free.
1444 // om_release() also sets the allocation state to Free because it
1445 // is called from other code paths.
1446 assert(take->is_free(), "invariant");
1447 om_release(self, take, false);
1448 }
1449 self->om_free_provision += 1 + (self->om_free_provision / 2);
1450 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1451 continue;
1452 }
1453
1454 // 3: allocate a block of new ObjectMonitors
1455 // Both the local and global free lists are empty -- resort to malloc().
1456 // In the current implementation ObjectMonitors are TSM - immortal.
1457 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1458 // each ObjectMonitor to start at the beginning of a cache line,
1459 // so we use align_up().
1460 // A better solution would be to use C++ placement-new.
1658 }
1659 // Refetch the possibly changed next field and try again.
1660 cur_om = unmarked_next(in_use_tail);
1661 continue;
1662 }
1663 if (cur_om->object() == NULL) {
1664 // cur_om was deflated and the object ref was cleared while it
1665 // was locked. We happened to see it just after it was unlocked
1666 // (and added to the free list). Refetch the possibly changed
1667 // next field and try again.
1668 cur_om = unmarked_next(in_use_tail);
1669 continue;
1670 }
1671 in_use_tail = cur_om;
1672 in_use_count++;
1673 cur_om = unmarked_next(cur_om);
1674 }
1675 guarantee(in_use_tail != NULL, "invariant");
1676 #ifdef ASSERT
1677 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1678 #endif
1679 assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1680 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1681 Atomic::store(&self->om_in_use_count, 0);
1682 // Clear the in-use list head (which also unlocks it):
1683 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1684 om_unlock(in_use_list);
1685 }
1686
1687 int free_count = 0;
1688 ObjectMonitor* free_list = NULL;
1689 ObjectMonitor* free_tail = NULL;
1690 // This function can race with a list walker thread so we lock the
1691 // list head to prevent confusion.
1692 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1693 // At this point, we have locked the free list head so a racing
1694 // thread cannot come in after us. However, a racing thread could
1695 // be ahead of us; we'll detect that and delay to let it finish.
1696 //
1697 // The thread is going away. Set 'free_tail' to the last per-thread free
1698 // monitor which will be linked to om_list_globals._free_list below.
1699 //
1700 // Account for the free list head before the loop since it is
1703 free_count++;
1704 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1705 if (is_locked(s)) {
1706 // s is locked so there must be a racing walker thread ahead
1707 // of us so we'll give it a chance to finish.
1708 while (is_locked(s)) {
1709 os::naked_short_sleep(1);
1710 }
1711 }
1712 free_tail = s;
1713 free_count++;
1714 guarantee(s->object() == NULL, "invariant");
1715 if (s->is_busy()) {
1716 stringStream ss;
1717 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss));
1718 }
1719 }
1720 guarantee(free_tail != NULL, "invariant");
1721 #ifdef ASSERT
1722 int l_om_free_count = Atomic::load(&self->om_free_count);
1723 #endif
1724 assert(l_om_free_count == free_count, "free counts don't match: "
1725 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1726 Atomic::store(&self->om_free_count, 0);
1727 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1728 om_unlock(free_list);
1729 }
1730
1731 if (free_tail != NULL) {
1732 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1733 }
1734
1735 if (in_use_tail != NULL) {
1736 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1737 }
1738
1739 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1740 LogStreamHandle(Info, monitorinflation) lsh_info;
1741 LogStream* ls = NULL;
1742 if (log_is_enabled(Debug, monitorinflation)) {
1743 ls = &lsh_debug;
1744 } else if ((free_count != 0 || in_use_count != 0) &&
1745 log_is_enabled(Info, monitorinflation)) {
2087 mid->clear_common();
2088
2089 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2090 p2i(mid->object()));
2091 assert(mid->is_free(), "must be free: allocation_state=%d",
2092 (int)mid->allocation_state());
2093
2094 // Move the deflated ObjectMonitor to the working free list
2095 // defined by free_head_p and free_tail_p.
2096 if (*free_head_p == NULL) {
2097 // First one on the list.
2098 *free_head_p = mid;
2099 }
2100 if (*free_tail_p != NULL) {
2101 // We append to the list so the caller can use mid->_next_om
2102 // to fix the linkages in its context.
2103 ObjectMonitor* prevtail = *free_tail_p;
2104 // prevtail should have been cleaned up by the caller:
2105 #ifdef ASSERT
2106 ObjectMonitor* l_next_om = unmarked_next(prevtail);
2107 #endif
2108 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2109 om_lock(prevtail);
2110 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked)
2111 }
2112 *free_tail_p = mid;
2113
2114 // At this point, mid->_next_om still refers to its current
2115 // value and another ObjectMonitor's _next_om field still
2116 // refers to this ObjectMonitor. Those linkages have to be
2117 // cleaned up by the caller who has the complete context.
2118
2119 // We leave owner == DEFLATER_MARKER and contentions < 0
2120 // to force any racing threads to retry.
2121 return true; // Success, ObjectMonitor has been deflated.
2122 }
2123
2124 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2125 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2126 // list could be a per-thread in-use list or the global in-use list.
2127 // If a safepoint has started, then we save state via saved_mid_in_use_p
2128 // and return to the caller to honor the safepoint.
2386 local_deflated_count =
2387 deflate_monitor_list_using_JT(&target->om_in_use_list,
2388 &target->om_in_use_count, &free_head_p,
2389 &free_tail_p, &saved_mid_in_use_p);
2390 }
2391 deflated_count += local_deflated_count;
2392
2393 if (free_head_p != NULL) {
2394 // Move the deflated ObjectMonitors to the global free list.
2395 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2396 // Note: The target thread can be doing an om_alloc() that
2397 // is trying to prepend an ObjectMonitor on its in-use list
2398 // at the same time that we have deflated the current in-use
2399 // list head and put it on the local free list. prepend_to_common()
2400 // will detect the race and retry which avoids list corruption,
2401 // but the next field in free_tail_p can flicker to marked
2402 // and then unmarked while prepend_to_common() is sorting it
2403 // all out.
2404 #ifdef ASSERT
2405 ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2406 #endif
2407 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2408
2409 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2410
2411 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2412 }
2413
2414 if (saved_mid_in_use_p != NULL) {
2415 // deflate_monitor_list_using_JT() detected a safepoint starting.
2416 timer.stop();
2417 {
2418 if (is_global) {
2419 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2420 } else {
2421 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2422 }
2423 assert(SafepointMechanism::should_block(self), "sanity check");
2424 ThreadBlockInVM blocker(self);
2425 }
2426 // Prepare for another loop after the safepoint.
2427 free_head_p = NULL;
2458 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2459 private:
2460 TRAPS;
2461
2462 public:
2463 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
2464 void do_monitor(ObjectMonitor* mid) {
2465 if (mid->owner() == THREAD) {
2466 (void)mid->complete_exit(CHECK);
2467 }
2468 }
2469 };
2470
2471 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2472 // ignored. This is meant to be called during JNI thread detach which assumes
2473 // all remaining monitors are heavyweight. All exceptions are swallowed.
2474 // Scanning the extant monitor list can be time consuming.
2475 // A simple optimization is to add a per-thread flag that indicates a thread
2476 // called jni_monitorenter() during its lifetime.
2477 //
2478 // Instead of No_Savepoint_Verifier it might be cheaper to
2479 // use an idiom of the form:
2480 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2481 // <code that must not run at safepoint>
2482 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2483 // Since the tests are extremely cheap we could leave them enabled
2484 // for normal product builds.
2485
2486 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2487 assert(THREAD == JavaThread::current(), "must be current Java thread");
2488 NoSafepointVerifier nsv;
2489 ReleaseJavaMonitorsClosure rjmc(THREAD);
2490 ObjectSynchronizer::monitors_iterate(&rjmc);
2491 THREAD->clear_pending_exception();
2492 }
2493
2494 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2495 switch (cause) {
2496 case inflate_cause_vm_internal: return "VM Internal";
2497 case inflate_cause_monitor_enter: return "Monitor Enter";
2498 case inflate_cause_wait: return "Monitor Wait";
|
965 unsigned t = self->_hashStateX;
966 t ^= (t << 11);
967 self->_hashStateX = self->_hashStateY;
968 self->_hashStateY = self->_hashStateZ;
969 self->_hashStateZ = self->_hashStateW;
970 unsigned v = self->_hashStateW;
971 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
972 self->_hashStateW = v;
973 value = v;
974 }
975
976 value &= markWord::hash_mask;
977 if (value == 0) value = 0xBAD;
978 assert(value != markWord::no_hash, "invariant");
979 return value;
980 }
981
982 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
983 if (UseBiasedLocking) {
984 // NOTE: many places throughout the JVM do not expect a safepoint
985 // to be taken here. However, we only ever bias Java instances and all
986 // of the call sites of identity_hash that might revoke biases have
987 // been checked to make sure they can handle a safepoint. The
988 // added check of the bias pattern is to avoid useless calls to
989 // thread-local storage.
990 if (obj->mark().has_bias_pattern()) {
991 // Handle for oop obj in case of STW safepoint
992 Handle hobj(self, obj);
993 // Relaxing assertion for bug 6320749.
994 assert(Universe::verify_in_progress() ||
995 !SafepointSynchronize::is_at_safepoint(),
996 "biases should not be seen by VM thread here");
997 BiasedLocking::revoke(hobj, JavaThread::current());
998 obj = hobj();
999 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
1000 }
1001 }
1002
1003 // hashCode() is a heap mutator ...
1004 // Relaxing assertion for bug 6320749.
1005 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
1006 !SafepointSynchronize::is_at_safepoint(), "invariant");
1354 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1355 list_oops_do(thread->om_in_use_list, f);
1356 }
1357
1358 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1359 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1360 // The oops_do() phase does not overlap with monitor deflation
1361 // so no need to lock ObjectMonitors for the list traversal.
1362 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1363 if (mid->object() != NULL) {
1364 f->do_oop((oop*)mid->object_addr());
1365 }
1366 }
1367 }
1368
1369
1370 // -----------------------------------------------------------------------------
1371 // ObjectMonitor Lifecycle
1372 // -----------------------
1373 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1374 // free list and associates them with objects. Async deflation disassociates
1375 // idle monitors from objects. Such scavenged monitors are returned to the
1376 // om_list_globals._free_list.
1377 //
1378 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1379 //
1380 // Lifecycle:
1381 // -- unassigned and on the om_list_globals._free_list
1382 // -- unassigned and on a per-thread free list
1383 // -- assigned to an object. The object is inflated and the mark refers
1384 // to the ObjectMonitor.
1385
1386 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1387 // A large MAXPRIVATE value reduces both list lock contention
1388 // and list coherency traffic, but also tends to increase the
1389 // number of ObjectMonitors in circulation as well as the
1390 // scavenge costs. As usual, we lean toward time in space-time
1391 // tradeoffs.
1392 const int MAXPRIVATE = 1024;
1393 NoSafepointVerifier nsv;
1394
1395 for (;;) {
1396 ObjectMonitor* m;
1397
1398 // 1: try to allocate from the thread's local om_free_list.
1399 // Threads will attempt to allocate first from their local list, then
1400 // from the global list, and only after those attempts fail will the
1401 // thread attempt to instantiate new monitors. Thread-local free lists
1402 // improve allocation latency, as well as reducing coherency traffic
1403 // on the shared global list.
1404 m = take_from_start_of_om_free_list(self);
1405 if (m != NULL) {
1406 guarantee(m->object() == NULL, "invariant");
1407 m->set_allocation_state(ObjectMonitor::New);
1408 prepend_to_om_in_use_list(self, m);
1409 return m;
1417 // Use bulk transfers to reduce the allocation rate and heat
1418 // on various locks.
1419 for (int i = self->om_free_provision; --i >= 0;) {
1420 ObjectMonitor* take = take_from_start_of_global_free_list();
1421 if (take == NULL) {
1422 break; // No more are available.
1423 }
1424 guarantee(take->object() == NULL, "invariant");
1425 // We allowed 3 field values to linger during async deflation.
1426 // Clear or restore them as appropriate.
1427 take->set_header(markWord::zero());
1428 // DEFLATER_MARKER is the only non-NULL value we should see here.
1429 take->try_set_owner_from(DEFLATER_MARKER, NULL);
1430 if (take->contentions() < 0) {
1431 // Add back max_jint to restore the contentions field to its
1432 // proper value.
1433 take->add_to_contentions(max_jint);
1434
1435 #ifdef ASSERT
1436 jint l_contentions = take->contentions();
1437 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d",
1438 l_contentions, take->contentions());
1439 #endif
1440 }
1441 take->Recycle();
1442 // Since we're taking from the global free-list, take must be Free.
1443 // om_release() also sets the allocation state to Free because it
1444 // is called from other code paths.
1445 assert(take->is_free(), "invariant");
1446 om_release(self, take, false);
1447 }
1448 self->om_free_provision += 1 + (self->om_free_provision / 2);
1449 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1450 continue;
1451 }
1452
1453 // 3: allocate a block of new ObjectMonitors
1454 // Both the local and global free lists are empty -- resort to malloc().
1455 // In the current implementation ObjectMonitors are TSM - immortal.
1456 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1457 // each ObjectMonitor to start at the beginning of a cache line,
1458 // so we use align_up().
1459 // A better solution would be to use C++ placement-new.
1657 }
1658 // Refetch the possibly changed next field and try again.
1659 cur_om = unmarked_next(in_use_tail);
1660 continue;
1661 }
1662 if (cur_om->object() == NULL) {
1663 // cur_om was deflated and the object ref was cleared while it
1664 // was locked. We happened to see it just after it was unlocked
1665 // (and added to the free list). Refetch the possibly changed
1666 // next field and try again.
1667 cur_om = unmarked_next(in_use_tail);
1668 continue;
1669 }
1670 in_use_tail = cur_om;
1671 in_use_count++;
1672 cur_om = unmarked_next(cur_om);
1673 }
1674 guarantee(in_use_tail != NULL, "invariant");
1675 #ifdef ASSERT
1676 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1677 assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1678 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1679 #endif
1680 Atomic::store(&self->om_in_use_count, 0);
1681 // Clear the in-use list head (which also unlocks it):
1682 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1683 om_unlock(in_use_list);
1684 }
1685
1686 int free_count = 0;
1687 ObjectMonitor* free_list = NULL;
1688 ObjectMonitor* free_tail = NULL;
1689 // This function can race with a list walker thread so we lock the
1690 // list head to prevent confusion.
1691 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1692 // At this point, we have locked the free list head so a racing
1693 // thread cannot come in after us. However, a racing thread could
1694 // be ahead of us; we'll detect that and delay to let it finish.
1695 //
1696 // The thread is going away. Set 'free_tail' to the last per-thread free
1697 // monitor which will be linked to om_list_globals._free_list below.
1698 //
1699 // Account for the free list head before the loop since it is
1702 free_count++;
1703 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1704 if (is_locked(s)) {
1705 // s is locked so there must be a racing walker thread ahead
1706 // of us so we'll give it a chance to finish.
1707 while (is_locked(s)) {
1708 os::naked_short_sleep(1);
1709 }
1710 }
1711 free_tail = s;
1712 free_count++;
1713 guarantee(s->object() == NULL, "invariant");
1714 if (s->is_busy()) {
1715 stringStream ss;
1716 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss));
1717 }
1718 }
1719 guarantee(free_tail != NULL, "invariant");
1720 #ifdef ASSERT
1721 int l_om_free_count = Atomic::load(&self->om_free_count);
1722 assert(l_om_free_count == free_count, "free counts don't match: "
1723 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1724 #endif
1725 Atomic::store(&self->om_free_count, 0);
1726 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1727 om_unlock(free_list);
1728 }
1729
1730 if (free_tail != NULL) {
1731 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1732 }
1733
1734 if (in_use_tail != NULL) {
1735 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1736 }
1737
1738 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1739 LogStreamHandle(Info, monitorinflation) lsh_info;
1740 LogStream* ls = NULL;
1741 if (log_is_enabled(Debug, monitorinflation)) {
1742 ls = &lsh_debug;
1743 } else if ((free_count != 0 || in_use_count != 0) &&
1744 log_is_enabled(Info, monitorinflation)) {
2086 mid->clear_common();
2087
2088 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2089 p2i(mid->object()));
2090 assert(mid->is_free(), "must be free: allocation_state=%d",
2091 (int)mid->allocation_state());
2092
2093 // Move the deflated ObjectMonitor to the working free list
2094 // defined by free_head_p and free_tail_p.
2095 if (*free_head_p == NULL) {
2096 // First one on the list.
2097 *free_head_p = mid;
2098 }
2099 if (*free_tail_p != NULL) {
2100 // We append to the list so the caller can use mid->_next_om
2101 // to fix the linkages in its context.
2102 ObjectMonitor* prevtail = *free_tail_p;
2103 // prevtail should have been cleaned up by the caller:
2104 #ifdef ASSERT
2105 ObjectMonitor* l_next_om = unmarked_next(prevtail);
2106 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2107 #endif
2108 om_lock(prevtail);
2109 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked)
2110 }
2111 *free_tail_p = mid;
2112
2113 // At this point, mid->_next_om still refers to its current
2114 // value and another ObjectMonitor's _next_om field still
2115 // refers to this ObjectMonitor. Those linkages have to be
2116 // cleaned up by the caller who has the complete context.
2117
2118 // We leave owner == DEFLATER_MARKER and contentions < 0
2119 // to force any racing threads to retry.
2120 return true; // Success, ObjectMonitor has been deflated.
2121 }
2122
2123 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2124 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2125 // list could be a per-thread in-use list or the global in-use list.
2126 // If a safepoint has started, then we save state via saved_mid_in_use_p
2127 // and return to the caller to honor the safepoint.
2385 local_deflated_count =
2386 deflate_monitor_list_using_JT(&target->om_in_use_list,
2387 &target->om_in_use_count, &free_head_p,
2388 &free_tail_p, &saved_mid_in_use_p);
2389 }
2390 deflated_count += local_deflated_count;
2391
2392 if (free_head_p != NULL) {
2393 // Move the deflated ObjectMonitors to the global free list.
2394 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2395 // Note: The target thread can be doing an om_alloc() that
2396 // is trying to prepend an ObjectMonitor on its in-use list
2397 // at the same time that we have deflated the current in-use
2398 // list head and put it on the local free list. prepend_to_common()
2399 // will detect the race and retry which avoids list corruption,
2400 // but the next field in free_tail_p can flicker to marked
2401 // and then unmarked while prepend_to_common() is sorting it
2402 // all out.
2403 #ifdef ASSERT
2404 ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2405 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2406 #endif
2407
2408 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2409
2410 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2411 }
2412
2413 if (saved_mid_in_use_p != NULL) {
2414 // deflate_monitor_list_using_JT() detected a safepoint starting.
2415 timer.stop();
2416 {
2417 if (is_global) {
2418 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2419 } else {
2420 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2421 }
2422 assert(SafepointMechanism::should_block(self), "sanity check");
2423 ThreadBlockInVM blocker(self);
2424 }
2425 // Prepare for another loop after the safepoint.
2426 free_head_p = NULL;
2457 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2458 private:
2459 TRAPS;
2460
2461 public:
2462 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
2463 void do_monitor(ObjectMonitor* mid) {
2464 if (mid->owner() == THREAD) {
2465 (void)mid->complete_exit(CHECK);
2466 }
2467 }
2468 };
2469
2470 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2471 // ignored. This is meant to be called during JNI thread detach which assumes
2472 // all remaining monitors are heavyweight. All exceptions are swallowed.
2473 // Scanning the extant monitor list can be time consuming.
2474 // A simple optimization is to add a per-thread flag that indicates a thread
2475 // called jni_monitorenter() during its lifetime.
2476 //
2477 // Instead of NoSafepointVerifier it might be cheaper to
2478 // use an idiom of the form:
2479 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2480 // <code that must not run at safepoint>
2481 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2482 // Since the tests are extremely cheap we could leave them enabled
2483 // for normal product builds.
2484
2485 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2486 assert(THREAD == JavaThread::current(), "must be current Java thread");
2487 NoSafepointVerifier nsv;
2488 ReleaseJavaMonitorsClosure rjmc(THREAD);
2489 ObjectSynchronizer::monitors_iterate(&rjmc);
2490 THREAD->clear_pending_exception();
2491 }
2492
2493 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2494 switch (cause) {
2495 case inflate_cause_vm_internal: return "VM Internal";
2496 case inflate_cause_monitor_enter: return "Monitor Enter";
2497 case inflate_cause_wait: return "Monitor Wait";
|