image.png

  • Synchornized重量级锁 中park()调用的是 pthread_**mutex**_init() 方法
  • ReentrantLock 中调用的LockSupport.park()所调用的 pthread_**cond**_init() 方法
  • 两者都会进入内核态,区别不大。


objectMonitor.cpp

  1. void ObjectMonitor::enter(TRAPS) {
  2. // The following code is ordered to check the most common cases first
  3. // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
  4. Thread * const Self = THREAD;
  5. void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
  6. //看当前持有锁的线程是否为null, 直接获取锁, 非公平
  7. if (cur == NULL) {
  8. // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
  9. assert(_recursions == 0, "invariant");
  10. assert(_owner == Self, "invariant");
  11. return;
  12. }
  13. //重入
  14. if (cur == Self) {
  15. // TODO-FIXME: check for integer overflow! BUGID 6557169.
  16. _recursions++;
  17. return;
  18. }
  19. if (Self->is_lock_owned ((address)cur)) {
  20. assert(_recursions == 0, "internal state error");
  21. _recursions = 1;
  22. // Commute owner from a thread-specific on-stack BasicLockObject address to
  23. // a full-fledged "Thread *".
  24. _owner = Self;
  25. return;
  26. }
  27. // We've encountered genuine contention.
  28. assert(Self->_Stalled == 0, "invariant");
  29. Self->_Stalled = intptr_t(this);
  30. // Try one round of spinning *before* enqueueing Self
  31. // and before going through the awkward and expensive state
  32. // transitions. The following spin is strictly optional ...
  33. // Note that if we acquire the monitor from an initial spin
  34. // we forgo posting JVMTI events and firing DTRACE probes.
  35. // 自旋
  36. if (TrySpin(Self) > 0) {
  37. assert(_owner == Self, "invariant");
  38. assert(_recursions == 0, "invariant");
  39. assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
  40. Self->_Stalled = 0;
  41. return;
  42. }
  43. assert(_owner != Self, "invariant");
  44. assert(_succ != Self, "invariant");
  45. assert(Self->is_Java_thread(), "invariant");
  46. JavaThread * jt = (JavaThread *) Self;
  47. assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
  48. assert(jt->thread_state() != _thread_blocked, "invariant");
  49. assert(this->object() != NULL, "invariant");
  50. assert(_count >= 0, "invariant");
  51. // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
  52. // Ensure the object-monitor relationship remains stable while there's contention.
  53. Atomic::inc(&_count);
  54. JFR_ONLY(JfrConditionalFlushWithStacktrace<EventJavaMonitorEnter> flush(jt);)
  55. EventJavaMonitorEnter event;
  56. if (event.should_commit()) {
  57. event.set_monitorClass(((oop)this->object())->klass());
  58. event.set_address((uintptr_t)(this->object_addr()));
  59. }
  60. { // Change java thread status to indicate blocked on monitor enter.
  61. JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
  62. Self->set_current_pending_monitor(this);
  63. DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
  64. if (JvmtiExport::should_post_monitor_contended_enter()) {
  65. JvmtiExport::post_monitor_contended_enter(jt, this);
  66. // The current thread does not yet own the monitor and does not
  67. // yet appear on any queues that would get it made the successor.
  68. // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
  69. // handler cannot accidentally consume an unpark() meant for the
  70. // ParkEvent associated with this ObjectMonitor.
  71. }
  72. OSThreadContendState osts(Self->osthread());
  73. ThreadBlockInVM tbivm(jt);
  74. // TODO-FIXME: change the following for(;;) loop to straight-line code.
  75. for (;;) {
  76. jt->set_suspend_equivalent();
  77. // cleared by handle_special_suspend_equivalent_condition()
  78. // or java_suspend_self()
  79. EnterI(THREAD);
  80. if (!ExitSuspendEquivalent(jt)) break;
  81. // We have acquired the contended monitor, but while we were
  82. // waiting another thread suspended us. We don't want to enter
  83. // the monitor while suspended because that would surprise the
  84. // thread that suspended us.
  85. //
  86. _recursions = 0;
  87. _succ = NULL;
  88. exit(false, Self);
  89. jt->java_suspend_self();
  90. }
  91. Self->set_current_pending_monitor(NULL);
  92. // We cleared the pending monitor info since we've just gotten past
  93. // the enter-check-for-suspend dance and we now own the monitor free
  94. // and clear, i.e., it is no longer pending. The ThreadBlockInVM
  95. // destructor can go to a safepoint at the end of this block. If we
  96. // do a thread dump during that safepoint, then this thread will show
  97. // as having "-locked" the monitor, but the OS and java.lang.Thread
  98. // states will still report that the thread is blocked trying to
  99. // acquire it.
  100. }
  101. Atomic::dec(&_count);
  102. assert(_count >= 0, "invariant");
  103. Self->_Stalled = 0;
  104. // Must either set _recursions = 0 or ASSERT _recursions == 0.
  105. assert(_recursions == 0, "invariant");
  106. assert(_owner == Self, "invariant");
  107. assert(_succ != Self, "invariant");
  108. assert(((oop)(object()))->mark() == markOopDesc::encode(this), "invariant");
  109. // The thread -- now the owner -- is back in vm mode.
  110. // Report the glorious news via TI,DTrace and jvmstat.
  111. // The probe effect is non-trivial. All the reportage occurs
  112. // while we hold the monitor, increasing the length of the critical
  113. // section. Amdahl's parallel speedup law comes vividly into play.
  114. //
  115. // Another option might be to aggregate the events (thread local or
  116. // per-monitor aggregation) and defer reporting until a more opportune
  117. // time -- such as next time some thread encounters contention but has
  118. // yet to acquire the lock. While spinning that thread could
  119. // spinning we could increment JVMStat counters, etc.
  120. DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
  121. if (JvmtiExport::should_post_monitor_contended_entered()) {
  122. JvmtiExport::post_monitor_contended_entered(jt, this);
  123. // The current thread already owns the monitor and is not going to
  124. // call park() for the remainder of the monitor enter protocol. So
  125. // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
  126. // event handler consumed an unpark() issued by the thread that
  127. // just exited the monitor.
  128. }
  129. if (event.should_commit()) {
  130. event.set_previousOwner((uintptr_t)_previous_owner_tid);
  131. event.commit();
  132. }
  133. OM_PERFDATA_OP(ContendedLockAttempts, inc());
  134. }

自旋中调用的 EnterI 方法

void ObjectMonitor::EnterI(TRAPS) {
  Thread * const Self = THREAD;
  assert(Self->is_Java_thread(), "invariant");
  assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");

  // Try the lock - TATAS
  if (TryLock (Self) > 0) {
    assert(_succ != Self, "invariant");
    assert(_owner == Self, "invariant");
    assert(_Responsible != Self, "invariant");
    return;
  }

  assert(InitDone, "Unexpectedly not initialized");

  // We try one round of spinning *before* enqueueing Self.
  //
  // If the _owner is ready but OFFPROC we could use a YieldTo()
  // operation to donate the remainder of this thread's quantum
  // to the owner.  This has subtle but beneficial affinity
  // effects.

  if (TrySpin(Self) > 0) {
    assert(_owner == Self, "invariant");
    assert(_succ != Self, "invariant");
    assert(_Responsible != Self, "invariant");
    return;
  }

  // The Spin failed -- Enqueue and park the thread ...
  assert(_succ != Self, "invariant");
  assert(_owner != Self, "invariant");
  assert(_Responsible != Self, "invariant");

  // Enqueue "Self" on ObjectMonitor's _cxq.
  //
  // Node acts as a proxy for Self.
  // As an aside, if were to ever rewrite the synchronization code mostly
  // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
  // Java objects.  This would avoid awkward lifecycle and liveness issues,
  // as well as eliminate a subset of ABA issues.
  // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.

  ObjectWaiter node(Self);
  Self->_ParkEvent->reset();
  node._prev   = (ObjectWaiter *) 0xBAD;
  node.TState  = ObjectWaiter::TS_CXQ;

  // Push "Self" onto the front of the _cxq.
  // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
  // Note that spinning tends to reduce the rate at which threads
  // enqueue and dequeue on EntryList|cxq.
  ObjectWaiter * nxt;
  for (;;) {
    node._next = nxt = _cxq;
    if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;

    // Interference - the CAS failed because _cxq changed.  Just retry.
    // As an optional optimization we retry the lock.
    if (TryLock (Self) > 0) {
      assert(_succ != Self, "invariant");
      assert(_owner == Self, "invariant");
      assert(_Responsible != Self, "invariant");
      return;
    }
  }

  // Check for cxq|EntryList edge transition to non-null.  This indicates
  // the onset of contention.  While contention persists exiting threads
  // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
  // operations revert to the faster 1-0 mode.  This enter operation may interleave
  // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
  // arrange for one of the contending thread to use a timed park() operations
  // to detect and recover from the race.  (Stranding is form of progress failure
  // where the monitor is unlocked but all the contending threads remain parked).
  // That is, at least one of the contended threads will periodically poll _owner.
  // One of the contending threads will become the designated "Responsible" thread.
  // The Responsible thread uses a timed park instead of a normal indefinite park
  // operation -- it periodically wakes and checks for and recovers from potential
  // strandings admitted by 1-0 exit operations.   We need at most one Responsible
  // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
  // be responsible for a monitor.
  //
  // Currently, one of the contended threads takes on the added role of "Responsible".
  // A viable alternative would be to use a dedicated "stranding checker" thread
  // that periodically iterated over all the threads (or active monitors) and unparked
  // successors where there was risk of stranding.  This would help eliminate the
  // timer scalability issues we see on some platforms as we'd only have one thread
  // -- the checker -- parked on a timer.

  if (nxt == NULL && _EntryList == NULL) {
    // Try to assume the role of responsible thread for the monitor.
    // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
    Atomic::replace_if_null(Self, &_Responsible);
  }

  // The lock might have been released while this thread was occupied queueing
  // itself onto _cxq.  To close the race and avoid "stranding" and
  // progress-liveness failure we must resample-retry _owner before parking.
  // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
  // In this case the ST-MEMBAR is accomplished with CAS().
  //
  // TODO: Defer all thread state transitions until park-time.
  // Since state transitions are heavy and inefficient we'd like
  // to defer the state transitions until absolutely necessary,
  // and in doing so avoid some transitions ...

  int nWakeups = 0;
  int recheckInterval = 1;

  for (;;) {

    if (TryLock(Self) > 0) break;
    assert(_owner != Self, "invariant");

    // park self
    if (_Responsible == Self) {
      Self->_ParkEvent->park((jlong) recheckInterval);
      // Increase the recheckInterval, but clamp the value.
      recheckInterval *= 8;
      if (recheckInterval > MAX_RECHECK_INTERVAL) {
        recheckInterval = MAX_RECHECK_INTERVAL;
      }
    } else {
       //_ParkEvent 根据当前操作系统调用对应的park方法  在Linux中调用的就是pthread_mutex_init
      Self->_ParkEvent->park();
    }

    if (TryLock(Self) > 0) break;

    // The lock is still contested.
    // Keep a tally of the # of futile wakeups.
    // Note that the counter is not protected by a lock or updated by atomics.
    // That is by design - we trade "lossy" counters which are exposed to
    // races during updates for a lower probe effect.

    // This PerfData object can be used in parallel with a safepoint.
    // See the work around in PerfDataManager::destroy().
    OM_PERFDATA_OP(FutileWakeups, inc());
    ++nWakeups;

    // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
    // We can defer clearing _succ until after the spin completes
    // TrySpin() must tolerate being called with _succ == Self.
    // Try yet another round of adaptive spinning.
    if (TrySpin(Self) > 0) break;

    // We can find that we were unpark()ed and redesignated _succ while
    // we were spinning.  That's harmless.  If we iterate and call park(),
    // park() will consume the event and return immediately and we'll
    // just spin again.  This pattern can repeat, leaving _succ to simply
    // spin on a CPU.

    if (_succ == Self) _succ = NULL;

    // Invariant: after clearing _succ a thread *must* retry _owner before parking.
    OrderAccess::fence();
  }

  // Egress :
  // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
  // Normally we'll find Self on the EntryList .
  // From the perspective of the lock owner (this thread), the
  // EntryList is stable and cxq is prepend-only.
  // The head of cxq is volatile but the interior is stable.
  // In addition, Self.TState is stable.

  assert(_owner == Self, "invariant");
  assert(object() != NULL, "invariant");
  // I'd like to write:
  //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  // but as we're at a safepoint that's not safe.

  UnlinkAfterAcquire(Self, &node);
  if (_succ == Self) _succ = NULL;

  assert(_succ != Self, "invariant");
  if (_Responsible == Self) {
    _Responsible = NULL;
    OrderAccess::fence(); // Dekker pivot-point

    // We may leave threads on cxq|EntryList without a designated
    // "Responsible" thread.  This is benign.  When this thread subsequently
    // exits the monitor it can "see" such preexisting "old" threads --
    // threads that arrived on the cxq|EntryList before the fence, above --
    // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
    // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
    // non-null and elect a new "Responsible" timer thread.
    //
    // This thread executes:
    //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
    //    LD cxq|EntryList               (in subsequent exit)
    //
    // Entering threads in the slow/contended path execute:
    //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
    //    The (ST cxq; MEMBAR) is accomplished with CAS().
    //
    // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
    // exit operation from floating above the ST Responsible=null.
  }

  // We've acquired ownership with CAS().
  // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
  // But since the CAS() this thread may have also stored into _succ,
  // EntryList, cxq or Responsible.  These meta-data updates must be
  // visible __before this thread subsequently drops the lock.
  // Consider what could occur if we didn't enforce this constraint --
  // STs to monitor meta-data and user-data could reorder with (become
  // visible after) the ST in exit that drops ownership of the lock.
  // Some other thread could then acquire the lock, but observe inconsistent
  // or old monitor meta-data and heap data.  That violates the JMM.
  // To that end, the 1-0 exit() operation must have at least STST|LDST
  // "release" barrier semantics.  Specifically, there must be at least a
  // STST|LDST barrier in exit() before the ST of null into _owner that drops
  // the lock.   The barrier ensures that changes to monitor meta-data and data
  // protected by the lock will be visible before we release the lock, and
  // therefore before some other thread (CPU) has a chance to acquire the lock.
  // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
  //
  // Critically, any prior STs to _succ or EntryList must be visible before
  // the ST of null into _owner in the *subsequent* (following) corresponding
  // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
  // execute a serializing instruction.

  return;
}