001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.jikesrvm.scheduler;
014
015import org.jikesrvm.VM;
016import org.jikesrvm.runtime.Magic;
017import org.jikesrvm.runtime.Entrypoints;
018import org.vmmagic.unboxed.Address;
019import org.vmmagic.unboxed.Offset;
020import org.vmmagic.pragma.Uninterruptible;
021import org.vmmagic.pragma.Entrypoint;
022import org.vmmagic.pragma.Untraced;
023import org.vmmagic.pragma.NoInline;
024
025/**
026 *
027 * <p> Alternative (to Java monitors) light-weight synchronization
028 * mechanism to implement Java monitors {@link Lock}.  These locks
029 * should not be used where Java monitors would suffice, or where
030 * an adaptive mutex is required.  They are
031 * intended to be held only briefly!
032 *
033 * <p> Normally, contending <code>RVMThread</code>s will spin on
034 * this processor lock's <code>latestContender</code> field.  If
035 * <code>MCS_Locking</code> is set, the processors spin on processor
036 * local data.  This is loosely based on an idea in Mellor-Crummey and
037 * Scott's paper in ASPLOS-IV (1991).
038 * 1.  Possible project: determine those conditions under which MCS
039 * locking performs better than spinning on a global address.
040 *
041 * <p> Acquiring or releasing a lock involves atomically reading and
042 * setting the lock's <code>latestContender</code> field.  If this
043 * field is null, the lock is unowned.  Otherwise, the field points to
044 * the thread that owns the lock, or, if MCS locking is
045 * being used, to the last thread on a circular queue of threads
046 * spinning until they get the lock, or, if MCS locking is
047 * being used and the circular spin queue is being updated, to
048 * <code>IN_FLUX</code>.
049 *
050 * <p> Contention is best handled by doing something else.  To support
051 * this, <code>tryLock</code> obtains the lock (and returns true) if
052 * the lock is unowned (and there is no spurious microcontention).
053 * Otherwise, it returns false.
054 *
055 * <p> Only when "doing something else" is not an attractive option
056 * (locking global thread queues, unlocking a thick lock with threads
057 * waiting, avoiding starvation on a thread that has issued several
058 * tryLocks, etc.) should lock() be called.  Here, any remaining
059 * contention is handled by spinning on a local flag.
060 *
061 * <p> To add itself to the circular waiting queue, a thread must
062 * succeed in setting the latestContender field to IN_FLUX.  A backoff
063 * strategy is used to reduce contention for this field.  This
064 * strategy has both a pseudo-random (to prevent two or more threads
065 * from backing off in lock step) and an exponential
066 * component (to deal with really high contention).
067 *
068 * <p> Releasing a lock entails either atomically setting the
069 * latestContender field to null (if this thread is the
070 * latestContender), or releasing the first thread on the
071 * circular spin queue.  In the latter case, the latestContender field
072 * must be set to IN_FLUX.  To give unlock() priority over lock(), the
073 * backoff strategy is not used for unlocking: if a thread fails to set
074 * set the field to IN_FLUX, it tries again immediately.
075 *
076 * <p> Usage: system locks should only be used when synchronized
077 * methods cannot.  Do not do anything, (such as trigger a type cast,
078 * allocate an object, or call any method of a class that does not
079 * implement Uninterruptible) that might allow a thread switch or
080 * trigger a garbage collection between lock and unlock.
081 *
082 * @see RVMThread
083 * @see Monitor
084 * @see Lock */
085@Uninterruptible
086public final class SpinLock {
087  /**
088   * Should contending <code>RVMThread</code>s spin on thread local addresses (true)
089   * or on a globally shared address (false).
090   */
091  private static final boolean MCS_Locking = false;
092
093  /**
094   * The state of the thread lock.
095   * <ul>
096   * <li> <code>null</code>, if the lock is not owned;
097   * <li> the thread that owns the lock, if no threads are waiting;
098   * <li> the last in a circular chain of threads waiting to own the lock; or
099   * <li> <code>IN_FLUX</code>, if the circular chain is being edited.
100   * </ul>
101   * Only the first two states are possible unless MCS locking is implemented.
102   */
103  @Entrypoint
104  @Untraced
105  RVMThread latestContender;
106  public boolean lockHeld() {
107    return latestContender != null;
108  }
109  // FIXME: save the string somewhere.
110  public void lock(String s) {
111    lock();
112  }
113  /**
114   * Acquire a lock.
115   */
116  public void lock() {
117    if (!VM.runningVM) return;
118    VM.disableYieldpoints();
119    RVMThread i = RVMThread.getCurrentThread();
120    RVMThread p;
121    int attempts = 0;
122    Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
123    do {
124      p = Magic.objectAsThread(Magic.addressAsObject(Magic.prepareAddress(this, latestContenderOffset)));
125      if (p == null) { // nobody owns the lock
126        if (Magic.attemptAddress(this, latestContenderOffset, Address.zero(), Magic.objectAsAddress(i))) {
127          Magic.isync(); // so subsequent instructions wont see stale values
128          return;
129        } else {
130          continue; // don't handle contention
131        }
132      } else if (MCS_Locking && Magic.objectAsAddress(p).NE(IN_FLUX)) { // lock is owned, but not being changed
133        if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), IN_FLUX)) {
134          Magic.isync(); // so subsequent instructions wont see stale values
135          break;
136        }
137      }
138      handleMicrocontention(attempts++);
139    } while (true);
140    // i owns the lock
141    if (VM.VerifyAssertions && !MCS_Locking) VM._assert(VM.NOT_REACHED);
142    i.awaitingSpinLock = this;
143    if (p.awaitingSpinLock != this) { // make i first (and only) waiter on the contender chain
144      i.contenderLink = i;
145    } else {                               // make i last waiter on the contender chain
146      i.contenderLink = p.contenderLink;
147      p.contenderLink = i;
148    }
149    Magic.sync(); // so other contender will see updated contender chain
150    Magic.setObjectAtOffset(this, latestContenderOffset, i);  // other threads can get at the lock
151    do { // spin, waiting for the lock
152      Magic.isync(); // to make new value visible as soon as possible
153    } while (i.awaitingSpinLock == this);
154  }
155
156  /**
157   * Conditionally acquire a lock.
158   * @return whether acquisition succeeded
159   */
160  public boolean tryLock() {
161    if (!VM.runningVM) return true;
162    VM.disableYieldpoints();
163    Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
164    if (Magic.prepareAddress(this, latestContenderOffset).isZero()) {
165      Address cp = Magic.objectAsAddress(RVMThread.getCurrentThread());
166      if (Magic.attemptAddress(this, latestContenderOffset, Address.zero(), cp)) {
167        Magic.isync(); // so subsequent instructions wont see stale values
168        return true;
169      }
170    }
171    VM.enableYieldpoints();
172    return false;
173  }
174
175  /**
176   * Release a lock.
177   */
178  public void unlock() {
179    if (!VM.runningVM) return;
180    Magic.sync(); // commit changes while lock was held so they are visible to the next processor that acquires the lock
181    Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
182    RVMThread i = RVMThread.getCurrentThread();
183    if (!MCS_Locking) {
184      Magic.setObjectAtOffset(this, latestContenderOffset, null);  // latestContender = null;
185      VM.enableYieldpoints();
186      return;
187    }
188    RVMThread p;
189    do {
190      p = Magic.objectAsThread(Magic.addressAsObject(Magic.prepareAddress(this, latestContenderOffset)));
191      if (p == i) { // nobody is waiting for the lock
192        if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), Address.zero())) {
193          break;
194        }
195      } else
196      if (Magic.objectAsAddress(p).NE(IN_FLUX)) { // there are waiters, but the contention chain is not being changed
197        if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), IN_FLUX)) {
198          break;
199        }
200      } else { // in flux
201        handleMicrocontention(-1); // wait a little before trying again
202      }
203    } while (true);
204    if (p != i) { // p is the last thread on the chain of threads contending for the lock
205      RVMThread q = p.contenderLink; // q is first thread on the chain
206      if (p == q) { // only one thread waiting for the lock
207        q.awaitingSpinLock = null; // q now owns the lock
208        Magic.sync(); // make sure the chain of waiting threads gets updated before another thread accesses the chain
209        // other contenders can get at the lock:
210        Magic.setObjectAtOffset(this, latestContenderOffset, q); // latestContender = q;
211      } else { // more than one thread waiting for the lock
212        p.contenderLink = q.contenderLink; // remove q from the chain
213        q.awaitingSpinLock = null; // q now owns the lock
214        Magic.sync(); // make sure the chain of waiting threads gets updated before another thread accesses the chain
215        Magic.setObjectAtOffset(this, latestContenderOffset, p); // other contenders can get at the lock
216      }
217    }
218    VM.enableYieldpoints();
219  }
220
221  /**
222   * An attempt to lock or unlock a processor lock has failed,
223   * presumably due to contention with another processor.  Backoff a
224   * little to increase the likelihood that a subsequent retry will
225   * succeed.
226   *
227   * @param n the number of attempts
228   */
229  @NoInline
230  private void handleMicrocontention(int n) {
231    Magic.pause();    // reduce overhead of spin wait on IA
232    if (n <= 0) return;  // method call overhead is delay enough
233    if (n > 100) {
234      // PNT: FIXME: we're dying here ... maybe we're deadlocking?
235      VM.sysWriteln("Unexpectedly large spin lock contention on ",Magic.objectAsAddress(this));
236      RVMThread t = latestContender;
237      if (t == null) {
238        VM.sysWriteln("Unexpectedly large spin lock contention in ",RVMThread.getCurrentThreadSlot(),"; lock held by nobody");
239      } else {
240        VM.sysWriteln("Unexpectedly large spin lock contention in ",RVMThread.getCurrentThreadSlot(),"; lock held by ",t.getThreadSlot());
241        if (t != RVMThread.getCurrentThread()) {
242          VM.sysWriteln("But -- at least the spin lock is held by a different thread.");
243        }
244      }
245      RVMThread.dumpStack();
246      VM.sysFail("Unexpectedly large spin lock contention");
247    }
248    // PNT: this is weird.
249    int pid = RVMThread.getCurrentThread().getThreadSlot(); // delay a different amount in each thread
250    delayIndex = (delayIndex + pid) % delayCount.length;
251    int delay = delayCount[delayIndex] * delayMultiplier; // pseudorandom backoff component
252    delay += delayBase << (n - 1);                     // exponential backoff component
253    for (int i = delay; i > 0; i--) ;                        // delay a different amount of time on each thread
254  }
255
256  private static final int delayMultiplier = 10;
257  private static final int delayBase = 64;
258  private static int delayIndex;
259  private static final int[] delayCount = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
260
261  /**
262   * For MCS locking, indicates that another processor is changing the
263   * state of the circular waiting queue.
264   */
265  private static final Address IN_FLUX = Address.max();
266
267}
268