001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.jikesrvm.scheduler;
014
015import static org.jikesrvm.objectmodel.ThinLockConstants.TL_DEDICATED_U16_OFFSET;
016import static org.jikesrvm.objectmodel.ThinLockConstants.TL_DEDICATED_U16_SHIFT;
017import static org.jikesrvm.objectmodel.ThinLockConstants.TL_LOCK_COUNT_MASK;
018import static org.jikesrvm.objectmodel.ThinLockConstants.TL_LOCK_COUNT_SHIFT;
019import static org.jikesrvm.objectmodel.ThinLockConstants.TL_LOCK_COUNT_UNIT;
020import static org.jikesrvm.objectmodel.ThinLockConstants.TL_LOCK_ID_MASK;
021import static org.jikesrvm.objectmodel.ThinLockConstants.TL_LOCK_ID_SHIFT;
022import static org.jikesrvm.objectmodel.ThinLockConstants.TL_STAT_BIASABLE;
023import static org.jikesrvm.objectmodel.ThinLockConstants.TL_STAT_FAT;
024import static org.jikesrvm.objectmodel.ThinLockConstants.TL_STAT_MASK;
025import static org.jikesrvm.objectmodel.ThinLockConstants.TL_STAT_THIN;
026import static org.jikesrvm.objectmodel.ThinLockConstants.TL_THREAD_ID_MASK;
027import static org.jikesrvm.objectmodel.ThinLockConstants.TL_THREAD_ID_SHIFT;
028import static org.jikesrvm.objectmodel.ThinLockConstants.TL_UNLOCK_MASK;
029
030import org.jikesrvm.VM;
031import org.jikesrvm.runtime.Magic;
032import org.jikesrvm.util.Services;
033import org.vmmagic.pragma.Inline;
034import org.vmmagic.pragma.NoInline;
035import org.vmmagic.pragma.NoNullCheck;
036import org.vmmagic.pragma.Uninterruptible;
037import org.vmmagic.pragma.Unpreemptible;
038import org.vmmagic.unboxed.Offset;
039import org.vmmagic.unboxed.Word;
040
041/**
042 * Implementation of thin locks.
043 */
044@Uninterruptible
045public final class ThinLock {
046
047  private static final boolean ENABLE_BIASED_LOCKING = true;
048
049  @Inline
050  @NoNullCheck
051  @Unpreemptible
052  public static void inlineLock(Object o, Offset lockOffset) {
053    Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
054    Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
055    Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
056    if (id.EQ(tid)) {
057      Word changed = old.plus(TL_LOCK_COUNT_UNIT);
058      if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
059        setDedicatedU16(o, lockOffset, changed);
060        return;
061      }
062    } else if (id.EQ(TL_STAT_THIN)) {
063      // lock is thin and not held by anyone
064      if (Magic.attemptWord(o, lockOffset, old, old.or(tid))) {
065        Magic.isync();
066        return;
067      }
068    }
069    lock(o, lockOffset);
070  }
071
072  @Inline
073  @NoNullCheck
074  @Unpreemptible
075  public static void inlineUnlock(Object o, Offset lockOffset) {
076    Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
077    Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
078    Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
079    if (id.EQ(tid)) {
080      if (!old.and(TL_LOCK_COUNT_MASK).isZero()) {
081        setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
082        return;
083      }
084    } else if (old.xor(tid).rshl(TL_LOCK_COUNT_SHIFT).EQ(TL_STAT_THIN.rshl(TL_LOCK_COUNT_SHIFT))) {
085      Magic.sync();
086      if (Magic.attemptWord(o, lockOffset, old, old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN))) {
087        return;
088      }
089    }
090    unlock(o, lockOffset);
091  }
092
093  @NoInline
094  @NoNullCheck
095  @Unpreemptible
096  public static void lock(Object o, Offset lockOffset) {
097    if (STATS) fastLocks++;
098
099    Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
100
101    for (int cnt = 0;;cnt++) {
102      Word old = Magic.getWordAtOffset(o, lockOffset);
103      Word stat = old.and(TL_STAT_MASK);
104      boolean tryToInflate = false;
105      if (stat.EQ(TL_STAT_BIASABLE)) {
106        Word id = old.and(TL_THREAD_ID_MASK);
107        if (id.isZero()) {
108          if (ENABLE_BIASED_LOCKING) {
109            // lock is unbiased, bias it in our favor and grab it
110            if (Synchronization.tryCompareAndSwap(
111                  o, lockOffset,
112                  old,
113                  old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) {
114              Magic.isync();
115              return;
116            }
117          } else {
118            // lock is unbiased but biasing is NOT allowed, so turn it into
119            // a thin lock
120            if (Synchronization.tryCompareAndSwap(
121                  o, lockOffset,
122                  old,
123                  old.or(threadId).or(TL_STAT_THIN))) {
124              Magic.isync();
125              return;
126            }
127          }
128        } else if (id.EQ(threadId)) {
129          // lock is biased in our favor
130          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
131          if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
132            setDedicatedU16(o, lockOffset, changed);
133            return;
134          } else {
135            tryToInflate = true;
136          }
137        } else {
138          if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) {
139            continue; // don't spin, since it's thin now
140          }
141        }
142      } else if (stat.EQ(TL_STAT_THIN)) {
143        Word id = old.and(TL_THREAD_ID_MASK);
144        if (id.isZero()) {
145          if (Synchronization.tryCompareAndSwap(
146                o, lockOffset, old, old.or(threadId))) {
147            Magic.isync();
148            return;
149          }
150        } else if (id.EQ(threadId)) {
151          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
152          if (changed.and(TL_LOCK_COUNT_MASK).isZero()) {
153            tryToInflate = true;
154          } else if (Synchronization.tryCompareAndSwap(
155                       o, lockOffset, old, changed)) {
156            Magic.isync();
157            return;
158          }
159        } else if (cnt > retryLimit) {
160          tryToInflate = true;
161        }
162      } else {
163        if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
164        // lock is fat.  contend on it.
165        if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) {
166          return;
167        }
168      }
169
170      if (tryToInflate) {
171        if (STATS) slowLocks++;
172        // the lock is not fat, is owned by someone else, or else the count wrapped.
173        // attempt to inflate it (this may fail, in which case we'll just harmlessly
174        // loop around) and lock it (may also fail, if we get the wrong lock).  if it
175        // succeeds, we're done.
176        // NB: this calls into our attemptToMarkInflated() method, which will do the
177        // Right Thing if the lock is biased to someone else.
178        if (inflateAndLock(o, lockOffset)) {
179          return;
180        }
181      } else {
182        RVMThread.yieldNoHandshake();
183      }
184    }
185  }
186
187  @NoInline
188  @NoNullCheck
189  @Unpreemptible
190  public static void unlock(Object o, Offset lockOffset) {
191    Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
192    for (int cnt = 0;;cnt++) {
193      Word old = Magic.getWordAtOffset(o, lockOffset);
194      Word stat = old.and(TL_STAT_MASK);
195      if (stat.EQ(TL_STAT_BIASABLE)) {
196        Word id = old.and(TL_THREAD_ID_MASK);
197        if (id.EQ(threadId)) {
198          if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
199            RVMThread.raiseIllegalMonitorStateException("biased unlocking: we own this object but the count is already zero", o);
200          }
201          setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
202          return;
203        } else {
204          RVMThread.raiseIllegalMonitorStateException("biased unlocking: we don't own this object", o);
205        }
206      } else if (stat.EQ(TL_STAT_THIN)) {
207        Magic.sync();
208        Word id = old.and(TL_THREAD_ID_MASK);
209        if (id.EQ(threadId)) {
210          Word changed;
211          if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
212            changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
213          } else {
214            changed = old.minus(TL_LOCK_COUNT_UNIT);
215          }
216          if (Synchronization.tryCompareAndSwap(
217                o, lockOffset, old, changed)) {
218            return;
219          }
220        } else {
221          if (false) {
222            VM.sysWriteln("threadId = ",threadId);
223            VM.sysWriteln("id = ",id);
224          }
225          RVMThread.raiseIllegalMonitorStateException("thin unlocking: we don't own this object", o);
226        }
227      } else {
228        if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
229        // fat unlock
230        Lock.getLock(getLockIndex(old)).unlockHeavy(o);
231        return;
232      }
233    }
234  }
235
236  @Uninterruptible
237  @NoNullCheck
238  public static boolean holdsLock(Object o, Offset lockOffset, RVMThread thread) {
239    for (int cnt = 0;;++cnt) {
240      int tid = thread.getLockingId();
241      Word bits = Magic.getWordAtOffset(o, lockOffset);
242      if (bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
243        // if locked, then it is locked with a thin lock
244        return
245          bits.and(TL_THREAD_ID_MASK).toInt() == tid &&
246          !bits.and(TL_LOCK_COUNT_MASK).isZero();
247      } else if (bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) {
248        return bits.and(TL_THREAD_ID_MASK).toInt() == tid;
249      } else {
250        if (VM.VerifyAssertions) VM._assert(bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT));
251        // if locked, then it is locked with a fat lock
252        Lock l = Lock.getLock(getLockIndex(bits));
253        if (l != null) {
254          l.mutex.lock();
255          boolean result = (l.getOwnerId() == tid && l.getLockedObject() == o);
256          l.mutex.unlock();
257          return result;
258        }
259      }
260      RVMThread.yieldNoHandshake();
261    }
262  }
263
264  @Inline
265  @Uninterruptible
266  public static boolean isFat(Word lockWord) {
267    return lockWord.and(TL_STAT_MASK).EQ(TL_STAT_FAT);
268  }
269
270  /**
271   * Return the lock index for a given lock word.  Assert valid index
272   * ranges, that the fat lock bit is set, and that the lock entry
273   * exists.
274   *
275   * @param lockWord The lock word whose lock index is being established
276   * @return the lock index corresponding to the lock workd.
277   */
278  @Inline
279  @Uninterruptible
280  public static int getLockIndex(Word lockWord) {
281    int index = lockWord.and(TL_LOCK_ID_MASK).rshl(TL_LOCK_ID_SHIFT).toInt();
282    if (VM.VerifyAssertions) {
283      if (!(index > 0 && index < Lock.numLocks())) {
284        VM.sysWrite("Lock index out of range! Word: "); VM.sysWrite(lockWord);
285        VM.sysWrite(" index: "); VM.sysWrite(index);
286        VM.sysWrite(" locks: "); VM.sysWrite(Lock.numLocks());
287        VM.sysWriteln();
288      }
289      VM._assert(index > 0 && index < Lock.numLocks());  // index is in range
290      VM._assert(lockWord.and(TL_STAT_MASK).EQ(TL_STAT_FAT));        // fat lock bit is set
291    }
292    return index;
293  }
294
295  @Inline
296  @Uninterruptible
297  public static int getLockOwner(Word lockWord) {
298    if (VM.VerifyAssertions) VM._assert(!isFat(lockWord));
299    if (lockWord.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
300      if (lockWord.and(TL_LOCK_COUNT_MASK).isZero()) {
301        return 0;
302      } else {
303        return lockWord.and(TL_THREAD_ID_MASK).toInt();
304      }
305    } else {
306      return lockWord.and(TL_THREAD_ID_MASK).toInt();
307    }
308  }
309
310  @Inline
311  @Uninterruptible
312  public static int getRecCount(Word lockWord) {
313    if (VM.VerifyAssertions) VM._assert(getLockOwner(lockWord) != 0);
314    if (lockWord.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
315      return lockWord.and(TL_LOCK_COUNT_MASK).rshl(TL_LOCK_COUNT_SHIFT).toInt();
316    } else {
317      return lockWord.and(TL_LOCK_COUNT_MASK).rshl(TL_LOCK_COUNT_SHIFT).toInt() + 1;
318    }
319  }
320
321  /**
322   * Set only the dedicated locking 16-bit part of the given value. This is the only part
323   * that is allowed to be written without a CAS. This takes care of the shifting and
324   * storing of the value.
325   *
326   * @param o The object whose header is to be changed
327   * @param lockOffset The lock offset
328   * @param value The value which contains the 16-bit portion to be written.
329   */
330  @Inline
331  @Unpreemptible
332  private static void setDedicatedU16(Object o, Offset lockOffset, Word value) {
333    Magic.setCharAtOffset(o, lockOffset.plus(TL_DEDICATED_U16_OFFSET), (char)(value.toInt() >>> TL_DEDICATED_U16_SHIFT));
334  }
335
336  @NoInline
337  @Unpreemptible
338  public static boolean casFromBiased(Object o, Offset lockOffset,
339                                      Word oldLockWord, Word changed,
340                                      int cnt) {
341    RVMThread me = RVMThread.getCurrentThread();
342    Word id = oldLockWord.and(TL_THREAD_ID_MASK);
343    if (id.isZero()) {
344      if (false) VM.sysWriteln("id is zero - easy case.");
345      return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
346    } else {
347      if (false) VM.sysWriteln("id = ",id);
348      int slot = id.toInt() >> TL_THREAD_ID_SHIFT;
349      if (false) VM.sysWriteln("slot = ",slot);
350      RVMThread owner = RVMThread.threadBySlot[slot];
351      if (owner == me /* I own it, so I can unbias it trivially.  This occurs
352                       when we are inflating due to, for example, wait() */ ||
353          owner == null /* the thread that owned it is dead, so it's safe to
354                         unbias. */) {
355        // note that we use a CAS here, but it's only needed in the case
356        // that owner==null, since in that case some other thread may also
357        // be unbiasing.
358        return Synchronization.tryCompareAndSwap(
359          o, lockOffset, oldLockWord, changed);
360      } else {
361        boolean result = false;
362
363        // NB. this may stop a thread other than the one that had the bias,
364        // if that thread died and some other thread took its slot.  that's
365        // why we do a CAS below.  it's only needed if some other thread
366        // had seen the owner be null (which may happen if we came here after
367        // a new thread took the slot while someone else came here when the
368        // slot was still null).  if it was the case that everyone else had
369        // seen a non-null owner, then the pair handshake would serve as
370        // sufficient synchronization (the id would identify the set of threads
371        // that shared that id's communicationLock).  oddly, that means that
372        // this whole thing could be "simplified" to acquire the
373        // communicationLock even if the owner was null.  but that would be
374        // goofy.
375        if (false) VM.sysWriteln("entering pair handshake");
376        owner.beginPairHandshake();
377        if (false) VM.sysWriteln("done with that");
378
379        Word newLockWord = Magic.getWordAtOffset(o, lockOffset);
380        result = Synchronization.tryCompareAndSwap(
381          o, lockOffset, oldLockWord, changed);
382        owner.endPairHandshake();
383        if (false) VM.sysWriteln("that worked.");
384
385        return result;
386      }
387    }
388  }
389
390  @Inline
391  @Unpreemptible
392  public static boolean attemptToMarkInflated(Object o, Offset lockOffset,
393                                              Word oldLockWord,
394                                              int lockId,
395                                              int cnt) {
396    if (VM.VerifyAssertions) VM._assert(oldLockWord.and(TL_STAT_MASK).NE(TL_STAT_FAT));
397    if (false) VM.sysWriteln("attemptToMarkInflated with oldLockWord = ",oldLockWord);
398    // what this needs to do:
399    // 1) if the lock is thin, it's just a CAS
400    // 2) if the lock is unbiased, CAS in the inflation
401    // 3) if the lock is biased in our favor, store the lock without CAS
402    // 4) if the lock is biased but to someone else, enter the pair handshake
403    //    to unbias it and install the inflated lock
404    Word changed =
405      TL_STAT_FAT.or(Word.fromIntZeroExtend(lockId).lsh(TL_LOCK_ID_SHIFT))
406      .or(oldLockWord.and(TL_UNLOCK_MASK));
407    if (false && oldLockWord.and(TL_STAT_MASK).EQ(TL_STAT_THIN))
408      VM.sysWriteln("obj = ",Magic.objectAsAddress(o),
409                    ", old = ",oldLockWord,
410                    ", owner = ",getLockOwner(oldLockWord),
411                    ", rec = ",getLockOwner(oldLockWord) == 0 ? 0 : getRecCount(oldLockWord),
412                    ", changed = ",changed,
413                    ", lockId = ",lockId);
414    if (false) VM.sysWriteln("changed = ",changed);
415    if (oldLockWord.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) {
416      if (false) VM.sysWriteln("it's thin, inflating the easy way.");
417      return Synchronization.tryCompareAndSwap(
418        o, lockOffset, oldLockWord, changed);
419    } else {
420      return casFromBiased(o, lockOffset, oldLockWord, changed, cnt);
421    }
422  }
423
424  /**
425   * Promotes a light-weight lock to a heavy-weight lock.  If this returns the lock
426   * that you gave it, its mutex will be locked; otherwise, its mutex will be unlocked.
427   * Hence, calls to this method should always be followed by a condition lock() or
428   * unlock() call.
429   *
430   * @param o the object to get a heavy-weight lock
431   * @param lockOffset the offset of the thin lock word in the object.
432   * @param l the lock to attempt to inflate
433   * @return the inflated lock; either the one you gave, or another one, if the lock
434   *         was inflated by some other thread.
435   */
436  @NoNullCheck
437  @Unpreemptible
438  protected static Lock attemptToInflate(Object o,
439                                         Offset lockOffset,
440                                         Lock l) {
441    if (false) VM.sysWriteln("l = ",Magic.objectAsAddress(l));
442    l.mutex.lock();
443    for (int cnt = 0;;++cnt) {
444      Word bits = Magic.getWordAtOffset(o, lockOffset);
445      // check to see if another thread has already created a fat lock
446      if (isFat(bits)) {
447        if (trace) {
448          VM.sysWriteln("Thread #",RVMThread.getCurrentThreadSlot(),
449                        ": freeing lock ",Magic.objectAsAddress(l),
450                        " because we had a double-inflate");
451        }
452        Lock result = Lock.getLock(getLockIndex(bits));
453        if (result == null ||
454            result.lockedObject != o) {
455          continue; /* this is nasty.  this will happen when a lock
456                       is deflated. */
457        }
458        Lock.free(l);
459        l.mutex.unlock();
460        return result;
461      }
462      if (VM.VerifyAssertions) VM._assert(l != null);
463      if (attemptToMarkInflated(
464            o, lockOffset, bits, l.index, cnt)) {
465        l.setLockedObject(o);
466        l.setOwnerId(getLockOwner(bits));
467        if (l.getOwnerId() != 0) {
468          l.setRecursionCount(getRecCount(bits));
469        } else {
470          if (VM.VerifyAssertions) VM._assert(l.getRecursionCount() == 0);
471        }
472        return l;
473      }
474      // contention detected, try again
475    }
476  }
477
478  @Inline
479  @Uninterruptible
480  private static Word biasBitsToThinBits(Word bits) {
481    int lockOwner = getLockOwner(bits);
482
483    Word changed = bits.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
484
485    if (lockOwner != 0) {
486      int recCount = getRecCount(bits);
487      changed = changed
488        .or(Word.fromIntZeroExtend(lockOwner))
489        .or(Word.fromIntZeroExtend(recCount - 1).lsh(TL_LOCK_COUNT_SHIFT));
490    }
491
492    return changed;
493  }
494
495  @Inline
496  @Uninterruptible
497  public static boolean attemptToMarkDeflated(Object o, Offset lockOffset,
498                                              Word oldLockWord) {
499    // we allow concurrent modification of the lock word when it's thin or fat.
500    Word changed = oldLockWord.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
501    if (VM.VerifyAssertions) VM._assert(getLockOwner(changed) == 0);
502    return Synchronization.tryCompareAndSwap(
503      o, lockOffset, oldLockWord, changed);
504  }
505
506  @Uninterruptible
507  public static void markDeflated(Object o, Offset lockOffset, int id) {
508    for (;;) {
509      Word bits = Magic.getWordAtOffset(o, lockOffset);
510      if (VM.VerifyAssertions) VM._assert(isFat(bits));
511      if (VM.VerifyAssertions) VM._assert(getLockIndex(bits) == id);
512      if (attemptToMarkDeflated(o, lockOffset, bits)) {
513        return;
514      }
515    }
516  }
517
518  ////////////////////////////////////////////////////////////////
519  /// Support for inflating (and deflating) heavy-weight locks ///
520  ////////////////////////////////////////////////////////////////
521
522  /**
523   * Promotes a light-weight lock to a heavy-weight lock.  Note: the
524   * object is question will normally be locked by another thread,
525   * or it may be unlocked.  If there is already a heavy-weight lock
526   * on this object, that lock is returned.
527   *
528   * @param o the object to get a heavy-weight lock
529   * @param lockOffset the offset of the thin lock word in the object.
530   * @return the heavy-weight lock on this object
531   */
532  @Unpreemptible
533  private static Lock inflate(Object o, Offset lockOffset) {
534    Lock l = Lock.allocate();
535    if (VM.VerifyAssertions) {
536      VM._assert(l != null); // inflate called by wait (or notify) which shouldn't be called during GC
537    }
538    Lock rtn = attemptToInflate(o, lockOffset, l);
539    if (rtn == l)
540      l.mutex.unlock();
541    return rtn;
542  }
543
544  /**
545   * Promotes a light-weight lock to a heavy-weight lock and locks it.
546   * Note: the object in question will normally be locked by another
547   * thread, or it may be unlocked.  If there is already a
548   * heavy-weight lock on this object, that lock is returned.
549   *
550   * @param o the object to get a heavy-weight lock
551   * @param lockOffset the offset of the thin lock word in the object.
552   * @return whether the object was successfully locked
553   */
554  @Unpreemptible
555  private static boolean inflateAndLock(Object o, Offset lockOffset) {
556    Lock l = Lock.allocate();
557    if (l == null) return false; // can't allocate locks during GC
558    Lock rtn = attemptToInflate(o, lockOffset, l);
559    if (l != rtn) {
560      l = rtn;
561      l.mutex.lock();
562    }
563    return l.lockHeavyLocked(o);
564  }
565
566  ////////////////////////////////////////////////////////////////////////////
567  /// Get heavy-weight lock for an object; if thin, inflate it.
568  ////////////////////////////////////////////////////////////////////////////
569
570  /**
571   * Obtains the heavy-weight lock, if there is one, associated with the
572   * indicated object.  Returns <code>null</code>, if there is no
573   * heavy-weight lock associated with the object.
574   *
575   * @param o the object from which a lock is desired
576   * @param lockOffset the offset of the thin lock word in the object.
577   * @param create if true, create heavy lock if none found
578   * @return the heavy-weight lock on the object (if any)
579   */
580  @Unpreemptible
581  public static Lock getHeavyLock(Object o, Offset lockOffset, boolean create) {
582    Word old = Magic.getWordAtOffset(o, lockOffset);
583    if (isFat(old)) { // already a fat lock in place
584      return Lock.getLock(getLockIndex(old));
585    } else if (create) {
586      return inflate(o, lockOffset);
587    } else {
588      return null;
589    }
590  }
591
592  ///////////////////////////////////////////////////////////////
593  /// Support for debugging and performance tuning ///
594  ///////////////////////////////////////////////////////////////
595
596  /**
597   * Number of times a thread yields before inflating the lock on a
598   * object to a heavy-weight lock.  The current value was for the
599   * portBOB benchmark on a 12-way SMP (AIX) in the Fall of '99.  FP
600   * confirmed that it's still optimal for JBB and DaCapo on 4-, 8-,
601   * and 16-way SMPs (Linux/ia32) in Spring '09.
602   */
603  private static final int retryLimit = 40;
604
605  static final boolean STATS = Lock.STATS;
606
607  static final boolean trace = false;
608
609  static int fastLocks;
610  static int slowLocks;
611
612  static void notifyAppRunStart(String app, int value) {
613    if (!STATS) return;
614    fastLocks = 0;
615    slowLocks = 0;
616  }
617
618  static void notifyExit(int value) {
619    if (!STATS) return;
620    VM.sysWrite("ThinLocks: ");
621    VM.sysWrite(fastLocks);
622    VM.sysWrite(" fast locks");
623    Services.percentage(fastLocks, value, "all lock operations");
624    VM.sysWrite("ThinLocks: ");
625    VM.sysWrite(slowLocks);
626    VM.sysWrite(" slow locks");
627    Services.percentage(slowLocks, value, "all lock operations");
628  }
629
630}
631