001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.mmtk.policy.immix;
014
015import static org.mmtk.policy.immix.ImmixConstants.*;
016import static org.mmtk.utility.Constants.LOG_BYTES_IN_PAGE;
017
018import org.mmtk.plan.Plan;
019import org.mmtk.plan.TransitiveClosure;
020import org.mmtk.policy.Space;
021import org.mmtk.utility.heap.*;
022import org.mmtk.utility.options.LineReuseRatio;
023import org.mmtk.utility.options.Options;
024import org.mmtk.utility.ForwardingWord;
025import org.mmtk.utility.HeaderByte;
026import org.mmtk.utility.Log;
027
028import org.mmtk.vm.Lock;
029import org.mmtk.vm.VM;
030
031import org.vmmagic.pragma.*;
032import org.vmmagic.unboxed.*;
033
034/**
035 * Each instance of this class corresponds to one immix <b>space</b>.
036 * Each of the instance methods of this class may be called by any
037 * thread (i.e. synchronization must be explicit in any instance or
038 * class method).  This contrasts with the SquishLocal, where
039 * instances correspond to *plan* instances and therefore to kernel
040 * threads.  Thus unlike this class, synchronization is not necessary
041 * in the instance methods of SquishLocal.
042 *
043 */
044@Uninterruptible
045public final class ImmixSpace extends Space {
046
047  /****************************************************************************
048   *
049   * Class variables
050   */
051
052  /**
053   *
054   */
055  private static short reusableMarkStateThreshold = 0;
056
057  /****************************************************************************
058   *
059   * Instance variables
060   */
061
062  /**
063   *
064   */
065  private byte markState = ObjectHeader.MARK_BASE_VALUE;
066          byte lineMarkState = RESET_LINE_MARK_STATE;
067  private byte lineUnavailState = RESET_LINE_MARK_STATE;
068  private boolean inCollection;
069  private int linesConsumed = 0;
070
071  private final Lock mutatorLock = VM.newLock(getName() + "mutator");
072  private final Lock gcLock = VM.newLock(getName() + "gc");
073
074  private Address allocBlockCursor = Address.zero();
075  private Address allocBlockSentinel = Address.zero();
076  private boolean exhaustedReusableSpace = true;
077
078  private final ChunkList chunkMap = new ChunkList();
079  private final Defrag defrag;
080
081  /****************************************************************************
082   *
083   * Initialization
084   */
085
086  static {
087    Options.lineReuseRatio = new LineReuseRatio();
088    reusableMarkStateThreshold = (short) (Options.lineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
089  }
090
091  /**
092   * The caller specifies the region of virtual memory to be used for
093   * this space.  If this region conflicts with an existing space,
094   * then the constructor will fail.
095   *
096   * @param name The name of this space (used when printing error messages etc)
097   * @param vmRequest The virtual memory request
098   */
099  public ImmixSpace(String name, VMRequest vmRequest) {
100    this(name, true, vmRequest);
101  }
102
103  /**
104   * The caller specifies the region of virtual memory to be used for
105   * this space.  If this region conflicts with an existing space,
106   * then the constructor will fail.
107   *
108   * @param name The name of this space (used when printing error messages etc)
109   * @param zeroed if true, allocations return zeroed memory
110   * @param vmRequest The virtual memory request
111   */
112  public ImmixSpace(String name, boolean zeroed, VMRequest vmRequest) {
113    super(name, false, false, zeroed, vmRequest);
114    if (vmRequest.isDiscontiguous())
115      pr = new FreeListPageResource(this, Chunk.getRequiredMetaDataPages());
116    else
117      pr = new FreeListPageResource(this, start, extent, Chunk.getRequiredMetaDataPages());
118    defrag = new Defrag((FreeListPageResource) pr);
119  }
120
121  @Interruptible
122  public void initializeDefrag() {
123    defrag.prepareHistograms();
124  }
125
126  /****************************************************************************
127   *
128   * Global prepare and release
129   */
130
131  /**
132   * Prepare for a new collection increment.
133   *
134   * @param majorGC whether the collection will be a full heap collection
135   */
136  public void prepare(boolean majorGC) {
137    if (majorGC) {
138      markState = ObjectHeader.deltaMarkState(markState, true);
139        lineMarkState++;
140        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(lineMarkState <= MAX_LINE_MARK_STATE);
141    }
142    chunkMap.reset();
143    defrag.prepare(chunkMap, this);
144    inCollection = true;
145  }
146
147  /**
148   * A new collection increment has completed.  Release global resources.
149   * @param majorGC whether the collection was a full heap collection
150   * @return whether defragmentation occurred
151   */
152  public boolean release(boolean majorGC) {
153    boolean didDefrag = defrag.inDefrag();
154    if (majorGC) {
155      if (lineMarkState == MAX_LINE_MARK_STATE)
156        lineMarkState = RESET_LINE_MARK_STATE;
157     lineUnavailState = lineMarkState;
158    }
159    chunkMap.reset();
160    defrag.globalRelease();
161    inCollection = false;
162
163    /* set up reusable space */
164    if (allocBlockCursor.isZero()) allocBlockCursor = chunkMap.getHeadChunk();
165    allocBlockSentinel = allocBlockCursor;
166    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockSentinel));
167    exhaustedReusableSpace = allocBlockCursor.isZero();
168    if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
169      Log.write("gr[allocBlockCursor: "); Log.write(allocBlockCursor); Log.write(" allocBlockSentinel: "); Log.write(allocBlockSentinel); Log.writeln("]");
170    }
171
172    /* really just want this to happen once after options are booted, but no harm in re-doing it */
173    reusableMarkStateThreshold = (short) (Options.lineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
174    Defrag.defragReusableMarkStateThreshold = (short) (Options.defragLineReuseRatio.getValue() * MAX_BLOCK_MARK_STATE);
175
176    linesConsumed = 0;
177    return didDefrag;
178  }
179
180  /**
181   * Determine the collection kind.
182   *
183   * @param emergencyCollection Is this collection an emergency (last did not yield enough)?
184   * @param collectWholeHeap Is this a whole heap collection?
185   * @param collectionAttempt Which attempt is this to collect?
186   * @param userTriggeredCollection Was this collection requested by the user?
187   */
188  public void decideWhetherToDefrag(boolean emergencyCollection, boolean collectWholeHeap, int collectionAttempt, boolean userTriggeredCollection) {
189    defrag.decideWhetherToDefrag(emergencyCollection, collectWholeHeap, collectionAttempt, userTriggeredCollection, exhaustedReusableSpace);
190  }
191
192  /**
193   * Return the amount of headroom required to allow defrag, so this can be included in a collection reserve.
194   *
195   * @return The number of pages.
196   */
197  public int defragHeadroomPages() {
198    return defrag.getDefragHeadroomPages();
199  }
200
201 /****************************************************************************
202  *
203  * Collection state access methods
204  */
205
206  /**
207   * Return {@code true} if this space is currently being collected.
208   *
209   * @return {@code true} if this space is currently being collected.
210   */
211  @Inline
212  public boolean inImmixCollection() {
213    return inCollection;
214  }
215
216  /**
217   * Return {@code true} if this space is currently being defraged.
218   *
219   * @return {@code true} if this space is currently being defraged.
220   */
221  @Inline
222  public boolean inImmixDefragCollection() {
223    return inCollection && defrag.inDefrag();
224  }
225
226  /**
227   * Return the number of pages allocated since the last collection
228   *
229   * @return The number of pages allocated since the last collection
230   */
231  public int getPagesAllocated() {
232    return linesConsumed >> (LOG_BYTES_IN_PAGE - LOG_BYTES_IN_LINE);
233  }
234
235  /**
236   * Return the reusable mark state threshold, which determines how
237   * eagerly lines should be recycled (by default these values are
238   * set so that all lines are recycled).
239   *
240   * @param forDefrag The query is the context of a defragmenting collection
241   * @return The reusable mark state threshold
242   */
243  @Inline
244  public static short getReusuableMarkStateThreshold(boolean forDefrag) {
245    return forDefrag ? Defrag.defragReusableMarkStateThreshold : reusableMarkStateThreshold;
246  }
247
248  /****************************************************************************
249   *
250   * Allocation
251   */
252
253  /**
254   * Return a pointer to a set of new usable blocks, or null if none are available.
255   * Use different block selection heuristics depending on whether the allocation
256   * request is "hot" or "cold".
257   *
258   * @param hot True if the requesting context is for hot allocations (used for
259   * allocations from high allocation volume sites).
260   * @param copy TODO needs documentation
261   * @param lineUseCount TODO needs documentation
262   * @return the pointer into the alloc table containing usable blocks, {@code null}
263   *  if no usable blocks are available
264   */
265  public Address getSpace(boolean hot, boolean copy, int lineUseCount) {
266    Address rtn;
267    if (copy)
268      defrag.getBlock();
269
270    linesConsumed += lineUseCount;
271
272    rtn = acquire(PAGES_IN_BLOCK);
273
274    if (VM.VERIFY_ASSERTIONS) {
275      VM.assertions._assert(Block.isAligned(rtn));
276      VM.assertions._assert(!(copy && Block.isDefragSource(rtn)));
277    }
278
279    if (!rtn.isZero()) {
280      Block.setBlockAsInUse(rtn);
281      Chunk.updateHighWater(rtn);
282      if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
283        Log.write("gs["); Log.write(rtn); Log.write(" -> "); Log.write(rtn.plus(BYTES_IN_BLOCK - 1)); Log.write(" copy: "); Log.write(copy); Log.writeln("]");
284      }
285    }
286
287    return rtn;
288  }
289
290  @Override
291  public void growSpace(Address start, Extent bytes, boolean newChunk) {
292    super.growSpace(start, bytes, newChunk);
293     if (newChunk) {
294      Address chunk = chunkAlign(start.plus(bytes), true);
295      if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(chunkAlign(start.plus(bytes), true).EQ(chunk));
296      Chunk.clearMetaData(chunk);
297      chunkMap.addNewChunkToMap(chunk);
298    }
299  }
300
301  public Address acquireReusableBlocks() {
302    if (VM.VERIFY_ASSERTIONS) {
303      VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockCursor));
304      VM.assertions._assert(isRecycleAllocChunkAligned(allocBlockSentinel));
305    }
306    Address rtn;
307
308    lock();
309    if (exhaustedReusableSpace)
310      rtn = Address.zero();
311    else {
312      rtn = allocBlockCursor;
313      Address lastAllocChunk = chunkAlign(allocBlockCursor, true);
314      allocBlockCursor = allocBlockCursor.plus(BYTES_IN_RECYCLE_ALLOC_CHUNK);
315      if (allocBlockCursor.GT(Chunk.getHighWater(lastAllocChunk)))
316        allocBlockCursor = chunkMap.nextChunk(lastAllocChunk);
317      if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
318        Log.write("arb[ rtn: "); Log.write(rtn); Log.write(" allocBlockCursor: "); Log.write(allocBlockCursor); Log.write(" allocBlockSentinel: "); Log.write(allocBlockSentinel); Log.writeln("]");
319      }
320
321      if (allocBlockCursor.isZero() || allocBlockCursor.EQ(allocBlockSentinel)) {
322        exhaustedReusableSpace = true;
323        if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
324          Log.writeln("[Reusable space exhausted]");
325        }
326      }
327    }
328    unlock();
329    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isRecycleAllocChunkAligned(rtn));
330    return rtn;
331  }
332
333  /**
334   * Release a block.  A block is free, so call the underlying page allocator
335   * to release the associated storage.
336   *
337   * @param block The address of the block to be released
338   */
339  @Override
340  @Inline
341  public void release(Address block) {
342    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(Block.isAligned(block));
343    Block.setBlockAsUnallocated(block);
344    ((FreeListPageResource) pr).releasePages(block);
345  }
346
347  /**
348   * {@inheritDoc}<p>
349   *
350   * This hook is called by the page level allocators whenever a
351   * complete discontiguous chunk is released.
352   */
353  @Override
354  public int releaseDiscontiguousChunks(Address chunk) {
355    chunkMap.removeChunkFromMap(chunk);
356    return super.releaseDiscontiguousChunks(chunk);
357  }
358
359  /****************************************************************************
360  *
361  * Header manipulation
362  */
363
364 /**
365  * Perform any required post allocation initialization
366  *
367  * @param object the object ref to the storage to be initialized
368  * @param bytes size of the allocated object in bytes
369  */
370  @Inline
371  public void postAlloc(ObjectReference object, int bytes) {
372    if (bytes > BYTES_IN_LINE)
373      ObjectHeader.markAsStraddling(object);
374    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(ObjectHeader.isNewObject(object));
375    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
376  }
377
378 /**
379  * Perform any required post copy (i.e. in-GC allocation) initialization.
380  * This is relevant (for example) when Squish is used as the mature space in
381  * a copying GC.
382  *
383  * @param object the object ref to the storage to be initialized
384  * @param bytes size of the copied object in bytes
385  * @param majorGC Is this copy happening during a major gc?
386  */
387  @Inline
388  public void postCopy(ObjectReference object, int bytes, boolean majorGC) {
389    ObjectHeader.writeMarkState(object, markState, bytes > BYTES_IN_LINE);
390    if (!MARK_LINE_AT_SCAN_TIME && majorGC) markLines(object);
391    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
392    if (VM.VERIFY_ASSERTIONS && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(object));
393  }
394
395  /****************************************************************************
396   *
397   * Object tracing
398   */
399
400  /**
401   * Trace a reference to an object.  If the object header is not already
402   * marked, mark the object and enqueue it for subsequent processing.
403   *
404   * @param trace The trace performing the transitive closure
405   * @param object The object to be traced.
406   * @param allocator The allocator to which any copying should be directed
407   * @return The object, which may have been moved.
408   */
409  @Inline
410  public ObjectReference traceObject(TransitiveClosure trace, ObjectReference object, int allocator) {
411    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(defrag.determined(true));
412
413    ObjectReference rtn = object;
414    if (isDefragSource(object))
415      rtn = traceObjectWithOpportunisticCopy(trace, object, allocator, false);
416    else
417      traceObjectWithoutMoving(trace, object);
418
419    if (VM.VERIFY_ASSERTIONS) {
420      VM.assertions._assert(!rtn.isNull());
421      VM.assertions._assert(defrag.spaceExhausted() || !isDefragSource(rtn) || (ObjectHeader.isPinnedObject(rtn)));
422    }
423    return rtn;
424  }
425
426  /**
427   * Trace a reference to an object in the context of a non-moving collection.  This
428   * call is optimized for the simpler non-moving case.
429   *
430   * @param trace The trace performing the transitive closure
431   * @param object The object to be traced.
432   * @return The object (there is no object forwarding in this
433   * trace method, so we always return the same object: this could be a
434   * void method but for compliance to a more general interface).
435   */
436  @Inline
437  public ObjectReference fastTraceObject(TransitiveClosure trace, ObjectReference object) {
438    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(defrag.determined(false));
439    traceObjectWithoutMoving(trace, object);
440    return object;
441  }
442
443  /**
444   * Trace a reference to an object during a nursery collection for
445   * a sticky mark bits implementation of immix.  If the object header
446   * is not already marked, mark the object and enqueue it for subsequent
447   * processing.
448   *
449   * @param trace The trace performing the transitive closure
450   * @param object The object to be traced.
451   * @param allocator The allocator to which any copying should be directed
452   * @return Either the object or a forwarded object, depending on
453   * the policy in place.
454   */
455  @Inline
456  public ObjectReference nurseryTraceObject(TransitiveClosure trace, ObjectReference object, int allocator) {
457    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!defrag.inDefrag());
458    if (ObjectHeader.isMatureObject(object))
459      return object;
460    else if (PREFER_COPY_ON_NURSERY_GC)
461      return traceObjectWithOpportunisticCopy(trace, object, allocator, true);
462    else
463      return fastTraceObject(trace, object);
464  }
465
466  /**
467   * Trace a reference to an object.  This interface is not supported by immix, since
468   * we require the allocator to be identified except for the special case of the fast
469   * trace.
470   *
471   * @param trace The trace performing the transitive closure
472   * @param object The object to be traced.
473   * @return null and fail.
474   */
475  @Override
476  public ObjectReference traceObject(TransitiveClosure trace, ObjectReference object) {
477    VM.assertions.fail("unsupported interface");
478    return null;
479  }
480
481  /**
482   * Trace a reference to an object in the context of a non-moving collection.  This
483   * call is optimized for the simpler non-moving case.
484   *
485   * @param trace The trace performing the transitive closure
486   * @param object The object to be traced.
487   */
488  @Inline
489  private void traceObjectWithoutMoving(TransitiveClosure trace, ObjectReference object) {
490    byte markValue = markState;
491    byte oldMarkState = ObjectHeader.testAndMark(object, markValue);
492    if (VM.VERIFY_ASSERTIONS)  VM.assertions._assert(!defrag.inDefrag() || defrag.spaceExhausted() || !isDefragSource(object));
493    if (oldMarkState != markValue) {
494      if (!MARK_LINE_AT_SCAN_TIME)
495        markLines(object);
496      trace.processNode(object);
497    }
498    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ForwardingWord.isForwardedOrBeingForwarded(object));
499    if (VM.VERIFY_ASSERTIONS  && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(object));
500  }
501
502  /**
503   * Trace a reference to an object, forwarding the object if appropriate
504   * If the object is not already marked, mark the object and enqueue it
505   * for subsequent processing.
506   *
507   * @param trace The trace performing the transitive closure
508   * @param object The object to be traced.
509   * @param allocator The allocator to which any copying should be directed
510   * @param nurseryCollection whether the current collection is a nursery collection
511   * @return Either the object or a forwarded object, if it was forwarded.
512   */
513  @Inline
514  private ObjectReference traceObjectWithOpportunisticCopy(TransitiveClosure trace, ObjectReference object, int allocator, boolean nurseryCollection) {
515    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert((nurseryCollection && !ObjectHeader.isMatureObject(object)) || (defrag.determined(true) && isDefragSource(object)));
516
517    /* Race to be the (potential) forwarder */
518    Word priorStatusWord = ForwardingWord.attemptToForward(object);
519    if (ForwardingWord.stateIsForwardedOrBeingForwarded(priorStatusWord)) {
520      /* We lost the race; the object is either forwarded or being forwarded by another thread. */
521      /* Note that the concurrent attempt to forward the object may fail, so the object may remain in-place */
522      ObjectReference rtn = ForwardingWord.spinAndGetForwardedObject(object, priorStatusWord);
523      if (VM.VERIFY_ASSERTIONS && rtn == object) VM.assertions._assert((nurseryCollection && ObjectHeader.testMarkState(object, markState)) || defrag.spaceExhausted() || ObjectHeader.isPinnedObject(object));
524      if (VM.VERIFY_ASSERTIONS && rtn != object) VM.assertions._assert(nurseryCollection || !isDefragSource(rtn));
525      if (VM.VERIFY_ASSERTIONS && HeaderByte.NEEDS_UNLOGGED_BIT) VM.assertions._assert(HeaderByte.isUnlogged(rtn));
526      return rtn;
527    } else {
528      byte priorState = (byte) (priorStatusWord.toInt() & 0xFF);
529      /* the object is unforwarded, either because this is the first thread to reach it, or because the object can't be forwarded */
530      if (ObjectHeader.testMarkState(priorState, markState)) {
531        /* the object has not been forwarded, but has the correct mark state; unlock and return unmoved object */
532        /* Note that in a sticky mark bits collector, the mark state does not change at each GC, so correct mark state does not imply another thread got there first */
533        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(nurseryCollection || defrag.spaceExhausted() || ObjectHeader.isPinnedObject(object));
534        ObjectHeader.returnToPriorStateAndEnsureUnlogged(object, priorState); // return to uncontested state
535        if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(object));
536        return object;
537      } else {
538        /* we are the first to reach the object; either mark in place or forward it */
539        ObjectReference newObject;
540        if (ObjectHeader.isPinnedObject(object) || (!nurseryCollection && defrag.spaceExhausted())) {
541          /* mark in place */
542          ObjectHeader.setMarkStateUnlogAndUnlock(object, priorState, markState);
543          newObject = object;
544          if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(newObject));
545        } else {
546          /* forward */
547          if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!ObjectHeader.isPinnedObject(object));
548          newObject = ForwardingWord.forwardObject(object, allocator);
549          if (VM.VERIFY_ASSERTIONS && Plan.NEEDS_LOG_BIT_IN_HEADER) VM.assertions._assert(HeaderByte.isUnlogged(newObject));
550        }
551        if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) {
552          Log.write("C["); Log.write(object); Log.write("/");
553          Log.write(getName()); Log.write("] -> ");
554          Log.write(newObject); Log.write("/");
555          Log.write(Space.getSpaceForObject(newObject).getName());
556          Log.writeln("]");
557        }
558        if (!MARK_LINE_AT_SCAN_TIME)
559          markLines(newObject);
560        trace.processNode(newObject);
561        if (VM.VERIFY_ASSERTIONS) {
562          if (!((getSpaceForObject(newObject) != this) ||
563                (newObject == object) ||
564                (nurseryCollection && willNotMoveThisNurseryGC(newObject)) ||
565                (defrag.inDefrag() && willNotMoveThisGC(newObject))
566               )) {
567            Log.write("   object: "); Log.writeln(object);
568            Log.write("newObject: "); Log.writeln(newObject);
569            Log.write("    space: "); Log.writeln(getName());
570            Log.write(" nursery?: "); Log.writeln(nurseryCollection);
571            Log.write("  mature?: "); Log.writeln(ObjectHeader.isMatureObject(object));
572            Log.write("  wnmngc?: "); Log.writeln(willNotMoveThisNurseryGC(newObject));
573            Log.write("  pinned?: "); Log.writeln(ObjectHeader.isPinnedObject(object));
574            Space otherSpace = getSpaceForObject(newObject);
575            Log.write(" space(o): "); Log.writeln(otherSpace == null ? "<NULL>" : otherSpace.getName());
576            VM.assertions._assert(false);
577          }
578        }
579        return newObject;
580      }
581    }
582  }
583
584  /**
585   * Mark the line/s associated with a given object.  This is distinct from the
586   * above tracing code because line marks are stored separately from the
587   * object headers (thus both must be set), and also because we found empirically
588   * that it was more efficient to perform the line mark of the object during
589   * the scan phase (which occurs after the trace phase), presumably because
590   * the latency of the associated memory operations was better hidden in the
591   * context of that code
592   *
593   * @param object The object which is live and for which the associated lines
594   * must be marked.
595   */
596  public void markLines(ObjectReference object) {
597    Address address = VM.objectModel.objectStartRef(object);
598    Line.mark(address, lineMarkState);
599    if (ObjectHeader.isStraddlingObject(object))
600      Line.markMultiLine(address, object, lineMarkState);
601  }
602
603  public int getNextUnavailableLine(Address baseLineAvailAddress, int line) {
604    return Line.getNextUnavailable(baseLineAvailAddress, line, lineUnavailState);
605  }
606
607  public int getNextAvailableLine(Address baseLineAvailAddress, int line) {
608    return Line.getNextAvailable(baseLineAvailAddress, line, lineUnavailState);
609  }
610
611  /****************************************************************************
612  *
613  * Establish available lines
614  */
615
616  /**
617   * Establish the number of recyclable lines lines available for allocation
618   * during defragmentation, populating the spillAvailHistogram, which buckets
619   * available lines according to the number of holes on the block on which
620   * the available lines reside.
621   *
622   * @param spillAvailHistogram A histogram of availability to be populated
623   * @return The number of available recyclable lines
624   */
625  int getAvailableLines(int[] spillAvailHistogram) {
626    int availableLines;
627    if (allocBlockCursor.isZero() || exhaustedReusableSpace) {
628      availableLines = 0;
629    } else {
630      if (allocBlockCursor.EQ(allocBlockSentinel)) {
631        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!exhaustedReusableSpace);
632        allocBlockCursor = chunkMap.getHeadChunk();
633        allocBlockSentinel = allocBlockCursor;
634      }
635      availableLines = getUsableLinesInRegion(allocBlockCursor, allocBlockSentinel, spillAvailHistogram);
636    }
637    return availableLines;
638  }
639
640  /**
641   * Return the number of lines usable for allocation during defragmentation in the
642   * address range specified by start and end.  Populate a histogram to indicate where
643   * the usable lines reside as a function of block hole count.
644   *
645   * @param start  The start of the region to be checked for availability
646   * @param end The end of the region to be checked for availability
647   * @param spillAvailHistogram The histogram which will be populated
648   * @return The number of usable lines
649   */
650  private int getUsableLinesInRegion(Address start, Address end, int[] spillAvailHistogram) {
651    int usableLines = 0;
652    Address blockCursor = Chunk.isAligned(start) ? start.plus(Chunk.FIRST_USABLE_BLOCK_INDEX << LOG_BYTES_IN_BLOCK) : start;
653    Address blockStateCursor = Block.getBlockMarkStateAddress(blockCursor);
654    Address chunkCursor = Chunk.align(blockCursor);
655    if (Chunk.getByteOffset(end) < Chunk.FIRST_USABLE_BLOCK_INDEX << LOG_BYTES_IN_BLOCK)
656      end = Chunk.align(end).plus(Chunk.FIRST_USABLE_BLOCK_INDEX << LOG_BYTES_IN_BLOCK);
657
658    for (int i = 0; i <= MAX_CONSV_SPILL_COUNT; i++) spillAvailHistogram[i] = 0;
659
660    Address highwater = Chunk.getHighWater(chunkCursor);
661    do {
662      short markState = blockStateCursor.loadShort();
663      if (markState != 0 && markState <= reusableMarkStateThreshold) {
664        int usable = LINES_IN_BLOCK - markState;
665        short bucket = Block.getConservativeSpillCount(blockCursor);
666        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(bucket >= 0 && bucket <= MAX_CONSV_SPILL_COUNT);
667        spillAvailHistogram[bucket] += usable;
668        usableLines += usable;
669      }
670      blockCursor = blockCursor.plus(BYTES_IN_BLOCK);
671      if (blockCursor.GT(highwater)) {
672        chunkCursor = chunkMap.nextChunk(chunkCursor);
673        if (chunkCursor.isZero()) break;
674        blockCursor = chunkCursor.plus(Chunk.FIRST_USABLE_BLOCK_INDEX << LOG_BYTES_IN_BLOCK);
675        blockStateCursor = Block.getBlockMarkStateAddress(blockCursor);
676        highwater = Chunk.getHighWater(chunkCursor);
677      } else
678        blockStateCursor = blockStateCursor.plus(Block.BYTES_IN_BLOCK_STATE_ENTRY);
679    } while (blockCursor.NE(end));
680
681    return usableLines;
682  }
683
684  /****************************************************************************
685   *
686   * Object state
687   */
688
689  /**
690   * Generic test of the liveness of an object
691   *
692   * @param object The object in question
693   * @return {@code true} if this object is known to be live (i.e. it is marked)
694   */
695  @Override
696  @Inline
697  public boolean isLive(ObjectReference object) {
698    if (defrag.inDefrag() && isDefragSource(object))
699      return ForwardingWord.isForwardedOrBeingForwarded(object) || ObjectHeader.testMarkState(object, markState);
700    else
701      return ObjectHeader.testMarkState(object, markState);
702  }
703
704  /**
705   * Test the liveness of an object during copying sticky mark bits collection
706   *
707   * @param object The object in question
708   * @return True if this object is known to be live (i.e. it is marked)
709   */
710  @Inline
711  public boolean copyNurseryIsLive(ObjectReference object) {
712    return ForwardingWord.isForwardedOrBeingForwarded(object) || ObjectHeader.testMarkState(object, markState);
713  }
714
715  /**
716   * Test the liveness of an object during defragmentation
717   *
718   * @param object The object in question
719   * @return {@code true} if this object is known to be live (i.e. it is marked)
720   */
721  @Inline
722  public boolean fastIsLive(ObjectReference object) {
723    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!defrag.inDefrag());
724    return ObjectHeader.testMarkState(object, markState);
725  }
726
727  @Inline
728  public boolean willNotMoveThisGC(ObjectReference object) {
729    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this && defrag.inDefrag());
730    return ObjectHeader.isPinnedObject(object) || willNotMoveThisGC(VM.objectModel.refToAddress(object));
731  }
732
733  @Inline
734  public boolean willNotMoveThisNurseryGC(ObjectReference object) {
735    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this);
736    return ObjectHeader.isMatureObject(object);
737  }
738
739  @Inline
740  private boolean isDefragSource(ObjectReference object) {
741    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(object) == this);
742    return isDefragSource(VM.objectModel.refToAddress(object));
743  }
744
745  @Inline
746  public boolean willNotMoveThisGC(Address address) {
747    return !defrag.inDefrag() || defrag.spaceExhausted() || !isDefragSource(address);
748  }
749
750  @Inline
751  public boolean isDefragSource(Address address) {
752    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(getSpaceForObject(address.toObjectReference()) == this);
753    return Block.isDefragSource(address);
754  }
755
756
757  /****************************************************************************
758   *
759   * Locks
760   */
761
762  /**
763   * Acquire the appropriate lock depending on whether the context is
764   * GC or mutator.
765   */
766  private void lock() {
767    if (inCollection)
768      gcLock.acquire();
769    else
770      mutatorLock.acquire();
771  }
772
773   /**
774    * Release the appropriate lock depending on whether the context is
775    * GC or mutator.
776    */
777  private void unlock() {
778    if (inCollection)
779      gcLock.release();
780    else
781       mutatorLock.release();
782  }
783
784
785 /****************************************************************************
786  *
787  * Misc
788  */
789
790  /**
791   * @param ptr the block's address
792   * @return whether the block has the {@link ImmixConstants#RECYCLE_ALLOC_CHUNK_MASK} flag
793   */
794  public static boolean isRecycleAllocChunkAligned(Address ptr) {
795    return ptr.toWord().and(RECYCLE_ALLOC_CHUNK_MASK).EQ(Word.zero());
796  }
797
798  ChunkList getChunkMap() {
799    return chunkMap;
800  }
801  Defrag getDefrag() {
802    return defrag;
803  }
804}