001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.mmtk.utility.alloc;
014
015import static org.mmtk.utility.Constants.BYTES_IN_ADDRESS;
016import static org.mmtk.utility.Constants.CARD_MASK;
017import static org.mmtk.utility.Constants.LOG_BYTES_IN_PAGE;
018import static org.mmtk.utility.Constants.LOG_CARD_BYTES;
019import static org.mmtk.utility.Constants.LOG_CARD_META_SIZE;
020import static org.mmtk.utility.Constants.MAX_ALIGNMENT;
021import static org.mmtk.utility.Constants.MIN_ALIGNMENT;
022import static org.mmtk.utility.Constants.SUPPORT_CARD_SCANNING;
023
024import org.mmtk.policy.Space;
025import org.mmtk.utility.Conversions;
026import org.mmtk.utility.Log;
027import org.mmtk.utility.gcspy.drivers.LinearSpaceDriver;
028import org.mmtk.vm.VM;
029import org.vmmagic.pragma.Inline;
030import org.vmmagic.pragma.NoInline;
031import org.vmmagic.pragma.Uninterruptible;
032import org.vmmagic.unboxed.Address;
033import org.vmmagic.unboxed.Extent;
034import org.vmmagic.unboxed.ObjectReference;
035import org.vmmagic.unboxed.Offset;
036import org.vmmagic.unboxed.Word;
037
038/**
039 * This class implements a bump pointer allocator that allows linearly
040 * scanning through the allocated objects. In order to achieve this in the
041 * face of parallelism it maintains a header at a region (1 or more blocks)
042 * granularity.<p>
043 *
044 * Intra-block allocation is fast, requiring only a load, addition comparison
045 * and store.  If a block boundary is encountered the allocator will
046 * request more memory (virtual and actual).<p>
047 *
048 * In the current implementation the scanned objects maintain affinity
049 * with the thread that allocated the objects in the region. In the future
050 * it is anticipated that subclasses should be allowed to choose to improve
051 * load balancing during the parallel scan.<p>
052 *
053 * Each region is laid out as follows:
054 * <pre>
055 *
056 *  +-------------+-------------+-------------+---------------
057 *  | Region  End | Next Region |  Data  End  | Data --&gt;
058 * +-------------+-------------+-------------+---------------
059 *
060 * </pre>
061 *
062 * The minimum region size is 32768 bytes, so the 3 or 4 word overhead is
063 * less than 0.05% of all space.<p>
064 *
065 * An intended enhancement is to facilitate a reallocation operation
066 * where a second cursor is maintained over earlier regions (and at the
067 * limit a lower location in the same region). This would be accompianied
068 * with an alternative slow path that would allow reuse of empty regions.<p>
069 *
070 * This class relies on the supporting virtual machine implementing the
071 * getNextObject and related operations.
072 */
073@Uninterruptible public class BumpPointer extends Allocator {
074
075  /****************************************************************************
076   *
077   * Class variables
078   */
079
080  // Block size defines slow path periodicity.
081
082  /**
083   *
084   */
085  private static final int LOG_DEFAULT_STEP_SIZE = 30; // 1G: let the external slow path dominate
086  private static final int STEP_SIZE = 1 << (SUPPORT_CARD_SCANNING ? LOG_CARD_BYTES : LOG_DEFAULT_STEP_SIZE);
087  protected static final int LOG_BLOCK_SIZE = LOG_BYTES_IN_PAGE + 3;
088  protected static final Word BLOCK_MASK = Word.one().lsh(LOG_BLOCK_SIZE).minus(Word.one());
089  private static final int BLOCK_SIZE = (1 << LOG_BLOCK_SIZE);
090
091
092  // Offsets into header
093  protected static final Offset REGION_LIMIT_OFFSET = Offset.zero();
094  protected static final Offset NEXT_REGION_OFFSET = REGION_LIMIT_OFFSET.plus(BYTES_IN_ADDRESS);
095  protected static final Offset DATA_END_OFFSET = NEXT_REGION_OFFSET.plus(BYTES_IN_ADDRESS);
096
097  // Data must start particle-aligned.
098  protected static final Offset DATA_START_OFFSET = alignAllocationNoFill(
099      Address.zero().plus(DATA_END_OFFSET.plus(BYTES_IN_ADDRESS)),
100      MIN_ALIGNMENT, 0).toWord().toOffset();
101  protected static final Offset MAX_DATA_START_OFFSET = alignAllocationNoFill(
102      Address.zero().plus(DATA_END_OFFSET.plus(BYTES_IN_ADDRESS)),
103      MAX_ALIGNMENT, 0).toWord().toOffset();
104
105  public static final int MINIMUM_DATA_SIZE = (1 << LOG_BLOCK_SIZE) - MAX_DATA_START_OFFSET.toInt();
106
107  private static final int SIZE_OF_TWO_X86_CACHE_LINES_IN_BYTES = 128;
108
109  private static final boolean VERBOSE = false;
110
111  /****************************************************************************
112   *
113   * Instance variables
114   */
115
116  /** insertion point */
117  protected Address cursor;
118  /** current internal slow-path sentinel for bump pointer */
119  private Address internalLimit;
120  /** current external slow-path sentinel for bump pointer */
121  private Address limit;
122  /**  space this bump pointer is associated with */
123  protected Space space;
124  /** first contiguous region */
125  protected Address initialRegion;
126  /** linear scanning is permitted if true */
127  protected final boolean allowScanning;
128  /** current contiguous region */
129  protected Address region;
130
131
132  /**
133   * Constructor.
134   *
135   * @param space The space to bump point into.
136   * @param allowScanning Allow linear scanning of this region of memory.
137   */
138  protected BumpPointer(Space space, boolean allowScanning) {
139    this.space = space;
140    this.allowScanning = allowScanning;
141    reset();
142  }
143
144  /**
145   * Reset the allocator. Note that this does not reset the space.
146   * This is must be done by the caller.
147   */
148  public final void reset() {
149    cursor = Address.zero();
150    limit = Address.zero();
151    internalLimit = Address.zero();
152    initialRegion = Address.zero();
153    region = Address.zero();
154  }
155
156  /**
157   * Re-associate this bump pointer with a different space. Also
158   * reset the bump pointer so that it will use the new space
159   * on the next call to <code>alloc</code>.
160   *
161   * @param space The space to associate the bump pointer with.
162   */
163  public final void rebind(Space space) {
164    reset();
165    this.space = space;
166  }
167
168  /**
169   * Allocate space for a new object.  This is frequently executed code and
170   * the coding is deliberately sensitive to the optimizing compiler.
171   * After changing this, always check the IR/MC that is generated.
172   *
173   * @param bytes The number of bytes allocated
174   * @param align The requested alignment
175   * @param offset The offset from the alignment
176   * @return The address of the first byte of the allocated region
177   */
178  @Inline
179  public final Address alloc(int bytes, int align, int offset) {
180    Address start = alignAllocationNoFill(cursor, align, offset);
181    Address end = start.plus(bytes);
182    if (end.GT(internalLimit))
183      return allocSlow(start, end, align, offset);
184    fillAlignmentGap(cursor, start);
185    cursor = end;
186    end.plus(SIZE_OF_TWO_X86_CACHE_LINES_IN_BYTES).prefetch();
187    return start;
188  }
189
190 /**
191  * Internal allocation slow path.  This is called whenever the bump
192  * pointer reaches the internal limit.  The code is forced out of
193  * line.  If required we perform an external slow path take, which
194  * we inline into this method since this is already out of line.
195  *
196  * @param start The start address for the pending allocation
197 * @param end The end address for the pending allocation
198 * @param align The requested alignment
199 * @param offset The offset from the alignment
200  * @return The address of the first byte of the allocated region
201  */
202  @NoInline
203  private Address allocSlow(Address start, Address end, int align,
204      int offset) {
205    Address rtn = null;
206    Address card = null;
207    if (SUPPORT_CARD_SCANNING)
208      card = getCard(start.plus(CARD_MASK)); // round up
209    if (end.GT(limit)) { /* external slow path */
210      rtn = allocSlowInline(end.diff(start).toInt(), align, offset);
211      if (SUPPORT_CARD_SCANNING && card.NE(getCard(rtn.plus(CARD_MASK))))
212        card = getCard(rtn); // round down
213    } else {             /* internal slow path */
214      while (internalLimit.LE(end))
215        internalLimit = internalLimit.plus(STEP_SIZE);
216      if (internalLimit.GT(limit))
217        internalLimit = limit;
218      fillAlignmentGap(cursor, start);
219      cursor = end;
220      rtn = start;
221    }
222    if (SUPPORT_CARD_SCANNING && !rtn.isZero())
223      createCardAnchor(card, rtn, end.diff(start).toInt());
224    return rtn;
225  }
226
227  /**
228   * Given an allocation which starts a new card, create a record of
229   * where the start of the object is relative to the start of the
230   * card.
231   *
232   * @param card An address that lies within the card to be marked
233   * @param start The address of an object which creates a new card.
234   * @param bytes The size of the pending allocation in bytes (used for debugging)
235   */
236  private void createCardAnchor(Address card, Address start, int bytes) {
237    if (VM.VERIFY_ASSERTIONS) {
238      VM.assertions._assert(allowScanning);
239      VM.assertions._assert(card.EQ(getCard(card)));
240      VM.assertions._assert(start.diff(card).sLE(MAX_DATA_START_OFFSET));
241      VM.assertions._assert(start.diff(card).toInt() >= -CARD_MASK);
242    }
243    while (bytes > 0) {
244      int offset = start.diff(card).toInt();
245      getCardMetaData(card).store(offset);
246      card = card.plus(1 << LOG_CARD_BYTES);
247      bytes -= (1 << LOG_CARD_BYTES);
248    }
249  }
250
251  /**
252   * Return the start of the card corresponding to a given address.
253   *
254   * @param address The address for which the card start is required
255   * @return The start of the card containing the address
256   */
257  private static Address getCard(Address address) {
258    return address.toWord().and(Word.fromIntSignExtend(CARD_MASK).not()).toAddress();
259  }
260
261  /**
262   * Return the address of the metadata for a card, given the address of the card.
263   * @param card The address of some card
264   * @return The address of the metadata associated with that card
265   */
266  private static Address getCardMetaData(Address card) {
267    Address metadata = EmbeddedMetaData.getMetaDataBase(card);
268    return metadata.plus(EmbeddedMetaData.getMetaDataOffset(card, LOG_CARD_BYTES - LOG_CARD_META_SIZE, LOG_CARD_META_SIZE));
269  }
270
271  /**
272   * External allocation slow path (called by superclass when slow path is
273   * actually taken.  This is necessary (rather than a direct call
274   * from the fast path) because of the possibility of a thread switch
275   * and corresponding re-association of bump pointers to kernel
276   * threads.
277   *
278   * @param bytes The number of bytes allocated
279   * @param offset The offset from the alignment
280   * @param align The requested alignment
281   * @return The address of the first byte of the allocated region or
282   * zero on failure
283   */
284  @Override
285  protected final Address allocSlowOnce(int bytes, int align, int offset) {
286    /* Check we have been bound to a space */
287    if (space == null) {
288      VM.assertions.fail("Allocation on unbound bump pointer.");
289    }
290
291    /* Check if we already have a block to use */
292    if (allowScanning && !region.isZero()) {
293      Address nextRegion = getNextRegion(region);
294      if (!nextRegion.isZero()) {
295        return consumeNextRegion(nextRegion, bytes, align, offset);
296      }
297    }
298
299    /* Acquire space, block aligned, that can accommodate the request */
300    Extent blockSize = Word.fromIntZeroExtend(bytes).plus(BLOCK_MASK)
301                       .and(BLOCK_MASK.not()).toExtent();
302    Address start = space.acquire(Conversions.bytesToPages(blockSize));
303
304    if (start.isZero()) return start; // failed allocation
305
306    if (!allowScanning) { // simple allocator
307      if (start.NE(limit)) cursor = start;  // discontiguous
308      updateLimit(start.plus(blockSize), start, bytes);
309    } else                // scannable allocator
310      updateMetaData(start, blockSize, bytes);
311    return alloc(bytes, align, offset);
312  }
313
314  /**
315   * Update the limit pointer.  As a side effect update the internal limit
316   * pointer appropriately.
317   *
318   * @param newLimit The new value for the limit pointer
319   * @param start The start of the region to be allocated into
320   * @param bytes The size of the pending allocation (if any).
321   */
322  @Inline
323  protected final void updateLimit(Address newLimit, Address start, int bytes) {
324    limit = newLimit;
325    internalLimit = start.plus(STEP_SIZE);
326    if (internalLimit.GT(limit))
327      internalLimit = limit;
328    else {
329      while (internalLimit.LT(cursor.plus(bytes)))
330        internalLimit = internalLimit.plus(STEP_SIZE);
331      if (VM.VERIFY_ASSERTIONS)
332        VM.assertions._assert(internalLimit.LE(limit));
333    }
334  }
335
336  /**
337   * A bump pointer chunk/region has been consumed but the contiguous region
338   * is available, so consume it and then return the address of the start
339   * of a memory region satisfying the outstanding allocation request.  This
340   * is relevant when re-using memory, as in a mark-compact collector.
341   *
342   * @param nextRegion The region to be consumed
343   * @param bytes The number of bytes allocated
344   * @param align The requested alignment
345   * @param offset The offset from the alignment
346   * @return The address of the first byte of the allocated region or
347   * zero on failure
348   */
349  private Address consumeNextRegion(Address nextRegion, int bytes, int align,
350        int offset) {
351    setNextRegion(region,cursor);
352    region = nextRegion;
353    cursor = getDataStart(nextRegion);
354    updateLimit(getRegionLimit(nextRegion), nextRegion, bytes);
355    setDataEnd(nextRegion,Address.zero());
356    VM.memory.zero(false, cursor, limit.diff(cursor).toWord().toExtent());
357    reusePages(Conversions.bytesToPages(limit.diff(region)));
358
359    return alloc(bytes, align, offset);
360  }
361
362  /******************************************************************************
363   *
364   *   Accessor methods for the region metadata fields.
365   *
366   */
367
368  /**
369   * The first offset in a region after the header
370   * @param region The region
371   * @return The lowest address at which data can be stored
372   */
373  @Inline
374  public static Address getDataStart(Address region) {
375    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
376    return region.plus(DATA_START_OFFSET);
377  }
378
379  /**
380   * The next region in the linked-list of regions
381   * @param region The region
382   * @return The next region in the list
383   */
384  @Inline
385  public static Address getNextRegion(Address region) {
386    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
387    Address result = region.plus(NEXT_REGION_OFFSET).loadAddress();
388    return result;
389  }
390
391  /**
392   * Set the next region in the linked-list of regions
393   * @param region The region
394   * @param nextRegion the next region in the list
395   */
396  @Inline
397  public static void setNextRegion(Address region, Address nextRegion) {
398    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
399    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!nextRegion.EQ(Address.fromIntZeroExtend(0xdeadbeef)));
400    region.store(nextRegion,NEXT_REGION_OFFSET);
401  }
402
403  /**
404   * Clear the next region pointer in the linked-list of regions
405   * @param region The region
406   */
407  @Inline
408  public static void clearNextRegion(Address region) {
409    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
410    region.store(Address.zero(),NEXT_REGION_OFFSET);
411  }
412
413  /**
414   * @param region The bump-pointer region
415   * @return The DATA_END address from the region header
416   */
417  @Inline
418  public static Address getDataEnd(Address region) {
419    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
420    return region.plus(DATA_END_OFFSET).loadAddress();
421  }
422
423  /**
424   * @param region The bump-pointer region
425   * @param endAddress The new DATA_END address from the region header
426   */
427  public static void setDataEnd(Address region, Address endAddress) {
428    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
429    region.store(endAddress, DATA_END_OFFSET);
430    if (VERBOSE) {
431      Log.write("setDataEnd(");
432      Log.write(region);
433      Log.write(",");
434      Log.write(endAddress);
435      Log.writeln(")");
436    }
437  }
438
439  /**
440   * Return the end address of the given region.
441   * @param region The region.
442   * @return the allocation limit of the region.
443   */
444  public static Address getRegionLimit(Address region) {
445    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
446    return region.plus(REGION_LIMIT_OFFSET).loadAddress();
447  }
448
449  /**
450   * Stores the limit value at the end of the region.
451   *
452   * @param region region address
453   * @param limit the limit value
454   */
455  public static void setRegionLimit(Address region, Address limit) {
456    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
457    region.plus(REGION_LIMIT_OFFSET).store(limit);
458  }
459
460  /**
461   * @param region The region.
462   * @return {@code true} if the address is region-aligned
463   */
464  public static boolean isRegionAligned(Address region) {
465    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!region.isZero());
466    return region.toWord().and(BLOCK_MASK).isZero();
467  }
468
469  /**
470   * Sanity check a region header
471   * @param region Region to check
472   */
473  public static void checkRegionMetadata(Address region) {
474    if (VM.VERIFY_ASSERTIONS) {
475      Address nextRegion = getNextRegion(region);
476      Address dataStart = getDataStart(region);
477      Address dataEnd = getDataEnd(region);
478      Address regionLimit = getRegionLimit(region);
479
480      VM.assertions._assert(nextRegion.isZero() || isRegionAligned(nextRegion));
481      VM.assertions._assert(dataEnd.GE(dataStart));
482      if (dataEnd.GT(regionLimit)) {
483        Log.write("dataEnd="); Log.write(dataEnd);
484        Log.write(", regionLimit="); Log.writeln(regionLimit);
485      }
486      VM.assertions._assert(dataEnd.LE(regionLimit));
487      VM.assertions._assert(regionLimit.EQ(region.plus(BLOCK_SIZE)));
488    }
489
490  }
491  /**
492   * Update the metadata to reflect the addition of a new region.
493   *
494   * @param start The start of the new region
495   * @param size The size of the new region (rounded up to block-alignment)
496   * @param bytes the size of the pending allocation, if any
497   */
498  @Inline
499  private void updateMetaData(Address start, Extent size, int bytes) {
500    if (initialRegion.isZero()) {
501      /* this is the first allocation */
502      initialRegion = start;
503      region = start;
504      cursor = region.plus(DATA_START_OFFSET);
505    } else if (limit.NE(start) ||
506               region.diff(start.plus(size)).toWord().toExtent().GT(maximumRegionSize())) {
507      /* non contiguous or over-size, initialize new region */
508      setNextRegion(region,start);
509      setDataEnd(region,cursor);
510      region = start;
511      cursor = start.plus(DATA_START_OFFSET);
512    }
513    updateLimit(start.plus(size), start, bytes);
514    setRegionLimit(region,limit);
515  }
516
517  /**
518   * Gather data for GCspy. <p>
519   * This method calls the drivers linear scanner to scan through
520   * the objects allocated by this bump pointer.
521   *
522   * @param driver The GCspy driver for this space.
523   */
524  public void gcspyGatherData(LinearSpaceDriver driver) {
525    //driver.setRange(space.getStart(), cursor);
526    driver.setRange(space.getStart(), limit);
527    this.linearScan(driver.getScanner());
528  }
529
530  /**
531   * Gather data for GCspy. <p>
532   * This method calls the drivers linear scanner to scan through
533   * the objects allocated by this bump pointer.
534   *
535   * @param driver The GCspy driver for this space.
536   * @param scanSpace The space to scan
537   */
538  public void gcspyGatherData(LinearSpaceDriver driver, Space scanSpace) {
539    //TODO can scanSpace ever be different to this.space?
540    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(scanSpace == space, "scanSpace != space");
541
542    //driver.setRange(scanSpace.getStart(), cursor);
543    Address start = scanSpace.getStart();
544    driver.setRange(start, limit);
545
546    if (false) {
547      Log.write("\nBumpPointer.gcspyGatherData set Range "); Log.write(scanSpace.getStart());
548      Log.write(" to "); Log.writeln(limit);
549      Log.write("BumpPointergcspyGatherData scan from "); Log.writeln(initialRegion);
550    }
551
552    linearScan(driver.getScanner());
553  }
554
555
556  /**
557   * Perform a linear scan through the objects allocated by this bump pointer.
558   *
559   * @param scanner The scan object to delegate scanning to.
560   */
561  @Inline
562  public final void linearScan(LinearScan scanner) {
563    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(allowScanning);
564    /* Has this allocator ever allocated anything? */
565    if (initialRegion.isZero()) return;
566
567    /* Loop through active regions or until the last region */
568    Address start = initialRegion;
569    while (!start.isZero()) {
570      scanRegion(scanner, start); // Scan this region
571      start = getNextRegion(start); // Move on to next
572    }
573  }
574
575  /**
576   * Perform a linear scan through a single contiguous region
577   *
578   * @param scanner The scan object to delegate to.
579   * @param start The start of this region
580   */
581  @Inline
582  private void scanRegion(LinearScan scanner, Address start) {
583    /* Get the end of this region */
584    Address dataEnd = start.plus(DATA_END_OFFSET).loadAddress();
585
586    /* dataEnd = zero represents the current region. */
587    Address currentLimit = (dataEnd.isZero() ? cursor : dataEnd);
588    if (currentLimit.EQ(start.plus(DATA_END_OFFSET).plus(BYTES_IN_ADDRESS))) {
589      /* Empty region, so we can not call getObjectFromStartAddress() */
590      return;
591    }
592
593    ObjectReference current = VM.objectModel.getObjectFromStartAddress(start.plus(DATA_START_OFFSET));
594
595    /* Loop through each object up to the limit */
596    do {
597      /* Read end address first, as scan may be destructive */
598      Address currentObjectEnd = VM.objectModel.getObjectEndAddress(current);
599      scanner.scan(current);
600      if (currentObjectEnd.GE(currentLimit)) {
601        /* We have scanned the last object */
602        break;
603      }
604      /* Find the next object from the start address (dealing with alignment gaps, etc.) */
605      ObjectReference next = VM.objectModel.getObjectFromStartAddress(currentObjectEnd);
606      if (VM.VERIFY_ASSERTIONS) {
607        /* Must be monotonically increasing */
608        VM.assertions._assert(next.toAddress().GT(current.toAddress()));
609      }
610      current = next;
611    } while (true);
612  }
613
614  /**
615   * Some pages are about to be re-used to satisfy a slow path request.
616   * @param pages The number of pages.
617   */
618  protected void reusePages(int pages) {
619    VM.assertions.fail("Subclasses that reuse regions must override this method.");
620  }
621
622  /**
623   * Maximum size of a single region. Important for children that implement
624   * load balancing or increments based on region size.
625   * @return the maximum region size
626   */
627  protected Extent maximumRegionSize() {
628    return Extent.max();
629  }
630
631  /** @return the current cursor value */
632  public final Address getCursor() {
633    return cursor;
634  }
635
636  /** @return the space associated with this bump pointer */
637  @Override
638  public final Space getSpace() {
639    return space;
640  }
641
642  /**
643   * Print out the status of the allocator (for debugging)
644   */
645  public final void show() {
646    Log.write("cursor = "); Log.write(cursor);
647    if (allowScanning) {
648      Log.write(" region = "); Log.write(region);
649    }
650    Log.write(" limit = "); Log.writeln(limit);
651  }
652}