001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.mmtk.utility.alloc;
014
015import static org.mmtk.utility.Constants.*;
016
017import org.mmtk.vm.Lock;
018import org.mmtk.plan.Plan;
019import org.mmtk.policy.Space;
020import org.mmtk.utility.*;
021
022import org.mmtk.vm.VM;
023
024import org.vmmagic.unboxed.*;
025import org.vmmagic.pragma.*;
026
027/**
028 * This abstract base class provides the basis for processor-local
029 * allocation.  The key functionality provided is the retry mechanism
030 * that is necessary to correctly handle the fact that a "slow-path"
031 * allocation can cause a GC which violate the uninterruptability assumption.
032 * This results in the thread being moved to a different processor so that
033 * the allocator object it is using is not actually the one for the processor
034 * it is running on.<p>
035 *
036 * This class also includes functionality to assist allocators with
037 * ensuring that requests are aligned according to requests.<p>
038 *
039 * Failing to handle this properly will lead to very hard to trace bugs
040 * where the allocation that caused a GC or allocations immediately following
041 * GC are run incorrectly.<p>
042 *
043 * TODO the comments in this class need to be rephrased from using the
044 * particle terminology to alignments.
045 */
046@Uninterruptible
047public abstract class Allocator {
048
049  /** Lock used for out of memory handling */
050  private static Lock oomLock = VM.newLock("OOM Lock");
051  /** Has an allocation succeeded since the emergency collection? */
052  private static volatile boolean allocationSuccess;
053  /** Maximum number of failed attempts by a single thread */
054  private static int collectionAttempts;
055
056  /**
057   * @return a consecutive failure count for any allocating thread.
058   */
059  public static int determineCollectionAttempts() {
060    if (!allocationSuccess) {
061      collectionAttempts++;
062    } else {
063      allocationSuccess = false;
064      collectionAttempts = 1;
065    }
066    return collectionAttempts;
067  }
068
069  /**
070   * Return the space this allocator is currently bound to.
071   *
072   * @return The Space.
073   */
074  protected abstract Space getSpace();
075
076  /**
077   * Aligns up an allocation request. The allocation request accepts a
078   * region, that must be at least particle aligned, an alignment
079   * request (some power of two number of particles) and an offset (a
080   * number of particles). There is also a knownAlignment parameter to
081   * allow a more optimised check when the particular allocator in use
082   * always aligns at a coarser grain than individual particles, such
083   * as some free lists.
084   *
085   * @param region The region to align up.
086   * @param alignment The requested alignment
087   * @param offset The offset from the alignment
088   * @param knownAlignment The statically known minimum alignment.
089   * @param fillAlignmentGap whether to fill up holes in the alignment
090   *  with the alignment value ({@link Constants#ALIGNMENT_VALUE})
091   * @return The aligned up address.
092   */
093  @Inline
094  public static Address alignAllocation(Address region, int alignment, int offset, int knownAlignment, boolean fillAlignmentGap) {
095    if (VM.VERIFY_ASSERTIONS) {
096      VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
097      VM.assertions._assert(MIN_ALIGNMENT >= BYTES_IN_INT);
098      VM.assertions._assert(!(fillAlignmentGap && region.isZero()));
099      VM.assertions._assert(alignment <= MAX_ALIGNMENT);
100      VM.assertions._assert(offset >= 0);
101      VM.assertions._assert(region.toWord().and(Word.fromIntSignExtend(MIN_ALIGNMENT - 1)).isZero());
102      VM.assertions._assert((alignment & (MIN_ALIGNMENT - 1)) == 0);
103      VM.assertions._assert((offset & (MIN_ALIGNMENT - 1)) == 0);
104    }
105
106    // No alignment ever required.
107    if (alignment <= knownAlignment || MAX_ALIGNMENT <= MIN_ALIGNMENT)
108      return region;
109
110    // May require an alignment
111    Word mask = Word.fromIntSignExtend(alignment - 1);
112    Word negOff = Word.fromIntSignExtend(-offset);
113    Offset delta = negOff.minus(region.toWord()).and(mask).toOffset();
114
115    if (fillAlignmentGap && ALIGNMENT_VALUE != 0) {
116      if ((MAX_ALIGNMENT - MIN_ALIGNMENT) == BYTES_IN_WORD) {
117        // At most a single hole
118        if (delta.toInt() == (BYTES_IN_WORD)) {
119          region.store(Word.fromIntSignExtend(ALIGNMENT_VALUE));
120          region = region.plus(delta);
121        return region;
122        }
123      } else {
124        while (delta.toInt() >= (BYTES_IN_WORD)) {
125          region.store(Word.fromIntSignExtend(ALIGNMENT_VALUE));
126          region = region.plus(BYTES_IN_WORD);
127          delta = delta.minus(BYTES_IN_WORD);
128        }
129      }
130    }
131
132    return region.plus(delta);
133  }
134
135  /**
136   * Fill the specified region with the alignment value.
137   *
138   * @param start The start of the region.
139   * @param end A pointer past the end of the region.
140   */
141  @Inline
142  public static void fillAlignmentGap(Address start, Address end) {
143    if ((MAX_ALIGNMENT - MIN_ALIGNMENT) == BYTES_IN_INT) {
144      // At most a single hole
145      if (!end.diff(start).isZero()) {
146        start.store(ALIGNMENT_VALUE);
147      }
148    } else {
149      while (start.LT(end)) {
150        start.store(ALIGNMENT_VALUE);
151        start = start.plus(BYTES_IN_INT);
152      }
153    }
154  }
155
156  /**
157   * Aligns up an allocation request. The allocation request accepts a
158   * region, that must be at least particle aligned, an alignment
159   * request (some power of two number of particles) and an offset (a
160   * number of particles).
161   *
162   * @param region The region to align up.
163   * @param alignment The requested alignment
164   * @param offset The offset from the alignment
165   * @return The aligned up address.
166   */
167  @Inline
168  public static Address alignAllocation(Address region, int alignment, int offset) {
169    return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, true);
170  }
171
172  /**
173   * Aligns up an allocation request. The allocation request accepts a
174   * region, that must be at least particle aligned, an alignment
175   * request (some power of two number of particles) and an offset (a
176   * number of particles).
177   *
178   * @param region The region to align up.
179   * @param alignment The requested alignment
180   * @param offset The offset from the alignment
181   * @return The aligned up address.
182   */
183  @Inline
184  public static Address alignAllocationNoFill(Address region, int alignment, int offset) {
185    return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, false);
186  }
187
188  /**
189   * This method calculates the minimum size that will guarantee the allocation
190   * of a specified number of bytes at the specified alignment.
191   *
192   * @param size The number of bytes (not aligned).
193   * @param alignment The requested alignment (some factor of 2).
194   * @return the minimum size (in bytes) that's necessary to guarantee allocation
195   *  at the given alignment
196   */
197  @Inline
198  public static int getMaximumAlignedSize(int size, int alignment) {
199    return getMaximumAlignedSize(size, alignment, MIN_ALIGNMENT);
200  }
201
202  /**
203   * This method calculates the minimum size that will guarantee the allocation
204   * of a specified number of bytes at the specified alignment.
205   *
206   * @param size The number of bytes (not aligned).
207   * @param alignment The requested alignment (some factor of 2).
208   * @param knownAlignment The known minimum alignment. Specifically for use in
209   * allocators that enforce greater than particle alignment. It is a <b>precondition</b>
210   * that size is aligned to knownAlignment, and that knownAlignment &gt;=
211   * {@link Constants#MIN_ALIGNMENT}.
212   * @return the minimum size (in bytes) that's necessary to guarantee allocation
213   *  at the given alignment
214   */
215  @Inline
216  public static int getMaximumAlignedSize(int size, int alignment, int knownAlignment) {
217    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(size == Conversions.roundDown(size, knownAlignment));
218    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
219
220    if (MAX_ALIGNMENT <= MIN_ALIGNMENT || alignment <= knownAlignment) {
221      return size;
222    } else {
223      return size + alignment - knownAlignment;
224    }
225  }
226
227  /**
228   * Single slow path allocation attempt. This is called by allocSlow.
229   *
230   * @param bytes The size of the allocation request
231   * @param alignment The required alignment
232   * @param offset The alignment offset
233   * @return The start address of the region, or zero if allocation fails
234   */
235  protected abstract Address allocSlowOnce(int bytes, int alignment, int offset);
236
237  /**
238   * <b>Out-of-line</b> slow path allocation. This method forces slow path
239   * allocation to be out of line (typically desirable, but not when the
240   * calling context is already explicitly out-of-line).
241   *
242   * @param bytes The size of the allocation request
243   * @param alignment The required alignment
244   * @param offset The alignment offset
245   * @return The start address of the region, or zero if allocation fails
246   */
247  @NoInline
248  public final Address allocSlow(int bytes, int alignment, int offset) {
249    return allocSlowInline(bytes, alignment, offset);
250  }
251
252  /**
253   * <b>Inline</b> slow path allocation. This method attempts allocSlowOnce
254   * several times, and allows collection to occur, and ensures that execution
255   * safely resumes by taking care of potential thread/mutator context affinity
256   * changes. All allocators should use this as the trampoline for slow
257   * path allocation.
258   *
259   * @param bytes The size of the allocation request
260   * @param alignment The required alignment
261   * @param offset The alignment offset
262   * @return The start address of the region, or zero if allocation fails
263   */
264  @Inline
265  public final Address allocSlowInline(int bytes, int alignment, int offset) {
266    Allocator current = this;
267    Space space = current.getSpace();
268
269    // Information about the previous collection.
270    boolean emergencyCollection = false;
271    while (true) {
272      // Try to allocate using the slow path
273      Address result = current.allocSlowOnce(bytes, alignment, offset);
274
275      // Collector allocation always succeeds (or fails inside allocSlow).
276      if (!VM.activePlan.isMutator()) {
277        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!result.isZero());
278        return result;
279      }
280
281      if (!result.isZero()) {
282        // Report allocation success to assist OutOfMemory handling.
283        if (!allocationSuccess) {
284          oomLock.acquire();
285          allocationSuccess = true;
286          oomLock.release();
287        }
288        return result;
289      }
290
291      if (emergencyCollection) {
292        // Check if we are in an OutOfMemory situation
293        oomLock.acquire();
294        boolean failWithOOM = !allocationSuccess;
295        // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
296        allocationSuccess = true;
297        oomLock.release();
298        if (failWithOOM) {
299          // Nobody has successfully allocated since an emergency collection: OutOfMemory
300          VM.collection.outOfMemory();
301          VM.assertions.fail("Not Reached");
302          return Address.zero();
303        }
304      }
305
306      /* This is in case a GC occurs, and our mutator context is stale.
307       * In some VMs the scheduler can change the affinity between the
308       * current thread and the mutator context. This is possible for
309       * VMs that dynamically multiplex Java threads onto multiple mutator
310       * contexts, */
311      current = VM.activePlan.mutator().getAllocatorFromSpace(space);
312
313      /*
314       * Record whether last collection was an Emergency collection.
315       * If so, we make one more attempt to allocate before we signal
316       * an OOM.
317       */
318      emergencyCollection = Plan.isEmergencyCollection();
319    }
320  }
321}