001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.mmtk.policy.immix;
014
015import static org.mmtk.policy.Space.BYTES_IN_CHUNK;
016import static org.mmtk.policy.immix.ImmixConstants.*;
017import static org.mmtk.utility.Constants.*;
018
019import org.mmtk.utility.Conversions;
020import org.mmtk.utility.heap.Mmapper;
021import org.mmtk.vm.VM;
022
023import org.vmmagic.pragma.Uninterruptible;
024import org.vmmagic.unboxed.Address;
025import org.vmmagic.unboxed.Extent;
026
027@Uninterruptible
028public class Chunk {
029
030  public static Address align(Address ptr) {
031    return ptr.toWord().and(CHUNK_MASK.not()).toAddress();
032  }
033
034  static boolean isAligned(Address ptr) {
035    return ptr.EQ(align(ptr));
036  }
037
038  static int getByteOffset(Address ptr) {
039    return ptr.toWord().and(CHUNK_MASK).toInt();
040  }
041
042  /**
043   * @return the number of pages of metadata required per chunk
044   */
045  static int getRequiredMetaDataPages() {
046    Extent bytes = Extent.fromIntZeroExtend(ROUNDED_METADATA_BYTES_PER_CHUNK);
047    return Conversions.bytesToPagesUp(bytes);
048  }
049
050  static void sweep(Address chunk, Address end, ImmixSpace space, int[] markHistogram, final byte markValue, final boolean resetMarks) {
051    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
052    Address start = getFirstUsableBlock(chunk);
053    Address cursor = Block.getBlockMarkStateAddress(start);
054    for (int index = FIRST_USABLE_BLOCK_INDEX; index < BLOCKS_IN_CHUNK; index++) {
055      Address block = chunk.plus(index << LOG_BYTES_IN_BLOCK);
056      if (block.GT(end)) break;
057      final boolean defragSource = space.inImmixDefragCollection() && Block.isDefragSource(block);
058      short marked = Block.sweepOneBlock(block, markHistogram, markValue, resetMarks);
059      if (marked == 0) {
060        if (!Block.isUnusedState(cursor)) {
061          space.release(block);
062          if (defragSource) Defrag.defragBytesFreed.inc(BYTES_IN_BLOCK);
063        }
064        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(Block.isUnused(block));
065      } else {
066        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(marked > 0 && marked <= LINES_IN_BLOCK);
067        Block.setState(cursor, marked);
068        if (defragSource) Defrag.defragBytesNotFreed.inc(BYTES_IN_BLOCK);
069      }
070      if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(Block.isUnused(block) || (Block.getBlockMarkState(block) == marked && marked > 0 && marked <= MAX_BLOCK_MARK_STATE));
071      cursor = cursor.plus(Block.BYTES_IN_BLOCK_STATE_ENTRY);
072    }
073  }
074
075  static void clearMetaData(Address chunk) {
076    if (VM.VERIFY_ASSERTIONS) {
077      VM.assertions._assert(isAligned(chunk));
078      VM.assertions._assert(Conversions.isPageAligned(chunk));
079      VM.assertions._assert(Conversions.isPageAligned(ROUNDED_METADATA_BYTES_PER_CHUNK));
080    }
081    Mmapper.ensureMapped(chunk, ROUNDED_METADATA_PAGES_PER_CHUNK);
082    VM.memory.zero(false, chunk, Extent.fromIntZeroExtend(ROUNDED_METADATA_BYTES_PER_CHUNK));
083    if (VM.VERIFY_ASSERTIONS) checkMetaDataCleared(chunk, chunk);
084  }
085
086  private static void checkMetaDataCleared(Address chunk, Address value) {
087    VM.assertions._assert(isAligned(chunk));
088    Address block = Chunk.getHighWater(chunk);
089    if (value.EQ(chunk)) {
090      VM.assertions._assert(block.isZero());
091      block = chunk.plus(Chunk.ROUNDED_METADATA_BYTES_PER_CHUNK);
092    } else {
093      block = block.plus(BYTES_IN_BLOCK); // start at first block after highwater
094      VM.assertions._assert(Block.align(block).EQ(block));
095    }
096    while (block.LT(chunk.plus(BYTES_IN_CHUNK))) {
097      VM.assertions._assert(Chunk.align(block).EQ(chunk));
098      VM.assertions._assert(Block.isUnused(block));
099      block = block.plus(BYTES_IN_BLOCK);
100    }
101  }
102
103  static void updateHighWater(Address value) {
104    Address chunk = align(value);
105    if (getHighWater(chunk).LT(value)) {
106      setHighWater(chunk, value);
107    }
108  }
109
110  private static void setHighWater(Address chunk, Address value) {
111    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
112    chunk.plus(HIGHWATER_OFFSET).store(value);
113  }
114
115  public static Address getHighWater(Address chunk) {
116    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
117    return chunk.plus(HIGHWATER_OFFSET).loadAddress();
118  }
119
120  static void setMap(Address chunk, int value) {
121    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
122    chunk.plus(MAP_OFFSET).store(value);
123  }
124
125  static int getMap(Address chunk) {
126    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
127    int rtn = chunk.plus(MAP_OFFSET).loadInt();
128    return (rtn < 0) ? -rtn : rtn;
129  }
130
131  static void resetLineMarksAndDefragStateTable(Address chunk, short threshold) {
132    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
133    Address markStateBase = Block.getBlockMarkStateAddress(chunk);
134    Address defragStateBase = Block.getDefragStateAddress(chunk);
135    Address lineMarkBase = Line.getChunkMarkTable(chunk);
136    for (int b = FIRST_USABLE_BLOCK_INDEX; b < BLOCKS_IN_CHUNK; b++) {
137      Block.resetLineMarksAndDefragStateTable(threshold, markStateBase, defragStateBase, lineMarkBase, b);
138    }
139  }
140
141  static Address getFirstUsableBlock(Address chunk) {
142    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(isAligned(chunk));
143    Address rtn = chunk.plus(ROUNDED_METADATA_BYTES_PER_CHUNK);
144    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(rtn.EQ(Block.align(rtn)));
145    return rtn;
146  }
147
148  private static final int LOG_BYTES_IN_HIGHWATER_ENTRY = LOG_BYTES_IN_ADDRESS;
149  private static final int HIGHWATER_BYTES = 1 << LOG_BYTES_IN_HIGHWATER_ENTRY;
150  private static final int LOG_BYTES_IN_MAP_ENTRY = LOG_BYTES_IN_INT;
151  private static final int MAP_BYTES = 1 << LOG_BYTES_IN_MAP_ENTRY;
152
153  /* byte offsets for each type of metadata */
154  static final int LINE_MARK_TABLE_OFFSET = 0;
155  static final int BLOCK_STATE_TABLE_OFFSET = LINE_MARK_TABLE_OFFSET + Line.LINE_MARK_TABLE_BYTES;
156  static final int BLOCK_DEFRAG_STATE_TABLE_OFFSET = BLOCK_STATE_TABLE_OFFSET + Block.BLOCK_STATE_TABLE_BYTES;
157  static final int HIGHWATER_OFFSET = BLOCK_DEFRAG_STATE_TABLE_OFFSET + Block.BLOCK_DEFRAG_STATE_TABLE_BYTES;
158  static final int MAP_OFFSET = HIGHWATER_OFFSET + HIGHWATER_BYTES;
159  static final int METADATA_BYTES_PER_CHUNK = MAP_OFFSET + MAP_BYTES;
160
161  /* FIXME we round the metadata up to block sizes just to ensure the underlying allocator gives us aligned requests */
162  private static final int BLOCK_MASK = (1 << LOG_BYTES_IN_BLOCK) - 1;
163  static final int ROUNDED_METADATA_BYTES_PER_CHUNK = (METADATA_BYTES_PER_CHUNK + BLOCK_MASK) & ~BLOCK_MASK;
164  static final int ROUNDED_METADATA_PAGES_PER_CHUNK = ROUNDED_METADATA_BYTES_PER_CHUNK >> LOG_BYTES_IN_PAGE;
165  public static final int FIRST_USABLE_BLOCK_INDEX = ROUNDED_METADATA_BYTES_PER_CHUNK >> LOG_BYTES_IN_BLOCK;
166}