001/*
002 *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003 *
004 *  This file is licensed to You under the Eclipse Public License (EPL);
005 *  You may not use this file except in compliance with the License. You
006 *  may obtain a copy of the License at
007 *
008 *      http://www.opensource.org/licenses/eclipse-1.0.php
009 *
010 *  See the COPYRIGHT.txt file distributed with this work for information
011 *  regarding copyright ownership.
012 */
013package org.jikesrvm.compilers.baseline;
014
015import static org.jikesrvm.classloader.BytecodeConstants.JBC_caload;
016import static org.jikesrvm.classloader.BytecodeConstants.JBC_getfield;
017import static org.jikesrvm.classloader.BytecodeConstants.JBC_ifeq;
018import static org.jikesrvm.classloader.BytecodeConstants.JBC_ifge;
019import static org.jikesrvm.classloader.BytecodeConstants.JBC_ifgt;
020import static org.jikesrvm.classloader.BytecodeConstants.JBC_ifle;
021import static org.jikesrvm.classloader.BytecodeConstants.JBC_iflt;
022import static org.jikesrvm.classloader.BytecodeConstants.JBC_ifne;
023import static org.jikesrvm.classloader.BytecodeConstants.JBC_nop;
024import static org.jikesrvm.runtime.ExitStatus.EXIT_STATUS_BOGUS_COMMAND_LINE_ARG;
025import static org.jikesrvm.runtime.UnboxedSizeConstants.LOG_BYTES_IN_ADDRESS;
026
027import org.jikesrvm.VM;
028import org.jikesrvm.classloader.Atom;
029import org.jikesrvm.classloader.FieldReference;
030import org.jikesrvm.classloader.MethodReference;
031import org.jikesrvm.classloader.NormalMethod;
032import org.jikesrvm.compilers.common.CodeArray;
033import org.jikesrvm.compilers.common.CompiledMethod;
034import org.jikesrvm.compilers.common.CompiledMethods;
035import org.jikesrvm.osr.BytecodeTraverser;
036import org.jikesrvm.runtime.MagicNames;
037import org.jikesrvm.runtime.Time;
038import org.jikesrvm.scheduler.RVMThread;
039import org.vmmagic.pragma.Uninterruptible;
040import org.vmmagic.unboxed.Offset;
041
042/**
043 * Baseline compiler - platform independent code.
044 * <p>
045 * Platform dependent versions extend this class and define
046 * the host of abstract methods defined by TemplateCompilerFramework to complete
047 * the implementation of a baseline compiler for a particular target.
048 * <p>
049 * In addition to the framework provided by TemplateCompilerFramework, this compiler
050 * also provides hooks for bytecode merging for some common bytecode combinations.
051 * By default, bytecode merging is active but has no effect. Subclasses that want to
052 * implement the merging need to override the hook methods.
053 */
054public abstract class BaselineCompiler extends TemplateCompilerFramework {
055
056  /**
057   * Merge commonly adjacent bytecodes?
058   */
059  private static final boolean mergeBytecodes = true;
060
061  private static long gcMapNanos;
062  private static long osrSetupNanos;
063  private static long codeGenNanos;
064  private static long encodingNanos;
065
066  /**
067   * Options used during base compiler execution
068   */
069  public static BaselineOptions options;
070
071  /**
072   * Next edge counter entry to allocate
073   */
074  protected int edgeCounterIdx;
075
076  /**
077   * Reference maps for method being compiled
078   */
079  ReferenceMaps refMaps;
080
081
082  public abstract byte getLastFixedStackRegister();
083  public abstract byte getLastFloatStackRegister();
084
085  @Uninterruptible
086  static short getGeneralLocalLocation(int localIndex, short[] localFixedLocations, NormalMethod method) {
087    if (VM.BuildForIA32) {
088      return org.jikesrvm.compilers.baseline.ia32.BaselineCompilerImpl.getGeneralLocalLocation(localIndex, localFixedLocations, method);
089    } else {
090      if (VM.VerifyAssertions) VM._assert(VM.BuildForPowerPC);
091      return org.jikesrvm.compilers.baseline.ppc.BaselineCompilerImpl.getGeneralLocalLocation(localIndex, localFixedLocations, method);
092    }
093  }
094
095  @Uninterruptible
096  static short getFloatLocalLocation(int localIndex, short[] localFixedLocations, NormalMethod method) {
097    if (VM.BuildForIA32) {
098      return org.jikesrvm.compilers.baseline.ia32.BaselineCompilerImpl.getFloatLocalLocation(localIndex, localFixedLocations, method);
099    } else {
100      if (VM.VerifyAssertions) VM._assert(VM.BuildForPowerPC);
101      return org.jikesrvm.compilers.baseline.ppc.BaselineCompilerImpl.getFloatLocalLocation(localIndex, localFixedLocations, method);
102    }
103  }
104
105  @Uninterruptible
106  static short getEmptyStackOffset(NormalMethod m) {
107    if (VM.BuildForIA32) {
108      return org.jikesrvm.compilers.baseline.ia32.BaselineCompilerImpl.getEmptyStackOffset(m);
109    } else {
110      if (VM.VerifyAssertions) VM._assert(VM.BuildForPowerPC);
111      return org.jikesrvm.compilers.baseline.ppc.BaselineCompilerImpl.getEmptyStackOffset(m);
112    }
113  }
114
115  @Uninterruptible
116  public static short offsetToLocation(int offset) {
117    if (VM.BuildForIA32) {
118      return org.jikesrvm.compilers.baseline.ia32.BaselineCompilerImpl.offsetToLocation(offset);
119    } else {
120      if (VM.VerifyAssertions) VM._assert(VM.BuildForPowerPC);
121      return org.jikesrvm.compilers.baseline.ppc.BaselineCompilerImpl.offsetToLocation(offset);
122    }
123  }
124
125  protected final Offset getEdgeCounterOffset() {
126    return Offset.fromIntZeroExtend(method.getId() << LOG_BYTES_IN_ADDRESS);
127  }
128
129  protected final int getEdgeCounterIndex() {
130    return method.getId();
131  }
132
133  /**
134   * The types that locals can take.
135   * There are two types of locals:
136   * <ul>
137   *  <li> the parameters of the method. They only have one type.</li>
138   *  <li> the other locals. Numbers get reused when stack shrinks and grows
139   *   again. Therefore, these can have more than one type assigned.
140   * </ul>
141   * The compiler can use this information to assign registers to locals.
142   * See the BaselineCompilerImpl constructor.
143   */
144  protected final byte[] localTypes;
145
146  protected BaselineCompiler(BaselineCompiledMethod cm) {
147    super(cm);
148    shouldPrint =
149        (!VM.runningTool &&
150         (options.PRINT_MACHINECODE) &&
151         (!options.hasMETHOD_TO_PRINT() || options.fuzzyMatchMETHOD_TO_PRINT(method.toString())));
152    if (!VM.runningTool && options.PRINT_METHOD) printMethodMessage();
153    if (shouldPrint && VM.runningVM && !VM.fullyBooted) {
154      shouldPrint = false;
155      if (options.PRINT_METHOD) {
156        VM.sysWriteln("\ttoo early in VM.boot() to print machine code");
157      }
158    }
159    localTypes = new byte[method.getLocalWords()];
160  }
161
162  /**
163   * Indicate if specified Magic method causes a frame to be created on the runtime stack.
164   * @param methodToBeCalled RVMMethod of the magic method being called
165   * @return true if method causes a stackframe to be created
166   */
167  public static boolean checkForActualCall(MethodReference methodToBeCalled) {
168    Atom methodName = methodToBeCalled.getName();
169    return methodName == MagicNames.invokeClassInitializer ||
170      methodName == MagicNames.invokeMethodReturningVoid ||
171      methodName == MagicNames.invokeMethodReturningInt ||
172      methodName == MagicNames.invokeMethodReturningLong ||
173      methodName == MagicNames.invokeMethodReturningFloat ||
174      methodName == MagicNames.invokeMethodReturningDouble ||
175      methodName == MagicNames.invokeMethodReturningObject ||
176      methodName == MagicNames.addressArrayCreate;
177  }
178
179  /**
180   * Clear out crud from bootimage writing
181   */
182  public static void initOptions() {
183    options = new BaselineOptions();
184  }
185
186  /**
187   * Now that VM is fully booted, enable options
188   * such as PRINT_MACHINE_CODE that require a fully booted VM.
189   */
190  public static void fullyBootedVM() {
191    // If the user has requested machine code dumps, then force a test
192    // of method to print option so extra classes needed to process
193    // matching will be loaded and compiled upfront. Thus avoiding getting
194    // stuck looping by just asking if we have a match in the middle of
195    // compilation. Pick an obscure string for the check.
196    if (options.hasMETHOD_TO_PRINT() && options.fuzzyMatchMETHOD_TO_PRINT("???")) {
197      VM.sysWrite("??? is not a sensible string to specify for method name");
198    }
199  }
200
201  /**
202   * Process a command line argument
203   * @param prefix the argument's prefix
204   * @param arg     Command line argument with prefix stripped off
205   */
206  public static void processCommandLineArg(String prefix, String arg) {
207    if (!options.processAsOption(prefix, arg)) {
208      VM.sysWrite("BaselineCompiler: Unrecognized argument \"" + arg + "\"\n");
209      VM.sysExit(EXIT_STATUS_BOGUS_COMMAND_LINE_ARG);
210    }
211  }
212
213  /**
214   * Generate a report of time spent in various phases of the baseline compiler.
215   * <p> NB: This method may be called in a context where class loading and/or
216   * GC cannot be allowed. Therefore we must use primitive sysWrites for output and avoid string
217   * appends and other allocations.
218   * <p>
219   * FIXME should this method be uninterruptible?
220   *
221   * @param explain Should an explanation of the metrics be generated?
222   */
223  public static void generateBaselineCompilerSubsystemReport(boolean explain) {
224    if (!VM.MeasureCompilationPhases) return;
225
226    VM.sysWriteln("\n\t\tBaseline Compiler SubSystem");
227    VM.sysWriteln("\tPhase\t\t\t    Time");
228    VM.sysWriteln("\t\t\t\t(ms)    (%ofTotal)");
229
230    double gcMapTime = Time.nanosToMillis(gcMapNanos);
231    double osrSetupTime = Time.nanosToMillis(osrSetupNanos);
232    double codeGenTime = Time.nanosToMillis(codeGenNanos);
233    double encodingTime = Time.nanosToMillis(encodingNanos);
234    double total = gcMapTime + osrSetupTime + codeGenTime + encodingTime;
235
236    VM.sysWrite("\tCompute GC Maps\t\t", gcMapTime);
237    VM.sysWriteln("\t", 100 * gcMapTime / total);
238
239    if (osrSetupTime > 0) {
240      VM.sysWrite("\tOSR setup \t\t", osrSetupTime);
241      VM.sysWriteln("\t", 100 * osrSetupTime / total);
242    }
243
244    VM.sysWrite("\tCode generation\t\t", codeGenTime);
245    VM.sysWriteln("\t", 100 * codeGenTime / total);
246
247    VM.sysWrite("\tEncode GC/MC maps\t", encodingTime);
248    VM.sysWriteln("\t", 100 * encodingTime / total);
249
250    VM.sysWriteln("\tTOTAL\t\t\t", total);
251  }
252
253  /**
254   * Compile the given method with the baseline compiler.
255   *
256   * @param method the NormalMethod to compile.
257   * @return the generated CompiledMethod for said NormalMethod.
258   */
259  public static CompiledMethod compile(NormalMethod method) {
260    if (VM.VerifyAssertions) VM._assert(!method.getDeclaringClass().hasSaveVolatileAnnotation(), "Baseline compiler doesn't implement SaveVolatile");
261
262    BaselineCompiledMethod cm =
263        (BaselineCompiledMethod) CompiledMethods.createCompiledMethod(method, CompiledMethod.BASELINE);
264    cm.compile();
265    return cm;
266  }
267
268  protected abstract void initializeCompiler();
269
270  /**
271   * Top level driver for baseline compilation of a method.
272   */
273  protected void compile() {
274    if (shouldPrint) printStartHeader(method);
275
276    // Phase 1: GC map computation
277    long start = 0;
278    try {
279      if (VM.MeasureCompilationPhases) {
280        start = Time.nanoTime();
281      }
282      refMaps = new ReferenceMaps((BaselineCompiledMethod) compiledMethod, stackHeights, localTypes);
283    } finally {
284      if (VM.MeasureCompilationPhases) {
285        long end = Time.nanoTime();
286        gcMapNanos += end - start;
287      }
288    }
289
290    /* reference map and stackheights were computed using original bytecodes
291     * and possibly new operand words
292     * recompute the stack height, but keep the operand words of the code
293     * generation consistent with reference map
294     * TODO: revisit this code as part of OSR redesign
295     */
296    // Phase 2: OSR setup\
297    boolean edge_counters = options.PROFILE_EDGE_COUNTERS;
298    try {
299      if (VM.MeasureCompilationPhases) {
300        start = Time.nanoTime();
301      }
302      if (VM.BuildForAdaptiveSystem && method.isForOsrSpecialization()) {
303        options.PROFILE_EDGE_COUNTERS = false;
304        // we already allocated enough space for stackHeights, shift it back first
305        System.arraycopy(stackHeights,
306                         0,
307                         stackHeights,
308                         method.getOsrPrologueLength(),
309                         method.getBytecodeLength());   // NB: getBytecodeLength returns back the length of original bytecodes
310
311        // compute stack height for prologue
312        new BytecodeTraverser().prologueStackHeights(method, method.getOsrPrologue(), stackHeights);
313      }
314    } finally {
315      if (VM.MeasureCompilationPhases) {
316        long end = Time.nanoTime();
317        osrSetupNanos += end - start;
318      }
319    }
320
321    // Phase 3: Code generation
322    int[] bcMap;
323    MachineCode machineCode;
324    CodeArray instructions;
325    try {
326      if (VM.MeasureCompilationPhases) {
327        start = Time.nanoTime();
328      }
329
330      // determine if we are going to insert edge counters for this method
331      if (options.PROFILE_EDGE_COUNTERS &&
332          !method.getDeclaringClass().hasBridgeFromNativeAnnotation() &&
333          (method.hasCondBranch() || method.hasSwitch())) {
334        ((BaselineCompiledMethod) compiledMethod).setHasCounterArray(); // yes, we will inject counters for this method.
335      }
336
337      //do platform specific tasks before generating code;
338      initializeCompiler();
339
340      machineCode = genCode();
341      instructions = machineCode.getInstructions();
342      bcMap = machineCode.getBytecodeMap();
343    } finally {
344      if (VM.MeasureCompilationPhases) {
345        long end = Time.nanoTime();
346        codeGenNanos += end - start;
347      }
348    }
349
350    /* adjust machine code map, and restore original bytecode
351     * for building reference map later.
352     * TODO: revisit this code as part of OSR redesign
353     */
354    // Phase 4: OSR part 2
355    try {
356      if (VM.MeasureCompilationPhases) {
357        start = Time.nanoTime();
358      }
359      if (VM.BuildForAdaptiveSystem && method.isForOsrSpecialization()) {
360        int[] newmap = new int[bcMap.length - method.getOsrPrologueLength()];
361        System.arraycopy(bcMap, method.getOsrPrologueLength(), newmap, 0, newmap.length);
362        machineCode.setBytecodeMap(newmap);
363        bcMap = newmap;
364        // switch back to original state
365        method.finalizeOsrSpecialization();
366        // restore options
367        options.PROFILE_EDGE_COUNTERS = edge_counters;
368      }
369    } finally {
370      if (VM.MeasureCompilationPhases) {
371        long end = Time.nanoTime();
372        osrSetupNanos += end - start;
373      }
374    }
375
376    // Phase 5: Encode machine code maps
377    try {
378      if (VM.MeasureCompilationPhases) {
379        start = Time.nanoTime();
380      }
381      if (method.isSynchronized()) {
382        ((BaselineCompiledMethod) compiledMethod).setLockAcquisitionOffset(lockOffset);
383      }
384      ((BaselineCompiledMethod) compiledMethod).encodeMappingInfo(refMaps, bcMap);
385      compiledMethod.compileComplete(instructions);
386      if (edgeCounterIdx > 0) {
387        EdgeCounts.allocateCounters(method, edgeCounterIdx);
388      }
389      if (shouldPrint) {
390        ((BaselineCompiledMethod) compiledMethod).printExceptionTable();
391        printEndHeader(method);
392      }
393    } finally {
394      if (VM.MeasureCompilationPhases) {
395        long end = Time.nanoTime();
396        encodingNanos += end - start;
397      }
398    }
399  }
400
401  @Override
402  protected String getCompilerName() {
403    return "baseline";
404  }
405
406  /**
407   * @return whether the current bytecode is on the boundary of a basic block
408   */
409  private boolean basicBlockBoundary() {
410    int index = biStart;
411    short currentBlock = refMaps.byteToBlockMap[index];
412    index--;
413    while (index >= 0) {
414      short prevBlock = refMaps.byteToBlockMap[index];
415      if (prevBlock == currentBlock) {
416        return false;
417      } else if (prevBlock != BasicBlock.NOTBLOCK) {
418        return true;
419      }
420      index--;
421    }
422    return true;
423  }
424
425  /**
426   * Emits code to load an int local variable
427   * @param index the local index to load
428   */
429  @Override
430  protected final void emit_iload(int index) {
431    if (!mergeBytecodes || basicBlockBoundary()) {
432      emit_regular_iload(index);
433    } else {
434      int nextBC = bcodes.peekNextOpcode();
435      switch (nextBC) {
436      case JBC_caload:
437        if (shouldPrint) getAssembler().noteBytecode(biStart, "caload");
438        bytecodeMap[bcodes.index()] = getAssembler().getMachineCodeIndex();
439        bcodes.nextInstruction(); // skip opcode
440        emit_iload_caload(index);
441        break;
442      default:
443        emit_regular_iload(index);
444        break;
445      }
446    }
447  }
448
449  /**
450   * Emits code to load an int local variable
451   * @param index the local index to load
452   */
453  protected abstract void emit_regular_iload(int index);
454
455  /**
456   * Emits code to load an int local variable and then load from a character array.
457   * <p>
458   * By default, this method emits code for iload and then for caload.
459   * Subclasses that want to implement bytecode merging for this pattern
460   * must override this method.
461   *
462   * @param index the local index to load
463   */
464  protected void emit_iload_caload(int index) {
465    emit_regular_iload(index);
466    emit_caload();
467  }
468
469  /**
470   * Emits code to load a reference local variable
471   * @param index the local index to load
472   */
473  @Override
474  protected final void emit_aload(int index) {
475    if (!mergeBytecodes || basicBlockBoundary()) {
476      emit_regular_aload(index);
477    } else {
478      int nextBC = JBC_nop; // bcodes.peekNextOpcode();
479      switch (nextBC) {
480      case JBC_getfield: {
481        int gfIndex = bcodes.index();
482        bcodes.nextInstruction(); // skip opcode
483        FieldReference fieldRef = bcodes.getFieldReference();
484        if (fieldRef.needsDynamicLink(method)) {
485          bcodes.reset(gfIndex);
486          emit_regular_aload(index);
487        } else {
488          bytecodeMap[gfIndex] = getAssembler().getMachineCodeIndex();
489          if (shouldPrint) getAssembler().noteBytecode(biStart, "getfield", fieldRef);
490          emit_aload_resolved_getfield(index, fieldRef);
491        }
492        break;
493      }
494      default:
495        emit_regular_aload(index);
496        break;
497      }
498    }
499  }
500
501  /**
502   * Emits code to load a reference local variable
503   * @param index the local index to load
504   */
505  protected abstract void emit_regular_aload(int index);
506
507  /**
508   * Emits code to load a reference local variable and then perform a field load
509   * <p>
510   * By default, this method emits code for aload and then for resolved_getfield.
511   * Subclasses that want to implement bytecode merging for this pattern
512   * must override this method.
513   *
514   * @param index the local index to load
515   * @param fieldRef the referenced field
516   */
517  protected void emit_aload_resolved_getfield(int index, FieldReference fieldRef) {
518    emit_regular_aload(index);
519    emit_resolved_getfield(fieldRef);
520  }
521
522  @Override
523  protected final void emit_lcmp() {
524    if (!mergeBytecodes || basicBlockBoundary()) {
525      emit_regular_lcmp();
526    } else {
527      int nextBC = bcodes.peekNextOpcode();
528      switch (nextBC) {
529        case JBC_ifeq:
530          do_lcmp_if(BranchCondition.EQ);
531          break;
532        case JBC_ifne:
533          do_lcmp_if(BranchCondition.NE);
534          break;
535        case JBC_iflt:
536          do_lcmp_if(BranchCondition.LT);
537          break;
538        case JBC_ifge:
539          do_lcmp_if(BranchCondition.GE);
540          break;
541        case JBC_ifgt:
542          do_lcmp_if(BranchCondition.GT);
543          break;
544        case JBC_ifle:
545          do_lcmp_if(BranchCondition.LE);
546          break;
547        default:
548          emit_regular_lcmp();
549          break;
550      }
551    }
552  }
553
554  /**
555   * Handles the bytecode pattern {@code lcmp; if..}
556   * @param bc branch condition
557   */
558  private void do_lcmp_if(BranchCondition bc) {
559    final boolean shouldPrint = this.shouldPrint;
560    int biStart = bcodes.index();  // start of if bytecode
561    bytecodeMap[biStart] = getAssembler().getMachineCodeIndex();
562    bcodes.nextInstruction(); // skip opcode
563    int offset = bcodes.getBranchOffset();
564    int bTarget = biStart + offset;
565    if (shouldPrint) getAssembler().noteBranchBytecode(biStart, "if" + bc, offset, bTarget);
566    if (offset <= 0) emit_threadSwitchTest(RVMThread.BACKEDGE);
567    emit_lcmp_if(bTarget, bc);
568  }
569
570  /**
571   * Emits code to implement the lcmp bytecode
572   */
573  protected abstract void emit_regular_lcmp();
574
575  /**
576   * Emits code to perform an lcmp followed by ifeq.
577   * <p>
578   * By default, this method emits code for lcmp and then for ifeq.
579   * Subclasses that want to implement bytecode merging for this pattern
580   * must override this method.
581   * @param bTarget target bytecode of the branch
582   * @param bc branch condition
583   */
584  protected void emit_lcmp_if(int bTarget, BranchCondition bc) {
585    emit_regular_lcmp();
586    emit_if(bTarget, bc);
587  }
588
589  @Override
590  protected final void emit_DFcmpGL(boolean single, boolean unorderedGT) {
591    if (!mergeBytecodes || basicBlockBoundary()) {
592      emit_regular_DFcmpGL(single, unorderedGT);
593    } else {
594      int nextBC = bcodes.peekNextOpcode();
595      switch (nextBC) {
596      case JBC_ifeq:
597        do_DFcmpGL_if(single, unorderedGT, BranchCondition.EQ);
598        break;
599      case JBC_ifne:
600        do_DFcmpGL_if(single, unorderedGT, BranchCondition.NE);
601        break;
602      case JBC_iflt:
603        do_DFcmpGL_if(single, unorderedGT, BranchCondition.LT);
604        break;
605      case JBC_ifge:
606        do_DFcmpGL_if(single, unorderedGT, BranchCondition.GE);
607        break;
608      case JBC_ifgt:
609        do_DFcmpGL_if(single, unorderedGT, BranchCondition.GT);
610        break;
611      case JBC_ifle:
612        do_DFcmpGL_if(single, unorderedGT, BranchCondition.LE);
613        break;
614      default:
615        emit_regular_DFcmpGL(single, unorderedGT);
616        break;
617      }
618    }
619  }
620
621  /**
622   * Handles the bytecode pattern {@code DFcmpGL; if..}
623   * @param single {@code true} for float [f], {@code false} for double [d]
624   * @param unorderedGT {@code true} for [g], {@code false} for [l]
625   * @param bc branch condition
626   */
627  private void do_DFcmpGL_if(boolean single, boolean unorderedGT, BranchCondition bc) {
628    final boolean shouldPrint = this.shouldPrint;
629    int biStart = bcodes.index();  // start of if bytecode
630    bytecodeMap[biStart] = getAssembler().getMachineCodeIndex();
631    bcodes.nextInstruction(); // skip opcode
632    int offset = bcodes.getBranchOffset();
633    int bTarget = biStart + offset;
634    if (shouldPrint) getAssembler().noteBranchBytecode(biStart, "if" + bc, offset, bTarget);
635    if (offset <= 0) emit_threadSwitchTest(RVMThread.BACKEDGE);
636    emit_DFcmpGL_if(single, unorderedGT, bTarget, bc);
637  }
638
639  /**
640   * Emits code to implement the [df]cmp[gl] bytecodes
641   * @param single {@code true} for float [f], {@code false} for double [d]
642   * @param unorderedGT {@code true} for [g], {@code false} for [l]
643   */
644  protected abstract void emit_regular_DFcmpGL(boolean single, boolean unorderedGT);
645
646  /**
647   * Emits code to perform an [df]cmp[gl] followed by ifeq
648   * <p>
649   * By default, this method emits code for [df]cmp[gl] and then for ifeq.
650   * Subclasses that want to implement bytecode merging for this pattern
651   * must override this method.
652
653   * @param single {@code true} for float [f], {@code false} for double [d]
654   * @param unorderedGT {@code true} for [g], {@code false} for [l]
655   * @param bTarget target bytecode of the branch
656   * @param bc branch condition
657   */
658  protected void emit_DFcmpGL_if(boolean single, boolean unorderedGT, int bTarget, BranchCondition bc) {
659    emit_regular_DFcmpGL(single, unorderedGT);
660    emit_if(bTarget, bc);
661  }
662
663}