001/* 002 * This file is part of the Jikes RVM project (http://jikesrvm.org). 003 * 004 * This file is licensed to You under the Eclipse Public License (EPL); 005 * You may not use this file except in compliance with the License. You 006 * may obtain a copy of the License at 007 * 008 * http://www.opensource.org/licenses/eclipse-1.0.php 009 * 010 * See the COPYRIGHT.txt file distributed with this work for information 011 * regarding copyright ownership. 012 */ 013package org.jikesrvm.compilers.opt.mir2mc.ia32; 014 015import static org.jikesrvm.compilers.common.assembler.ia32.AssemblerConstants.CONDITION; 016import static org.jikesrvm.compilers.common.assembler.ia32.AssemblerConstants.WORD; 017import static org.jikesrvm.compilers.opt.OptimizingCompilerException.opt_assert; 018import static org.jikesrvm.compilers.opt.ir.Operators.BBEND_opcode; 019import static org.jikesrvm.compilers.opt.ir.Operators.IG_PATCH_POINT_opcode; 020import static org.jikesrvm.compilers.opt.ir.Operators.LABEL_opcode; 021import static org.jikesrvm.compilers.opt.ir.Operators.READ_CEILING_opcode; 022import static org.jikesrvm.compilers.opt.ir.Operators.UNINT_BEGIN_opcode; 023import static org.jikesrvm.compilers.opt.ir.Operators.UNINT_END_opcode; 024import static org.jikesrvm.compilers.opt.ir.Operators.WRITE_FLOOR_opcode; 025import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADC_opcode; 026import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADDSD_opcode; 027import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADDSS_opcode; 028import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_ADD_opcode; 029import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_AND_opcode; 030import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CALL_opcode; 031import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPEQSD_opcode; 032import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPEQSS_opcode; 033import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLESD_opcode; 034import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLESS_opcode; 035import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLTSD_opcode; 036import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPLTSS_opcode; 037import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNESD_opcode; 038import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNESS_opcode; 039import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLESD_opcode; 040import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLESS_opcode; 041import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLTSD_opcode; 042import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPNLTSS_opcode; 043import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPORDSD_opcode; 044import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPORDSS_opcode; 045import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPUNORDSD_opcode; 046import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMPUNORDSS_opcode; 047import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CMP_opcode; 048import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSD2SI_opcode; 049import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSD2SS_opcode; 050import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSI2SD_opcode; 051import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSI2SS_opcode; 052import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSS2SD_opcode; 053import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTSS2SI_opcode; 054import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTTSD2SI_opcode; 055import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_CVTTSS2SI_opcode; 056import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_DIVSD_opcode; 057import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_DIVSS_opcode; 058import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_INT_opcode; 059import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_JCC_opcode; 060import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_JMP_opcode; 061import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_LEA_opcode; 062import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_LOCK_opcode; 063import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_METHODSTART_opcode; 064import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVAPD_opcode; 065import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVAPS_opcode; 066import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVD_opcode; 067import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVLPD_opcode; 068import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVQ_opcode; 069import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVSD_opcode; 070import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOVSS_opcode; 071import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MOV_opcode; 072import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MULSD_opcode; 073import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_MULSS_opcode; 074import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_OFFSET_opcode; 075import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_OR_opcode; 076import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_PUSH_opcode; 077import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_RET_opcode; 078import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SBB_opcode; 079import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SQRTSD_opcode; 080import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SUBSD_opcode; 081import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_SUBSS_opcode; 082import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_TEST_opcode; 083import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_UCOMISD_opcode; 084import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_UCOMISS_opcode; 085import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XORPD_opcode; 086import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XORPS_opcode; 087import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.IA32_XOR_opcode; 088import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.MIR_LOWTABLESWITCH; 089import static org.jikesrvm.compilers.opt.ir.ia32.ArchOperators.MIR_LOWTABLESWITCH_opcode; 090import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.DOUBLE_REG; 091import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_DOUBLE; 092import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_INT; 093import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.FIRST_SPECIAL; 094import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.INT_REG; 095import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.ST0; 096import static org.jikesrvm.compilers.opt.regalloc.ia32.PhysicalRegisterConstants.ST1; 097import static org.jikesrvm.ia32.ArchConstants.SSE2_FULL; 098import static org.jikesrvm.ia32.RegisterConstants.FP0; 099import static org.jikesrvm.ia32.RegisterConstants.FP1; 100import static org.jikesrvm.ia32.TrapConstants.RVM_TRAP_BASE; 101import static org.jikesrvm.util.Bits.fits; 102 103import java.util.ArrayList; 104import java.util.Enumeration; 105 106import org.jikesrvm.VM; 107import org.jikesrvm.architecture.MachineRegister; 108import org.jikesrvm.compilers.common.assembler.ForwardReference; 109import org.jikesrvm.compilers.common.assembler.ia32.Assembler; 110import org.jikesrvm.compilers.opt.OptimizingCompilerException; 111import org.jikesrvm.compilers.opt.ir.IR; 112import org.jikesrvm.compilers.opt.ir.Instruction; 113import org.jikesrvm.compilers.opt.ir.Operator; 114import org.jikesrvm.compilers.opt.ir.Register; 115import org.jikesrvm.compilers.opt.ir.ia32.ArchOperator; 116import org.jikesrvm.compilers.opt.ir.ia32.MIR_BinaryAcc; 117import org.jikesrvm.compilers.opt.ir.ia32.MIR_Branch; 118import org.jikesrvm.compilers.opt.ir.ia32.MIR_Call; 119import org.jikesrvm.compilers.opt.ir.ia32.MIR_Compare; 120import org.jikesrvm.compilers.opt.ir.ia32.MIR_CondBranch; 121import org.jikesrvm.compilers.opt.ir.ia32.MIR_Lea; 122import org.jikesrvm.compilers.opt.ir.ia32.MIR_LowTableSwitch; 123import org.jikesrvm.compilers.opt.ir.ia32.MIR_Move; 124import org.jikesrvm.compilers.opt.ir.ia32.MIR_Test; 125import org.jikesrvm.compilers.opt.ir.ia32.MIR_Unary; 126import org.jikesrvm.compilers.opt.ir.ia32.MIR_UnaryNoRes; 127import org.jikesrvm.compilers.opt.ir.ia32.PhysicalRegisterSet; 128import org.jikesrvm.compilers.opt.ir.operand.IntConstantOperand; 129import org.jikesrvm.compilers.opt.ir.operand.LongConstantOperand; 130import org.jikesrvm.compilers.opt.ir.operand.MemoryOperand; 131import org.jikesrvm.compilers.opt.ir.operand.Operand; 132import org.jikesrvm.compilers.opt.ir.operand.RegisterOperand; 133import org.jikesrvm.compilers.opt.ir.operand.TrapCodeOperand; 134import org.jikesrvm.compilers.opt.ir.operand.ia32.IA32ConditionOperand; 135import org.jikesrvm.compilers.opt.mir2mc.MachineCodeOffsets; 136import org.jikesrvm.ia32.RegisterConstants.FPR; 137import org.jikesrvm.ia32.RegisterConstants.GPR; 138import org.jikesrvm.ia32.RegisterConstants.MM; 139import org.jikesrvm.ia32.RegisterConstants.XMM; 140import org.jikesrvm.util.Bits; 141import org.vmmagic.pragma.NoInline; 142import org.vmmagic.unboxed.Offset; 143 144/** 145 * This class provides support functionality used by the generated 146 * Assembler; it handles basic impedance-matching functionality 147 * such as determining which addressing mode is suitable for a given 148 * IA32MemoryOperand. This class also provides some boilerplate 149 * methods that do not depend on how instructions should actually be 150 * assembled, like the top-level generateCode driver. This class is 151 * not meant to be used in isolation, but rather to provide support 152 * from the Assembler. 153 */ 154abstract class AssemblerBase extends Assembler { 155 156 private static final boolean DEBUG_ESTIMATE = false; 157 158 /** 159 * Hold EBP register object for use in estimating size of memory operands. 160 */ 161 private final Register EBP; 162 163 /** 164 * Hold EBP register object for use in estimating size of memory operands. 165 */ 166 private final Register ESP; 167 168 /** 169 * Operators with byte arguments 170 */ 171 private static final Operator[] byteSizeOperators; 172 173 /** 174 * Operators with word arguments 175 */ 176 private static final Operator[] wordSizeOperators; 177 178 /** 179 * Operators with quad arguments 180 */ 181 private static final Operator[] quadSizeOperators; 182 183 protected final MachineCodeOffsets mcOffsets; 184 185 protected final IR ir; 186 187 static { 188 ArrayList<Operator> temp = new ArrayList<Operator>(); 189 for (Operator opr : ArchOperator.operatorArray()) { 190 if (opr != null && opr.toString().indexOf("__b") != -1) { 191 temp.add(opr); 192 } 193 } 194 byteSizeOperators = temp.toArray(new Operator[temp.size()]); 195 temp.clear(); 196 for (Operator opr : ArchOperator.operatorArray()) { 197 if (opr != null && opr.toString().indexOf("__w") != -1) { 198 temp.add(opr); 199 } 200 } 201 wordSizeOperators = temp.toArray(new Operator[temp.size()]); 202 for (Operator opr : ArchOperator.operatorArray()) { 203 if (opr != null && opr.toString().indexOf("__q") != -1) { 204 temp.add(opr); 205 } 206 } 207 quadSizeOperators = temp.toArray(new Operator[temp.size()]); 208 } 209 210 /** 211 * Construct Assembler object 212 * 213 * @param bytecodeSize initial machine code buffer size. 214 * @param shouldPrint whether to dump generated machine code. 215 * @param ir the IR object for the opt compilation 216 * 217 * @see Assembler 218 */ 219 AssemblerBase(int bytecodeSize, boolean shouldPrint, IR ir) { 220 super(bytecodeSize, shouldPrint); 221 EBP = ir.regpool.getPhysicalRegisterSet().asIA32().getEBP(); 222 ESP = ir.regpool.getPhysicalRegisterSet().asIA32().getESP(); 223 mcOffsets = ir.MIRInfo.mcOffsets; 224 this.ir = ir; 225 } 226 227 /** 228 * Should code created by this assembler instance be allocated in the 229 * hot code code space? The default answer for opt compiled code is yes 230 * (otherwise why are we opt compiling it?). 231 */ 232 @Override 233 protected boolean isHotCode() { 234 return true; 235 } 236 237 /** 238 * Is the given operand an immediate? In the IA32 assembly, one 239 * cannot specify floating-point constants, so the possible 240 * immediates we may see are IntegerConstants and 241 * TrapConstants (a trap constant really is an integer), and 242 * jump targets for which the exact offset is known. 243 * 244 * @see #getImm 245 * 246 * @param op the operand being queried 247 * @return true if op represents an immediate 248 */ 249 boolean isImm(Operand op) { 250 boolean isKnownJumpTarget = op.isBranch() && 251 mcOffsets.getMachineCodeOffset(op.asBranch().target) >= 0; 252 return (op instanceof IntConstantOperand) || 253 (VM.BuildFor64Addr && op instanceof LongConstantOperand) || 254 (op instanceof TrapCodeOperand) || 255 isKnownJumpTarget; 256 } 257 258 /** 259 * Return the IA32 ISA encoding of the immediate value 260 * represented by the the given operand. This method assumes the 261 * operand is an immediate and will likely throw a 262 * ClassCastException if this not the case. It treats 263 * BranchOperands somewhat differently than isImm does: in 264 * case a branch target is not resolved, it simply returns a wrong 265 * answer and trusts the caller to ignore it. This behavior 266 * simplifies life when generating code for ImmOrLabel operands. 267 * 268 * @see #isImm 269 * 270 * @param op the operand being queried 271 * @return the immediate value represented by the operand 272 */ 273 int getImm(Operand op) { 274 if (op.isIntConstant()) { 275 return op.asIntConstant().value; 276 } else if (VM.BuildFor64Addr && op.isLongConstant()) { 277 long v = op.asLongConstant().value; 278 if (!Bits.fits(v, 32)) { 279 throw new OptimizingCompilerException("Invalid immediate operand " + v); 280 } 281 return (int)v; 282 } else if (op.isBranch()) { 283 // used by ImmOrLabel stuff 284 return mcOffsets.getMachineCodeOffset(op.asBranch().target); 285 } else { 286 return ((TrapCodeOperand) op).getTrapCode() + RVM_TRAP_BASE; 287 } 288 } 289 290 /** 291 * Return the IA32 ISA encoding of the immediate value 292 * represented by the the given operand. This method assumes the 293 * operand is an immediate and will likely throw a 294 * ClassCastException if this not the case. It treats 295 * BranchOperands somewhat differently than isImm does: in 296 * case a branch target is not resolved, it simply returns a wrong 297 * answer and trusts the caller to ignore it. This behavior 298 * simplifies life when generating code for ImmOrLabel operands. 299 * 300 * @see #isImm 301 * 302 * @param op the operand being queried 303 * @return the immediate value represented by the operand 304 */ 305 long getImmQuad(Operand op) { 306 if (VM.BuildFor64Addr && op.isLongConstant()) { 307 return op.asLongConstant().value; 308 } else { 309 return getImm(op); 310 } 311 } 312 313 /** 314 * Is the given operand a register operand? 315 * 316 * @see #getReg 317 * 318 * @param op the operand being queried 319 * @return true if op is an RegisterOperand 320 */ 321 boolean isReg(Operand op) { 322 return op.isRegister(); 323 } 324 325 boolean isGPR_Reg(Operand op) { 326 return isReg(op); 327 } 328 329 boolean isFPR_Reg(Operand op) { 330 return isReg(op); 331 } 332 333 boolean isMM_Reg(Operand op) { 334 return false; // MM registers not currently supported in the OPT compiler 335 } 336 337 boolean isXMM_Reg(Operand op) { 338 return op.isRegister() && (op.isFloat() || op.isDouble()); 339 } 340 341 /** 342 * Return the machine-level register number corresponding to a given integer 343 * Register. The optimizing compiler has its own notion of register 344 * numbers, which is not the same as the numbers used by the IA32 ISA. This 345 * method takes an optimizing compiler register and translates it into the 346 * appropriate machine-level encoding. This method is not applied directly to 347 * operands, but rather to register objects. 348 * 349 * @see #getBase 350 * @see #getIndex 351 * 352 * @param reg the register being queried 353 * @return the 3 bit machine-level encoding of reg 354 */ 355 private GPR getGPMachineRegister(Register reg) { 356 if (VM.VerifyAssertions) { 357 opt_assert(PhysicalRegisterSet.getPhysicalRegisterType(reg) == INT_REG); 358 } 359 return GPR.lookup(reg.number - FIRST_INT); 360 } 361 362 /** 363 * Return the machine-level register number corresponding to a 364 * given Register. The optimizing compiler has its own notion 365 * of register numbers, which is not the same as the numbers used 366 * by the IA32 ISA. This method takes an optimizing compiler 367 * register and translates it into the appropriate machine-level 368 * encoding. This method is not applied directly to operands, but 369 * rather to register objects. 370 * 371 * @see #getReg 372 * @see #getBase 373 * @see #getIndex 374 * 375 * @param reg the register being queried 376 * @return the 3 bit machine-level encoding of reg 377 */ 378 private MachineRegister getMachineRegister(Register reg) { 379 int type = PhysicalRegisterSet.getPhysicalRegisterType(reg); 380 MachineRegister result; 381 if (type == INT_REG) { 382 result = GPR.lookup(reg.number - FIRST_INT); 383 } else { 384 if (VM.VerifyAssertions) opt_assert(type == DOUBLE_REG); 385 if (SSE2_FULL) { 386 if (reg.number < FIRST_SPECIAL) { 387 result = XMM.lookup(reg.number - FIRST_DOUBLE); 388 } else if (reg.number == ST0) { 389 result = FP0; 390 } else { 391 if (VM.VerifyAssertions) opt_assert(reg.number == ST1); 392 result = FP1; 393 } 394 } else { 395 result = FPR.lookup(reg.number - FIRST_DOUBLE); 396 } 397 } 398 return result; 399 } 400 401 /** 402 * Given a register operand, return the 3 bit IA32 ISA encoding 403 * of that register. This function translates an optimizing 404 * compiler register operand into the 3 bit IA32 ISA encoding that 405 * can be passed to the Assembler. This function assumes its 406 * operand is a register operand, and will blow up if it is not; 407 * use isReg to check operands passed to this method. 408 * 409 * @see #isReg 410 * 411 * @param op the register operand being queried 412 * @return the 3 bit IA32 ISA encoding of op 413 */ 414 MachineRegister getReg(Operand op) { 415 return getMachineRegister(op.asRegister().getRegister()); 416 } 417 418 GPR getGPR_Reg(Operand op) { 419 return getGPMachineRegister(op.asRegister().getRegister()); 420 } 421 422 FPR getFPR_Reg(Operand op) { 423 return (FPR)getMachineRegister(op.asRegister().getRegister()); 424 } 425 426 MM getMM_Reg(Operand op) { 427 throw new OptimizingCompilerException("MM registers not currently supported in the opt compiler"); 428 } 429 430 XMM getXMM_Reg(Operand op) { 431 return (XMM)getMachineRegister(op.asRegister().getRegister()); 432 } 433 434 /** 435 * Given a memory operand, return the 3 bit IA32 ISA encoding 436 * of its base regsiter. This function translates the optimizing 437 * compiler register operand representing the base of the given 438 * memory operand into the 3 bit IA32 ISA encoding that 439 * can be passed to the Assembler. This function assumes its 440 * operand is a memory operand, and will blow up if it is not; 441 * one should confirm an operand really has a base register before 442 * invoking this method on it. 443 * 444 * @see #isRegDisp 445 * @see #isRegIdx 446 * @see #isRegInd 447 * 448 * @param op the register operand being queried 449 * @return the 3 bit IA32 ISA encoding of the base register of op 450 */ 451 GPR getBase(Operand op) { 452 return getGPMachineRegister(((MemoryOperand) op).base.getRegister()); 453 } 454 455 /** 456 * Given a memory operand, return the 3 bit IA32 ISA encoding 457 * of its index register. This function translates the optimizing 458 * compiler register operand representing the index of the given 459 * memory operand into the 3 bit IA32 ISA encoding that 460 * can be passed to the Assembler. This function assumes its 461 * operand is a memory operand, and will blow up if it is not; 462 * one should confirm an operand really has an index register before 463 * invoking this method on it. 464 * 465 * @see #isRegIdx 466 * @see #isRegOff 467 * 468 * @param op the register operand being queried 469 * @return the 3 bit IA32 ISA encoding of the index register of op 470 */ 471 GPR getIndex(Operand op) { 472 return getGPMachineRegister(((MemoryOperand) op).index.getRegister()); 473 } 474 475 /** 476 * Given a memory operand, return the 2 bit IA32 ISA encoding 477 * of its scale, suitable for passing to the Assembler to mask 478 * into a SIB byte. This function assumes its operand is a memory 479 * operand, and will blow up if it is not; one should confirm an 480 * operand really has a scale before invoking this method on it. 481 * 482 * @see #isRegIdx 483 * @see #isRegOff 484 * 485 * @param op the register operand being queried 486 * @return the IA32 ISA encoding of the scale of op 487 */ 488 short getScale(Operand op) { 489 return ((MemoryOperand) op).scale; 490 } 491 492 /** 493 * Given a memory operand, return the 2 bit IA32 ISA encoding 494 * of its scale, suitable for passing to the Assembler to mask 495 * into a SIB byte. This function assumes its operand is a memory 496 * operand, and will blow up if it is not; one should confirm an 497 * operand really has a scale before invoking this method on it. 498 * 499 * @see #isRegIdx 500 * @see #isRegOff 501 * 502 * @param op the register operand being queried 503 * @return the IA32 ISA encoding of the scale of op 504 */ 505 Offset getDisp(Operand op) { 506 return ((MemoryOperand) op).disp; 507 } 508 509 /** 510 * Determine if a given operand is a memory operand representing 511 * register-displacement mode addressing. This method takes an 512 * arbitrary operand, checks whether it is a memory operand, and, 513 * if it is, checks whether it should be assembled as IA32 514 * register-displacement mode. That is, does it have a non-zero 515 * displacement and a base register, but no scale and no index 516 * register? 517 * 518 * @param op the operand being queried 519 * @return true if op should be assembled as register-displacement mode 520 */ 521 boolean isRegDisp(Operand op) { 522 if (op instanceof MemoryOperand) { 523 MemoryOperand mop = (MemoryOperand) op; 524 return (mop.base != null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0); 525 } else { 526 return false; 527 } 528 } 529 530 /** 531 * Determine if a given operand is a memory operand representing 532 * absolute mode addressing. This method takes an 533 * arbitrary operand, checks whether it is a memory operand, and, 534 * if it is, checks whether it should be assembled as IA32 535 * absolute address mode. That is, does it have a non-zero 536 * displacement, but no scale, no scale and no index register? 537 * 538 * @param op the operand being queried 539 * @return true if op should be assembled as absolute mode 540 */ 541 boolean isAbs(Operand op) { 542 if (op instanceof MemoryOperand) { 543 MemoryOperand mop = (MemoryOperand) op; 544 return (mop.base == null) && (mop.index == null) && (!mop.disp.isZero()) && (mop.scale == 0); 545 } else { 546 return false; 547 } 548 } 549 550 /** 551 * Determine if a given operand is a memory operand representing 552 * register-indirect mode addressing. This method takes an 553 * arbitrary operand, checks whether it is a memory operand, and, 554 * if it is, checks whether it should be assembled as IA32 555 * register-displacement mode. That is, does it have a base 556 * register, but no displacement, no scale and no index 557 * register? 558 * 559 * @param op the operand being queried 560 * @return true if op should be assembled as register-indirect mode 561 */ 562 boolean isRegInd(Operand op) { 563 if (op instanceof MemoryOperand) { 564 MemoryOperand mop = (MemoryOperand) op; 565 return (mop.base != null) && (mop.index == null) && (mop.disp.isZero()) && (mop.scale == 0); 566 } else { 567 return false; 568 } 569 } 570 571 /** 572 * Determine if a given operand is a memory operand representing 573 * register-offset mode addressing. This method takes an 574 * arbitrary operand, checks whether it is a memory operand, and, 575 * if it is, checks whether it should be assembled as IA32 576 * register-offset mode. That is, does it have a non-zero 577 * displacement, a scale parameter and an index register, but no 578 * base register? 579 * 580 * @param op the operand being queried 581 * @return true if op should be assembled as register-offset mode 582 */ 583 boolean isRegOff(Operand op) { 584 if (op instanceof MemoryOperand) { 585 MemoryOperand mop = (MemoryOperand) op; 586 return (mop.base == null) && (mop.index != null); 587 } else { 588 return false; 589 } 590 } 591 592 /** 593 * Determine if a given operand is a memory operand representing 594 * the full glory of scaled-index-base addressing. This method takes an 595 * arbitrary operand, checks whether it is a memory operand, and, 596 * if it is, checks whether it should be assembled as IA32 597 * SIB mode. That is, does it have a non-zero 598 * displacement, a scale parameter, a base register and an index 599 * register? 600 * 601 * @param op the operand being queried 602 * @return true if op should be assembled as SIB mode 603 */ 604 boolean isRegIdx(Operand op) { 605 if (op instanceof MemoryOperand) { 606 return !(isAbs(op) || isRegInd(op) || isRegDisp(op) || isRegOff(op)); 607 } else { 608 return false; 609 } 610 } 611 612 /** 613 * Return the condition bits of a given optimizing compiler 614 * condition operand. This method returns the IA32 ISA bits 615 * representing a given condition operand, suitable for passing to 616 * the Assembler to encode into the opcode of a SET, Jcc or 617 * CMOV instruction. This being IA32, there are of course 618 * exceptions in the binary encoding of conditions (see FCMOV), 619 * but the Assembler handles that. This function assumes its 620 * argument is an IA32ConditionOperand, and will blow up if it 621 * is not. 622 * 623 * @param op the operand being queried 624 * @return the bits that (usually) represent the given condition 625 * in the IA32 ISA */ 626 byte getCond(Operand op) { 627 return ((IA32ConditionOperand) op).value; 628 } 629 630 /** 631 * Is the given operand an IA32 condition operand? 632 * 633 * @param op the operand being queried 634 * @return true if op is an IA32 condition operand 635 */ 636 boolean isCond(Operand op) { 637 return (op instanceof IA32ConditionOperand); 638 } 639 640 /** 641 * Return the label representing the target of the given branch 642 * operand. These labels are used to represent branch targets 643 * that have not yet been assembled, and so cannot be given 644 * concrete machine code offsets. All instructions are numbered 645 * just prior to assembly, and these numbers are used as labels. 646 * This method also returns 0 (not a valid label) for int 647 * constants to simplify generation of branches (the branch 648 * generation code will ignore this invalid label; it is used to 649 * prevent type exceptions). This method assumes its operand is a 650 * branch operand (or an int) and will blow up if it is not. 651 * 652 * @param op the branch operand being queried 653 * @return the label representing the branch target 654 */ 655 int getLabel(Operand op) { 656 if (op instanceof IntConstantOperand) { 657 // used by ImmOrLabel stuff 658 return 0; 659 } else { 660 int offset = mcOffsets.getMachineCodeOffset(op.asBranch().target); 661 if (offset < 0) { 662 return -offset; 663 } else { 664 return -1; 665 } 666 } 667 } 668 669 /** 670 * Is the given operand a branch target that requires a label? 671 * 672 * @see #getLabel 673 * 674 * @param op the operand being queried 675 * @return true if it represents a branch requiring a label target 676 */ 677 boolean isLabel(Operand op) { 678 if (op.isBranch()) { 679 return mcOffsets.getMachineCodeOffset(op.asBranch().target) < 0; 680 } else { 681 return false; 682 } 683 } 684 685 /** 686 * Is the given operand a branch target? 687 * 688 * @see #getLabel 689 * @see #isLabel 690 * 691 * @param op the operand being queried 692 * @return true if it represents a branch target 693 */ 694 @NoInline 695 boolean isImmOrLabel(Operand op) { 696 // TODO: Remove NoInlinePragma, work around for leave SSA bug 697 return (isImm(op) || isLabel(op)); 698 } 699 700 /** 701 * Does the given instruction operate upon byte-sized data? The 702 * opt compiler does not represent the size of register data, so 703 * this method typically looks at the memory operand, if any, and 704 * checks whether that is a byte. This does not work for the 705 * size-converting moves (MOVSX and MOVZX), and those instructions 706 * use the operator convention that __b on the end of the operator 707 * name means operate upon byte data. 708 * 709 * @param inst the instruction being queried 710 * @return {@code true} if inst operates upon byte data 711 */ 712 boolean isByte(Instruction inst) { 713 for (Operator opr : byteSizeOperators) { 714 if (opr == inst.operator()) { 715 return true; 716 } 717 } 718 719 for (int i = 0; i < inst.getNumberOfOperands(); i++) { 720 Operand op = inst.getOperand(i); 721 if (op instanceof MemoryOperand) { 722 return (((MemoryOperand) op).size == 1); 723 } 724 } 725 726 return false; 727 } 728 729 /** 730 * Does the given instruction operate upon word-sized data? The 731 * opt compiler does not represent the size of register data, so 732 * this method typically looks at the memory operand, if any, and 733 * checks whether that is a word. This does not work for the 734 * size-converting moves (MOVSX and MOVZX), and those instructions 735 * use the operator convention that __w on the end of the operator 736 * name means operate upon word data. 737 * 738 * @param inst the instruction being queried 739 * @return true if inst operates upon word data 740 */ 741 boolean isWord(Instruction inst) { 742 for (Operator opr : wordSizeOperators) { 743 if (opr == inst.operator()) { 744 return true; 745 } 746 } 747 748 for (int i = 0; i < inst.getNumberOfOperands(); i++) { 749 Operand op = inst.getOperand(i); 750 if (op instanceof MemoryOperand) { 751 return (((MemoryOperand) op).size == 2); 752 } 753 } 754 755 return false; 756 } 757 758 /** 759 * Does the given instruction operate upon quad-sized data 760 * <em>for the purposes of assembling the instruction</em>? 761 * The opt compiler does not represent the size of register data, so 762 * it is necessary to determine whether to emit a quad instruction. 763 * As described above, this method is only concerned with quad data 764 * that changes the instruction. For example, this method will return 765 * {@code false} for {@code FSTP}. {@code FSTP} operates on quad-data 766 * but the instruction's operation is the same for 32-bit and 64-bit 767 * mode, so it is not a quad instruction for the purposes of this method. 768 * <p> 769 * This method typically looks at the memory operand, if any, and 770 * checks whether that is a byte. This method also recognizes 771 * the operator convention that __q on the end of the operator 772 * name means operate upon quad data. Moreover, it looks at data types 773 * for x64. 774 * 775 * @param inst the instruction being queried 776 * @return {@code true} if instruction operates upon quad data <b>AND</b> 777 * is treated as a quad instruction for the purpose of assembling the 778 * machine code 779 */ 780 boolean isQuad(Instruction inst) { 781 for (Operator opr : quadSizeOperators) { 782 if (opr == inst.operator()) { 783 return true; 784 } 785 } 786 787 for (int i = 0; i < inst.getNumberOfOperands(); i++) { 788 Operand op = inst.getOperand(i); 789 if (VM.BuildFor64Addr) { 790 if (op == null) { 791 continue; 792 } 793 if (op.isLong() || op.isRef()) { 794 return true; 795 } 796 } 797 if (op instanceof MemoryOperand) { 798 boolean quadMemOp = ((MemoryOperand) op).size == 8; 799 if (VM.BuildFor32Addr) { 800 return quadMemOp; 801 } else if (VM.BuildFor64Addr) { 802 // 64-bit: other operands may cause the instruction to be 64 bit 803 // even if this one won't 804 if (quadMemOp) { 805 return true; 806 } 807 } 808 } 809 } 810 811 return false; 812 } 813 814 /** 815 * Given a forward branch instruction and its target, 816 * determine (conservatively) if the relative offset to the 817 * target is less than 127 bytes 818 * @param start the branch instruction 819 * @param target the value of the mcOffset of the target label 820 * @return {@code true} if the relative offset will be less than 127, false otherwise 821 */ 822 protected boolean targetIsClose(Instruction start, int target) { 823 Instruction inst = start.nextInstructionInCodeOrder(); 824 final int budget = 120; // slight fudge factor could be 127 825 int offset = 0; 826 while (true) { 827 if (offset <= budget) return false; 828 if (mcOffsets.getMachineCodeOffset(inst) == target) { 829 return true; 830 } 831 offset += estimateSize(inst, offset); 832 inst = inst.nextInstructionInCodeOrder(); 833 } 834 } 835 836 protected int estimateSize(Instruction inst, int offset) { 837 switch (inst.getOpcode()) { 838 case LABEL_opcode: 839 return (4 - offset) & 3; // return size of nop required for alignment 840 case BBEND_opcode: 841 case READ_CEILING_opcode: 842 case WRITE_FLOOR_opcode: 843 case UNINT_BEGIN_opcode: 844 case UNINT_END_opcode: { 845 // these generate no code 846 return 0; 847 } 848 case IA32_METHODSTART_opcode: 849 return 12; 850 // Generated from the same case in Assembler 851 case IA32_ADC_opcode: 852 case IA32_ADD_opcode: 853 case IA32_AND_opcode: 854 case IA32_OR_opcode: 855 case IA32_SBB_opcode: 856 case IA32_XOR_opcode: { 857 int size = 2; // opcode + modr/m 858 size += operandCost(MIR_BinaryAcc.getResult(inst), true); 859 size += operandCost(MIR_BinaryAcc.getValue(inst), true); 860 return size; 861 } 862 case IA32_CMP_opcode: { 863 int size = 2; // opcode + modr/m 864 size += operandCost(MIR_Compare.getVal1(inst), true); 865 size += operandCost(MIR_Compare.getVal2(inst), true); 866 return size; 867 } 868 case IA32_TEST_opcode: { 869 int size = 2; // opcode + modr/m 870 size += operandCost(MIR_Test.getVal1(inst), false); 871 size += operandCost(MIR_Test.getVal2(inst), false); 872 return size; 873 } 874 case IA32_ADDSD_opcode: 875 case IA32_SUBSD_opcode: 876 case IA32_MULSD_opcode: 877 case IA32_DIVSD_opcode: 878 case IA32_XORPD_opcode: 879 case IA32_SQRTSD_opcode: 880 case IA32_ADDSS_opcode: 881 case IA32_SUBSS_opcode: 882 case IA32_MULSS_opcode: 883 case IA32_DIVSS_opcode: 884 case IA32_XORPS_opcode: { 885 int size = 4; // opcode + modr/m 886 Operand value = MIR_BinaryAcc.getValue(inst); 887 size += operandCost(value, false); 888 return size; 889 } 890 case IA32_UCOMISS_opcode: { 891 int size = 3; // opcode + modr/m 892 Operand val2 = MIR_Compare.getVal2(inst); 893 size += operandCost(val2, false); 894 return size; 895 } 896 case IA32_UCOMISD_opcode: { 897 int size = 4; // opcode + modr/m 898 Operand val2 = MIR_Compare.getVal2(inst); 899 size += operandCost(val2, false); 900 return size; 901 } 902 case IA32_CVTSI2SS_opcode: 903 case IA32_CVTSI2SD_opcode: 904 case IA32_CVTSS2SD_opcode: 905 case IA32_CVTSD2SS_opcode: 906 case IA32_CVTSD2SI_opcode: 907 case IA32_CVTTSD2SI_opcode: 908 case IA32_CVTSS2SI_opcode: 909 case IA32_CVTTSS2SI_opcode: { 910 int size = 4; // opcode + modr/m 911 Operand result = MIR_Unary.getResult(inst); 912 Operand value = MIR_Unary.getVal(inst); 913 size += operandCost(result, false); 914 size += operandCost(value, false); 915 return size; 916 } 917 case IA32_CMPEQSD_opcode: 918 case IA32_CMPLTSD_opcode: 919 case IA32_CMPLESD_opcode: 920 case IA32_CMPUNORDSD_opcode: 921 case IA32_CMPNESD_opcode: 922 case IA32_CMPNLTSD_opcode: 923 case IA32_CMPNLESD_opcode: 924 case IA32_CMPORDSD_opcode: 925 case IA32_CMPEQSS_opcode: 926 case IA32_CMPLTSS_opcode: 927 case IA32_CMPLESS_opcode: 928 case IA32_CMPUNORDSS_opcode: 929 case IA32_CMPNESS_opcode: 930 case IA32_CMPNLTSS_opcode: 931 case IA32_CMPNLESS_opcode: 932 case IA32_CMPORDSS_opcode: { 933 int size = 5; // opcode + modr/m + type 934 Operand value = MIR_BinaryAcc.getValue(inst); 935 size += operandCost(value, false); 936 return size; 937 } 938 case IA32_MOVD_opcode: 939 case IA32_MOVAPD_opcode: 940 case IA32_MOVAPS_opcode: 941 case IA32_MOVLPD_opcode: 942 case IA32_MOVQ_opcode: 943 case IA32_MOVSS_opcode: 944 case IA32_MOVSD_opcode: { 945 int size = 4; // opcode + modr/m 946 Operand result = MIR_Move.getResult(inst); 947 Operand value = MIR_Move.getValue(inst); 948 size += operandCost(result, false); 949 size += operandCost(value, false); 950 return size; 951 } 952 case IA32_PUSH_opcode: { 953 Operand op = MIR_UnaryNoRes.getVal(inst); 954 int size = 0; 955 if (op instanceof RegisterOperand) { 956 size += 1; 957 } else if (op instanceof IntConstantOperand) { 958 if (fits(((IntConstantOperand) op).value, 8)) { 959 size += 2; 960 } else { 961 size += 5; 962 } 963 } else { 964 size += (2 + operandCost(op, true)); 965 } 966 return size; 967 } 968 case IA32_LEA_opcode: { 969 int size = 2; // opcode + 1 byte modr/m 970 size += operandCost(MIR_Lea.getResult(inst), false); 971 size += operandCost(MIR_Lea.getValue(inst), false); 972 return size; 973 } 974 case IA32_MOV_opcode: { 975 int size = 2; // opcode + modr/m 976 Operand result = MIR_Move.getResult(inst); 977 Operand value = MIR_Move.getValue(inst); 978 size += operandCost(result, false); 979 size += operandCost(value, false); 980 return size; 981 } 982 case MIR_LOWTABLESWITCH_opcode: 983 return MIR_LowTableSwitch.getNumberOfTargets(inst) * 4 + 14; 984 case IA32_OFFSET_opcode: 985 return 4; 986 case IA32_JCC_opcode: 987 case IA32_JMP_opcode: 988 return 6; // assume long form 989 case IA32_LOCK_opcode: 990 return 1; 991 case IG_PATCH_POINT_opcode: 992 return 8; 993 case IA32_INT_opcode: 994 return 2; 995 case IA32_RET_opcode: 996 return 3; 997 case IA32_CALL_opcode: 998 Operand target = MIR_Call.getTarget(inst); 999 if (isImmOrLabel(target)) { 1000 return 5; // opcode + 32bit immediate 1001 } else { 1002 return 2 + operandCost(target, false); // opcode + modr/m 1003 } 1004 default: { 1005 int size = 3; // 2 bytes opcode + 1 byte modr/m 1006 for (Enumeration<Operand> opEnum = inst.getRootOperands(); opEnum.hasMoreElements();) { 1007 Operand op = opEnum.nextElement(); 1008 size += operandCost(op, false); 1009 } 1010 return size; 1011 } 1012 } 1013 } 1014 1015 private int operandCost(Operand op, boolean shortFormImmediate) { 1016 if (op instanceof MemoryOperand) { 1017 MemoryOperand mop = (MemoryOperand) op; 1018 // If it's a 2byte mem location, we're going to need an override prefix 1019 int prefix = mop.size == 2 ? 1 : 0; 1020 1021 // Deal with EBP wierdness 1022 if (mop.base != null && mop.base.getRegister() == EBP) { 1023 if (mop.index != null) { 1024 // forced into SIB + 32 bit displacement no matter what disp is 1025 return prefix + 5; 1026 } 1027 if (fits(mop.disp, 8)) { 1028 return prefix + 1; 1029 } else { 1030 return prefix + 4; 1031 } 1032 } 1033 if (mop.index != null && mop.index.getRegister() == EBP) { 1034 // forced into SIB + 32 bit displacement no matter what disp is 1035 return prefix + 5; 1036 } 1037 1038 // Deal with ESP wierdness -- requires SIB byte even when index is null 1039 if (mop.base != null && mop.base.getRegister() == ESP) { 1040 if (fits(mop.disp, 8)) { 1041 return prefix + 2; 1042 } else { 1043 return prefix + 5; 1044 } 1045 } 1046 1047 if (mop.index == null) { 1048 // just displacement to worry about 1049 if (mop.disp.isZero()) { 1050 return prefix + 0; 1051 } else if (fits(mop.disp, 8)) { 1052 return prefix + 1; 1053 } else { 1054 return prefix + 4; 1055 } 1056 } else { 1057 // have a SIB 1058 if (mop.base == null && mop.scale != 0) { 1059 // forced to 32 bit displacement even if it would fit in 8 1060 return prefix + 5; 1061 } else { 1062 if (mop.disp.isZero()) { 1063 return prefix + 1; 1064 } else if (fits(mop.disp, 8)) { 1065 return prefix + 2; 1066 } else { 1067 return prefix + 5; 1068 } 1069 } 1070 } 1071 } else if (op instanceof IntConstantOperand) { 1072 if (shortFormImmediate && fits(((IntConstantOperand) op).value, 8)) { 1073 return 1; 1074 } else { 1075 return 4; 1076 } 1077 } else { 1078 return 0; 1079 } 1080 } 1081 1082 /** 1083 * Emit the given instruction, assuming that 1084 * it is a MIR_CondBranch instruction 1085 * and has a JCC operator 1086 * 1087 * @param inst the instruction to assemble 1088 */ 1089 protected void doJCC(Instruction inst) { 1090 byte cond = getCond(MIR_CondBranch.getCond(inst)); 1091 if (isImm(MIR_CondBranch.getTarget(inst))) { 1092 emitJCC_Cond_Imm(cond, getImm(MIR_CondBranch.getTarget(inst))); 1093 } else { 1094 if (VM.VerifyAssertions && !isLabel(MIR_CondBranch.getTarget(inst))) { 1095 throw new OptimizingCompilerException("Unexpected operand " + inst.toString()); 1096 } 1097 int sourceLabel = -mcOffsets.getMachineCodeOffset(inst); 1098 int targetLabel = getLabel(MIR_CondBranch.getTarget(inst)); 1099 int delta = targetLabel - sourceLabel; 1100 if (VM.VerifyAssertions) opt_assert(delta >= 0); 1101 if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) { 1102 int miStart = mi; 1103 ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel); 1104 forwardRefs = ForwardReference.enqueue(forwardRefs, r); 1105 setMachineCodes(mi++, (byte) (0x70 + cond)); 1106 mi += 1; // leave space for displacement 1107 if (lister != null) lister.I(miStart, "J" + CONDITION[cond], 0); 1108 } else { 1109 emitJCC_Cond_Label(cond, targetLabel); 1110 } 1111 } 1112 } 1113 1114 /** 1115 * Emit the given instruction, assuming that 1116 * it is a MIR_Branch instruction 1117 * and has a JMP operator 1118 * 1119 * @param inst the instruction to assemble 1120 */ 1121 protected void doJMP(Instruction inst) { 1122 if (isImm(MIR_Branch.getTarget(inst))) { 1123 emitJMP_Imm(getImm(MIR_Branch.getTarget(inst))); 1124 } else if (isLabel(MIR_Branch.getTarget(inst))) { 1125 int sourceLabel = -mcOffsets.getMachineCodeOffset(inst); 1126 int targetLabel = getLabel(MIR_Branch.getTarget(inst)); 1127 int delta = targetLabel - sourceLabel; 1128 if (VM.VerifyAssertions) opt_assert(delta >= 0); 1129 if (delta < 10 || (delta < 90 && targetIsClose(inst, -targetLabel))) { 1130 int miStart = mi; 1131 ForwardReference r = new ForwardReference.ShortBranch(mi, targetLabel); 1132 forwardRefs = ForwardReference.enqueue(forwardRefs, r); 1133 setMachineCodes(mi++, (byte) 0xEB); 1134 mi += 1; // leave space for displacement 1135 if (lister != null) lister.I(miStart, "JMP", 0); 1136 } else { 1137 emitJMP_Label(getLabel(MIR_Branch.getTarget(inst))); 1138 } 1139 } else if (isReg(MIR_Branch.getTarget(inst))) { 1140 emitJMP_Reg(getGPR_Reg(MIR_Branch.getTarget(inst))); 1141 } else if (isAbs(MIR_Branch.getTarget(inst))) { 1142 emitJMP_Abs(getDisp(MIR_Branch.getTarget(inst)).toWord().toAddress()); 1143 } else if (isRegDisp(MIR_Branch.getTarget(inst))) { 1144 emitJMP_RegDisp(getBase(MIR_Branch.getTarget(inst)), getDisp(MIR_Branch.getTarget(inst))); 1145 } else if (isRegOff(MIR_Branch.getTarget(inst))) { 1146 emitJMP_RegOff(getIndex(MIR_Branch.getTarget(inst)), 1147 getScale(MIR_Branch.getTarget(inst)), 1148 getDisp(MIR_Branch.getTarget(inst))); 1149 } else if (isRegIdx(MIR_Branch.getTarget(inst))) { 1150 emitJMP_RegIdx(getBase(MIR_Branch.getTarget(inst)), 1151 getIndex(MIR_Branch.getTarget(inst)), 1152 getScale(MIR_Branch.getTarget(inst)), 1153 getDisp(MIR_Branch.getTarget(inst))); 1154 } else if (isRegInd(MIR_Branch.getTarget(inst))) { 1155 emitJMP_RegInd(getBase(MIR_Branch.getTarget(inst))); 1156 } else { 1157 if (VM.VerifyAssertions) { 1158 throw new OptimizingCompilerException("Unexpected operand " + inst.toString()); 1159 } 1160 } 1161 } 1162 1163 /** 1164 * Emit the given instruction, assuming that 1165 * it is a MIR_LowTableSwitch instruction 1166 * and has a MIR_LOWTABLESWITCH operator 1167 * 1168 * @param inst the instruction to assemble 1169 */ 1170 protected void doLOWTABLESWITCH(Instruction inst) { 1171 int n = MIR_LowTableSwitch.getNumberOfTargets(inst); // n = number of normal cases (0..n-1) 1172 GPR ms = GPR.lookup(MIR_LowTableSwitch.getMethodStart(inst).getRegister().number); 1173 GPR idx = GPR.lookup(MIR_LowTableSwitch.getIndex(inst).getRegister().number); 1174 // idx += [ms + idx<<2 + ??] - we will patch ?? when we know the placement of the table 1175 int toPatchAddress = getMachineCodeIndex(); 1176 if (VM.buildFor32Addr()) { 1177 emitMOV_Reg_RegIdx(idx, ms, idx, WORD, Offset.fromIntZeroExtend(Integer.MAX_VALUE)); 1178 emitADD_Reg_Reg(idx, ms); 1179 } else { 1180 emitMOV_Reg_RegIdx(idx, ms, idx, WORD, Offset.fromIntZeroExtend(Integer.MAX_VALUE)); 1181 emitADD_Reg_Reg_Quad(idx, ms); 1182 } 1183 // JMP T0 1184 emitJMP_Reg(idx); 1185 emitNOP((4 - getMachineCodeIndex()) & 3); // align table 1186 // create table of offsets from start of method 1187 patchSwitchTableDisplacement(toPatchAddress); 1188 for (int i = 0; i < n; i++) { 1189 Operand target = MIR_LowTableSwitch.getTarget(inst, i); 1190 emitOFFSET_Imm_ImmOrLabel(i, getImm(target), getLabel(target)); 1191 } 1192 } 1193 1194 protected void doIMMQ_MOV(Instruction inst) { 1195 Operand result = MIR_Move.getResult(inst); 1196 if (isGPR_Reg(result)) { 1197 if (isQuad(inst)) { 1198 Operand value = MIR_Move.getValue(inst); 1199 if (isImm(value)) { 1200 emitMOV_Reg_Imm_Quad(getGPR_Reg(result), 1201 getImmQuad(value)); 1202 return; 1203 } 1204 } 1205 } 1206 throw new OptimizingCompilerException("Unexpected operand/imm " + 1207 inst.toString()); 1208 } 1209 1210 /** 1211 * Debugging support (return a printable representation of the machine code). 1212 * 1213 * @param instr An integer to be interpreted as a PowerPC instruction 1214 * @param offset the mcoffset (in bytes) of the instruction 1215 * @return a printable representation of the machine code 1216 */ 1217 public String disasm(int instr, int offset) { 1218 OptimizingCompilerException.TODO("Assembler: disassembler"); 1219 return null; 1220 } 1221 1222 /** 1223 * Assembles the given instruction. 1224 * 1225 * @param inst the instruction to assemble 1226 */ 1227 public abstract void doInst(Instruction inst); 1228 1229 /** 1230 * generate machine code into ir.machinecode. 1231 * @return the number of machinecode instructions generated 1232 */ 1233 public int generateCode() { 1234 int count = 0; 1235 for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) { 1236 // Set the mc offset of all instructions to their negative position. 1237 // A positive value in their position means they have been created 1238 // by the assembler. 1239 count++; 1240 mcOffsets.setMachineCodeOffset(p, -count); 1241 if (p.operator() == MIR_LOWTABLESWITCH) { 1242 // Table switch kludge, as these will occupy multiple slots in the 1243 // generated assembler 1244 count += MIR_LowTableSwitch.getNumberOfTargets(p); 1245 } 1246 } 1247 1248 for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) { 1249 if (DEBUG_ESTIMATE) { 1250 int start = getMachineCodeIndex(); 1251 int estimate = estimateSize(p, start); 1252 doInst(p); 1253 int end = getMachineCodeIndex(); 1254 if (end - start > estimate) { 1255 VM.sysWriteln("Bad estimate: " + (end - start) + " " + estimate + " " + p); 1256 VM.sysWrite("\tMachine code: "); 1257 writeLastInstruction(start); 1258 VM.sysWriteln(); 1259 } 1260 } else { 1261 doInst(p); 1262 } 1263 } 1264 1265 ir.MIRInfo.machinecode = getMachineCodes(); 1266 1267 return ir.MIRInfo.machinecode.length(); 1268 } 1269 1270}