private static void EmitSmmul(ArmEmitterContext context, MullFlags flags) { OpCode32AluMla op = (OpCode32AluMla)context.CurrOp; Operand n = context.SignExtend32(OperandType.I64, GetIntA32(context, op.Rn)); Operand m = context.SignExtend32(OperandType.I64, GetIntA32(context, op.Rm)); Operand res = context.Multiply(n, m); if (flags.HasFlag(MullFlags.Add) && op.Ra != 0xf) { res = context.Add(context.ShiftLeft(context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.Ra)), Const(32)), res); } else if (flags.HasFlag(MullFlags.Subtract)) { res = context.Subtract(context.ShiftLeft(context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.Ra)), Const(32)), res); } if (op.R) { res = context.Add(res, Const(0x80000000L)); } Operand hi = context.ConvertI64ToI32(context.ShiftRightSI(res, Const(32))); EmitGenericAluStoreA32(context, op.Rd, false, hi); }
public static void Ldm(ArmEmitterContext context) { OpCode32MemMult op = (OpCode32MemMult)context.CurrOp; Operand n = GetIntA32(context, op.Rn); Operand baseAddress = context.Add(n, Const(op.Offset)); bool writesToPc = (op.RegisterMask & (1 << RegisterAlias.Aarch32Pc)) != 0; bool writeBack = op.PostOffset != 0 && (op.Rn != RegisterAlias.Aarch32Pc || !writesToPc); if (writeBack) { SetIntA32(context, op.Rn, context.Add(n, Const(op.PostOffset))); } int mask = op.RegisterMask; int offset = 0; for (int register = 0; mask != 0; mask >>= 1, register++) { if ((mask & 1) != 0) { Operand address = context.Add(baseAddress, Const(offset)); EmitLoadZx(context, address, register, WordSizeLog2); offset += 4; } } }
public static void Stm(ArmEmitterContext context) { OpCode32MemMult op = (OpCode32MemMult)context.CurrOp; Operand n = GetIntA32(context, op.Rn); Operand baseAddress = context.Add(n, Const(op.Offset)); int mask = op.RegisterMask; int offset = 0; for (int register = 0; mask != 0; mask >>= 1, register++) { if ((mask & 1) != 0) { Operand address = context.Add(baseAddress, Const(offset)); EmitStore(context, address, register, WordSizeLog2); // Note: If Rn is also specified on the register list, // and Rn is the first register on this list, then the // value that is written to memory is the unmodified value, // before the write back. If it is on the list, but it's // not the first one, then the value written to memory // varies between CPUs. if (offset == 0 && op.PostOffset != 0) { // Emit write back after the first write. SetIntA32(context, op.Rn, context.Add(n, Const(op.PostOffset))); } offset += 4; } } }
public static void Vstm(ArmEmitterContext context) { OpCode32SimdMemMult op = (OpCode32SimdMemMult)context.CurrOp; Operand n = context.Copy(GetIntA32(context, op.Rn)); Operand baseAddress = context.Add(n, Const(op.Offset)); bool writeBack = op.PostOffset != 0; if (writeBack) { SetIntA32(context, op.Rn, context.Add(n, Const(op.PostOffset))); } int offset = 0; int range = op.RegisterRange; int sReg = (op.DoubleWidth) ? (op.Vd << 1) : op.Vd; int byteSize = 4; for (int num = 0; num < range; num++, sReg++) { Operand address = context.Add(baseAddress, Const(offset)); EmitStoreSimd(context, address, sReg >> 2, sReg & 3, WordSizeLog2); offset += byteSize; } }
private static void EmitAdc(ArmEmitterContext context, bool setFlags) { Operand n = GetAluN(context); Operand m = GetAluM(context); Operand d = context.Add(n, m); Operand carry = GetFlag(PState.CFlag); if (context.CurrOp.RegisterSize == RegisterSize.Int64) { carry = context.ZeroExtend32(OperandType.I64, carry); } d = context.Add(d, carry); if (setFlags) { EmitNZFlagsCheck(context, d); EmitAdcsCCheck(context, n, d); EmitAddsVCheck(context, n, m, d); } SetAluDOrZR(context, d); }
public static void Vrshr(ArmEmitterContext context) { OpCode32SimdShImm op = (OpCode32SimdShImm)context.CurrOp; int shift = GetImmShr(op); long roundConst = 1L << (shift - 1); if (op.U) { if (op.Size < 2) { EmitVectorUnaryOpZx32(context, (op1) => { op1 = context.Add(op1, Const(op1.Type, roundConst)); return(context.ShiftRightUI(op1, Const(shift))); }); } else if (op.Size == 2) { EmitVectorUnaryOpZx32(context, (op1) => { op1 = context.ZeroExtend32(OperandType.I64, op1); op1 = context.Add(op1, Const(op1.Type, roundConst)); return(context.ConvertI64ToI32(context.ShiftRightUI(op1, Const(shift)))); }); } else /* if (op.Size == 3) */ { EmitVectorUnaryOpZx32(context, (op1) => EmitShrImm64(context, op1, signed: false, roundConst, shift)); } } else { if (op.Size < 2) { EmitVectorUnaryOpSx32(context, (op1) => { op1 = context.Add(op1, Const(op1.Type, roundConst)); return(context.ShiftRightSI(op1, Const(shift))); }); } else if (op.Size == 2) { EmitVectorUnaryOpSx32(context, (op1) => { op1 = context.SignExtend32(OperandType.I64, op1); op1 = context.Add(op1, Const(op1.Type, roundConst)); return(context.ConvertI64ToI32(context.ShiftRightSI(op1, Const(shift)))); }); } else /* if (op.Size == 3) */ { EmitVectorUnaryOpZx32(context, (op1) => EmitShrImm64(context, op1, signed: true, roundConst, shift)); } } }
public static Operand EmitLoadExclusive(ArmEmitterContext context, Operand address, bool exclusive, int size) { if (exclusive) { Operand value; if (size == 4) { Operand isUnalignedAddr = InstEmitMemoryHelper.EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); context.MarkLabel(lblFastPath); // Only 128-bit CAS is guaranteed to have a atomic load. Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: false); Operand zero = context.VectorZero(); value = context.CompareAndSwap(physAddr, zero, zero); } else { value = InstEmitMemoryHelper.EmitReadIntAligned(context, address, size); } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); context.Store(exAddrPtr, context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask()))); // Make sure the unused higher bits of the value are cleared. if (size < 3) { context.Store(exValuePtr, Const(0UL)); } if (size < 4) { context.Store(context.Add(exValuePtr, Const(exValuePtr.Type, 8L)), Const(0UL)); } // Store the new exclusive value. context.Store(exValuePtr, value); return(value); } else { return(InstEmitMemoryHelper.EmitReadIntAligned(context, address, size)); } }
public static void EmitDynamicTableCall(ArmEmitterContext context, Operand tableAddress, Operand address, bool isJump) { // Loop over elements of the dynamic table. Unrolled loop. Operand endLabel = Label(); Operand fallbackLabel = Label(); void EmitTableEntry(Operand entrySkipLabel) { // Try to take this entry in the table if its guest address equals 0. Operand gotResult = context.CompareAndSwap(tableAddress, Const(0L), address); // Is the address ours? (either taken via CompareAndSwap (0), or what was already here) context.BranchIfFalse(entrySkipLabel, context.BitwiseOr( context.ICompareEqual(gotResult, address), context.ICompareEqual(gotResult, Const(0L))) ); // It's ours, so what function is it pointing to? Operand targetFunctionPtr = context.Add(tableAddress, Const(8L)); Operand targetFunction = context.Load(OperandType.I64, targetFunctionPtr); // Call the function. // We pass in the entry address as the guest address, as the entry may need to be updated by the // indirect call stub. EmitNativeCallWithGuestAddress(context, targetFunction, tableAddress, isJump); context.Branch(endLabel); } // Currently this uses a size of 1, as higher values inflate code size for no real benefit. for (int i = 0; i < JumpTable.DynamicTableElems; i++) { if (i == JumpTable.DynamicTableElems - 1) { // If this is the last entry, avoid emitting the additional label and add. EmitTableEntry(fallbackLabel); } else { Operand nextLabel = Label(); EmitTableEntry(nextLabel); context.MarkLabel(nextLabel); // Move to the next table entry. tableAddress = context.Add(tableAddress, Const((long)JumpTable.JumpTableStride)); } } context.MarkLabel(fallbackLabel); EmitBranchFallback(context, address, isJump); context.MarkLabel(endLabel); }
private static void EmitExtend16(ArmEmitterContext context, bool signed) { IOpCode32AluUx op = (IOpCode32AluUx)context.CurrOp; Operand m = GetAluM(context); Operand res; if (op.RotateBits == 0) { res = m; } else { Operand rotate = Const(op.RotateBits); res = context.RotateRight(m, rotate); } Operand low16, high16; if (signed) { low16 = context.SignExtend8(OperandType.I32, res); high16 = context.SignExtend8(OperandType.I32, context.ShiftRightUI(res, Const(16))); } else { low16 = context.ZeroExtend8(OperandType.I32, res); high16 = context.ZeroExtend8(OperandType.I32, context.ShiftRightUI(res, Const(16))); } if (op.Add) { Operand n = GetAluN(context); Operand lowAdd, highAdd; if (signed) { lowAdd = context.SignExtend16(OperandType.I32, n); highAdd = context.SignExtend16(OperandType.I32, context.ShiftRightUI(n, Const(16))); } else { lowAdd = context.ZeroExtend16(OperandType.I32, n); highAdd = context.ZeroExtend16(OperandType.I32, context.ShiftRightUI(n, Const(16))); } low16 = context.Add(low16, lowAdd); high16 = context.Add(high16, highAdd); } res = context.BitwiseOr( context.ZeroExtend16(OperandType.I32, low16), context.ShiftLeft(context.ZeroExtend16(OperandType.I32, high16), Const(16))); EmitAluStore(context, res); }
private static void EmitShrImmOp(ArmEmitterContext context, ShrImmFlags flags) { OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp; Operand res = context.VectorZero(); bool scalar = (flags & ShrImmFlags.Scalar) != 0; bool signed = (flags & ShrImmFlags.Signed) != 0; bool round = (flags & ShrImmFlags.Round) != 0; bool accumulate = (flags & ShrImmFlags.Accumulate) != 0; int shift = GetImmShr(op); long roundConst = 1L << (shift - 1); int elems = !scalar?op.GetBytesCount() >> op.Size : 1; for (int index = 0; index < elems; index++) { Operand e = EmitVectorExtract(context, op.Rn, index, op.Size, signed); if (op.Size <= 2) { if (round) { e = context.Add(e, Const(roundConst)); } e = signed ? context.ShiftRightSI(e, Const(shift)) : context.ShiftRightUI(e, Const(shift)); } else /* if (op.Size == 3) */ { e = EmitShrImm64(context, e, signed, round ? roundConst : 0L, shift); } if (accumulate) { Operand de = EmitVectorExtract(context, op.Rd, index, op.Size, signed); e = context.Add(e, de); } res = EmitVectorInsert(context, res, e, index, op.Size); } context.Copy(GetVec(op.Rd), res); }
public static void Sys(ArmEmitterContext context) { // This instruction is used to do some operations on the CPU like cache invalidation, // address translation and the like. // We treat it as no-op here since we don't have any cache being emulated anyway. OpCodeSystem op = (OpCodeSystem)context.CurrOp; switch (GetPackedId(op)) { case 0b11_011_0111_0100_001: { // DC ZVA Operand t = GetIntOrZR(context, op.Rt); for (long offset = 0; offset < (4 << DczSizeLog2); offset += 8) { Operand address = context.Add(t, Const(offset)); context.Call(new _Void_U64_U64(NativeInterface.WriteUInt64), address, Const(0L)); } break; } // No-op case 0b11_011_0111_1110_001: //DC CIVAC break; } }
private static void EmitVectorShrImmNarrowOpZx(ArmEmitterContext context, bool round) { OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp; int shift = GetImmShr(op); long roundConst = 1L << (shift - 1); int elems = 8 >> op.Size; int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0; Operand res = part == 0 ? context.VectorZero() : context.Copy(GetVec(op.Rd)); for (int index = 0; index < elems; index++) { Operand e = EmitVectorExtractZx(context, op.Rn, index, op.Size + 1); if (round) { e = context.Add(e, Const(roundConst)); } e = context.ShiftRightUI(e, Const(shift)); res = EmitVectorInsert(context, res, e, part + index, op.Size); } context.Copy(GetVec(op.Rd), res); }
public static void Ldp(ArmEmitterContext context) { OpCodeMemPair op = (OpCodeMemPair)context.CurrOp; void EmitLoad(int rt, Operand ldAddr) { if (op.Extend64) { EmitLoadSx64(context, ldAddr, rt, op.Size); } else { EmitLoadZx(context, ldAddr, rt, op.Size); } } Operand address = GetAddress(context); Operand address2 = context.Add(address, Const(1L << op.Size)); EmitLoad(op.Rt, address); EmitLoad(op.Rt2, address2); EmitWBackIfNeeded(context, address); }
private static void EmitHadd8(ArmEmitterContext context, bool unsigned) { OpCode32AluReg op = (OpCode32AluReg)context.CurrOp; Operand m = GetIntA32(context, op.Rm); Operand n = GetIntA32(context, op.Rn); Operand xor, res, carry; // This relies on the equality x+y == ((x&y) << 1) + (x^y). // Note that x^y always contains the LSB of the result. // Since we want to calculate (x+y)/2, we can instead calculate (x&y) + ((x^y)>>1). // We mask by 0x7F to remove the LSB so that it doesn't leak into the field below. res = context.BitwiseAnd(m, n); carry = context.BitwiseExclusiveOr(m, n); xor = context.ShiftRightUI(carry, Const(1)); xor = context.BitwiseAnd(xor, Const(0x7F7F7F7Fu)); res = context.Add(res, xor); if (!unsigned) { // Propagates the sign bit from (x^y)>>1 upwards by one. carry = context.BitwiseAnd(carry, Const(0x80808080u)); res = context.BitwiseExclusiveOr(res, carry); } SetIntA32(context, op.Rd, res); }
private static void EmitSignExtend(ArmEmitterContext context, bool signed, int bits) { IOpCode32AluUx op = (IOpCode32AluUx)context.CurrOp; Operand m = GetAluM(context); Operand res; if (op.RotateBits == 0) { res = m; } else { Operand rotate = Const(op.RotateBits); res = context.RotateRight(m, rotate); } switch (bits) { case 8: res = (signed) ? context.SignExtend8(OperandType.I32, res) : context.ZeroExtend8(OperandType.I32, res); break; case 16: res = (signed) ? context.SignExtend16(OperandType.I32, res) : context.ZeroExtend16(OperandType.I32, res); break; } if (op.Add) { res = context.Add(res, GetAluN(context)); } EmitAluStore(context, res); }
public static void Smlab(ArmEmitterContext context) { OpCode32AluMla op = (OpCode32AluMla)context.CurrOp; Operand n = GetIntA32(context, op.Rn); Operand m = GetIntA32(context, op.Rm); if (op.NHigh) { n = context.SignExtend16(OperandType.I32, context.ShiftRightUI(n, Const(16))); } else { n = context.SignExtend16(OperandType.I32, n); } if (op.MHigh) { m = context.SignExtend16(OperandType.I32, context.ShiftRightUI(m, Const(16))); } else { m = context.SignExtend16(OperandType.I32, m); } Operand res = context.Multiply(n, m); Operand a = GetIntA32(context, op.Ra); res = context.Add(res, a); // TODO: set Q flag when last addition overflows (saturation)? EmitGenericAluStoreA32(context, op.Rd, false, res); }
public static void Sys(ArmEmitterContext context) { // This instruction is used to do some operations on the CPU like cache invalidation, // address translation and the like. // We treat it as no-op here since we don't have any cache being emulated anyway. OpCodeSystem op = (OpCodeSystem)context.CurrOp; switch (GetPackedId(op)) { case 0b11_011_0111_0100_001: { // DC ZVA Operand t = GetIntOrZR(context, op.Rt); for (long offset = 0; offset < (4 << DczSizeLog2); offset += 8) { Operand address = context.Add(t, Const(offset)); InstEmitMemoryHelper.EmitStore(context, address, RegisterConsts.ZeroIndex, 3); } break; } // No-op case 0b11_011_0111_1110_001: //DC CIVAC break; } }
public static void Smlaw_(ArmEmitterContext context) { OpCode32AluMla op = (OpCode32AluMla)context.CurrOp; Operand n = GetIntA32(context, op.Rn); Operand m = GetIntA32(context, op.Rm); Operand a = GetIntA32(context, op.Ra); if (op.MHigh) { m = context.SignExtend16(OperandType.I64, context.ShiftRightUI(m, Const(16))); } else { m = context.SignExtend16(OperandType.I64, m); } Operand res = context.Multiply(context.SignExtend32(OperandType.I64, n), m); Operand toAdd = context.ShiftLeft(context.SignExtend32(OperandType.I64, a), Const(16)); res = context.Add(res, toAdd); res = context.ShiftRightSI(res, Const(16)); Operand q = context.ICompareNotEqual(res, context.SignExtend32(OperandType.I64, res)); res = context.ConvertI64ToI32(res); UpdateQFlag(context, q); EmitGenericAluStoreA32(context, op.Rd, false, res); }
public static void Sys(ArmEmitterContext context) { // This instruction is used to do some operations on the CPU like cache invalidation, // address translation and the like. // We treat it as no-op here since we don't have any cache being emulated anyway. OpCodeSystem op = (OpCodeSystem)context.CurrOp; switch (GetPackedId(op)) { case 0b11_011_0111_0100_001: { // DC ZVA Operand t = GetIntOrZR(context, op.Rt); for (long offset = 0; offset < DczSizeInBytes; offset += 8) { Operand address = context.Add(t, Const(offset)); InstEmitMemoryHelper.EmitStore(context, address, RegisterConsts.ZeroIndex, 3); } break; } // No-op case 0b11_011_0111_1110_001: // DC CIVAC break; case 0b11_011_0111_0101_001: // IC IVAU Operand target = Register(op.Rt, RegisterType.Integer, OperandType.I64); context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.InvalidateCacheLine)), target); break; } }
private static void EmitCsel(ArmEmitterContext context, CselOperation cselOp) { OpCodeCsel op = (OpCodeCsel)context.CurrOp; Operand n = GetIntOrZR(context, op.Rn); Operand m = GetIntOrZR(context, op.Rm); if (cselOp == CselOperation.Increment) { m = context.Add(m, Const(m.Type, 1)); } else if (cselOp == CselOperation.Invert) { m = context.BitwiseNot(m); } else if (cselOp == CselOperation.Negate) { m = context.Negate(m); } Operand condTrue = GetCondTrue(context, op.Cond); Operand d = context.ConditionalSelect(condTrue, n, m); SetIntOrZR(context, op.Rd, d); }
private static void EmitMull(ArmEmitterContext context, MullFlags flags) { OpCodeMul op = (OpCodeMul)context.CurrOp; Operand GetExtendedRegister32(int index) { Operand value = GetIntOrZR(context, index); if ((flags & MullFlags.Signed) != 0) { return(context.SignExtend32(value.Type, value)); } else { return(context.ZeroExtend32(value.Type, value)); } } Operand a = GetIntOrZR(context, op.Ra); Operand n = GetExtendedRegister32(op.Rn); Operand m = GetExtendedRegister32(op.Rm); Operand res = context.Multiply(n, m); res = (flags & MullFlags.Add) != 0 ? context.Add(a, res) : context.Subtract(a, res); SetIntOrZR(context, op.Rd, res); }
private static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblFallbackPath) { Operand pte = Const(context.Memory.PageTable.ToInt64()); int bit = MemoryManager.PageBits; do { Operand addrPart = context.ShiftRightUI(address, Const(bit)); bit += context.Memory.PtLevelBits; if (bit < context.Memory.AddressSpaceBits) { addrPart = context.BitwiseAnd(addrPart, Const(addrPart.Type, context.Memory.PtLevelMask)); } Operand pteOffset = context.ShiftLeft(addrPart, Const(3)); if (pteOffset.Type == OperandType.I32) { pteOffset = context.ZeroExtend32(OperandType.I64, pteOffset); } Operand pteAddress = context.Add(pte, pteOffset); pte = context.Load(OperandType.I64, pteAddress); }while (bit < context.Memory.AddressSpaceBits); if (!context.Memory.HasWriteWatchSupport) { Operand hasFlagSet = context.BitwiseAnd(pte, Const((long)MemoryManager.PteFlagsMask)); context.BranchIfTrue(lblFallbackPath, hasFlagSet); } Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, MemoryManager.PageMask)); if (pageOffset.Type == OperandType.I32) { pageOffset = context.ZeroExtend32(OperandType.I64, pageOffset); } Operand physAddr = context.Add(pte, pageOffset); return(physAddr); }
public static Operand EmitLoadExclusive(ArmEmitterContext context, Operand address, bool exclusive, int size) { if (exclusive) { Operand value; if (size == 4) { // Only 128-bit CAS is guaranteed to have a atomic load. Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: false, 4); Operand zero = context.VectorZero(); value = context.CompareAndSwap(physAddr, zero, zero); } else { value = InstEmitMemoryHelper.EmitReadIntAligned(context, address, size); } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); context.Store(exAddrPtr, context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask()))); // Make sure the unused higher bits of the value are cleared. if (size < 3) { context.Store(exValuePtr, Const(0UL)); } if (size < 4) { context.Store(context.Add(exValuePtr, Const(exValuePtr.Type, 8L)), Const(0UL)); } // Store the new exclusive value. context.Store(exValuePtr, value); return(value); } else { return(InstEmitMemoryHelper.EmitReadIntAligned(context, address, size)); } }
private static Operand GetAddress(ArmEmitterContext context, long addend = 0) { Operand address = default; switch (context.CurrOp) { case OpCodeMemImm op: { address = context.Copy(GetIntOrSP(context, op.Rn)); // Pre-indexing. if (!op.PostIdx) { address = context.Add(address, Const(op.Immediate + addend)); } else if (addend != 0) { address = context.Add(address, Const(addend)); } break; } case OpCodeMemReg op: { Operand n = GetIntOrSP(context, op.Rn); Operand m = GetExtendedM(context, op.Rm, op.IntType); if (op.Shift) { m = context.ShiftLeft(m, Const(op.Size)); } address = context.Add(n, m); if (addend != 0) { address = context.Add(address, Const(addend)); } break; } } return(address); }
private static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblSlowPath) { int ptLevelBits = context.Memory.AddressSpaceBits - 12; // 12 = Number of page bits. int ptLevelSize = 1 << ptLevelBits; int ptLevelMask = ptLevelSize - 1; Operand pte = Ptc.State == PtcState.Disabled ? Const(context.Memory.PageTablePointer.ToInt64()) : Const(context.Memory.PageTablePointer.ToInt64(), true, Ptc.PageTablePointerIndex); int bit = PageBits; do { Operand addrPart = context.ShiftRightUI(address, Const(bit)); bit += ptLevelBits; if (bit < context.Memory.AddressSpaceBits) { addrPart = context.BitwiseAnd(addrPart, Const(addrPart.Type, ptLevelMask)); } Operand pteOffset = context.ShiftLeft(addrPart, Const(3)); if (pteOffset.Type == OperandType.I32) { pteOffset = context.ZeroExtend32(OperandType.I64, pteOffset); } Operand pteAddress = context.Add(pte, pteOffset); pte = context.Load(OperandType.I64, pteAddress); }while (bit < context.Memory.AddressSpaceBits); context.BranchIfTrue(lblSlowPath, context.ICompareLess(pte, Const(0L))); Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, PageMask)); if (pageOffset.Type == OperandType.I32) { pageOffset = context.ZeroExtend32(OperandType.I64, pageOffset); } return(context.Add(pte, pageOffset)); }
private static void EmitNativeCallWithGuestAddress(ArmEmitterContext context, Operand funcAddr, Operand guestAddress, bool isJump) { Operand nativeContextPtr = context.LoadArgument(OperandType.I64, 0); context.Store(context.Add(nativeContextPtr, Const((long)NativeContext.GetCallAddressOffset())), guestAddress); EmitNativeCall(context, nativeContextPtr, funcAddr, isJump); }
public static void EmitClearExclusive(ArmEmitterContext context) { Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); // We store ULONG max to force any exclusive address checks to fail, // since this value is not aligned to the ERG mask. context.Store(exAddrPtr, Const(ulong.MaxValue)); }
public static void Vsra(ArmEmitterContext context) { OpCode32SimdShImm op = (OpCode32SimdShImm)context.CurrOp; int shift = GetImmShr(op); int maxShift = (8 << op.Size) - 1; if (op.U) { EmitVectorImmBinaryQdQmOpZx32(context, (op1, op2) => { Operand shiftRes = shift > maxShift ? Const(op2.Type, 0) : context.ShiftRightUI(op2, Const(shift)); return(context.Add(op1, shiftRes)); }); } else { EmitVectorImmBinaryQdQmOpSx32(context, (op1, op2) => context.Add(op1, context.ShiftRightSI(op2, Const(Math.Min(maxShift, shift))))); } }
public static void Umaal(ArmEmitterContext context) { OpCode32AluUmull op = (OpCode32AluUmull)context.CurrOp; Operand n = context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.Rn)); Operand m = context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.Rm)); Operand dHi = context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.RdHi)); Operand dLo = context.ZeroExtend32(OperandType.I64, GetIntA32(context, op.RdLo)); Operand res = context.Multiply(n, m); res = context.Add(res, dHi); res = context.Add(res, dLo); Operand hi = context.ConvertI64ToI32(context.ShiftRightUI(res, Const(32))); Operand lo = context.ConvertI64ToI32(res); EmitGenericAluStoreA32(context, op.RdHi, false, hi); EmitGenericAluStoreA32(context, op.RdLo, false, lo); }
public static void Vpadd_V(ArmEmitterContext context) { if (Optimizations.FastFP && Optimizations.UseSse2) { EmitSse2VectorPairwiseOpF32(context, Intrinsic.X86Addps); } else { EmitVectorPairwiseOpF32(context, (op1, op2) => context.Add(op1, op2)); } }