private static void EmitSat(ArmEmitterContext context, int intMin, int intMax) { OpCode32Sat op = (OpCode32Sat)context.CurrOp; Operand n = GetIntA32(context, op.Rn); int shift = DecodeImmShift(op.ShiftType, op.Imm5); switch (op.ShiftType) { case ShiftType.Lsl: if (shift == 32) { n = Const(0); } else { n = context.ShiftLeft(n, Const(shift)); } break; case ShiftType.Asr: if (shift == 32) { n = context.ShiftRightSI(n, Const(31)); } else { n = context.ShiftRightSI(n, Const(shift)); } break; } Operand lblCheckLtIntMin = Label(); Operand lblNoSat = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblCheckLtIntMin, context.ICompareGreater(n, Const(intMax))); SetFlag(context, PState.QFlag, Const(1)); SetIntA32(context, op.Rd, Const(intMax)); context.Branch(lblEnd); context.MarkLabel(lblCheckLtIntMin); context.BranchIfFalse(lblNoSat, context.ICompareLess(n, Const(intMin))); SetFlag(context, PState.QFlag, Const(1)); SetIntA32(context, op.Rd, Const(intMin)); context.Branch(lblEnd); context.MarkLabel(lblNoSat); SetIntA32(context, op.Rd, n); context.MarkLabel(lblEnd); }
private static void EmitSat16(ArmEmitterContext context, int intMin, int intMax) { OpCode32Sat16 op = (OpCode32Sat16)context.CurrOp; void SetD(int part, Operand value) { if (part == 0) { SetIntA32(context, op.Rd, context.ZeroExtend16(OperandType.I32, value)); } else { SetIntA32(context, op.Rd, context.BitwiseOr(GetIntA32(context, op.Rd), context.ShiftLeft(value, Const(16)))); } } Operand n = GetIntA32(context, op.Rn); Operand nLow = context.SignExtend16(OperandType.I32, n); Operand nHigh = context.ShiftRightSI(n, Const(16)); for (int part = 0; part < 2; part++) { Operand nPart = part == 0 ? nLow : nHigh; Operand lblCheckLtIntMin = Label(); Operand lblNoSat = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblCheckLtIntMin, context.ICompareGreater(nPart, Const(intMax))); SetFlag(context, PState.QFlag, Const(1)); SetD(part, Const(intMax)); context.Branch(lblEnd); context.MarkLabel(lblCheckLtIntMin); context.BranchIfFalse(lblNoSat, context.ICompareLess(nPart, Const(intMin))); SetFlag(context, PState.QFlag, Const(1)); SetD(part, Const(intMin)); context.Branch(lblEnd); context.MarkLabel(lblNoSat); SetD(part, nPart); context.MarkLabel(lblEnd); } }
public static Operand EmitReadIntAligned(ArmEmitterContext context, Operand address, int size) { if ((uint)size > 4) { throw new ArgumentOutOfRangeException(nameof(size)); } Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr, BasicBlockFrequency.Cold); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, null, write: false); return(size switch { 0 => context.Load8(physAddr), 1 => context.Load16(physAddr), 2 => context.Load(OperandType.I32, physAddr), 3 => context.Load(OperandType.I64, physAddr), _ => context.Load(OperandType.V128, physAddr) });
public static Operand EmitLoadExclusive(ArmEmitterContext context, Operand address, bool exclusive, int size) { if (exclusive) { Operand value; if (size == 4) { Operand isUnalignedAddr = InstEmitMemoryHelper.EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); context.MarkLabel(lblFastPath); // Only 128-bit CAS is guaranteed to have a atomic load. Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: false); Operand zero = context.VectorZero(); value = context.CompareAndSwap(physAddr, zero, zero); } else { value = InstEmitMemoryHelper.EmitReadIntAligned(context, address, size); } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); context.Store(exAddrPtr, context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask()))); // Make sure the unused higher bits of the value are cleared. if (size < 3) { context.Store(exValuePtr, Const(0UL)); } if (size < 4) { context.Store(context.Add(exValuePtr, Const(exValuePtr.Type, 8L)), Const(0UL)); } // Store the new exclusive value. context.Store(exValuePtr, value); return(value); } else { return(InstEmitMemoryHelper.EmitReadIntAligned(context, address, size)); } }
public static void EmitDynamicTableCall(ArmEmitterContext context, Operand tableAddress, Operand address, bool isJump) { // Loop over elements of the dynamic table. Unrolled loop. Operand endLabel = Label(); Operand fallbackLabel = Label(); void EmitTableEntry(Operand entrySkipLabel) { // Try to take this entry in the table if its guest address equals 0. Operand gotResult = context.CompareAndSwap(tableAddress, Const(0L), address); // Is the address ours? (either taken via CompareAndSwap (0), or what was already here) context.BranchIfFalse(entrySkipLabel, context.BitwiseOr( context.ICompareEqual(gotResult, address), context.ICompareEqual(gotResult, Const(0L))) ); // It's ours, so what function is it pointing to? Operand targetFunctionPtr = context.Add(tableAddress, Const(8L)); Operand targetFunction = context.Load(OperandType.I64, targetFunctionPtr); // Call the function. // We pass in the entry address as the guest address, as the entry may need to be updated by the // indirect call stub. EmitNativeCallWithGuestAddress(context, targetFunction, tableAddress, isJump); context.Branch(endLabel); } // Currently this uses a size of 1, as higher values inflate code size for no real benefit. for (int i = 0; i < JumpTable.DynamicTableElems; i++) { if (i == JumpTable.DynamicTableElems - 1) { // If this is the last entry, avoid emitting the additional label and add. EmitTableEntry(fallbackLabel); } else { Operand nextLabel = Label(); EmitTableEntry(nextLabel); context.MarkLabel(nextLabel); // Move to the next table entry. tableAddress = context.Add(tableAddress, Const((long)JumpTable.JumpTableStride)); } } context.MarkLabel(fallbackLabel); EmitBranchFallback(context, address, isJump); context.MarkLabel(endLabel); }
private static void UpdateQFlag(ArmEmitterContext context, Operand q) { Operand lblSkipSetQ = Label(); context.BranchIfFalse(lblSkipSetQ, q); SetFlag(context, PState.QFlag, Const(1)); context.MarkLabel(lblSkipSetQ); }
private static void EmitReadVector( ArmEmitterContext context, Operand address, Operand vector, int rt, int elem, int size) { Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); Operand lblSlowPath = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); context.MarkLabel(lblSlowPath); EmitReadVectorFallback(context, address, vector, rt, elem, size); context.Branch(lblEnd); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath); Operand value = null; switch (size) { case 0: value = context.VectorInsert8(vector, context.Load8(physAddr), elem); break; case 1: value = context.VectorInsert16(vector, context.Load16(physAddr), elem); break; case 2: value = context.VectorInsert(vector, context.Load(OperandType.I32, physAddr), elem); break; case 3: value = context.VectorInsert(vector, context.Load(OperandType.I64, physAddr), elem); break; case 4: value = context.Load(OperandType.V128, physAddr); break; } context.Copy(GetVec(rt), value); context.MarkLabel(lblEnd); }
private static void EmitBranch(ArmEmitterContext context, Operand value, bool onNotZero) { OpCodeBImm op = (OpCodeBImm)context.CurrOp; if (context.CurrBlock.Branch != null) { Operand lblTarget = context.GetLabel((ulong)op.Immediate); if (onNotZero) { context.BranchIfTrue(lblTarget, value); } else { context.BranchIfFalse(lblTarget, value); } if (context.CurrBlock.Next == null) { context.Return(Const(op.Address + 4)); } } else { Operand lblTaken = Label(); if (onNotZero) { context.BranchIfTrue(lblTaken, value); } else { context.BranchIfFalse(lblTaken, value); } context.Return(Const(op.Address + 4)); context.MarkLabel(lblTaken); context.Return(Const(op.Immediate)); } }
private static void EmitWriteVector( ArmEmitterContext context, Operand address, int rt, int elem, int size) { Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); Operand lblSlowPath = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); context.MarkLabel(lblSlowPath); EmitWriteVectorFallback(context, address, rt, elem, size); context.Branch(lblEnd); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath); Operand value = GetVec(rt); switch (size) { case 0: context.Store8(physAddr, context.VectorExtract8(value, elem)); break; case 1: context.Store16(physAddr, context.VectorExtract16(value, elem)); break; case 2: context.Store(physAddr, context.VectorExtract(OperandType.FP32, value, elem)); break; case 3: context.Store(physAddr, context.VectorExtract(OperandType.FP64, value, elem)); break; case 4: context.Store(physAddr, value); break; } context.MarkLabel(lblEnd); }
private static void EmitDiv(ArmEmitterContext context, bool unsigned) { OpCodeAluBinary op = (OpCodeAluBinary)context.CurrOp; // If Rm == 0, Rd = 0 (division by zero). Operand n = GetIntOrZR(context, op.Rn); Operand m = GetIntOrZR(context, op.Rm); Operand divisorIsZero = context.ICompareEqual(m, Const(m.Type, 0)); Operand lblBadDiv = Label(); Operand lblEnd = Label(); context.BranchIfTrue(lblBadDiv, divisorIsZero); if (!unsigned) { // If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow). bool is32Bits = op.RegisterSize == RegisterSize.Int32; Operand intMin = is32Bits ? Const(int.MinValue) : Const(long.MinValue); Operand minus1 = is32Bits ? Const(-1) : Const(-1L); Operand nIsIntMin = context.ICompareEqual(n, intMin); Operand mIsMinus1 = context.ICompareEqual(m, minus1); Operand lblGoodDiv = Label(); context.BranchIfFalse(lblGoodDiv, context.BitwiseAnd(nIsIntMin, mIsMinus1)); SetAluDOrZR(context, intMin); context.Branch(lblEnd); context.MarkLabel(lblGoodDiv); } Operand d = unsigned ? context.DivideUI(n, m) : context.Divide(n, m); SetAluDOrZR(context, d); context.Branch(lblEnd); context.MarkLabel(lblBadDiv); SetAluDOrZR(context, Const(op.GetOperandType(), 0)); context.MarkLabel(lblEnd); }
public static void EmitDiv(ArmEmitterContext context, bool unsigned) { Operand n = GetAluN(context); Operand m = GetAluM(context); Operand zero = Const(m.Type, 0); Operand divisorIsZero = context.ICompareEqual(m, zero); Operand lblBadDiv = Label(); Operand lblEnd = Label(); context.BranchIfTrue(lblBadDiv, divisorIsZero); if (!unsigned) { // ARM64 behaviour: If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow). // TODO: tests to ensure A32 works the same Operand intMin = Const(int.MinValue); Operand minus1 = Const(-1); Operand nIsIntMin = context.ICompareEqual(n, intMin); Operand mIsMinus1 = context.ICompareEqual(m, minus1); Operand lblGoodDiv = Label(); context.BranchIfFalse(lblGoodDiv, context.BitwiseAnd(nIsIntMin, mIsMinus1)); EmitAluStore(context, intMin); context.Branch(lblEnd); context.MarkLabel(lblGoodDiv); } Operand res = unsigned ? context.DivideUI(n, m) : context.Divide(n, m); EmitAluStore(context, res); context.Branch(lblEnd); context.MarkLabel(lblBadDiv); EmitAluStore(context, zero); context.MarkLabel(lblEnd); }
private static void EmitBranch(ArmEmitterContext context, Operand value, bool onNotZero) { OpCodeBImm op = (OpCodeBImm)context.CurrOp; Operand lblTarget = context.GetLabel((ulong)op.Immediate); if (onNotZero) { context.BranchIfTrue(lblTarget, value); } else { context.BranchIfFalse(lblTarget, value); } }
private static void EmitCb(ArmEmitterContext context, bool onNotZero) { OpCodeT16BImmCmp op = (OpCodeT16BImmCmp)context.CurrOp; Operand value = GetIntA32(context, op.Rn); Operand lblTarget = context.GetLabel((ulong)op.Immediate); if (onNotZero) { context.BranchIfTrue(lblTarget, value); } else { context.BranchIfFalse(lblTarget, value); } }
private static void EmitReadInt(ArmEmitterContext context, Operand address, int rt, int size) { Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); Operand lblSlowPath = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); context.MarkLabel(lblSlowPath); EmitReadIntFallback(context, address, rt, size); context.Branch(lblEnd); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath); Operand value = null; switch (size) { case 0: value = context.Load8(physAddr); break; case 1: value = context.Load16(physAddr); break; case 2: value = context.Load(OperandType.I32, physAddr); break; case 3: value = context.Load(OperandType.I64, physAddr); break; } SetInt(context, rt, value); context.MarkLabel(lblEnd); }
public static void EmitIfHelper(ArmEmitterContext context, Operand boolValue, Action action, bool expected = true) { Debug.Assert(boolValue.Type == OperandType.I32); Operand endLabel = Label(); if (expected) { context.BranchIfFalse(endLabel, boolValue); } else { context.BranchIfTrue(endLabel, boolValue); } action(); context.MarkLabel(endLabel); }
private static void EmitWriteInt(ArmEmitterContext context, Operand address, int rt, int size) { Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); Operand lblSlowPath = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); context.MarkLabel(lblSlowPath); EmitWriteIntFallback(context, address, rt, size); context.Branch(lblEnd); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath); Operand value = GetInt(context, rt); if (size < 3 && value.Type == OperandType.I64) { value = context.ConvertI64ToI32(value); } switch (size) { case 0: context.Store8(physAddr, value); break; case 1: context.Store16(physAddr, value); break; case 2: context.Store(physAddr, value); break; case 3: context.Store(physAddr, value); break; } context.MarkLabel(lblEnd); }
private static Operand EmitSatQ(ArmEmitterContext context, Operand value, int eSize, bool signed) { Debug.Assert(eSize <= 32); long intMin = signed ? -(1L << (eSize - 1)) : 0; long intMax = signed ? (1L << (eSize - 1)) - 1 : (1L << eSize) - 1; Operand gt = context.ICompareGreater(value, Const(value.Type, intMax)); Operand lt = context.ICompareLess(value, Const(value.Type, intMin)); value = context.ConditionalSelect(gt, Const(value.Type, intMax), value); value = context.ConditionalSelect(lt, Const(value.Type, intMin), value); Operand lblNoSat = Label(); context.BranchIfFalse(lblNoSat, context.BitwiseOr(gt, lt)); context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.SetFpsrQc))); context.MarkLabel(lblNoSat); return(value); }
private static Operand EmitSatQ(ArmEmitterContext context, Operand value, int eSize, bool signed) { Debug.Assert(eSize <= 32); long intMin = signed ? -(1L << (eSize - 1)) : 0; long intMax = signed ? (1L << (eSize - 1)) - 1 : (1L << eSize) - 1; Operand gt = context.ICompareGreater(value, Const(value.Type, intMax)); Operand lt = context.ICompareLess(value, Const(value.Type, intMin)); value = context.ConditionalSelect(gt, Const(value.Type, intMax), value); value = context.ConditionalSelect(lt, Const(value.Type, intMin), value); Operand lblNoSat = Label(); context.BranchIfFalse(lblNoSat, context.BitwiseOr(gt, lt)); // TODO: Set QC (to 1) on FPSCR here. context.MarkLabel(lblNoSat); return(value); }
public static void EmitStoreExclusive( ArmEmitterContext context, Operand address, Operand value, bool exclusive, int size, int rs, bool a32) { if (size < 3) { value = context.ConvertI64ToI32(value); } if (exclusive) { void SetRs(Operand value) { if (a32) { SetIntA32(context, rs, value); } else { SetIntOrZR(context, rs, value); } } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exAddr = context.Load(address.Type, exAddrPtr); // STEP 1: Check if we have exclusive access to this memory region. If not, fail and skip store. Operand maskedAddress = context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask())); Operand exFailed = context.ICompareNotEqual(exAddr, maskedAddress); Operand lblExit = Label(); SetRs(exFailed); context.BranchIfTrue(lblExit, exFailed); // STEP 2: We have exclusive access, make sure that the address is valid. Operand isUnalignedAddr = InstEmitMemoryHelper.EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); // STEP 3: We have exclusive access and the address is valid, attempt the store using CAS. context.MarkLabel(lblFastPath); Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: true); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); Operand exValue = size switch { 0 => context.Load8(exValuePtr), 1 => context.Load16(exValuePtr), 2 => context.Load(OperandType.I32, exValuePtr), 3 => context.Load(OperandType.I64, exValuePtr), _ => context.Load(OperandType.V128, exValuePtr) }; Operand currValue = size switch { 0 => context.CompareAndSwap8(physAddr, exValue, value), 1 => context.CompareAndSwap16(physAddr, exValue, value), _ => context.CompareAndSwap(physAddr, exValue, value) }; // STEP 4: Check if we succeeded by comparing expected and in-memory values. Operand storeFailed; if (size == 4) { Operand currValueLow = context.VectorExtract(OperandType.I64, currValue, 0); Operand currValueHigh = context.VectorExtract(OperandType.I64, currValue, 1); Operand exValueLow = context.VectorExtract(OperandType.I64, exValue, 0); Operand exValueHigh = context.VectorExtract(OperandType.I64, exValue, 1); storeFailed = context.BitwiseOr( context.ICompareNotEqual(currValueLow, exValueLow), context.ICompareNotEqual(currValueHigh, exValueHigh)); } else { storeFailed = context.ICompareNotEqual(currValue, exValue); } SetRs(storeFailed); context.MarkLabel(lblExit); } else { InstEmitMemoryHelper.EmitWriteIntAligned(context, address, value, size); } }
private static void EmitVcmpOrVcmpe(ArmEmitterContext context, bool signalNaNs) { OpCode32SimdS op = (OpCode32SimdS)context.CurrOp; bool cmpWithZero = (op.Opc & 2) != 0; int sizeF = op.Size & 1; if (Optimizations.FastFP && (signalNaNs ? Optimizations.UseAvx : Optimizations.UseSse2)) { CmpCondition cmpOrdered = signalNaNs ? CmpCondition.OrderedS : CmpCondition.OrderedQ; bool doubleSize = sizeF != 0; int shift = doubleSize ? 1 : 2; Operand m = GetVecA32(op.Vm >> shift); Operand n = GetVecA32(op.Vd >> shift); n = EmitSwapScalar(context, n, op.Vd, doubleSize); m = cmpWithZero ? context.VectorZero() : EmitSwapScalar(context, m, op.Vm, doubleSize); Operand lblNaN = Label(); Operand lblEnd = Label(); if (!doubleSize) { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const((int)cmpOrdered)); Operand isOrdered = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, ordMask); context.BranchIfFalse(lblNaN, isOrdered); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, n, m); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, n, m); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, n, m); SetFpFlag(context, FPState.VFlag, Const(0)); SetFpFlag(context, FPState.CFlag, cf); SetFpFlag(context, FPState.ZFlag, zf); SetFpFlag(context, FPState.NFlag, nf); } else { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const((int)cmpOrdered)); Operand isOrdered = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, ordMask); context.BranchIfFalse(lblNaN, isOrdered); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, n, m); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, n, m); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, n, m); SetFpFlag(context, FPState.VFlag, Const(0)); SetFpFlag(context, FPState.CFlag, cf); SetFpFlag(context, FPState.ZFlag, zf); SetFpFlag(context, FPState.NFlag, nf); } context.Branch(lblEnd); context.MarkLabel(lblNaN); SetFpFlag(context, FPState.VFlag, Const(1)); SetFpFlag(context, FPState.CFlag, Const(1)); SetFpFlag(context, FPState.ZFlag, Const(0)); SetFpFlag(context, FPState.NFlag, Const(0)); context.MarkLabel(lblEnd); } else { OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32; Operand ne = ExtractScalar(context, type, op.Vd); Operand me; if (cmpWithZero) { me = sizeF == 0 ? ConstF(0f) : ConstF(0d); } else { me = ExtractScalar(context, type, op.Vm); } MethodInfo info = sizeF != 0 ? typeof(SoftFloat64).GetMethod(nameof(SoftFloat64.FPCompare)) : typeof(SoftFloat32).GetMethod(nameof(SoftFloat32.FPCompare)); Operand nzcv = context.Call(info, ne, me, Const(signalNaNs)); EmitSetFpscrNzcv(context, nzcv); } }
private static void EmitFcmpOrFcmpe(ArmEmitterContext context, bool signalNaNs) { OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp; bool cmpWithZero = !(op is OpCodeSimdFcond) ? op.Bit3 : false; if (Optimizations.FastFP && (signalNaNs ? Optimizations.UseAvx : Optimizations.UseSse2)) { Operand n = GetVec(op.Rn); Operand m = cmpWithZero ? context.VectorZero() : GetVec(op.Rm); CmpCondition cmpOrdered = signalNaNs ? CmpCondition.OrderedS : CmpCondition.OrderedQ; Operand lblNaN = Label(); Operand lblEnd = Label(); if (op.Size == 0) { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const((int)cmpOrdered)); Operand isOrdered = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, ordMask); context.BranchIfFalse(lblNaN, isOrdered); Operand nCopy = context.Copy(n); Operand mCopy = cmpWithZero ? context.VectorZero() : context.Copy(m); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, nCopy, mCopy); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, nCopy, mCopy); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, nCopy, mCopy); SetFlag(context, PState.VFlag, Const(0)); SetFlag(context, PState.CFlag, cf); SetFlag(context, PState.ZFlag, zf); SetFlag(context, PState.NFlag, nf); } else /* if (op.Size == 1) */ { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const((int)cmpOrdered)); Operand isOrdered = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, ordMask); context.BranchIfFalse(lblNaN, isOrdered); Operand nCopy = context.Copy(n); Operand mCopy = cmpWithZero ? context.VectorZero() : context.Copy(m); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, nCopy, mCopy); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, nCopy, mCopy); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, nCopy, mCopy); SetFlag(context, PState.VFlag, Const(0)); SetFlag(context, PState.CFlag, cf); SetFlag(context, PState.ZFlag, zf); SetFlag(context, PState.NFlag, nf); } context.Branch(lblEnd); context.MarkLabel(lblNaN); SetFlag(context, PState.VFlag, Const(1)); SetFlag(context, PState.CFlag, Const(1)); SetFlag(context, PState.ZFlag, Const(0)); SetFlag(context, PState.NFlag, Const(0)); context.MarkLabel(lblEnd); } else { OperandType type = op.Size != 0 ? OperandType.FP64 : OperandType.FP32; Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0); Operand me; if (cmpWithZero) { me = op.Size == 0 ? ConstF(0f) : ConstF(0d); } else { me = context.VectorExtract(type, GetVec(op.Rm), 0); } Operand nzcv = EmitSoftFloatCall(context, nameof(SoftFloat32.FPCompare), ne, me, Const(signalNaNs)); EmitSetNzcv(context, nzcv); } }
private static void EmitFcmpOrFcmpe(ArmEmitterContext context, bool signalNaNs) { OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp; const int cmpOrdered = 7; bool cmpWithZero = !(op is OpCodeSimdFcond) ? op.Bit3 : false; if (Optimizations.FastFP && Optimizations.UseSse2) { Operand n = GetVec(op.Rn); Operand m = cmpWithZero ? context.VectorZero() : GetVec(op.Rm); Operand lblNaN = Label(); Operand lblEnd = Label(); if (op.Size == 0) { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const(cmpOrdered)); Operand isOrdered = context.VectorExtract16(ordMask, 0); context.BranchIfFalse(lblNaN, isOrdered); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, n, m); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, n, m); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, n, m); SetFlag(context, PState.VFlag, Const(0)); SetFlag(context, PState.CFlag, cf); SetFlag(context, PState.ZFlag, zf); SetFlag(context, PState.NFlag, nf); } else /* if (op.Size == 1) */ { Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const(cmpOrdered)); Operand isOrdered = context.VectorExtract16(ordMask, 0); context.BranchIfFalse(lblNaN, isOrdered); Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, n, m); Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, n, m); Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, n, m); SetFlag(context, PState.VFlag, Const(0)); SetFlag(context, PState.CFlag, cf); SetFlag(context, PState.ZFlag, zf); SetFlag(context, PState.NFlag, nf); } context.Branch(lblEnd); context.MarkLabel(lblNaN); SetFlag(context, PState.VFlag, Const(1)); SetFlag(context, PState.CFlag, Const(1)); SetFlag(context, PState.ZFlag, Const(0)); SetFlag(context, PState.NFlag, Const(0)); context.MarkLabel(lblEnd); } else { OperandType type = op.Size != 0 ? OperandType.FP64 : OperandType.FP32; Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0); Operand me; if (cmpWithZero) { me = op.Size == 0 ? ConstF(0f) : ConstF(0d); } else { me = context.VectorExtract(type, GetVec(op.Rm), 0); } Delegate dlg = op.Size != 0 ? (Delegate) new _S32_F64_F64_Bool(SoftFloat64.FPCompare) : (Delegate) new _S32_F32_F32_Bool(SoftFloat32.FPCompare); Operand nzcv = context.Call(dlg, ne, me, Const(signalNaNs)); EmitSetNzcv(context, nzcv); } }