private static void EmitLoadAddress(ILEmitterCtx context) { switch (context.CurrOp) { case OpCodeMemImm64 op: context.EmitLdint(op.Rn); if (!op.PostIdx) { // Pre-indexing. context.EmitLdc_I(op.Imm); context.Emit(OpCodes.Add); } break; case OpCodeMemReg64 op: context.EmitLdint(op.Rn); context.EmitLdintzr(op.Rm); context.EmitCast(op.IntType); if (op.Shift) { context.EmitLsl(op.Size); } context.Emit(OpCodes.Add); break; } // Save address to Scratch var since the register value may change. context.Emit(OpCodes.Dup); context.EmitSttmp(); }
private static void EmitWriteIntFallback(ILEmitterCtx context, int size) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(_tempIntAddress); if (context.CurrOp.RegisterSize == RegisterSize.Int32) { context.Emit(OpCodes.Conv_U8); } context.EmitLdint(_tempIntValue); if (size < 3) { context.Emit(OpCodes.Conv_U4); } string fallbackMethodName = null; switch (size) { case 0: fallbackMethodName = nameof(MemoryManager.WriteByte); break; case 1: fallbackMethodName = nameof(MemoryManager.WriteUInt16); break; case 2: fallbackMethodName = nameof(MemoryManager.WriteUInt32); break; case 3: fallbackMethodName = nameof(MemoryManager.WriteUInt64); break; } context.EmitCall(typeof(MemoryManager), fallbackMethodName); }
private static void EmitStore(ILEmitterCtx context, AccessType accType, bool pair) { OpCodeMemEx64 op = (OpCodeMemEx64)context.CurrOp; bool ordered = (accType & AccessType.Ordered) != 0; bool exclusive = (accType & AccessType.Exclusive) != 0; if (ordered) { EmitBarrier(context); } ILLabel lblEx = new ILLabel(); ILLabel lblEnd = new ILLabel(); if (exclusive) { EmitMemoryCall(context, nameof(MemoryManager.TestExclusive), op.Rn); context.Emit(OpCodes.Brtrue_S, lblEx); context.EmitLdc_I8(1); context.EmitStintzr(op.Rs); context.Emit(OpCodes.Br_S, lblEnd); } context.MarkLabel(lblEx); context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdintzr(op.Rt); EmitWriteCall(context, op.Size); if (pair) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdc_I8(1 << op.Size); context.Emit(OpCodes.Add); context.EmitLdintzr(op.Rt2); EmitWriteCall(context, op.Size); } if (exclusive) { context.EmitLdc_I8(0); context.EmitStintzr(op.Rs); EmitMemoryCall(context, nameof(MemoryManager.ClearExclusiveForStore)); } context.MarkLabel(lblEnd); }
private static void EmitSimdMemMs(ILEmitterCtx context, bool isLoad) { OpCodeSimdMemMs64 op = (OpCodeSimdMemMs64)context.CurrOp; int offset = 0; for (int rep = 0; rep < op.Reps; rep++) { for (int elem = 0; elem < op.Elems; elem++) { for (int sElem = 0; sElem < op.SElems; sElem++) { int rtt = (op.Rt + rep + sElem) & 0x1f; if (isLoad) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdc_I8(offset); context.Emit(OpCodes.Add); EmitReadZxCall(context, op.Size); EmitVectorInsert(context, rtt, elem, op.Size); if (op.RegisterSize == RegisterSize.Simd64 && elem == op.Elems - 1) { EmitVectorZeroUpper(context, rtt); } } else { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdc_I8(offset); context.Emit(OpCodes.Add); EmitVectorExtractZx(context, rtt, elem, op.Size); EmitWriteCall(context, op.Size); } offset += 1 << op.Size; } } } if (op.WBack) { EmitSimdMemWBack(context, offset); } }
public static void Ret(ILEmitterCtx context) { context.EmitStoreState(); context.EmitLdint(CpuThreadState.LrIndex); context.Emit(OpCodes.Ret); }
public static void Ret(ILEmitterCtx context) { context.EmitStoreState(); context.EmitLdint(RegisterAlias.Lr); context.Emit(OpCodes.Ret); }
private static void EmitSimdMemWBack(ILEmitterCtx context, int offset) { OpCodeMemReg64 op = (OpCodeMemReg64)context.CurrOp; context.EmitLdint(op.Rn); if (op.Rm != RegisterAlias.Zr) { context.EmitLdint(op.Rm); } else { context.EmitLdc_I8(offset); } context.Emit(OpCodes.Add); context.EmitStint(op.Rn); }
private static void EmitAddressCheck(ILEmitterCtx context, int size) { long addressCheckMask = ~(context.Memory.AddressSpaceSize - 1); addressCheckMask |= (1u << size) - 1; context.EmitLdint(_tempIntAddress); context.EmitLdc_I(addressCheckMask); context.Emit(OpCodes.And); }
public static void EmitLoadFromRegister(ILEmitterCtx context, int register) { if (register == RegisterAlias.Aarch32Pc) { OpCode32 op = (OpCode32)context.CurrOp; context.EmitLdc_I4((int)op.GetPc()); } else { context.EmitLdint(GetRegisterAlias(context.Mode, register)); } }
public static void EmitDataLoadRn(ILEmitterCtx context) { IOpCodeAlu64 op = (IOpCodeAlu64)context.CurrOp; if (op.DataOp == DataOp.Logical || op is IOpCodeAluRs64) { context.EmitLdintzr(op.Rn); } else { context.EmitLdint(op.Rn); } }
private static void EmitMemoryCall(ILEmitterCtx context, string name, int rn = -1) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitCallPropGet(typeof(CpuThreadState), nameof(CpuThreadState.Core)); if (rn != -1) { context.EmitLdint(rn); } context.EmitCall(typeof(MemoryManager), name); }
// ARM32 helpers. private static void EmitLoadRmShiftedByImmediate(ILEmitterCtx context, OpCode32AluRsImm op, bool setCarry) { int shift = op.Imm; if (shift == 0) { switch (op.ShiftType) { case ShiftType.Lsr: shift = 32; break; case ShiftType.Asr: shift = 32; break; case ShiftType.Ror: shift = 1; break; } } context.EmitLdint(op.Rm); if (shift != 0) { setCarry &= op.SetFlags; switch (op.ShiftType) { case ShiftType.Lsl: EmitLslC(context, setCarry, shift); break; case ShiftType.Lsr: EmitLsrC(context, setCarry, shift); break; case ShiftType.Asr: EmitAsrC(context, setCarry, shift); break; case ShiftType.Ror: if (op.Imm != 0) { EmitRorC(context, setCarry, shift); } else { EmitRrxC(context, setCarry); } break; } } }
private static void EmitWriteInt(ILEmitterCtx context, int size) { EmitAddressCheck(context, size); ILLabel lblFastPath = new ILLabel(); ILLabel lblSlowPath = new ILLabel(); ILLabel lblEnd = new ILLabel(); context.Emit(OpCodes.Brfalse_S, lblFastPath); context.MarkLabel(lblSlowPath); EmitWriteIntFallback(context, size); context.Emit(OpCodes.Br, lblEnd); context.MarkLabel(lblFastPath); EmitPtPointerLoad(context, lblSlowPath); context.EmitLdint(_tempIntValue); if (size < 3) { context.Emit(OpCodes.Conv_U4); } switch (size) { case 0: context.Emit(OpCodes.Stind_I1); break; case 1: context.Emit(OpCodes.Stind_I2); break; case 2: context.Emit(OpCodes.Stind_I4); break; case 3: context.Emit(OpCodes.Stind_I8); break; } context.MarkLabel(lblEnd); }
private static void EmitLoad(ILEmitterCtx context, AccessType accType, bool pair) { OpCodeMemEx64 op = (OpCodeMemEx64)context.CurrOp; bool ordered = (accType & AccessType.Ordered) != 0; bool exclusive = (accType & AccessType.Exclusive) != 0; if (ordered) { EmitBarrier(context); } if (exclusive) { EmitMemoryCall(context, nameof(MemoryManager.SetExclusive), op.Rn); } context.EmitLdint(op.Rn); context.EmitSttmp(); context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdtmp(); EmitReadZxCall(context, op.Size); context.EmitStintzr(op.Rt); if (pair) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdtmp(); context.EmitLdc_I8(1 << op.Size); context.Emit(OpCodes.Add); EmitReadZxCall(context, op.Size); context.EmitStintzr(op.Rt2); } }
public static void EmitAluLoadRn(ILEmitterCtx context) { if (context.CurrOp is IOpCodeAlu64 op) { if (op.DataOp == DataOp.Logical || op is IOpCodeAluRs64) { context.EmitLdintzr(op.Rn); } else { context.EmitLdint(op.Rn); } } else if (context.CurrOp is IOpCode32Alu op32) { InstEmit32Helper.EmitLoadFromRegister(context, op32.Rn); } else { throw new InvalidOperationException(); } }
private static void EmitPtPointerLoad(ILEmitterCtx context, ILLabel lblFallbackPath) { context.EmitLdc_I8(context.Memory.PageTable.ToInt64()); context.Emit(OpCodes.Conv_I); int bit = MemoryManager.PageBits; do { context.EmitLdint(_tempIntAddress); if (context.CurrOp.RegisterSize == RegisterSize.Int32) { context.Emit(OpCodes.Conv_U8); } context.EmitLsr(bit); bit += context.Memory.PtLevelBits; if (bit < context.Memory.AddressSpaceBits) { context.EmitLdc_I8(context.Memory.PtLevelMask); context.Emit(OpCodes.And); } context.EmitLdc_I8(IntPtr.Size); context.Emit(OpCodes.Mul); context.Emit(OpCodes.Conv_I); context.Emit(OpCodes.Add); context.Emit(OpCodes.Ldind_I); }while (bit < context.Memory.AddressSpaceBits); if (!context.Memory.HasWriteWatchSupport) { context.Emit(OpCodes.Conv_U8); context.EmitStint(_tempIntPtAddr); context.EmitLdint(_tempIntPtAddr); context.EmitLdc_I8(MemoryManager.PteFlagsMask); context.Emit(OpCodes.And); context.Emit(OpCodes.Brtrue, lblFallbackPath); context.EmitLdint(_tempIntPtAddr); context.Emit(OpCodes.Conv_I); } context.EmitLdint(_tempIntAddress); context.EmitLdc_I(MemoryManager.PageMask); context.Emit(OpCodes.And); context.Emit(OpCodes.Conv_I); context.Emit(OpCodes.Add); }
private static void EmitSimdMemSs(ILEmitterCtx context, bool isLoad) { OpCodeSimdMemSs64 op = (OpCodeSimdMemSs64)context.CurrOp; int offset = 0; void EmitMemAddress() { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdc_I8(offset); context.Emit(OpCodes.Add); } if (op.Replicate) { //Only loads uses the replicate mode. if (!isLoad) { throw new InvalidOperationException(); } int bytes = op.GetBitsCount() >> 3; int elems = bytes >> op.Size; for (int sElem = 0; sElem < op.SElems; sElem++) { int rt = (op.Rt + sElem) & 0x1f; for (int index = 0; index < elems; index++) { EmitMemAddress(); EmitReadZxCall(context, op.Size); EmitVectorInsert(context, rt, index, op.Size); } if (op.RegisterSize == RegisterSize.Simd64) { EmitVectorZeroUpper(context, rt); } offset += 1 << op.Size; } } else { for (int sElem = 0; sElem < op.SElems; sElem++) { int rt = (op.Rt + sElem) & 0x1f; if (isLoad) { EmitMemAddress(); EmitReadZxCall(context, op.Size); EmitVectorInsert(context, rt, op.Index, op.Size); } else { EmitMemAddress(); EmitVectorExtractZx(context, rt, op.Index, op.Size); EmitWriteCall(context, op.Size); } offset += 1 << op.Size; } } if (op.WBack) { EmitSimdMemWBack(context, offset); } }
private static void EmitLoad(ILEmitterCtx context, AccessType accType, bool pair) { OpCodeMemEx64 op = (OpCodeMemEx64)context.CurrOp; bool ordered = (accType & AccessType.Ordered) != 0; bool exclusive = (accType & AccessType.Exclusive) != 0; if (ordered) { EmitBarrier(context); } context.EmitLdint(op.Rn); context.EmitSttmp(); if (exclusive) { context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitLdtmp(); context.EmitPrivateCall(typeof(CpuThreadState), nameof(CpuThreadState.SetExclusiveAddress)); } void WriteExclusiveValue(string propName) { if (op.Size < 3) { context.Emit(OpCodes.Conv_U8); } context.EmitSttmp2(); context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitLdtmp2(); context.EmitCallPrivatePropSet(typeof(CpuThreadState), propName); context.EmitLdtmp2(); if (op.Size < 3) { context.Emit(OpCodes.Conv_U4); } } if (pair) { //Exclusive loads should be atomic. For pairwise loads, we need to //read all the data at once. For a 32-bits pairwise load, we do a //simple 64-bits load, for a 128-bits load, we need to call a special //method to read 128-bits atomically. if (op.Size == 2) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdtmp(); EmitReadZxCall(context, 3); context.Emit(OpCodes.Dup); //Mask low half. context.Emit(OpCodes.Conv_U4); if (exclusive) { WriteExclusiveValue(nameof(CpuThreadState.ExclusiveValueLow)); } context.EmitStintzr(op.Rt); //Shift high half. context.EmitLsr(32); context.Emit(OpCodes.Conv_U4); if (exclusive) { WriteExclusiveValue(nameof(CpuThreadState.ExclusiveValueHigh)); } context.EmitStintzr(op.Rt2); } else if (op.Size == 3) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdtmp(); context.EmitPrivateCall(typeof(MemoryManager), nameof(MemoryManager.AtomicReadInt128)); context.Emit(OpCodes.Dup); //Load low part of the vector. context.EmitLdc_I4(0); context.EmitLdc_I4(3); VectorHelper.EmitCall(context, nameof(VectorHelper.VectorExtractIntZx)); if (exclusive) { WriteExclusiveValue(nameof(CpuThreadState.ExclusiveValueLow)); } context.EmitStintzr(op.Rt); //Load high part of the vector. context.EmitLdc_I4(1); context.EmitLdc_I4(3); VectorHelper.EmitCall(context, nameof(VectorHelper.VectorExtractIntZx)); if (exclusive) { WriteExclusiveValue(nameof(CpuThreadState.ExclusiveValueHigh)); } context.EmitStintzr(op.Rt2); } else { throw new InvalidOperationException($"Invalid store size of {1 << op.Size} bytes."); } } else { //8, 16, 32 or 64-bits (non-pairwise) load. context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdtmp(); EmitReadZxCall(context, op.Size); if (exclusive) { WriteExclusiveValue(nameof(CpuThreadState.ExclusiveValueLow)); } context.EmitStintzr(op.Rt); } }
private static void EmitStore(ILEmitterCtx context, AccessType accType, bool pair) { OpCodeMemEx64 op = (OpCodeMemEx64)context.CurrOp; bool ordered = (accType & AccessType.Ordered) != 0; bool exclusive = (accType & AccessType.Exclusive) != 0; if (ordered) { EmitBarrier(context); } if (exclusive) { ILLabel lblEx = new ILLabel(); ILLabel lblEnd = new ILLabel(); context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitLdint(op.Rn); context.EmitPrivateCall(typeof(CpuThreadState), nameof(CpuThreadState.CheckExclusiveAddress)); context.Emit(OpCodes.Brtrue_S, lblEx); //Address check failed, set error right away and do not store anything. context.EmitLdc_I4(1); context.EmitStintzr(op.Rs); context.Emit(OpCodes.Br, lblEnd); //Address check passsed. context.MarkLabel(lblEx); context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitCallPrivatePropGet(typeof(CpuThreadState), nameof(CpuThreadState.ExclusiveValueLow)); void EmitCast() { //The input should be always int64. switch (op.Size) { case 0: context.Emit(OpCodes.Conv_U1); break; case 1: context.Emit(OpCodes.Conv_U2); break; case 2: context.Emit(OpCodes.Conv_U4); break; } } EmitCast(); if (pair) { context.EmitLdarg(TranslatedSub.StateArgIdx); context.EmitCallPrivatePropGet(typeof(CpuThreadState), nameof(CpuThreadState.ExclusiveValueHigh)); EmitCast(); context.EmitLdintzr(op.Rt); EmitCast(); context.EmitLdintzr(op.Rt2); EmitCast(); switch (op.Size) { case 2: context.EmitPrivateCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchange2xInt32)); break; case 3: context.EmitPrivateCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchangeInt128)); break; default: throw new InvalidOperationException($"Invalid store size of {1 << op.Size} bytes."); } } else { context.EmitLdintzr(op.Rt); EmitCast(); switch (op.Size) { case 0: context.EmitCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchangeByte)); break; case 1: context.EmitCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchangeInt16)); break; case 2: context.EmitCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchangeInt32)); break; case 3: context.EmitCall(typeof(MemoryManager), nameof(MemoryManager.AtomicCompareExchangeInt64)); break; default: throw new InvalidOperationException($"Invalid store size of {1 << op.Size} bytes."); } } //The value returned is a bool, true if the values compared //were equal and the new value was written, false otherwise. //We need to invert this result, as on ARM 1 indicates failure, //and 0 success on those instructions. context.EmitLdc_I4(1); context.Emit(OpCodes.Xor); context.Emit(OpCodes.Dup); context.Emit(OpCodes.Conv_U8); context.EmitStintzr(op.Rs); //Only clear the exclusive monitor if the store was successful (Rs = false). context.Emit(OpCodes.Brtrue_S, lblEnd); Clrex(context); context.MarkLabel(lblEnd); } else { void EmitWrite(int rt, long offset) { context.EmitLdarg(TranslatedSub.MemoryArgIdx); context.EmitLdint(op.Rn); if (offset != 0) { context.EmitLdc_I8(offset); context.Emit(OpCodes.Add); } context.EmitLdintzr(rt); EmitWriteCall(context, op.Size); } EmitWrite(op.Rt, 0); if (pair) { EmitWrite(op.Rt2, 1 << op.Size); } } }