public static Operand EmitReadIntAligned(ArmEmitterContext context, Operand address, int size) { if ((uint)size > 4) { throw new ArgumentOutOfRangeException(nameof(size)); } Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr, BasicBlockFrequency.Cold); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, null, write: false); return(size switch { 0 => context.Load8(physAddr), 1 => context.Load16(physAddr), 2 => context.Load(OperandType.I32, physAddr), 3 => context.Load(OperandType.I64, physAddr), _ => context.Load(OperandType.V128, physAddr) });
private static void EmitReadInt(ArmEmitterContext context, Operand address, int rt, int size) { Operand lblSlowPath = Label(); Operand lblEnd = Label(); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath, write: false, size); Operand value = default; switch (size) { case 0: value = context.Load8(physAddr); break; case 1: value = context.Load16(physAddr); break; case 2: value = context.Load(OperandType.I32, physAddr); break; case 3: value = context.Load(OperandType.I64, physAddr); break; } SetInt(context, rt, value); if (!context.Memory.Type.IsHostMapped()) { context.Branch(lblEnd); context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); EmitReadIntFallback(context, address, rt, size); context.MarkLabel(lblEnd); } }
private static void EmitReadInt(ArmEmitterContext context, Operand address, int rt, int size) { Operand lblSlowPath = Label(); Operand lblEnd = Label(); Operand isUnalignedAddr = EmitAddressCheck(context, address, size); context.BranchIfTrue(lblSlowPath, isUnalignedAddr); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath, write: false); Operand value = null; switch (size) { case 0: value = context.Load8(physAddr); break; case 1: value = context.Load16(physAddr); break; case 2: value = context.Load(OperandType.I32, physAddr); break; case 3: value = context.Load(OperandType.I64, physAddr); break; } SetInt(context, rt, value); context.Branch(lblEnd); context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); EmitReadIntFallback(context, address, rt, size); context.MarkLabel(lblEnd); }
private static void EmitReadVector( ArmEmitterContext context, Operand address, Operand vector, int rt, int elem, int size) { Operand isUnalignedAddr = EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); Operand lblSlowPath = Label(); Operand lblEnd = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); context.MarkLabel(lblSlowPath); EmitReadVectorFallback(context, address, vector, rt, elem, size); context.Branch(lblEnd); context.MarkLabel(lblFastPath); Operand physAddr = EmitPtPointerLoad(context, address, lblSlowPath); Operand value = null; switch (size) { case 0: value = context.VectorInsert8(vector, context.Load8(physAddr), elem); break; case 1: value = context.VectorInsert16(vector, context.Load16(physAddr), elem); break; case 2: value = context.VectorInsert(vector, context.Load(OperandType.I32, physAddr), elem); break; case 3: value = context.VectorInsert(vector, context.Load(OperandType.I64, physAddr), elem); break; case 4: value = context.Load(OperandType.V128, physAddr); break; } context.Copy(GetVec(rt), value); context.MarkLabel(lblEnd); }
public static Operand EmitReadIntAligned(ArmEmitterContext context, Operand address, int size) { if ((uint)size > 4) { throw new ArgumentOutOfRangeException(nameof(size)); } Operand physAddr = EmitPtPointerLoad(context, address, null, write: false, size); return(size switch { 0 => context.Load8(physAddr), 1 => context.Load16(physAddr), 2 => context.Load(OperandType.I32, physAddr), 3 => context.Load(OperandType.I64, physAddr), _ => context.Load(OperandType.V128, physAddr) });
public static void EmitDynamicTableCall(ArmEmitterContext context, Operand tableAddress, Operand address, bool isJump) { // Loop over elements of the dynamic table. Unrolled loop. Operand endLabel = Label(); Operand fallbackLabel = Label(); void EmitTableEntry(Operand entrySkipLabel) { // Try to take this entry in the table if its guest address equals 0. Operand gotResult = context.CompareAndSwap(tableAddress, Const(0L), address); // Is the address ours? (either taken via CompareAndSwap (0), or what was already here) context.BranchIfFalse(entrySkipLabel, context.BitwiseOr( context.ICompareEqual(gotResult, address), context.ICompareEqual(gotResult, Const(0L))) ); // It's ours, so what function is it pointing to? Operand targetFunctionPtr = context.Add(tableAddress, Const(8L)); Operand targetFunction = context.Load(OperandType.I64, targetFunctionPtr); // Call the function. // We pass in the entry address as the guest address, as the entry may need to be updated by the // indirect call stub. EmitNativeCallWithGuestAddress(context, targetFunction, tableAddress, isJump); context.Branch(endLabel); } // Currently this uses a size of 1, as higher values inflate code size for no real benefit. for (int i = 0; i < JumpTable.DynamicTableElems; i++) { if (i == JumpTable.DynamicTableElems - 1) { // If this is the last entry, avoid emitting the additional label and add. EmitTableEntry(fallbackLabel); } else { Operand nextLabel = Label(); EmitTableEntry(nextLabel); context.MarkLabel(nextLabel); // Move to the next table entry. tableAddress = context.Add(tableAddress, Const((long)JumpTable.JumpTableStride)); } } context.MarkLabel(fallbackLabel); EmitBranchFallback(context, address, isJump); context.MarkLabel(endLabel); }
private static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblFallbackPath) { Operand pte = Const(context.Memory.PageTable.ToInt64()); int bit = MemoryManager.PageBits; do { Operand addrPart = context.ShiftRightUI(address, Const(bit)); bit += context.Memory.PtLevelBits; if (bit < context.Memory.AddressSpaceBits) { addrPart = context.BitwiseAnd(addrPart, Const(addrPart.Type, context.Memory.PtLevelMask)); } Operand pteOffset = context.ShiftLeft(addrPart, Const(3)); if (pteOffset.Type == OperandType.I32) { pteOffset = context.ZeroExtend32(OperandType.I64, pteOffset); } Operand pteAddress = context.Add(pte, pteOffset); pte = context.Load(OperandType.I64, pteAddress); }while (bit < context.Memory.AddressSpaceBits); if (!context.Memory.HasWriteWatchSupport) { Operand hasFlagSet = context.BitwiseAnd(pte, Const((long)MemoryManager.PteFlagsMask)); context.BranchIfTrue(lblFallbackPath, hasFlagSet); } Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, MemoryManager.PageMask)); if (pageOffset.Type == OperandType.I32) { pageOffset = context.ZeroExtend32(OperandType.I64, pageOffset); } Operand physAddr = context.Add(pte, pageOffset); return(physAddr); }
private static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblSlowPath) { int ptLevelBits = context.Memory.AddressSpaceBits - 12; // 12 = Number of page bits. int ptLevelSize = 1 << ptLevelBits; int ptLevelMask = ptLevelSize - 1; Operand pte = Ptc.State == PtcState.Disabled ? Const(context.Memory.PageTablePointer.ToInt64()) : Const(context.Memory.PageTablePointer.ToInt64(), true, Ptc.PageTablePointerIndex); int bit = PageBits; do { Operand addrPart = context.ShiftRightUI(address, Const(bit)); bit += ptLevelBits; if (bit < context.Memory.AddressSpaceBits) { addrPart = context.BitwiseAnd(addrPart, Const(addrPart.Type, ptLevelMask)); } Operand pteOffset = context.ShiftLeft(addrPart, Const(3)); if (pteOffset.Type == OperandType.I32) { pteOffset = context.ZeroExtend32(OperandType.I64, pteOffset); } Operand pteAddress = context.Add(pte, pteOffset); pte = context.Load(OperandType.I64, pteAddress); }while (bit < context.Memory.AddressSpaceBits); context.BranchIfTrue(lblSlowPath, context.ICompareLess(pte, Const(0L))); Operand pageOffset = context.BitwiseAnd(address, Const(address.Type, PageMask)); if (pageOffset.Type == OperandType.I32) { pageOffset = context.ZeroExtend32(OperandType.I64, pageOffset); } return(context.Add(pte, pageOffset)); }
public static void EmitJumpTableBranch(ArmEmitterContext context, Operand address, bool isJump = false) { if (address.Type == OperandType.I32) { address = context.ZeroExtend32(OperandType.I64, address); } // TODO: Constant folding. Indirect calls are slower in the best case and emit more code so we want to avoid them when possible. bool isConst = address.Kind == OperandKind.Constant; long constAddr = (long)address.Value; if (!context.HighCq) { // Don't emit indirect calls or jumps if we're compiling in lowCq mode. // This avoids wasting space on the jump and indirect tables. // Just ask the translator for the function address. EmitBranchFallback(context, address, isJump); } else if (!isConst) { // Virtual branch/call - store first used addresses on a small table for fast lookup. int entry = context.JumpTable.ReserveDynamicEntry(isJump); int jumpOffset = entry * JumpTable.JumpTableStride * JumpTable.DynamicTableElems; Operand dynTablePtr = Const(context.JumpTable.DynamicPointer.ToInt64() + jumpOffset); EmitDynamicTableCall(context, dynTablePtr, address, isJump); } else { int entry = context.JumpTable.ReserveTableEntry(context.BaseAddress & (~3L), constAddr, isJump); int jumpOffset = entry * JumpTable.JumpTableStride + 8; // Offset directly to the host address. // TODO: Relocatable jump table ptr for AOT. Would prefer a solution to patch this constant into functions as they are loaded rather than calculate at runtime. Operand tableEntryPtr = Const(context.JumpTable.JumpPointer.ToInt64() + jumpOffset); Operand funcAddr = context.Load(OperandType.I64, tableEntryPtr); EmitNativeCallWithGuestAddress(context, funcAddr, address, isJump); // Call the function directly. If it's not present yet, this will call the direct call stub. } }
public static void EmitJumpTableBranch(ArmEmitterContext context, Operand address, bool isJump = false) { if (address.Type == OperandType.I32) { address = context.ZeroExtend32(OperandType.I64, address); } // TODO: Constant folding. Indirect calls are slower in the best case and emit more code so we want to // avoid them when possible. bool isConst = address.Kind == OperandKind.Constant; long constAddr = (long)address.Value; if (!context.HighCq) { // Don't emit indirect calls or jumps if we're compiling in lowCq mode. This avoids wasting space on the // jump and indirect tables. Just ask the translator for the function address. EmitBranchFallback(context, address, isJump); } else if (!isConst) { // Virtual branch/call - store first used addresses on a small table for fast lookup. int entry = context.JumpTable.ReserveDynamicEntry(isJump); int jumpOffset = entry * JumpTable.JumpTableStride * JumpTable.DynamicTableElems; Operand dynTablePtr; if (Ptc.State == PtcState.Disabled) { dynTablePtr = Const(context.JumpTable.DynamicPointer.ToInt64() + jumpOffset); } else { dynTablePtr = Const(context.JumpTable.DynamicPointer.ToInt64(), true, Ptc.DynamicPointerIndex); dynTablePtr = context.Add(dynTablePtr, Const((long)jumpOffset)); } EmitDynamicTableCall(context, dynTablePtr, address, isJump); } else { int entry = context.JumpTable.ReserveTableEntry(context.BaseAddress & (~3L), constAddr, isJump); int jumpOffset = entry * JumpTable.JumpTableStride + 8; // Offset directly to the host address. Operand tableEntryPtr; if (Ptc.State == PtcState.Disabled) { tableEntryPtr = Const(context.JumpTable.JumpPointer.ToInt64() + jumpOffset); } else { tableEntryPtr = Const(context.JumpTable.JumpPointer.ToInt64(), true, Ptc.JumpPointerIndex); tableEntryPtr = context.Add(tableEntryPtr, Const((long)jumpOffset)); } Operand funcAddr = context.Load(OperandType.I64, tableEntryPtr); // Call the function directly. If it's not present yet, this will call the direct call stub. EmitNativeCallWithGuestAddress(context, funcAddr, address, isJump); } }
private static void EmitTableBranch(ArmEmitterContext context, Operand guestAddress, bool isJump) { context.StoreToContext(); if (guestAddress.Type == OperandType.I32) { guestAddress = context.ZeroExtend32(OperandType.I64, guestAddress); } // Store the target guest address into the native context. The stubs uses this address to dispatch into the // next translation. Operand nativeContext = context.LoadArgument(OperandType.I64, 0); Operand dispAddressAddr = context.Add(nativeContext, Const((ulong)NativeContext.GetDispatchAddressOffset())); context.Store(dispAddressAddr, guestAddress); Operand hostAddress; // If address is mapped onto the function table, we can skip the table walk. Otherwise we fallback // onto the dispatch stub. if (guestAddress.Kind == OperandKind.Constant && context.FunctionTable.IsValid(guestAddress.Value)) { Operand hostAddressAddr = !context.HasPtc ? Const(ref context.FunctionTable.GetValue(guestAddress.Value)) : Const(ref context.FunctionTable.GetValue(guestAddress.Value), new Symbol(SymbolType.FunctionTable, guestAddress.Value)); hostAddress = context.Load(OperandType.I64, hostAddressAddr); } else { hostAddress = !context.HasPtc ? Const((long)context.Stubs.DispatchStub) : Const((long)context.Stubs.DispatchStub, Ptc.DispatchStubSymbol); } if (isJump) { context.Tailcall(hostAddress, nativeContext); } else { OpCode op = context.CurrOp; Operand returnAddress = context.Call(hostAddress, OperandType.I64, nativeContext); context.LoadFromContext(); // Note: The return value of a translated function is always an Int64 with the address execution has // returned to. We expect this address to be immediately after the current instruction, if it isn't we // keep returning until we reach the dispatcher. Operand nextAddr = Const((long)op.Address + op.OpCodeSizeInBytes); // Try to continue within this block. // If the return address isn't to our next instruction, we need to return so the JIT can figure out // what to do. Operand lblContinue = context.GetLabel(nextAddr.Value); context.BranchIf(lblContinue, returnAddress, nextAddr, Comparison.Equal, BasicBlockFrequency.Cold); context.Return(returnAddress); } }
public static void EmitStoreExclusive( ArmEmitterContext context, Operand address, Operand value, bool exclusive, int size, int rs, bool a32) { if (size < 3) { value = context.ConvertI64ToI32(value); } if (exclusive) { void SetRs(Operand value) { if (a32) { SetIntA32(context, rs, value); } else { SetIntOrZR(context, rs, value); } } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exAddr = context.Load(address.Type, exAddrPtr); // STEP 1: Check if we have exclusive access to this memory region. If not, fail and skip store. Operand maskedAddress = context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask())); Operand exFailed = context.ICompareNotEqual(exAddr, maskedAddress); Operand lblExit = Label(); SetRs(exFailed); context.BranchIfTrue(lblExit, exFailed); // STEP 2: We have exclusive access, make sure that the address is valid. Operand isUnalignedAddr = InstEmitMemoryHelper.EmitAddressCheck(context, address, size); Operand lblFastPath = Label(); context.BranchIfFalse(lblFastPath, isUnalignedAddr); // The call is not expected to return (it should throw). context.Call(typeof(NativeInterface).GetMethod(nameof(NativeInterface.ThrowInvalidMemoryAccess)), address); // STEP 3: We have exclusive access and the address is valid, attempt the store using CAS. context.MarkLabel(lblFastPath); Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: true); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); Operand exValue = size switch { 0 => context.Load8(exValuePtr), 1 => context.Load16(exValuePtr), 2 => context.Load(OperandType.I32, exValuePtr), 3 => context.Load(OperandType.I64, exValuePtr), _ => context.Load(OperandType.V128, exValuePtr) }; Operand currValue = size switch { 0 => context.CompareAndSwap8(physAddr, exValue, value), 1 => context.CompareAndSwap16(physAddr, exValue, value), _ => context.CompareAndSwap(physAddr, exValue, value) }; // STEP 4: Check if we succeeded by comparing expected and in-memory values. Operand storeFailed; if (size == 4) { Operand currValueLow = context.VectorExtract(OperandType.I64, currValue, 0); Operand currValueHigh = context.VectorExtract(OperandType.I64, currValue, 1); Operand exValueLow = context.VectorExtract(OperandType.I64, exValue, 0); Operand exValueHigh = context.VectorExtract(OperandType.I64, exValue, 1); storeFailed = context.BitwiseOr( context.ICompareNotEqual(currValueLow, exValueLow), context.ICompareNotEqual(currValueHigh, exValueHigh)); } else { storeFailed = context.ICompareNotEqual(currValue, exValue); } SetRs(storeFailed); context.MarkLabel(lblExit); } else { InstEmitMemoryHelper.EmitWriteIntAligned(context, address, value, size); } }
public static void EmitStoreExclusive( ArmEmitterContext context, Operand address, Operand value, bool exclusive, int size, int rs, bool a32) { if (size < 3) { value = context.ConvertI64ToI32(value); } if (exclusive) { // We overwrite one of the register (Rs), // keep a copy of the values to ensure we are working with the correct values. address = context.Copy(address); value = context.Copy(value); void SetRs(Operand value) { if (a32) { SetIntA32(context, rs, value); } else { SetIntOrZR(context, rs, value); } } Operand arg0 = context.LoadArgument(OperandType.I64, 0); Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset())); Operand exAddr = context.Load(address.Type, exAddrPtr); // STEP 1: Check if we have exclusive access to this memory region. If not, fail and skip store. Operand maskedAddress = context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask())); Operand exFailed = context.ICompareNotEqual(exAddr, maskedAddress); Operand lblExit = Label(); SetRs(Const(1)); context.BranchIfTrue(lblExit, exFailed); // STEP 2: We have exclusive access and the address is valid, attempt the store using CAS. Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, null, write: true, size); Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset())); Operand exValue = size switch { 0 => context.Load8(exValuePtr), 1 => context.Load16(exValuePtr), 2 => context.Load(OperandType.I32, exValuePtr), 3 => context.Load(OperandType.I64, exValuePtr), _ => context.Load(OperandType.V128, exValuePtr) }; Operand currValue = size switch { 0 => context.CompareAndSwap8(physAddr, exValue, value), 1 => context.CompareAndSwap16(physAddr, exValue, value), _ => context.CompareAndSwap(physAddr, exValue, value) }; // STEP 3: Check if we succeeded by comparing expected and in-memory values. Operand storeFailed; if (size == 4) { Operand currValueLow = context.VectorExtract(OperandType.I64, currValue, 0); Operand currValueHigh = context.VectorExtract(OperandType.I64, currValue, 1); Operand exValueLow = context.VectorExtract(OperandType.I64, exValue, 0); Operand exValueHigh = context.VectorExtract(OperandType.I64, exValue, 1); storeFailed = context.BitwiseOr( context.ICompareNotEqual(currValueLow, exValueLow), context.ICompareNotEqual(currValueHigh, exValueHigh)); } else { storeFailed = context.ICompareNotEqual(currValue, exValue); } SetRs(storeFailed); context.MarkLabel(lblExit); } else { InstEmitMemoryHelper.EmitWriteIntAligned(context, address, value, size); } }