コード例 #1
0
        private static void InvokeSecondPass(ref ExInfo exInfo, uint idxStart, uint idxLimit)
        {
            EHEnum ehEnum;
            byte * pbMethodStartAddress;

            if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref exInfo._frameIter, &pbMethodStartAddress, &ehEnum))
            {
                return;
            }

            byte *pbControlPC = exInfo._frameIter.ControlPC;

            uint codeOffset = (uint)(pbControlPC - pbMethodStartAddress);

            uint lastTryStart = 0, lastTryEnd = 0;

            // Search the clauses for one that contains the current offset.
            RhEHClause ehClause;

            for (uint curIdx = 0; InternalCalls.RhpEHEnumNext(&ehEnum, &ehClause) && curIdx < idxLimit; curIdx++)
            {
                //
                // Skip to the starting try region.  This is used by collided unwinds and rethrows to pickup where
                // the previous dispatch left off.
                //
                if (idxStart != MaxTryRegionIdx)
                {
                    if (curIdx <= idxStart)
                    {
                        lastTryStart = ehClause._tryStartOffset; lastTryEnd = ehClause._tryEndOffset;
                        continue;
                    }

                    // Now, we continue skipping while the try region is identical to the one that invoked the
                    // previous dispatch.
                    if ((ehClause._tryStartOffset == lastTryStart) && (ehClause._tryEndOffset == lastTryEnd))
                    {
                        continue;
                    }

                    // We are done skipping. This is required to handle empty finally block markers that are used
                    // to separate runs of different try blocks with same native code offsets.
                    idxStart = MaxTryRegionIdx;
                }

                RhEHClauseKind clauseKind = ehClause._clauseKind;

                if ((clauseKind != RhEHClauseKind.RH_EH_CLAUSE_FAULT) ||
                    !ehClause.ContainsCodeOffset(codeOffset))
                {
                    continue;
                }

                // Found a containing clause. Because of the order of the clauses, we know this is the
                // most containing.

                // N.B. -- We need to suppress GC "in-between" calls to finallys in this loop because we do
                // not have the correct next-execution point live on the stack and, therefore, may cause a GC
                // hole if we allow a GC between invocation of finally funclets (i.e. after one has returned
                // here to the dispatcher, but before the next one is invoked).  Once they are running, it's
                // fine for them to trigger a GC, obviously.
                //
                // As a result, RhpCallFinallyFunclet will set this state in the runtime upon return from the
                // funclet, and we need to reset it if/when we fall out of the loop and we know that the
                // method will no longer get any more GC callbacks.

                byte *pFinallyHandler = ehClause._handlerAddress;
                exInfo._idxCurClause = curIdx;
                InternalCalls.RhpCallFinallyFunclet(pFinallyHandler, exInfo._frameIter.RegisterSet);
                exInfo._idxCurClause = MaxTryRegionIdx;
            }
        }
コード例 #2
0
 // Retrieve the offset of the value embedded in a Nullable<T>.
 internal unsafe byte GetNullableValueOffset()
 {
     fixed(EEType *pThis = &this)
     return(InternalCalls.RhpGetNullableEETypeValueOffset(pThis));
 }
コード例 #3
0
 internal IntPtr GetSealedVirtualSlot(ushort index)
 {
     fixed(EEType *pThis = &this)
     return(InternalCalls.RhpGetSealedVirtualSlot(pThis, index));
 }
コード例 #4
0
        private static bool FindImplSlotInSimpleMap(EEType *pTgtType,
                                                    EEType *pItfType,
                                                    UInt32 itfSlotNumber,
                                                    UInt16 *pImplSlotNumber,
                                                    bool actuallyCheckVariance)
        {
            Debug.Assert(pTgtType->HasDispatchMap, "Missing dispatch map");

            EEType *         pItfOpenGenericType = null;
            EETypeRef *      pItfInstantiation   = null;
            int              itfArity            = 0;
            GenericVariance *pItfVarianceInfo    = null;

            bool fCheckVariance   = false;
            bool fArrayCovariance = false;

            if (actuallyCheckVariance)
            {
                fCheckVariance   = pItfType->HasGenericVariance;
                fArrayCovariance = pTgtType->IsArray;

                // Non-arrays can follow array variance rules iff
                // 1. They have one generic parameter
                // 2. That generic parameter is array covariant.
                //
                // This special case is to allow array enumerators to work
                if (!fArrayCovariance && pTgtType->HasGenericVariance)
                {
                    EETypeRef *      pTgtInstantiation;
                    int              tgtEntryArity;
                    GenericVariance *pTgtVarianceInfo;
                    EEType *         pTgtEntryGenericType = InternalCalls.RhGetGenericInstantiation(pTgtType,
                                                                                                    &tgtEntryArity,
                                                                                                    &pTgtInstantiation,
                                                                                                    &pTgtVarianceInfo);

                    if ((tgtEntryArity == 1) && pTgtVarianceInfo[0] == GenericVariance.ArrayCovariant)
                    {
                        fArrayCovariance = true;
                    }
                }

                // Arrays are covariant even though you can both get and set elements (type safety is maintained by
                // runtime type checks during set operations). This extends to generic interfaces implemented on those
                // arrays. We handle this by forcing all generic interfaces on arrays to behave as though they were
                // covariant (over their one type parameter corresponding to the array element type).
                if (fArrayCovariance && pItfType->IsGeneric)
                {
                    fCheckVariance = true;
                }

                // If there is no variance checking, there is no operation to perform. (The non-variance check loop
                // has already completed)
                if (!fCheckVariance)
                {
                    return(false);
                }
            }

            DispatchMap *     pMap = pTgtType->DispatchMap;
            DispatchMapEntry *i    = &pMap->_dispatchMap;
            DispatchMapEntry *iEnd = (&pMap->_dispatchMap) + pMap->_entryCount;

            for (; i != iEnd; ++i)
            {
                if (i->_usInterfaceMethodSlot == itfSlotNumber)
                {
                    EEType *pCurEntryType =
                        pTgtType->InterfaceMap[i->_usInterfaceIndex].InterfaceType;

                    if (pCurEntryType->IsCloned)
                    {
                        pCurEntryType = pCurEntryType->CanonicalEEType;
                    }

                    if (pCurEntryType == pItfType)
                    {
                        *pImplSlotNumber = i->_usImplMethodSlot;
                        return(true);
                    }
                    else if (fCheckVariance && ((fArrayCovariance && pCurEntryType->IsGeneric) || pCurEntryType->HasGenericVariance))
                    {
                        // Interface types don't match exactly but both the target interface and the current interface
                        // in the map are marked as being generic with at least one co- or contra- variant type
                        // parameter. So we might still have a compatible match.

                        // Retrieve the unified generic instance for the callsite interface if we haven't already (we
                        // lazily get this then cache the result since the lookup isn't necessarily cheap).
                        if (pItfOpenGenericType == null)
                        {
                            pItfOpenGenericType = InternalCalls.RhGetGenericInstantiation(pItfType,
                                                                                          &itfArity,
                                                                                          &pItfInstantiation,
                                                                                          &pItfVarianceInfo);
                        }

                        // Retrieve the unified generic instance for the interface we're looking at in the map.
                        // Grab instantiation details for the candidate interface.
                        EETypeRef *      pCurEntryInstantiation;
                        int              curEntryArity;
                        GenericVariance *pCurEntryVarianceInfo;
                        EEType *         pCurEntryGenericType = InternalCalls.RhGetGenericInstantiation(pCurEntryType,
                                                                                                        &curEntryArity,
                                                                                                        &pCurEntryInstantiation,
                                                                                                        &pCurEntryVarianceInfo);

                        // If the generic types aren't the same then the types aren't compatible.
                        if (pItfOpenGenericType != pCurEntryGenericType)
                        {
                            continue;
                        }

                        // The types represent different instantiations of the same generic type. The
                        // arity of both had better be the same.
                        Debug.Assert(itfArity == curEntryArity, "arity mismatch betweeen generic instantiations");

                        if (TypeCast.TypeParametersAreCompatible(itfArity, pCurEntryInstantiation, pItfInstantiation, pItfVarianceInfo, fArrayCovariance))
                        {
                            *pImplSlotNumber = i->_usImplMethodSlot;
                            return(true);
                        }
                    }
                }
            }

            return(false);
        }
コード例 #5
0
 // Retrieve the value type T from a Nullable<T>.
 internal unsafe EEType *GetNullableType()
 {
     fixed(EEType *pThis = &this)
     return(InternalCalls.RhpGetNullableEEType(pThis));
 }
コード例 #6
0
        private static void DispatchEx(ref StackFrameIterator frameIter, ref ExInfo exInfo, uint startIdx)
        {
            Debug.Assert(exInfo._passNumber == 1, "expected asm throw routine to set the pass");
            object exceptionObj = exInfo.ThrownException;

            // ------------------------------------------------
            //
            // First pass
            //
            // ------------------------------------------------
            UIntPtr handlingFrameSP      = MaxSP;
            byte *  pCatchHandler        = null;
            uint    catchingTryRegionIdx = MaxTryRegionIdx;

            bool isFirstRethrowFrame = (startIdx != MaxTryRegionIdx);
            bool isFirstFrame        = true;

            byte *  prevControlPC         = null;
            UIntPtr prevFramePtr          = UIntPtr.Zero;
            bool    unwoundReversePInvoke = false;

            bool isValid = frameIter.Init(exInfo._pExContext);

            Debug.Assert(isValid, "RhThrowEx called with an unexpected context");
            DebuggerNotify.BeginFirstPass(exceptionObj, frameIter.ControlPC, frameIter.SP);
            for (; isValid; isValid = frameIter.Next(out startIdx, out unwoundReversePInvoke))
            {
                // For GC stackwalking, we'll happily walk across native code blocks, but for EH dispatch, we
                // disallow dispatching exceptions across native code.
                if (unwoundReversePInvoke)
                {
                    break;
                }

                prevControlPC = frameIter.ControlPC;

                DebugScanCallFrame(exInfo._passNumber, frameIter.ControlPC, frameIter.SP);

                // A debugger can subscribe to get callbacks at a specific frame of exception dispatch
                // exInfo._notifyDebuggerSP can be populated by the debugger from out of process
                // at any time.
                if (exInfo._notifyDebuggerSP == frameIter.SP)
                {
                    DebuggerNotify.FirstPassFrameEntered(exceptionObj, frameIter.ControlPC, frameIter.SP);
                }

                UpdateStackTrace(exceptionObj, ref exInfo, ref isFirstRethrowFrame, ref prevFramePtr, ref isFirstFrame);

                byte *pHandler;
                if (FindFirstPassHandler(exceptionObj, startIdx, ref frameIter,
                                         out catchingTryRegionIdx, out pHandler))
                {
                    handlingFrameSP = frameIter.SP;
                    pCatchHandler   = pHandler;

                    DebugVerifyHandlingFrame(handlingFrameSP);
                    break;
                }
            }
            DebuggerNotify.EndFirstPass(exceptionObj, pCatchHandler, handlingFrameSP);

            if (pCatchHandler == null)
            {
                UnhandledExceptionFailFastViaClasslib(
                    RhFailFastReason.PN_UnhandledException,
                    exceptionObj,
                    (IntPtr)prevControlPC, // IP of the last frame that did not handle the exception
                    ref exInfo);
            }

            // We FailFast above if the exception goes unhandled.  Therefore, we cannot run the second pass
            // without a catch handler.
            Debug.Assert(pCatchHandler != null, "We should have a handler if we're starting the second pass");

            DebuggerNotify.BeginSecondPass();
            // ------------------------------------------------
            //
            // Second pass
            //
            // ------------------------------------------------

            // Due to the stackwalker logic, we cannot tolerate triggering a GC from the dispatch code once we
            // are in the 2nd pass.  This is because the stackwalker applies a particular unwind semantic to
            // 'collapse' funclets which gets confused when we walk out of the dispatch code and encounter the
            // 'main body' without first encountering the funclet.  The thunks used to invoke 2nd-pass
            // funclets will always toggle this mode off before invoking them.
            InternalCalls.RhpSetThreadDoNotTriggerGC();

            exInfo._passNumber = 2;
            startIdx           = MaxTryRegionIdx;
            isValid            = frameIter.Init(exInfo._pExContext);
            for (; isValid && ((byte *)frameIter.SP <= (byte *)handlingFrameSP); isValid = frameIter.Next(out startIdx))
            {
                Debug.Assert(isValid, "second-pass EH unwind failed unexpectedly");
                DebugScanCallFrame(exInfo._passNumber, frameIter.ControlPC, frameIter.SP);

                if (frameIter.SP == handlingFrameSP)
                {
                    // invoke only a partial second-pass here...
                    InvokeSecondPass(ref exInfo, startIdx, catchingTryRegionIdx);
                    break;
                }

                InvokeSecondPass(ref exInfo, startIdx);
            }

            // ------------------------------------------------
            //
            // Call the handler and resume execution
            //
            // ------------------------------------------------
            exInfo._idxCurClause = catchingTryRegionIdx;
            InternalCalls.RhpCallCatchFunclet(
                exceptionObj, pCatchHandler, frameIter.RegisterSet, ref exInfo);
            // currently, RhpCallCatchFunclet will resume after the catch
            Debug.Assert(false, "unreachable");
            FallbackFailFast(RhFailFastReason.InternalError, null);
        }
コード例 #7
0
ファイル: ThunkPool.cs プロジェクト: wtgodbe/corert
        public unsafe static IntPtr GetNewThunksBlock()
        {
            IntPtr nextThunkMapBlock;

            // Check the most recently mapped thunks block. Each mapping consists of multiple
            // thunk stubs pages, and multiple thunk data pages (typically 8 pages of each in a single mapping)
            if (s_currentlyMappedThunkBlocksIndex < Constants.NumThunkBlocksPerMapping)
            {
                nextThunkMapBlock = s_currentlyMappedThunkBlocks[s_currentlyMappedThunkBlocksIndex++];
#if DEBUG
                s_currentlyMappedThunkBlocks[s_currentlyMappedThunkBlocksIndex - 1] = IntPtr.Zero;
                Debug.Assert(nextThunkMapBlock != IntPtr.Zero);
#endif
            }
            else
            {
                if (s_thunksTemplate == IntPtr.Zero)
                {
                    // First, we use the thunks directly from the thunks template sections in the module until all
                    // thunks in that template are used up.
                    s_thunksTemplate = nextThunkMapBlock = InternalCalls.RhpGetThunksBase();
                }
                else
                {
                    // Compute and cache the current module's base address and the RVA of the thunks template

                    if (s_thunksModuleBaseAddress == IntPtr.Zero)
                    {
                        EEType *pInstanceType = (new ThunkBlocks()).EEType;
                        s_thunksModuleBaseAddress = InternalCalls.RhGetModuleFromPointer(pInstanceType);

                        IntPtr thunkBase = InternalCalls.RhpGetThunksBase();
                        Debug.Assert(thunkBase != IntPtr.Zero);

                        s_thunksTemplateRva = (int)(((nuint)thunkBase) - ((nuint)s_thunksModuleBaseAddress));
                        Debug.Assert(s_thunksTemplateRva % (int)Constants.AllocationGranularity == 0);
                    }

                    // We've already used the thunks tempate in the module for some previous thunks, and we
                    // cannot reuse it here. Now we need to create a new mapping of the thunks section in order to have
                    // more thunks
                    nextThunkMapBlock = InternalCalls.RhAllocateThunksFromTemplate(
                        s_thunksModuleBaseAddress,
                        s_thunksTemplateRva,
                        (int)(Constants.NumThunkBlocksPerMapping * Constants.PageSize * 2));

                    if (nextThunkMapBlock == IntPtr.Zero)
                    {
                        // We either ran out of memory and can't do anymore mappings of the thunks templates sections,
                        // or we are using the managed runtime services fallback, which doesn't provide the
                        // file mapping feature (ex: older version of mrt100.dll, or no mrt100.dll at all).

                        // The only option is for the caller to attempt and recycle unused thunks to be able to
                        // find some free entries.

                        InternalCalls.RhpReleaseThunkPoolLock();

                        return(IntPtr.Zero);
                    }
                }

                // Each mapping consists of multiple blocks of thunk stubs/data pairs. Keep track of those
                // so that we do not create a new mapping until all blocks in the sections we just mapped are consumed
                for (int i = 0; i < Constants.NumThunkBlocksPerMapping; i++)
                {
                    s_currentlyMappedThunkBlocks[i] = nextThunkMapBlock + (int)(Constants.PageSize * i * 2);
                }
                s_currentlyMappedThunkBlocksIndex = 1;
            }

            Debug.Assert(nextThunkMapBlock != IntPtr.Zero);

            // Setup the thunks in the new block as a linked list of thunks.
            // Use the first data field of the thunk to build the linked list.
            int    numThunksPerBlock = Constants.NumThunksPerBlock;
            IntPtr dataAddress       = nextThunkMapBlock + (int)Constants.PageSize;
            for (int i = 0; i < numThunksPerBlock; i++)
            {
                Debug.Assert(dataAddress == nextThunkMapBlock + (int)(Constants.PageSize + i * 2 * IntPtr.Size));

                if (i == (numThunksPerBlock - 1))
                {
                    *((IntPtr *)(dataAddress)) = IntPtr.Zero;
                }
                else
                {
                    *((IntPtr *)(dataAddress)) = dataAddress + 2 * IntPtr.Size;
                }

#if DEBUG
                // Debug flag in the second data cell indicating the thunk is not used
                *((IntPtr *)(dataAddress + IntPtr.Size)) = new IntPtr(-1);
#endif

                dataAddress += 2 * IntPtr.Size;
            }

            return(nextThunkMapBlock);
        }
コード例 #8
0
        public static void RhThrowHwEx(uint exceptionCode, ref ExInfo exInfo)
        {
            // trigger a GC (only if gcstress) to ensure we can stackwalk at this point
            GCStress.TriggerGC();

            InternalCalls.RhpValidateExInfoStack();

            IntPtr       faultingCodeAddress = exInfo._pExContext->IP;
            bool         instructionFault    = true;
            ExceptionIDs exceptionId         = default(ExceptionIDs);
            Exception?   exceptionToThrow    = null;

            switch (exceptionCode)
            {
            case (uint)HwExceptionCode.STATUS_REDHAWK_NULL_REFERENCE:
                exceptionId = ExceptionIDs.NullReference;
                break;

            case (uint)HwExceptionCode.STATUS_REDHAWK_UNMANAGED_HELPER_NULL_REFERENCE:
                // The write barrier where the actual fault happened has been unwound already.
                // The IP of this fault needs to be treated as return address, not as IP of
                // faulting instruction.
                instructionFault = false;
                exceptionId      = ExceptionIDs.NullReference;
                break;

            case (uint)HwExceptionCode.STATUS_REDHAWK_THREAD_ABORT:
                exceptionToThrow = InternalCalls.RhpGetThreadAbortException();
                break;

            case (uint)HwExceptionCode.STATUS_DATATYPE_MISALIGNMENT:
                exceptionId = ExceptionIDs.DataMisaligned;
                break;

            // N.B. -- AVs that have a read/write address lower than 64k are already transformed to
            //         HwExceptionCode.REDHAWK_NULL_REFERENCE prior to calling this routine.
            case (uint)HwExceptionCode.STATUS_ACCESS_VIOLATION:
                exceptionId = ExceptionIDs.AccessViolation;
                break;

            case (uint)HwExceptionCode.STATUS_INTEGER_DIVIDE_BY_ZERO:
                exceptionId = ExceptionIDs.DivideByZero;
                break;

            case (uint)HwExceptionCode.STATUS_INTEGER_OVERFLOW:
                exceptionId = ExceptionIDs.Overflow;
                break;

            default:
                // We don't wrap SEH exceptions from foreign code like CLR does, so we believe that we
                // know the complete set of HW faults generated by managed code and do not need to handle
                // this case.
                FailFastViaClasslib(RhFailFastReason.InternalError, null, faultingCodeAddress);
                break;
            }

            if (exceptionId != default(ExceptionIDs))
            {
                exceptionToThrow = GetClasslibException(exceptionId, faultingCodeAddress);
            }

            exInfo.Init(exceptionToThrow !, instructionFault);
            DispatchEx(ref exInfo._frameIter, ref exInfo, MaxTryRegionIdx);
            FallbackFailFast(RhFailFastReason.InternalError, null);
        }
コード例 #9
0
ファイル: RuntimeExports.cs プロジェクト: tedm/corert
 public static int RhGetThunkSize()
 {
     return(InternalCalls.RhpGetThunkSize());
 }
コード例 #10
0
ファイル: TypeCast.cs プロジェクト: gitchomik/corert
        static internal unsafe bool ImplementsInterface(EEType *pObjType, EEType *pTargetType)
        {
            Debug.Assert(!pTargetType->IsParameterizedType, "did not expect paramterized type");
            Debug.Assert(pTargetType->IsInterface, "IsInstanceOfInterface called with non-interface EEType");

            // This can happen with generic interface types
            // Debug.Assert(!pTargetType->IsCloned, "cloned interface types are disallowed");

            // canonicalize target type
            if (pTargetType->IsCloned)
            {
                pTargetType = pTargetType->CanonicalEEType;
            }

            int numInterfaces             = pObjType->NumInterfaces;
            EEInterfaceInfo *interfaceMap = pObjType->InterfaceMap;

            for (int i = 0; i < numInterfaces; i++)
            {
                EEType *pInterfaceType = interfaceMap[i].InterfaceType;

                // canonicalize the interface type
                if (pInterfaceType->IsCloned)
                {
                    pInterfaceType = pInterfaceType->CanonicalEEType;
                }

                if (pInterfaceType == pTargetType)
                {
                    return(true);
                }
            }

            // We did not find the interface type in the list of supported interfaces. There's still one
            // chance left: if the target interface is generic and one or more of its type parameters is co or
            // contra variant then the object can still match if it implements a different instantiation of
            // the interface with type compatible generic arguments.
            //
            // Interfaces which are only variant for arrays have the HasGenericVariance flag set even if they
            // are not variant.
            bool fArrayCovariance = pObjType->IsArray;

            if (pTargetType->HasGenericVariance)
            {
                // Grab details about the instantiation of the target generic interface.
                EETypeRef *      pTargetInstantiation;
                int              targetArity;
                GenericVariance *pTargetVarianceInfo;
                EEType *         pTargetGenericType = InternalCalls.RhGetGenericInstantiation(pTargetType,
                                                                                              &targetArity,
                                                                                              &pTargetInstantiation,
                                                                                              &pTargetVarianceInfo);

                Debug.Assert(pTargetVarianceInfo != null, "did not expect empty variance info");


                for (int i = 0; i < numInterfaces; i++)
                {
                    EEType *pInterfaceType = interfaceMap[i].InterfaceType;

                    // We can ignore interfaces which are not also marked as having generic variance
                    // unless we're dealing with array covariance.
                    //
                    // Interfaces which are only variant for arrays have the HasGenericVariance flag set even if they
                    // are not variant.
                    if (pInterfaceType->HasGenericVariance)
                    {
                        // Grab instantiation details for the candidate interface.
                        EETypeRef *      pInterfaceInstantiation;
                        int              interfaceArity;
                        GenericVariance *pInterfaceVarianceInfo;
                        EEType *         pInterfaceGenericType = InternalCalls.RhGetGenericInstantiation(pInterfaceType,
                                                                                                         &interfaceArity,
                                                                                                         &pInterfaceInstantiation,
                                                                                                         &pInterfaceVarianceInfo);

                        Debug.Assert(pInterfaceVarianceInfo != null, "did not expect empty variance info");

                        // If the generic types aren't the same then the types aren't compatible.
                        if (pInterfaceGenericType != pTargetGenericType)
                        {
                            continue;
                        }

                        // The types represent different instantiations of the same generic type. The
                        // arity of both had better be the same.
                        Debug.Assert(targetArity == interfaceArity, "arity mismatch betweeen generic instantiations");

                        // Compare the instantiations to see if they're compatible taking variance into account.
                        if (TypeParametersAreCompatible(targetArity,
                                                        pInterfaceInstantiation,
                                                        pTargetInstantiation,
                                                        pTargetVarianceInfo,
                                                        fArrayCovariance))
                        {
                            return(true);
                        }
                    }
                }
            }

            return(false);
        }
コード例 #11
0
ファイル: ThunkPool.cs プロジェクト: yongweisun/corert
        public static unsafe IntPtr GetNewThunksBlock()
        {
            IntPtr nextThunksBlock;

            // Check the most recently mapped thunks block. Each mapping consists of multiple
            // thunk stubs pages, and multiple thunk data pages (typically 8 pages of each in a single mapping)
            if (s_currentlyMappedThunkBlocksIndex < Constants.NumThunkBlocksPerMapping)
            {
                nextThunksBlock = s_currentlyMappedThunkBlocks[s_currentlyMappedThunkBlocksIndex++];
#if DEBUG
                s_currentlyMappedThunkBlocks[s_currentlyMappedThunkBlocksIndex - 1] = IntPtr.Zero;
                Debug.Assert(nextThunksBlock != IntPtr.Zero);
#endif
            }
            else
            {
                nextThunksBlock = InternalCalls.RhAllocateThunksMapping();

                if (nextThunksBlock == IntPtr.Zero)
                {
                    // We either ran out of memory and can't do anymore mappings of the thunks templates sections,
                    // or we are using the managed runtime services fallback, which doesn't provide the
                    // file mapping feature (ex: older version of mrt100.dll, or no mrt100.dll at all).

                    // The only option is for the caller to attempt and recycle unused thunks to be able to
                    // find some free entries.

                    return(IntPtr.Zero);
                }

                // Each mapping consists of multiple blocks of thunk stubs/data pairs. Keep track of those
                // so that we do not create a new mapping until all blocks in the sections we just mapped are consumed
                IntPtr currentThunksBlock = nextThunksBlock;
                for (int i = 0; i < Constants.NumThunkBlocksPerMapping; i++)
                {
                    s_currentlyMappedThunkBlocks[i] = currentThunksBlock;
                    currentThunksBlock = InternalCalls.RhpGetNextThunkStubsBlockAddress(currentThunksBlock);
                }
                s_currentlyMappedThunkBlocksIndex = 1;
            }

            Debug.Assert(nextThunksBlock != IntPtr.Zero);

            // Setup the thunks in the new block as a linked list of thunks.
            // Use the first data field of the thunk to build the linked list.
            IntPtr dataAddress = InternalCalls.RhpGetThunkDataBlockAddress(nextThunksBlock);

            for (int i = 0; i < Constants.NumThunksPerBlock; i++)
            {
                if (i == (Constants.NumThunksPerBlock - 1))
                {
                    *((IntPtr *)(dataAddress)) = IntPtr.Zero;
                }
                else
                {
                    *((IntPtr *)(dataAddress)) = dataAddress + Constants.ThunkDataSize;
                }

#if DEBUG
                // Debug flag in the second data cell indicating the thunk is not used
                *((IntPtr *)(dataAddress + IntPtr.Size)) = new IntPtr(-1);
#endif

                dataAddress += Constants.ThunkDataSize;
            }

            return(nextThunksBlock);
        }
コード例 #12
0
        // TODO: temporary to try things out, when working look to see how to refactor with FindFirstPassHandler
        private static bool FindFirstPassHandlerWasm(object exception, uint idxStart, uint idxCurrentBlockStart /* the start IL idx of the current block for the landing pad, will use in place of PC */,
                                                     void *shadowStack, ref EHClauseIterator clauseIter, out uint tryRegionIdx, out byte *pHandler)
        {
            pHandler     = (byte *)0;
            tryRegionIdx = MaxTryRegionIdx;
            uint           lastTryStart = 0, lastTryEnd = 0;
            RhEHClauseWasm ehClause = new RhEHClauseWasm();

            for (uint curIdx = 0; clauseIter.Next(ref ehClause); curIdx++)
            {
                //
                // Skip to the starting try region.  This is used by collided unwinds and rethrows to pickup where
                // the previous dispatch left off.
                //
                if (idxStart != MaxTryRegionIdx)
                {
                    if (curIdx <= idxStart)
                    {
                        lastTryStart = ehClause._tryStartOffset;
                        lastTryEnd   = ehClause._tryEndOffset;
                        continue;
                    }

                    // Now, we continue skipping while the try region is identical to the one that invoked the
                    // previous dispatch.
                    if ((ehClause._tryStartOffset == lastTryStart) && (ehClause._tryEndOffset == lastTryEnd))
                    {
                        continue;
                    }

                    // We are done skipping. This is required to handle empty finally block markers that are used
                    // to separate runs of different try blocks with same native code offsets.
                    idxStart = MaxTryRegionIdx;
                }

                EHClauseIterator.RhEHClauseKindWasm clauseKind = ehClause._clauseKind;
                if (((clauseKind != EHClauseIterator.RhEHClauseKindWasm.RH_EH_CLAUSE_TYPED) &&
                     (clauseKind != EHClauseIterator.RhEHClauseKindWasm.RH_EH_CLAUSE_FILTER)) ||
                    !ehClause.ContainsCodeOffset(idxCurrentBlockStart))
                {
                    continue;
                }

                // Found a containing clause. Because of the order of the clauses, we know this is the
                // most containing.
                if (clauseKind == EHClauseIterator.RhEHClauseKindWasm.RH_EH_CLAUSE_TYPED)
                {
                    if (ShouldTypedClauseCatchThisException(exception, (EEType *)ehClause._typeSymbol))
                    {
                        pHandler     = ehClause._handlerAddress;
                        tryRegionIdx = curIdx;
                        return(true);
                    }
                }
                else
                {
                    tryRegionIdx = 0;
                    bool shouldInvokeHandler = InternalCalls.RhpCallFilterFunclet(exception, ehClause._filterAddress, shadowStack);
                    if (shouldInvokeHandler)
                    {
                        pHandler     = ehClause._handlerAddress;
                        tryRegionIdx = curIdx;
                        return(true);
                    }
                }
            }

            return(false);
        }
コード例 #13
0
        internal static unsafe object GetCastableTargetIfPossible(ICastableObject castableObject, EEType *interfaceType, bool produceException, ref Exception exception)
        {
            CastableObjectCacheEntry <object>[] cache = Unsafe.As <CastableObject>(castableObject).Cache;

            object targetObjectInitial = null;

            if (cache != null)
            {
                targetObjectInitial = CacheLookup(cache, new IntPtr(interfaceType));
                if (targetObjectInitial != null)
                {
                    if (targetObjectInitial != s_castFailCanary)
                    {
                        return(targetObjectInitial);
                    }
                    else if (!produceException)
                    {
                        return(null);
                    }
                }
            }

            // Call into the object to determine if the runtime can perform the cast. This will return null if it fails.
            object targetObject = castableObject.CastToInterface(new EETypePtr(new IntPtr(interfaceType)), produceException, out exception);

            // If the target object is null, and that result has already been cached, just return null now.
            // Otherwise, we need to store the canary in the cache so future failing "is" checks can be fast
            if (targetObject == null)
            {
                if (targetObjectInitial != null)
                {
                    return(null);
                }
                else
                {
                    targetObject = s_castFailCanary;
                }
            }

            InternalCalls.RhpAcquireCastCacheLock();
            // Assuming we reach here, we should attempt to add the newly discovered targetObject to the per-object cache

            // First, check to see if something is already there

            // we may have replaced the cache object since the earlier acquisition in this method. Re-acquire the cache object
            // here.
            cache = Unsafe.As <CastableObject>(castableObject).Cache;
            object targetObjectInCache = null;

            if (cache != null)
            {
                targetObjectInCache = CacheLookup(cache, new IntPtr(interfaceType));
            }

            if (targetObjectInCache == null)
            {
                // If the target object still isn't in the cache by this point, add it now
                AddToCastableCache(castableObject, interfaceType, targetObject);
                targetObjectInCache = targetObject;
            }
            InternalCalls.RhpReleaseCastCacheLock();

            if (targetObjectInCache != s_castFailCanary)
            {
                return(targetObjectInCache);
            }
            else
            {
                return(null);
            }
        }
コード例 #14
0
 internal bool Next(out uint uExCollideClauseIdx, out bool fUnwoundReversePInvoke)
 {
     return(InternalCalls.RhpSfiNext(ref this, out uExCollideClauseIdx, out fUnwoundReversePInvoke));
 }
コード例 #15
0
        private static unsafe IntPtr RhResolveDispatchWorker(object pObject, void *cell, ref DispatchCellInfo cellInfo)
        {
            // Type of object we're dispatching on.
            EEType *pInstanceType = pObject.EEType;

            if (cellInfo.CellType == DispatchCellType.InterfaceAndSlot)
            {
                // Type whose DispatchMap is used. Usually the same as the above but for types which implement ICastable
                // we may repeat this process with an alternate type.
                EEType *pResolvingInstanceType = pInstanceType;

                IntPtr pTargetCode = DispatchResolve.FindInterfaceMethodImplementationTarget(pResolvingInstanceType,
                                                                                             cellInfo.InterfaceType.ToPointer(),
                                                                                             cellInfo.InterfaceSlot);

                if (pTargetCode == IntPtr.Zero && pInstanceType->IsICastable)
                {
                    // TODO!! BEGIN REMOVE THIS CODE WHEN WE REMOVE ICASTABLE
                    // Dispatch not resolved through normal dispatch map, try using the ICastable
                    // Call the ICastable.IsInstanceOfInterface method directly rather than via an interface
                    // dispatch since we know the method address statically. We ignore any cast error exception
                    // object passed back on failure (result == false) since IsInstanceOfInterface never throws.
                    IntPtr    pfnIsInstanceOfInterface = pInstanceType->ICastableIsInstanceOfInterfaceMethod;
                    Exception castError = null;
                    if (CalliIntrinsics.Call <bool>(pfnIsInstanceOfInterface, pObject, cellInfo.InterfaceType.ToPointer(), out castError))
                    {
                        IntPtr pfnGetImplTypeMethod = pInstanceType->ICastableGetImplTypeMethod;
                        pResolvingInstanceType = (EEType *)CalliIntrinsics.Call <IntPtr>(pfnGetImplTypeMethod, pObject, new IntPtr(cellInfo.InterfaceType.ToPointer()));
                        pTargetCode            = DispatchResolve.FindInterfaceMethodImplementationTarget(pResolvingInstanceType,
                                                                                                         cellInfo.InterfaceType.ToPointer(),
                                                                                                         cellInfo.InterfaceSlot);
                    }
                    else
                    // TODO!! END REMOVE THIS CODE WHEN WE REMOVE ICASTABLE
                    {
                        // Dispatch not resolved through normal dispatch map, using the CastableObject path
                        pTargetCode = InternalCalls.RhpGetCastableObjectDispatchHelper();
                    }
                }

                return(pTargetCode);
            }
            else if (cellInfo.CellType == DispatchCellType.VTableOffset)
            {
                // Dereference VTable
                return(*(IntPtr *)(((byte *)pInstanceType) + cellInfo.VTableOffset));
            }
            else
            {
#if SUPPORTS_NATIVE_METADATA_TYPE_LOADING_AND_SUPPORTS_TOKEN_BASED_DISPATCH_CELLS
                // Attempt to convert dispatch cell to non-metadata form if we haven't acquired a cache for this cell yet
                if (cellInfo.HasCache == 0)
                {
                    cellInfo = InternalTypeLoaderCalls.ConvertMetadataTokenDispatch(InternalCalls.RhGetModuleFromPointer(cell), cellInfo);
                    if (cellInfo.CellType != DispatchCellType.MetadataToken)
                    {
                        return(RhResolveDispatchWorker(pObject, cell, ref cellInfo));
                    }
                }

                // If that failed, go down the metadata resolution path
                return(InternalTypeLoaderCalls.ResolveMetadataTokenDispatch(InternalCalls.RhGetModuleFromPointer(cell), (int)cellInfo.MetadataToken, new IntPtr(pInstanceType)));
#else
                EH.FallbackFailFast(RhFailFastReason.InternalError, null);
                return(IntPtr.Zero);
#endif
            }
        }
コード例 #16
0
ファイル: StackFrameIterator.cs プロジェクト: z77ma/runtime
 internal bool Init(EH.PAL_LIMITED_CONTEXT *pStackwalkCtx, bool instructionFault = false)
 {
     return(InternalCalls.RhpSfiInit(ref this, pStackwalkCtx, instructionFault));
 }
コード例 #17
0
 // This is a fail-fast function used by the runtime as a last resort that will terminate the process with
 // as little effort as possible. No guarantee is made about the semantics of this fail-fast.
 internal static void FallbackFailFast(RhFailFastReason reason, object unhandledException)
 {
     InternalCalls.RhpFallbackFailFast();
 }
コード例 #18
0
ファイル: StackFrameIterator.cs プロジェクト: z77ma/runtime
 internal bool Next(uint *uExCollideClauseIdx, bool *fUnwoundReversePInvoke)
 {
     return(InternalCalls.RhpSfiNext(ref this, uExCollideClauseIdx, fUnwoundReversePInvoke));
 }
コード例 #19
0
        private static bool FindFirstPassHandler(object exception, uint idxStart,
                                                 ref StackFrameIterator frameIter, out uint tryRegionIdx, out byte *pHandler)
        {
            pHandler     = null;
            tryRegionIdx = MaxTryRegionIdx;

            EHEnum ehEnum;
            byte * pbMethodStartAddress;

            if (!InternalCalls.RhpEHEnumInitFromStackFrameIterator(ref frameIter, &pbMethodStartAddress, &ehEnum))
            {
                return(false);
            }

            byte *pbControlPC = frameIter.ControlPC;

            uint codeOffset = (uint)(pbControlPC - pbMethodStartAddress);

            uint lastTryStart = 0, lastTryEnd = 0;

            // Search the clauses for one that contains the current offset.
            RhEHClause ehClause;

            for (uint curIdx = 0; InternalCalls.RhpEHEnumNext(&ehEnum, &ehClause); curIdx++)
            {
                //
                // Skip to the starting try region.  This is used by collided unwinds and rethrows to pickup where
                // the previous dispatch left off.
                //
                if (idxStart != MaxTryRegionIdx)
                {
                    if (curIdx <= idxStart)
                    {
                        lastTryStart = ehClause._tryStartOffset; lastTryEnd = ehClause._tryEndOffset;
                        continue;
                    }

                    // Now, we continue skipping while the try region is identical to the one that invoked the
                    // previous dispatch.
                    if ((ehClause._tryStartOffset == lastTryStart) && (ehClause._tryEndOffset == lastTryEnd))
                    {
                        continue;
                    }

                    // We are done skipping. This is required to handle empty finally block markers that are used
                    // to separate runs of different try blocks with same native code offsets.
                    idxStart = MaxTryRegionIdx;
                }

                RhEHClauseKind clauseKind = ehClause._clauseKind;

                if (((clauseKind != RhEHClauseKind.RH_EH_CLAUSE_TYPED) &&
                     (clauseKind != RhEHClauseKind.RH_EH_CLAUSE_FILTER)) ||
                    !ehClause.ContainsCodeOffset(codeOffset))
                {
                    continue;
                }

                // Found a containing clause. Because of the order of the clauses, we know this is the
                // most containing.
                if (clauseKind == RhEHClauseKind.RH_EH_CLAUSE_TYPED)
                {
                    if (ShouldTypedClauseCatchThisException(exception, (EEType *)ehClause._pTargetType))
                    {
                        pHandler     = ehClause._handlerAddress;
                        tryRegionIdx = curIdx;
                        return(true);
                    }
                }
                else
                {
                    byte *pFilterFunclet      = ehClause._filterAddress;
                    bool  shouldInvokeHandler =
                        InternalCalls.RhpCallFilterFunclet(exception, pFilterFunclet, frameIter.RegisterSet);

                    if (shouldInvokeHandler)
                    {
                        pHandler     = ehClause._handlerAddress;
                        tryRegionIdx = curIdx;
                        return(true);
                    }
                }
            }

            return(false);
        }
コード例 #20
0
ファイル: StackFrameIterator.cs プロジェクト: wtgodbe/corert
 internal bool Init(EH.PAL_LIMITED_CONTEXT *pStackwalkCtx)
 {
     return(InternalCalls.RhpSfiInit(ref this, pStackwalkCtx));
 }