public static void UIntPtrArithmeticOverflow() { var value = new UIntPtr(ulong.MaxValue); Throws <OverflowException>(() => value.AddChecked(new UIntPtr(1U))); value = new UIntPtr(0U); Throws <OverflowException>(() => value.SubtractChecked(new UIntPtr(1U))); Equal(new UIntPtr(ulong.MaxValue), value.Subtract(new UIntPtr(1U))); }
public static void Main() { int[] arr = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; UIntPtr ptr = (UIntPtr)arr[arr.GetUpperBound(0)]; for (int ctr = 0; ctr <= arr.GetUpperBound(0); ctr++) { UIntPtr newPtr = UIntPtr.Subtract(ptr, ctr); Console.Write("{0} ", newPtr); } }
public static void Subtract(UIntPtr ptr, int offset, ulong expected) { UIntPtr p1 = UIntPtr.Subtract(ptr, offset); VerifyPointer(p1, expected); UIntPtr p2 = ptr - offset; VerifyPointer(p2, expected); UIntPtr p3 = ptr; p3 -= offset; VerifyPointer(p3, expected); }
/// <summary> /// Decerements packet references count by one and destroys the packet if count reaches zero /// </summary> public void RemoveRef() { ThrowIfNull(); var newRefCount = UIntPtr.Subtract(m_Native->ReferenceCount, 1); if (newRefCount.ToUInt32() == 0) { Destroy(); } else { m_Native->ReferenceCount = newRefCount; } }
public static UIntPtr Subtract(UIntPtr pointer, int offset) { #if NET35 switch (UIntPtr.Size) { case 4: return(new UIntPtr(unchecked ((uint)((int)pointer - offset)))); case 8: return(new UIntPtr(unchecked ((ulong)((long)pointer - offset)))); default: throw new NotSupportedException("Not supported platform"); } #else return(UIntPtr.Subtract(pointer, offset)); #endif }
public unsafe static void Log(byte *textUtf8, int textBytes, LogType type = LogType.Log) { // We have already shutdown, so don't try to use this. // This won't detect if we haven't yet initialized, but that's less likely to happen due to // a controlled init/shutdown sequence. We can't check for "0" because Baselib_TLS_Alloc() // may return slot 0. // This fixes an issue which came up in a managed object's destructor which wanted // to log something and tried to send that log over playerconnection. if (bufferTls.Data == UIntPtr.Subtract(UIntPtr.Zero, 1)) { return; } var stream = MessageStream; stream->MessageBegin(EditorMessageIds.kLog); stream->WriteData((uint)type); stream->WriteRaw(textUtf8, textBytes); stream->MessageEnd(); }
/// <summary> /// Subtracts an offset from the value of an unsigned pointer. /// </summary> /// <param name="pointer">The unsigned pointer to subtract the offset from.</param> /// <param name="offset">The offset to subtract.</param> /// <returns>A new unsigned pointer that reflects the subtraction of from .</returns> public static UIntPtr Subtract(this UIntPtr pointer, Int32 offset) { return(UIntPtr.Subtract(pointer, offset)); }
public static unsafe void TestBasics() { UIntPtr p; uint i; ulong l; if (sizeof(void *) == 4) { // Skip UIntPtr tests on 32-bit platforms return; } int size = UIntPtr.Size; Assert.Equal(size, sizeof(void *)); TestPointer(UIntPtr.Zero, 0); i = 42; TestPointer(new UIntPtr(i), i); TestPointer((UIntPtr)i, i); i = 42; TestPointer(new UIntPtr(i), i); l = 0x0fffffffffffffff; TestPointer(new UIntPtr(l), l); TestPointer((UIntPtr)l, l); void *pv = new UIntPtr(42).ToPointer(); TestPointer(new UIntPtr(pv), 42); TestPointer((UIntPtr)pv, 42); p = UIntPtr.Add(new UIntPtr(42), 5); TestPointer(p, 42 + 5); // Add is spected NOT to generate an OverflowException p = UIntPtr.Add(new UIntPtr(0xffffffffffffffff), 5); unchecked { TestPointer(p, (long)0x0000000000000004); } p = UIntPtr.Subtract(new UIntPtr(42), 5); TestPointer(p, 42 - 5); bool b; p = new UIntPtr(42); b = p.Equals(null); Assert.False(b); b = p.Equals((object)42); Assert.False(b); b = p.Equals((object)(new UIntPtr(42))); Assert.True(b); int h = p.GetHashCode(); int h2 = p.GetHashCode(); Assert.Equal(h, h2); p = new UIntPtr(42); i = (uint)p; Assert.Equal(i, 42u); l = (ulong)p; Assert.Equal(l, 42u); UIntPtr p2; p2 = (UIntPtr)i; Assert.Equal(p, p2); p2 = (UIntPtr)l; Assert.Equal(p, p2); p2 = (UIntPtr)(p.ToPointer()); Assert.Equal(p, p2); p2 = new UIntPtr(40) + 2; Assert.Equal(p, p2); p2 = new UIntPtr(44) - 2; Assert.Equal(p, p2); p = new UIntPtr(0x7fffffffffffffff); Assert.Throws <OverflowException>(() => (uint)p); }
public unsafe static void Shutdown() { Binding.Baselib_TLS_Free(bufferTls.Data); bufferTls.Data = UIntPtr.Subtract(UIntPtr.Zero, 1); }
private static void Scan(Process process, out List <VMChunkInfo> chunkInfos, out List <VMRegionInfo> mappingInfos, out UIntPtr addressLimit) { IntPtr processHandle = process.Handle; MEMORY_BASIC_INFORMATION m = new MEMORY_BASIC_INFORMATION(); chunkInfos = new List <VMChunkInfo>(); const UInt64 GB = 1024 * 1024 * 1024; UInt64 maxRegionSize = (UInt64)2 * GB; UIntPtr memoryLimit; if (process.Is64BitProcess()) { memoryLimit = (UIntPtr)((UInt64)6 * GB); } else { memoryLimit = UIntPtr.Subtract(UIntPtr.Zero, 1); } addressLimit = memoryLimit; // Use UIntPtr so that we can cope with addresses above 2GB in a /3GB or "4GT" environment, or 64-bit Windows UIntPtr address = (UIntPtr)0; while ((UInt64)address < (UInt64)memoryLimit) { int result = VirtualQueryEx(processHandle, address, out m, (uint)Marshal.SizeOf(m)); if (0 == result || (UInt64)m.RegionSize > maxRegionSize) { // Record the 'end' of the address scale // (Expect 2GB in the case of a Win32 process running under 32-bit Windows, but may be // extended to up to 3GB if the OS is configured for "4 GT tuning" with the /3GB switch // Expect 4GB in the case of a Win32 process running under 64-bit Windows) addressLimit = address; break; } VMChunkInfo chunk = new VMChunkInfo(); chunk.regionStartAddress = (UIntPtr)(UInt64)m.BaseAddress; chunk.regionSize = (UInt64)m.RegionSize; chunk.type = (PageType)m.Type; chunk.state = (PageState)m.State; if ((chunk.type == PageType.Image) || (chunk.type == PageType.Mapped)) { // .Net 4 maps assemblies into memory using the memory-mapped file mechanism; // they don't show up in Process.Modules list string fileName = GetMappedFileName(processHandle, chunk.regionStartAddress); if (fileName.Length > 0) { fileName = Path.GetFileName(fileName); chunk.regionName = fileName; } } chunkInfos.Add(chunk); UIntPtr oldAddress = address; // It's maddening, but UIntPtr.Add can't cope with 64-bit offsets under Win64! address = UIntPtr.Add(address, (int)m.RegionSize); if ((UInt64)address <= (UInt64)oldAddress) { addressLimit = oldAddress; break; } } ; mappingInfos = new List <VMRegionInfo>(); try { foreach (ProcessModule module in process.Modules) { VMRegionInfo mappingInfo = new VMRegionInfo(); mappingInfo.regionStartAddress = (UIntPtr)(UInt64)module.BaseAddress; mappingInfo.regionSize = (UInt64)module.ModuleMemorySize; mappingInfo.regionName = Path.GetFileName(module.FileName); mappingInfos.Add(mappingInfo); } } catch { } // Sort by address mappingInfos.Sort(delegate(VMRegionInfo map1, VMRegionInfo map2) { return(Comparer <UInt64> .Default.Compare((UInt64)map1.regionStartAddress, (UInt64)map2.regionStartAddress)); }); }
private UIntPtr FindFreeBlockForRegion(UIntPtr baseAddress, uint size) { UIntPtr minAddress = UIntPtr.Subtract(baseAddress, 0x70000000); UIntPtr maxAddress = UIntPtr.Add(baseAddress, 0x70000000); UIntPtr ret = UIntPtr.Zero; UIntPtr tmpAddress = UIntPtr.Zero; GetSystemInfo(out SYSTEM_INFO si); if (mProc.Is64Bit) { if ((long)minAddress > (long)si.maximumApplicationAddress || (long)minAddress < (long)si.minimumApplicationAddress) { minAddress = si.minimumApplicationAddress; } if ((long)maxAddress < (long)si.minimumApplicationAddress || (long)maxAddress > (long)si.maximumApplicationAddress) { maxAddress = si.maximumApplicationAddress; } } else { minAddress = si.minimumApplicationAddress; maxAddress = si.maximumApplicationAddress; } MEMORY_BASIC_INFORMATION mbi; UIntPtr current = minAddress; UIntPtr previous = current; while (VirtualQueryEx(mProc.Handle, current, out mbi).ToUInt64() != 0) { if ((long)mbi.BaseAddress > (long)maxAddress) { return(UIntPtr.Zero); // No memory found, let windows handle } if (mbi.State == MEM_FREE && mbi.RegionSize > size) { if ((long)mbi.BaseAddress % si.allocationGranularity > 0) { // The whole size can not be used tmpAddress = mbi.BaseAddress; int offset = (int)(si.allocationGranularity - ((long)tmpAddress % si.allocationGranularity)); // Check if there is enough left if ((mbi.RegionSize - offset) >= size) { // yup there is enough tmpAddress = UIntPtr.Add(tmpAddress, offset); if ((long)tmpAddress < (long)baseAddress) { tmpAddress = UIntPtr.Add(tmpAddress, (int)(mbi.RegionSize - offset - size)); if ((long)tmpAddress > (long)baseAddress) { tmpAddress = baseAddress; } // decrease tmpAddress until its alligned properly tmpAddress = UIntPtr.Subtract(tmpAddress, (int)((long)tmpAddress % si.allocationGranularity)); } // if the difference is closer then use that if (Math.Abs((long)tmpAddress - (long)baseAddress) < Math.Abs((long)ret - (long)baseAddress)) { ret = tmpAddress; } } } else { tmpAddress = mbi.BaseAddress; if ((long)tmpAddress < (long)baseAddress) // try to get it the cloest possible // (so to the end of the region - size and // aligned by system allocation granularity) { tmpAddress = UIntPtr.Add(tmpAddress, (int)(mbi.RegionSize - size)); if ((long)tmpAddress > (long)baseAddress) { tmpAddress = baseAddress; } // decrease until aligned properly tmpAddress = UIntPtr.Subtract(tmpAddress, (int)((long)tmpAddress % si.allocationGranularity)); } if (Math.Abs((long)tmpAddress - (long)baseAddress) < Math.Abs((long)ret - (long)baseAddress)) { ret = tmpAddress; } } } if (mbi.RegionSize % si.allocationGranularity > 0) { mbi.RegionSize += si.allocationGranularity - (mbi.RegionSize % si.allocationGranularity); } previous = current; current = new UIntPtr(((ulong)mbi.BaseAddress) + (ulong)mbi.RegionSize); if ((long)current >= (long)maxAddress) { return(ret); } if ((long)previous >= (long)current) { return(ret); // Overflow } } return(ret); }