/// <summary>Performs basic verification on a map.</summary> /// <param name="mmf">The map.</param> /// <param name="expectedCapacity">The capacity that was specified to create the map.</param> /// <param name="expectedAccess">The access specified to create the map.</param> /// <param name="expectedInheritability">The inheritability specified to create the map.</param> protected static void ValidateMemoryMappedFile(MemoryMappedFile mmf, long expectedCapacity, MemoryMappedFileAccess expectedAccess = MemoryMappedFileAccess.ReadWrite, HandleInheritability expectedInheritability = HandleInheritability.None) { // Validate that we got a MemoryMappedFile object and that its handle is valid Assert.NotNull(mmf); Assert.NotNull(mmf.SafeMemoryMappedFileHandle); Assert.Same(mmf.SafeMemoryMappedFileHandle, mmf.SafeMemoryMappedFileHandle); Assert.False(mmf.SafeMemoryMappedFileHandle.IsClosed); Assert.False(mmf.SafeMemoryMappedFileHandle.IsInvalid); AssertInheritability(mmf.SafeMemoryMappedFileHandle, expectedInheritability); // Create and validate one or more views from the map if (IsReadable(expectedAccess) && IsWritable(expectedAccess)) { CreateAndValidateViews(mmf, expectedCapacity, MemoryMappedFileAccess.Read); CreateAndValidateViews(mmf, expectedCapacity, MemoryMappedFileAccess.Write); CreateAndValidateViews(mmf, expectedCapacity, MemoryMappedFileAccess.ReadWrite); } else if (IsWritable(expectedAccess)) { CreateAndValidateViews(mmf, expectedCapacity, MemoryMappedFileAccess.Write); } else if (IsReadable(expectedAccess)) { CreateAndValidateViews(mmf, expectedCapacity, MemoryMappedFileAccess.Read); } else { Assert.Throws<UnauthorizedAccessException>(() => mmf.CreateViewAccessor(0, expectedCapacity, MemoryMappedFileAccess.Read)); Assert.Throws<UnauthorizedAccessException>(() => mmf.CreateViewAccessor(0, expectedCapacity, MemoryMappedFileAccess.Write)); Assert.Throws<UnauthorizedAccessException>(() => mmf.CreateViewAccessor(0, expectedCapacity, MemoryMappedFileAccess.ReadWrite)); } }
public static void createMemFile(string input) { // grab the mutex m_mmf = MemoryMappedFile.CreateInMemoryMap(SharedMapName, MaxMapSize); if (!m_mutex.WaitOne(5000, false)) { m_mmf.Close(); m_mmf = MemoryMappedFile.CreateInMemoryMap(SharedMapName, MaxMapSize); Debug.WriteLine("Unable to acquire mutex. create Abandoned"); //return; } // String to byte array byte[] x = Encoding.UTF8.GetBytes(input); byte[] y = { 0, 0, 0, 0, 0, 0, 0, 0 }; var z = new byte[x.Length + y.Length]; x.CopyTo(z, 0); y.CopyTo(z, x.Length); Buffer.BlockCopy(z, 0, dataBuffer, 0, z.Length); // write the packet at the start m_mmf.Seek(0, System.IO.SeekOrigin.Begin); m_mmf.Write(z, 0, z.Length); // release the mutex m_mutex.ReleaseMutex(); }
public void Dispose(int capacity) { const int innerIterations = 1000; MemoryMappedFile[] files = new MemoryMappedFile[innerIterations]; foreach (var iteration in Benchmark.Iterations) { for (int i = 0; i < innerIterations; i++) files[i] = MemoryMappedFile.CreateNew(null, capacity); using (iteration.StartMeasurement()) for (int i = 0; i < innerIterations; i++) files[i].Dispose(); } }
public static void StartMMF() { var filename = @"sensordata.bin"; var mmf = new MemoryMappedFile<Data>(filename, 1024); mmf.Open(); //var StructSize = mmf.StructSize; //var x1 = mmf.Serialize(new myData() { data1 = 12, data2 = -12 }); //var x2 = mmf.Serialize(new myData() { data1 = 13, data2 = -13 }); //using (var view = mmf.CreateViewAccessor(0, 1024, MemoryMappedFileAccess.Write)) //{ // //view.WriteArray(0, whiteRow, 0, whiteRow.Length); //} }
// Process A: static void Main(string[] args) { using (MemoryMappedFile mmf = MemoryMappedFile.CreateNew("testmap", 10000)) { bool mutexCreated; var franco = new Person("franco Pettigrosso", 26); Mutex mutex = new Mutex(true, "testmapmutex", out mutexCreated); IFormatter Formatter = new BinaryFormatter(); using (MemoryMappedViewStream stream = mmf.CreateViewStream()) { BinaryWriter writer = new BinaryWriter(stream); writer.Write(1); } //https://stackoverflow.com/questions/1446547/how-to-convert-an-object-to-a-byte-array-in-c-sharp //https://www.red-gate.com/simple-talk/dotnet/net-development/sharing-caring-using-memory-mapped-files-net/ mutex.ReleaseMutex(); Console.WriteLine("Start Process B and press ENTER to continue."); Console.ReadLine(); Console.WriteLine("Start Process C and press ENTER to continue."); Console.ReadLine(); mutex.WaitOne(); using (MemoryMappedViewStream stream = mmf.CreateViewStream()) { BinaryReader reader = new BinaryReader(stream); Console.WriteLine("Process A says: {0}", reader.ReadBoolean()); Console.WriteLine("Process B says: {0}", reader.ReadBoolean()); Console.WriteLine("Process C says: {0}", reader.ReadBoolean()); } mutex.ReleaseMutex(); } }
static void Main(string[] args) { long offset = 0x10000000; // 256 megabytes long length = 0x20000000; // 512 megabytes // Create the memory-mapped file. using (var mmf = MemoryMappedFile.CreateFromFile(@"c:\ExtremelyLargeImage.data", FileMode.Open, "ImgA")) { // Create a random access view, from the 256th megabyte (the offset) // to the 768th megabyte (the offset plus length). using (var accessor = mmf.CreateViewAccessor(offset, length)) { int colorSize = Marshal.SizeOf(typeof(MyColor)); MyColor color; // Make changes to the view. for (long i = 0; i < length; i += colorSize) { accessor.Read(i, out color); color.Brighten(10); accessor.Write(i, ref color); } } } }
private static void Main(string[] args) { Console.Write("请输入共享内存公用名(默认:testmap):"); string shareName = Console.ReadLine(); if (string.IsNullOrEmpty(shareName)) { shareName = "testmap"; } using (MemoryMappedFile mmf = MemoryMappedFile.CreateOrOpen(shareName, 1024, MemoryMappedFileAccess.ReadWrite)) //using (MemoryMappedFile mmf = MemoryMappedFile.OpenExisting(shareName)) { //进程间同步 //Mutex mutex = Mutex.OpenExisting("testmapmutex"); Mutex mutex = new Mutex(false, "testmapmutex"); while (true) { char[] buffer = new char[1024]; //Console.WriteLine("按【回车】读取共享内存数据:"); //Console.ReadLine(); //Thread.Sleep(1000); mutex.WaitOne(); using (MemoryMappedViewStream stream = mmf.CreateViewStream()) { var reader = new BinaryReader(stream); reader.Read(buffer, 0, 1024); //Console.Write(buffer); //Console.WriteLine("stream lenght : " + mmf.CreateViewStream().Length.ToString()); } mutex.ReleaseMutex(); Console.WriteLine(buffer); } } //Console.ReadKey(); }
private static void ReadElements(MemoryMappedFile mmf, _HWiNFO_SHARED_MEM hWiNFOMemory) { for (uint index = 0; index < hWiNFOMemory.NumReadingElements; ++index) { using (var viewStream = mmf.CreateViewStream(hWiNFOMemory.OffsetOfReadingSection + index * hWiNFOMemory.SizeOfReadingElement, hWiNFOMemory.SizeOfReadingElement, MemoryMappedFileAccess.Read)) { var buffer = new byte[(int)hWiNFOMemory.SizeOfReadingElement]; viewStream.Read(buffer, 0, (int)hWiNFOMemory.SizeOfReadingElement); var gcHandle = GCHandle.Alloc(buffer, GCHandleType.Pinned); var structure = (_HWiNFO_ELEMENT)Marshal.PtrToStructure(gcHandle.AddrOfPinnedObject(), typeof(_HWiNFO_ELEMENT)); gcHandle.Free(); var sensor = FullSensorData[(int) structure.SensorIndex]; var elementKey = sensor.SensorId + "-" + sensor.SensorInstance + "-" + structure.ElementId; var element = new ElementObj { ElementKey = elementKey, SensorType = structure.SensorType, ElementId = structure.ElementId, LabelOrig = structure.LabelOrig, LabelUser = structure.LabelUser, Unit = structure.Unit, NumericValue = (float)structure.Value, Value = NumberFormat(structure.SensorType, structure.Unit, structure.Value), ValueMin = NumberFormat(structure.SensorType, structure.Unit, structure.ValueMin), ValueMax = NumberFormat(structure.SensorType, structure.Unit, structure.ValueMax), ValueAvg = NumberFormat(structure.SensorType, structure.Unit,structure.ValueAvg) }; sensor.Elements[elementKey] = element; } } }
public static string ReadInChunks(long offset, long limit, string path) { StringBuilder resultAsString = new StringBuilder(); MemoryMappedFile memoryMappedFile = MemoryMappedFile.CreateFromFile(path); MemoryMappedViewStream memoryMappedViewStream = memoryMappedFile.CreateViewStream(offset, limit, MemoryMappedFileAccess.Read); { for (long i = offset; i < limit; i++) { //Reads a byte from a stream and advances the position within the stream by one byte, or returns -1 if at the end of the stream. int result = memoryMappedViewStream.ReadByte(); if (result == -1) { break; } char letter = (char)result; resultAsString.Append(letter); } } Console.WriteLine(resultAsString.ToString()); memoryMappedViewStream.Close(); memoryMappedFile.Dispose(); return(resultAsString.ToString()); }
public void MapAtEdgeOfPage() { using (var f = new FileStream(fname, FileMode.Open)) { var b = new byte [4096]; for (int i = 0; i < 4096; ++i) { b[i] = 0xAA; } for (int i = 0; i < 2; ++i) { f.Write(b, 0, 4096); } } var m0 = MemoryMappedFile.CreateFromFile(fname, FileMode.Open); var v0 = m0.CreateViewAccessor(500, 4096); var v1 = m0.CreateViewAccessor(0, 4096 * 2); for (int i = 0; i < 4096; ++i) { Assert.AreEqual(0xAA, v1.ReadByte(i + 500)); v0.Write(i, (byte)0xFF); Assert.AreEqual(0xFF, v1.ReadByte(i + 500)); } }
public async Task StartInternal(string pipeName, bool isServer) { _cts = new CancellationTokenSource(); if (isServer) { _receiver = MemoryMappedFile.CreateOrOpen(pipeName + "_receiver", MemoryMappedFileCapacity); _sender = MemoryMappedFile.CreateOrOpen(pipeName + "_sender", MemoryMappedFileCapacity); } else { while (true) { try { //サーバーと逆方向 _receiver = MemoryMappedFile.OpenExisting(pipeName + "_sender"); _sender = MemoryMappedFile.OpenExisting(pipeName + "_receiver"); break; } catch (System.IO.FileNotFoundException) { } if (_cts.Token.IsCancellationRequested) { return; } await Task.Delay(100); } } _receiverAccessor = _receiver.CreateViewAccessor(); _senderAccessor = _sender.CreateViewAccessor(); new Thread(() => ReadThread()).Start(); new Thread(() => WriteThread()).Start(); IsConnected = true; }
public void CreateSharedMemory() { //Before Console.WriteLine("Wait on the event. Load driver now!!!"); //You can create custom event to say that your driver is loaded. SharedEvent_ready2read.WaitOne();//here we use SharedEvent_ready2read to say that your driver was loaded. SharedEvent_ready2read.Reset(); // Map a view of the file mapping into the address space of the // current process. try { //Create Shared memory mapped file memoryMappedFile = MemoryMappedFile.CreateOrOpen( FullMapName, ViewSize, MemoryMappedFileAccess.ReadWrite); //Create View stream stream = memoryMappedFile.CreateViewStream(); Console.WriteLine("Shared memory created."); } catch (Exception ex) { Console.WriteLine("CreateSharedMemory failed error: {0}", ex.Message); } }
public static string readMemFile() { m_mmf = MemoryMappedFile.CreateInMemoryMap(SharedMapName, MaxMapSize); // grab the mutex to prevent concurrency issues if (!m_mutex.WaitOne(1000, false)) { Debug.WriteLine("Unable to acquire mutex. read Abandoned"); return "@ERROR"; } // read from the start m_mmf.Seek(0, System.IO.SeekOrigin.Begin); // get the length m_mmf.Read(dataBuffer, 0, MaxMapSize); int i; for (i = 0; i < MaxMapSize; ++i) { if (dataBuffer[i] == 0) { if ((dataBuffer[i + 1] | dataBuffer[i + 2] | dataBuffer[i + 3] | dataBuffer[i + 4] | dataBuffer[i + 5] | dataBuffer[i + 6] | dataBuffer[i + 7]) == 0) break; } } string received = Encoding.UTF8.GetString(dataBuffer, 0, i); // release the mutex so any other clients can receive m_mutex.ReleaseMutex(); Debug.WriteLine("Received: " + received); return received; }
private void CreateNewExtent() { int newFileId = this.fileId; this.fileId++; this.extentName = string.Format(FileNameFormat, this.fileName, newFileId); // create a new file first, just in case anybody is reading MemoryMappedFile newMMF; if (!this.IsVolatile) { this.extentName = System.IO.Path.Combine(this.path, this.extentName); var file = File.Open(this.extentName, FileMode.Create, FileAccess.ReadWrite, FileShare.Read); newMMF = MemoryMappedFile.CreateFromFile(file, null, this.extentSize, MemoryMappedFileAccess.ReadWrite, HandleInheritability.Inheritable, false); } else { newMMF = MemoryMappedFile.CreateNew(this.extentName, this.extentSize); } // store the id of the new file in the old file and close it if (this.mappedFile != null) { *(int *)this.freePointer = -newFileId; // end of file marker this.CloseCurrent(false); } // re-initialize this.mappedFile = newMMF; this.view = this.mappedFile.CreateViewAccessor(0, 0, MemoryMappedFileAccess.ReadWrite); this.view.SafeMemoryMappedViewHandle.AcquirePointer(ref this.startPointer); this.freeSpace = this.extentSize - sizeof(int); this.freePointer = this.startPointer; *((uint *)this.freePointer) = 0; }
///// <summary> ///// Get a working view for the current thread ///// </summary> ///// <param name="threadId"></param> ///// <returns></returns> //public Stream GetView(int threadId) //{ // return Mmf.CreateViewStream(); //} private static MemoryMappedFile CreateOrOpenFile(string fileName, long capacity, PersistenceMode persistenceMode, Mutex mutex, bool forGrowth) { try { mutex.WaitOne(); switch (persistenceMode) { case PersistenceMode.TemporaryPersist: // we are first to the party since have created the mutex, will create new file instead of previous // we could avoid using semaphore here because mutex.Created = semaphore.Created if (mutex.Created && !forGrowth) { DeleteBackingFileIfExists(fileName); } goto case PersistenceMode.Persist; case PersistenceMode.Persist: var fileStream = new FileStream(fileName, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.ReadWrite); var mmfs = new MemoryMappedFileSecurity(); capacity = Math.Max(fileStream.Length, capacity); return(MemoryMappedFile.CreateFromFile(fileStream, Path.GetFileName(fileName), capacity, MemoryMappedFileAccess.ReadWrite, mmfs, HandleInheritability.Inheritable, false)); case PersistenceMode.Ephemeral: return(MemoryMappedFile.CreateOrOpen(Path.GetFileName(fileName), capacity, MemoryMappedFileAccess.ReadWrite)); default: throw new ArgumentOutOfRangeException("persistenceMode"); } } finally { mutex.ReleaseMutex(); } }
private unsafe static PEReader OpenPEFile(string filePath, out MemoryMappedViewAccessor mappedViewAccessor) { // System.Reflection.Metadata has heuristic that tries to save virtual address space. This heuristic does not work // well for us since it can make IL access very slow (call to OS for each method IL query). We will map the file // ourselves to get the desired performance characteristics reliably. MemoryMappedFile mappedFile = null; MemoryMappedViewAccessor accessor = null; try { mappedFile = MemoryMappedFile.CreateFromFile(filePath, FileMode.Open, null, 0, MemoryMappedFileAccess.Read); accessor = mappedFile.CreateViewAccessor(0, 0, MemoryMappedFileAccess.Read); var safeBuffer = accessor.SafeMemoryMappedViewHandle; var peReader = new PEReader((byte *)safeBuffer.DangerousGetHandle(), (int)safeBuffer.ByteLength); // MemoryMappedFile does not need to be kept around. MemoryMappedViewAccessor is enough. mappedViewAccessor = accessor; accessor = null; return(peReader); } finally { if (accessor != null) { accessor.Dispose(); } if (mappedFile != null) { mappedFile.Dispose(); } } }
public override OperationStatus Read(MemoryMappedFile file, long offset, long length) { MemoryMappedViewStream stream = null; try { stream = file.CreateViewStream(offset, length); try { _readingDelegate(stream); return(OperationStatus.Completed); } catch (OperationCanceledException) { return(OperationStatus.Cancelled); } catch { return(OperationStatus.DelegateFailed); } } catch (ArgumentOutOfRangeException) { return(OperationStatus.RequestedLengthIsGreaterThanLogicalAddressSpace); } catch (IOException) { return(OperationStatus.RequestedLengthIsGreaterThanVirtualAddressSpace); } finally { stream?.Dispose(); } }
public void CreateIPCClassNameMMF(IntPtr hWnd) { if (ipcClassNameMMA != null) { return; // Already holding a handle to MMF file. No need to re-write the data } try { StringBuilder wndClassNameStr = new StringBuilder(128); if (GetClassName(hWnd, wndClassNameStr, wndClassNameStr.Capacity) != 0 && wndClassNameStr.Length > 0) { byte[] buffer = ASCIIEncoding.ASCII.GetBytes(wndClassNameStr.ToString()); ipcClassNameMMF = MemoryMappedFile.CreateNew("DS4Windows_IPCClassName.dat", 128); ipcClassNameMMA = ipcClassNameMMF.CreateViewAccessor(0, buffer.Length); ipcClassNameMMA.WriteArray(0, buffer, 0, buffer.Length); // The MMF file is alive as long this process holds the file handle open } } catch (Exception) { /* Eat all exceptions because errors here are not fatal for DS4Win */ } }
/// <summary></summary> /// <param name="file"></param> /// <returns></returns> public static MemoryMappedFile CreateFromFile(String file) { file = file.GetFullPath(); var name = Path.GetFileNameWithoutExtension(file); //var capacity = 4 * 1024 * 1024; //if (file.AsFile().Exists) capacity = 0; //_mmf = MemoryMappedFile.CreateFromFile(file, FileMode.OpenOrCreate, name, capacity, MemoryMappedFileAccess.ReadWrite); // 使用文件流可以控制共享读写,让别的进程也可以读写文件 var fs = new FileStream(file, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.ReadWrite, 4096, FileOptions.RandomAccess); if (fs.Length == 0) { fs.SetLength(1024); } #if __CORE__ var _mmf = MemoryMappedFile.CreateFromFile(fs, name, 0, MemoryMappedFileAccess.ReadWrite, HandleInheritability.None, false); #else var _mmf = MemoryMappedFile.CreateFromFile(fs, name, 0, MemoryMappedFileAccess.ReadWrite, null, HandleInheritability.None, false); #endif return(_mmf); }
private static MemoryMappedFile attachToMemoryMapedFile(int maxBytes, string name) { return(MemoryMappedFile.CreateOrOpen(name, maxBytes, MemoryMappedFileAccess.ReadWrite)); }
static void Extract(MemoryMappedFile mmap, string root_dir, string dir, long offset = 0) { bool is_bundle = false; using var header_view_0 = mmap.CreateViewAccessor(offset, 6); var magic = header_view_0.ReadInt32(0); if (magic == 0x48425845) // EXBH { is_bundle = true; } else if ((magic != 0x48505845) && // EXPH (magic != 0x48535845)) // EXSH ?? { Console.WriteLine("\n invalid file\n"); return; } var header_size = header_view_0.ReadUInt16(4); header_view_0.Dispose(); using var header_view = mmap.CreateViewAccessor(offset, header_size); long pos = 6; Func <Int64> ReadInt64 = () => { var ret = header_view.ReadInt64(pos); pos += 8; return(ret); }; Func <Int32> ReadInt32 = () => { var ret = header_view.ReadInt32(pos); pos += 4; return(ret); }; Func <Int16> ReadInt16 = () => { var ret = header_view.ReadInt16(pos); pos += 2; return(ret); }; Func <int, byte[]> ReadBytes = count => { var data = new byte[count]; header_view.ReadArray(pos, data, 0, count); pos += count; return(data); }; Func <int, string> ReadString = count => { return(System.Text.Encoding.Unicode.GetString(ReadBytes(count))); }; // See: https://pastebin.com/zH5tet0b var file_version = ReadInt64(); var footer_offset = ReadInt64(); var footer_length = ReadInt64(); var file_count = ReadInt64(); var sig_offset = ReadInt64(); var sig_zipped = ReadInt16(); var sig_length_orig = ReadInt32(); var sig_length = ReadInt32(); var coin_offset = ReadInt64(); var coin_zipped = ReadInt16(); var coin_length_origin = ReadInt32(); var coin_length = ReadInt32(); var block_map_file_ID = ReadInt64(); var key_len = ReadInt32(); var key_count = ReadInt16(); var keys = new List <Guid>(); for (int i = 0; i < key_len / 16; ++i) { keys.Add(new Guid(ReadBytes(16))); } var packname_str_len = ReadInt16(); var packname_byte_len = ReadInt16(); var packname = ReadString(packname_byte_len); var crypto_algo_len = ReadInt16(); var crypto_algo = ReadString(crypto_algo_len); var diffusion_support_enabled = ReadInt16(); var block_map_hash_method_len = ReadInt16(); var block_map_hash_method = ReadString(block_map_hash_method_len); var block_map_hash_len = ReadInt16(); var block_map_hash = ReadBytes(block_map_hash_len); bool end = pos == header_size; if ((sig_offset != 0) && (sig_length) != 0) { WriteFile(root_dir, Path.Combine(dir, "AppxSignature.p7x"), ReadData(mmap, sig_offset, sig_length, true) ); } if ((coin_offset != 0) && (coin_length) != 0) { WriteFile(root_dir, Path.Combine(dir, "AppxMetadata\\CodeIntegrity.cat"), ReadData(mmap, coin_offset, coin_length, true) ); } using var parts_acc = mmap.CreateViewAccessor( offset + footer_offset, footer_length ); var parts = new List <Part>(); for (var i = 0; i < (parts_acc.Capacity / 40); ++i) { long p_offset = i * 40; parts.Add(new Part() { id = parts_acc.ReadInt32(p_offset + 8), flags = parts_acc.ReadInt16(p_offset + 4), zipped = parts_acc.ReadInt16(p_offset + 6), pos = parts_acc.ReadInt64(p_offset + 16), len_orig = parts_acc.ReadInt64(p_offset + 24), len = parts_acc.ReadInt64(p_offset + 32), path = "part" + i.ToString() + ".dat", isPackage = false, }); } if (parts.Count > 0) { var bmap_part = parts[parts.Count - 1]; // Sure ? bmap_part.path = "AppxBlockMap.xml"; using var strm = ReadData(mmap, offset + bmap_part.pos, bmap_part.len, bmap_part.zipped == 1); var xml = new XmlDocument(); xml.Load(strm); foreach (XmlElement elem in xml.DocumentElement.ChildNodes) { var id_str = elem.GetAttribute("Id"); var name = elem.GetAttribute("Name"); if (!(string.IsNullOrEmpty(id_str) || string.IsNullOrEmpty(name))) { var id = int.Parse(id_str, System.Globalization.NumberStyles.HexNumber); var part = parts.Find(p => p.id == id); if (part is object) { part.path = name; } } } } if (is_bundle) { var bman_part = parts.Find(p => p.path == "AppxMetadata\\AppxBundleManifest.xml"); if (bman_part is object) { using var strm = ReadData(mmap, offset + bman_part.pos, bman_part.len, bman_part.zipped == 1 ); var xml = new XmlDocument(); xml.Load(strm); foreach (XmlElement elem in xml.DocumentElement.ChildNodes) { if (elem.Name == "Packages") { foreach (XmlElement elem_ in elem.ChildNodes) { var offset_str = elem_.GetAttribute("Offset"); var filename = elem_.GetAttribute("FileName"); if (!(string.IsNullOrEmpty(offset_str) || string.IsNullOrEmpty(filename))) { var offset_ = int.Parse(offset_str); var part = parts.Find(p => p.pos == offset_); if (part is object) { part.path = filename; part.isPackage = true; } } } } } } } foreach (var part in parts) { if (part.isPackage) { Extract(mmap, root_dir, Path.Combine(dir, Path.ChangeExtension(part.path, null)), part.pos); } else { WriteFile(root_dir, Path.Combine(dir, part.path), ReadData(mmap, offset + part.pos, part.len, part.zipped == 1), part.flags == 0); } } }
static Stream ReadData(MemoryMappedFile mmap, long offset, long count, bool unzip = false) { var strm = (count > 0) ? mmap.CreateViewStream(offset, count) as Stream : new MemoryStream(0); return(unzip ? new DeflateStream(strm, CompressionMode.Decompress) as Stream : strm); }
public void OpenExistingWithNoExistingThrows() { Assert.Throws <FileNotFoundException>(() => { MemoryMappedFile.OpenExisting(MkNamedMapping()); }); }
public void CreateFromFile_Null() { AssertThrows <ArgumentNullException> (delegate() { MemoryMappedFile.CreateFromFile(null); }); }
public void CreateNew() { // This must succeed MemoryMappedFile.CreateNew(MkNamedMapping(), 8192); }
public FileMemoryAccessor(long size) { mmf = MemoryMappedFile.CreateNew(null, size); mmva = mmf.CreateViewAccessor(); this.size = size; }
public MemoryMappedInfo(string name, long offset, long size) : this(new ReferenceCountedDisposable <MemoryMappedFile>(MemoryMappedFile.OpenExisting(name)), name, offset, size) { }
public void InvalidArgument_Inheritability(HandleInheritability inheritability) { AssertExtensions.Throws <ArgumentOutOfRangeException>("inheritability", () => MemoryMappedFile.CreateOrOpen(CreateUniqueMapName(), 4096, MemoryMappedFileAccess.ReadWrite, MemoryMappedFileOptions.None, inheritability)); }
/// <summary> /// Start speed controller /// </summary> /// <returns></returns> private Task StartSpeedControllerAsync() { return(Task.Factory.StartNew(async() => { bool @break = false; var mmf = MmfSignal ? MemoryMappedFile.CreateFromFile(Path.Combine("mmf-signal", Id), FileMode.OpenOrCreate, null, 4, MemoryMappedFileAccess.ReadWrite) : null; using (var accessor = mmf?.CreateViewAccessor()) { _logger.LogInformation($"Task {Id} speed controller starts"); var paused = 0; while (!@break) { Thread.Sleep(_speedControllerInterval); try { switch (Status) { case Status.Running: { try { // Determine if too many download requests have not been answered if (_enqueued.Value - _responded.Value > NonRespondedLimitation) { if (paused > NonRespondedTimeLimitation) { _logger.LogInformation( $"Task {Id} {NonRespondedTimeLimitation} seconds did not receive a download response"); @break = true; break; } paused += _speedControllerInterval; _logger.LogInformation($"Task {Id} Speed Controller has not been suspended due to excessive download requests"); continue; } paused = 0; // Retry timeout download request var timeoutRequests = new List <Request>(); var now = DateTime.Now; foreach (var kv in _enqueuedRequestDict) { if (!((now - kv.Value.CreationTime).TotalSeconds > RespondedTimeout)) { continue; } kv.Value.RetriedTimes++; if (kv.Value.RetriedTimes > RespondedTimeoutRetryTimes) { _logger.LogInformation( $"Task {Id} Retry Download Request {RespondedTimeoutRetryTimes} Not Received Download Response"); @break = true; break; } timeoutRequests.Add(kv.Value); } // If there is a timeout download, try again, and no timeout download is taken from the dispatch queue. if (timeoutRequests.Count > 0) { await EnqueueRequests(timeoutRequests.ToArray()); } else { var requests = _scheduler.Dequeue(Id, _dequeueBatchCount); if (requests == null || requests.Length == 0) { break; } foreach (var request in requests) { foreach (var configureRequestDelegate in _configureRequestDelegates) { configureRequestDelegate(request); } } await EnqueueRequests(requests); } } catch (Exception e) { _logger.LogError($"Task {Id} speed controller failed: {e}"); } break; } case Status.Paused: { _logger.LogDebug($"Task {Id} speed controller paused"); break; } case Status.Exiting: case Status.Exited: { @break = true; break; } } if (!@break && accessor != null && accessor.ReadBoolean(0)) { _logger.LogInformation($"Task {Id} received MMF exit signal"); Exit(); } } catch (Exception e) { _logger.LogError($"Task {Id} speed controller failed: {e}"); } } } _logger.LogInformation($"Task {Id} speed controller exit"); })); }
public void InvalidArgument_Options(MemoryMappedFileOptions options) { AssertExtensions.Throws <ArgumentOutOfRangeException>("options", () => MemoryMappedFile.CreateOrOpen(CreateUniqueMapName(), 4096, MemoryMappedFileAccess.ReadWrite, options, HandleInheritability.None)); }
public virtual DocumentText CreateDocumentText(string fileName, CompilerResults results, CompilerParameters options, ErrorNodeList errorNodes, bool canUseMemoryMap){ #if !ROTOR if (canUseMemoryMap){ int applicableCodePage = System.Threading.Thread.CurrentThread.CurrentCulture.TextInfo.ANSICodePage; CompilerOptions coptions = options as CompilerOptions; if (coptions != null && coptions.CodePage != null) applicableCodePage = (int)coptions.CodePage; int asciiCodePage = System.Globalization.CultureInfo.InvariantCulture.TextInfo.ANSICodePage; if (applicableCodePage == asciiCodePage){ //If there is no unicode signature at the start of the file, it seems reasonably safe to assume that 1 byte == 1 char //In that case we can bypass the overhead of BCL file classes and use a memory mapped file instead. unsafe{ try{ MemoryMappedFile mmFile = new MemoryMappedFile(fileName); try{ byte b0 = *mmFile.Buffer; if (b0 == 'M' && *(mmFile.Buffer+1) == 'Z'){ //This is a binary file. Give an appropriate error. errorNodes.Add(this.CreateErrorNode(Error.IsBinaryFile, System.IO.Path.GetFullPath(fileName))); this.ProcessErrors(options, results, errorNodes); mmFile.Dispose(); return null; }else if (b0 != 0xff && b0 != 0xfe && b0 != 0xef){ // No unicode signature, it seems. Go ahead and compile using the memory mapped file. return new DocumentText(mmFile); } }catch(Exception e){ errorNodes.Add(this.CreateErrorNode(Error.InternalCompilerError, e.Message)); this.ProcessErrors(options, results, errorNodes); return new DocumentText(""); } }catch{} } } } #endif return new DocumentText(this.ReadSourceText(fileName, options, errorNodes)); }
/// <summary>Creates and validates a view accessor and a view stream from the map.</summary> /// <param name="mmf">The map.</param> /// <param name="capacity">The capacity to use when creating the view.</param> /// <param name="access">The access to use when creating the view.</param> private static void CreateAndValidateViews(MemoryMappedFile mmf, long capacity, MemoryMappedFileAccess access) { using (MemoryMappedViewAccessor accessor = mmf.CreateViewAccessor(0, capacity, access)) { ValidateMemoryMappedViewAccessor(accessor, capacity, access); } using (MemoryMappedViewStream stream = mmf.CreateViewStream(0, capacity, access)) { ValidateMemoryMappedViewStream(stream, capacity, access); } }
public MemoryMappedFileChannel(string name, int bufferSize) { _file = attachToMemoryMapedFile(bufferSize, name); _bufferSize = bufferSize; }
static void Main(string[] args) { if (args.Length < 2) { throw new ArgumentException("Not enough arguments! Required: file, outdir"); } if (!File.Exists(args[0])) { throw new FileNotFoundException("File not found!"); } var file = MemoryMappedFile.CreateFromFile(args[0], FileMode.Open); var stream = file.CreateViewAccessor(); #region BinaryMagic long offset = 0; stream.Read(offset, out Header header); offset += Marshal.SizeOf(header); var maps = new List <Map>(); for (var i = 0; i < header.commandsCount; i++) { stream.Read(offset, out Command command); if (command.id == 25) { var segmentOffset = offset + 4 + 4 + 16; // Segment start + id + size + name; var vmemOffs = stream.ReadUInt64(segmentOffset); var vmemSize = stream.ReadUInt64(segmentOffset + 8); var fileOffs = stream.ReadUInt64(segmentOffset + 16); var fileSize = stream.ReadUInt64(segmentOffset + 24); var map = new Map { memoryStart = vmemOffs, memoryEnd = vmemOffs + vmemSize, fileStart = fileOffs, fileEnd = fileOffs + fileSize }; maps.Add(map); } offset += command.size; } Func <UInt64, UInt64> translate = search => maps.Select(map => map.translateMemoryToFile(search)).FirstOrDefault(r => r != 0x0); #endregion using (var bin = new BinaryReader(file.CreateViewStream())) { var chunkSize = 1024; // Find version var buildPattern = new byte?[] { 0x42, 0x75, 0x69, 0x6C, 0x64, 0x20, null, null, null, null, null, 0x20, 0x28, null, null, null, null, null, 0x29, 0x20, 0x28 }; var buildPatternLength = buildPattern.Length; var build = ""; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), buildPattern); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; bin.BaseStream.Position = matchPos; bin.ReadBytes(6); var buildNumber = new string(bin.ReadChars(5)); bin.ReadBytes(2); var patch = new string(bin.ReadChars(5)); build = patch + "." + buildNumber; } else { bin.BaseStream.Position = bin.BaseStream.Position - buildPatternLength; } } if (build == "") { // Retry with backup pattern (crash log output) bin.BaseStream.Position = 0; buildPattern = new byte?[] { 0x00, 0x3C, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x3E, 0x20 }; // <Version> buildPatternLength = buildPattern.Length; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), buildPattern); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; bin.BaseStream.Position = matchPos; bin.ReadBytes(11); build = new string(bin.ReadChars(11)); } else { bin.BaseStream.Position = bin.BaseStream.Position - buildPatternLength; } } } if (build == "") { throw new Exception("Build was not found!"); } // Reset position for DBMeta reading bin.BaseStream.Position = 0; // Extract DBMeta var metas = new Dictionary <string, DBMeta>(); var patternBuilder = new PatternBuilder(); foreach (var pattern in patternBuilder.patterns) { // Skip versions of the pattern that aren't for this expansion if (build[0] != pattern.name[0]) { continue; } var patternBytes = ParsePattern(pattern.cur_pattern).ToArray(); var patternLength = patternBytes.Length; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), patternBytes); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; Console.WriteLine("Pattern " + pattern.name + " matched at " + matchPos); if (pattern.offsets.ContainsKey(Name.FDID)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.FDID]; if (bin.ReadUInt32() < 53183) { Console.WriteLine("Invalid filedataid, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.RECORD_SIZE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.RECORD_SIZE]; if (bin.ReadUInt32() == 0) { Console.WriteLine("Record size is 0, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_NAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_NAME]; if (bin.ReadUInt32() < 10) { Console.WriteLine("Name offset is invalid, skipping match.."); continue; } bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_NAME]; var targetOffset = (long)translate(bin.ReadUInt64()); if (targetOffset > bin.BaseStream.Length) { Console.WriteLine("Name offset is out of range of file, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_FILENAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_FILENAME]; if (bin.ReadUInt32() < 10) { Console.WriteLine("Name offset is invalid, skipping match.."); continue; } bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_FILENAME]; var targetOffset = (long)translate(bin.ReadUInt64()); if (targetOffset > bin.BaseStream.Length) { Console.WriteLine("Name offset is out of range of file, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.NUM_FIELD_IN_FILE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.NUM_FIELD_IN_FILE]; if (bin.ReadUInt32() > 5000) { Console.WriteLine("Num fields in file is over 5000, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.FIELD_TYPES_IN_FILE) && pattern.offsets.ContainsKey(Name.FIELD_SIZES_IN_FILE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.FIELD_TYPES_IN_FILE]; var fieldTypesInFile = bin.ReadInt64(); bin.BaseStream.Position = matchPos + pattern.offsets[Name.FIELD_SIZES_IN_FILE]; var fieldSizesInFileOffs = bin.ReadInt64(); if (fieldTypesInFile == fieldSizesInFileOffs) { Console.WriteLine("Field types in file offset == field sizes in file offset, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_CACHE_FILENAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_CACHE_FILENAME]; var name = bin.ReadCString(); if (!name.EndsWith("adb")) { Console.WriteLine("ADB filename does not end in adb, skipping match.."); continue; } } bin.BaseStream.Position = matchPos; var meta = ReadMeta(bin, pattern); if (pattern.offsets.ContainsKey(Name.DB_NAME)) { bin.BaseStream.Position = (long)translate((ulong)meta.nameOffset); metas.TryAdd(bin.ReadCString(), meta); } else if (pattern.offsets.ContainsKey(Name.DB_FILENAME)) { bin.BaseStream.Position = (long)translate((ulong)meta.dbFilenameOffs); var name = bin.ReadCString(); metas.TryAdd(Path.GetFileNameWithoutExtension(name), meta); } bin.BaseStream.Position = matchPos + patternLength; } else { bin.BaseStream.Position = bin.BaseStream.Position - patternLength; } } bin.BaseStream.Position = 0; } var outputDirectory = Path.Combine(args[1], build); if (!Directory.Exists(outputDirectory)) { Directory.CreateDirectory(outputDirectory); } // Process DBMetas foreach (var meta in metas) { if ((long)translate((ulong)meta.Value.field_offsets_offs) > bin.BaseStream.Length) { Console.WriteLine("Skipping reading of " + meta.Key + " because first offset is way out of range!"); continue; } var writer = new StreamWriter(Path.Combine(outputDirectory, meta.Key + ".dbd")); writer.WriteLine("COLUMNS"); Console.Write("Writing " + meta.Key + ".dbd.."); var field_offsets = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_offsets_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_offsets.Add(bin.ReadInt32()); } var field_sizes = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_sizes_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_sizes.Add(bin.ReadInt32()); } var field_types = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_types_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_types.Add(bin.ReadInt32()); } var field_flags = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_flags_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_flags.Add(bin.ReadInt32()); } var field_sizes_in_file = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_sizes_in_file_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_sizes_in_file.Add(bin.ReadInt32()); } var field_types_in_file = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_types_in_file_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_types_in_file.Add(bin.ReadInt32()); } // Read field flags in file var field_flags_in_file = new List <int>(); bin.BaseStream.Position = (long)translate((ulong)meta.Value.field_flags_in_file_offs); for (var i = 0; i < meta.Value.num_fields; i++) { field_flags_in_file.Add(bin.ReadInt32()); } if (meta.Value.id_column == -1) { writer.WriteLine("int ID"); } var columnNames = new List <string>(); var columnTypeFlags = new List <Tuple <int, int> >(); for (var i = 0; i < meta.Value.num_fields_in_file; i++) { columnTypeFlags.Add(new Tuple <int, int>(field_types_in_file[i], field_flags_in_file[i])); } if (meta.Value.num_fields_in_file != meta.Value.num_fields) { columnTypeFlags.Add(new Tuple <int, int>(field_types[meta.Value.num_fields_in_file], field_flags[meta.Value.num_fields_in_file])); } for (var i = 0; i < columnTypeFlags.Count; i++) { columnNames.Add("field_" + new Random().Next(1, int.MaxValue).ToString().PadLeft(9, '0')); var t = TypeToT(columnTypeFlags[i].Item1, (FieldFlags)columnTypeFlags[i].Item2); if (t.Item1 == "locstring") { writer.WriteLine(t.Item1 + " " + columnNames[i] + "_lang"); } else { if (t.Item1 == "uint") { writer.WriteLine("int " + columnNames[i]); } else { writer.WriteLine(t.Item1 + " " + columnNames[i]); } } } writer.WriteLine(); writer.WriteLine("LAYOUT " + meta.Value.layout_hash.ToString("X8").ToUpper()); writer.WriteLine("BUILD " + build); if (meta.Value.sparseTable == 1) { writer.WriteLine("COMMENT table is sparse"); } if (meta.Value.id_column == -1) { writer.WriteLine("$noninline,id$ID<32>"); } for (var i = 0; i < meta.Value.num_fields_in_file; i++) { var typeFlags = TypeToT(field_types_in_file[i], (FieldFlags)field_flags_in_file[i]); if (meta.Value.id_column == i) { writer.Write("$id$"); } if (build.StartsWith("7.3.5") || build.StartsWith("8.0.1")) { if (meta.Value.column_8C == i) { writer.Write("$relation$"); if (meta.Value.column_90 != i) { throw new Exception("No column_90 but there is column_8C send help!"); } } } writer.Write(columnNames[i]); if (typeFlags.Item1 == "locstring") { writer.Write("_lang"); } if (typeFlags.Item2 > 0) { if (typeFlags.Item1 == "uint") { writer.Write("<u" + typeFlags.Item2 + ">"); } else { writer.Write("<" + typeFlags.Item2 + ">"); } } if (field_sizes_in_file[i] != 1) { writer.Write("[" + field_sizes_in_file[i] + "]"); } writer.WriteLine(); } if (meta.Value.num_fields_in_file != meta.Value.num_fields) { var i = meta.Value.num_fields_in_file; var typeFlags = TypeToT(field_types[i], (FieldFlags)field_flags[i]); writer.Write("$noninline,relation$" + columnNames[i]); if (typeFlags.Item1 == "locstring") { writer.Write("_lang"); } if (typeFlags.Item2 > 0) { if (typeFlags.Item1 == "uint") { writer.Write("<u" + typeFlags.Item2 + ">"); } else { writer.Write("<" + typeFlags.Item2 + ">"); } } if (field_sizes[i] != 1) { writer.Write("[" + field_sizes[i] + "]"); } } writer.Flush(); writer.Close(); Console.Write("..done!\n"); } } Environment.Exit(0); }
private IntPtr PegeantWndProc(IntPtr hWnd, uint msg, IntPtr wParam, IntPtr lParam) { if (msg != WM_COPYDATA) { return(DefWindowProcW(hWnd, msg, wParam, lParam)); } // convert lParam to something usable CopyData copyData = Marshal.PtrToStructure <CopyData>(lParam); var isCopyDataRequest = IntPtr.Size == 4 ? (copyData.dwData.ToInt32() == AGENT_COPYDATA_ID) : (copyData.dwData.ToInt64() == AGENT_COPYDATA_ID); if (!isCopyDataRequest) { return(IntPtr.Zero); } string mapName = Marshal.PtrToStringAnsi(copyData.lpData); if (mapName.Length != copyData.cbData - 1) { return(IntPtr.Zero); } using (var fileMap = MemoryMappedFile.OpenExisting(mapName, MemoryMappedFileRights.FullControl)) { if (fileMap.SafeMemoryMappedFileHandle.IsInvalid) { return(IntPtr.Zero); } var mapOwner = fileMap.GetAccessControl().GetOwner(typeof(SecurityIdentifier)) as SecurityIdentifier; // Maintain backards combatability with PuTTY 0.6.0 (and WinSCP) // see http://www.chiark.greenend.org.uk/~sgtatham/putty/wishlist/pageant-backwards-compatibility.html var processOwner = GetProcessOwner(Process.GetCurrentProcess()); //Process otherProcess = null; //try //{ // if (RestartManager.StartSession(out var rmSessionHandle, 0, Guid.NewGuid().ToString()) != RmResult.ERROR_SUCCESS) // throw new Exception("Could not start session to determin file locks."); // if(RestartManager.RegisterResources(rmSessionHandle, 1, new string[] { mapName }, 0, null, 0, null) != RmResult.ERROR_SUCCESS) // throw new Exception("Could not register resource"); // var processCount = 1u; // There should only be one process locking the file // var processes = new RM_PROCESS_INFO[processCount]; // if (RestartManager.GetList(rmSessionHandle, out var foundProcesses, ref processCount, processes, out var rebootReason) != RmResult.ERROR_SUCCESS) // throw new Exception("at least you tried"); // otherProcess = Process.GetProcessById(processes[0].Process.dwProcessId); // //herProcess = WinInternals.FindProcessWithMatchingHandle(fileMap); //} //catch (Exception ex) //{ // Debug.Fail(ex.ToString()); //} if (_user == mapOwner || processOwner == mapOwner) { using (var stream = fileMap.CreateViewStream()) { var requestLength = new byte[4]; stream.Read(requestLength, 0, 4); if (BitConverter.IsLittleEndian) { Array.Reverse(requestLength); } var message = new byte[BitConverter.ToInt32(requestLength, 0)]; stream.Read(message, 0, message.Length); byte[] response = new byte[] { (byte)SshAgentResult.Failure }; try { response = _agent.ProcessRequestAsync(message).ConfigureAwait(true).GetAwaiter().GetResult(); } catch (Exception) { response = new byte[] { (byte)SshAgentResult.Failure }; // TODO: Retrow this exception without interrupting the SSH client //throw; } var responseLength = BitConverter.GetBytes(response.Length); if (BitConverter.IsLittleEndian) { Array.Reverse(responseLength); } stream.Seek(0, SeekOrigin.Begin); stream.Write(responseLength, 0, responseLength.Length); stream.Write(response, 0, response.Length); stream.Flush(); } return(new IntPtr(1)); } } return(IntPtr.Zero); }
public MemoryMappedInfo(MemoryMappedFile memoryMappedFile, long size) : this(memoryMappedFile, 0, size, null) { }
static void Main(string[] args) { if (args.Length < 2) { throw new ArgumentException("Not enough arguments! Required: file, outdir, (build in x.x.x format), (pattern name to always use)"); } if (!File.Exists(args[0])) { throw new FileNotFoundException("File not found!"); } var file = MemoryMappedFile.CreateFromFile(args[0], FileMode.Open); var maps = EXEParsing.GenerateMap(file.CreateViewAccessor()); ulong translate(ulong search) => maps.Select(map => map.translateMemoryToFile(search)).FirstOrDefault(r => r != 0x0); using (var bin = new BinaryReader(file.CreateViewStream())) { var chunkSize = 1024; // Find version var buildPattern = new byte?[] { 0x42, 0x75, 0x69, 0x6C, 0x64, 0x20, null, null, null, null, null, 0x20, 0x28, null, null, null, null, null, 0x29, 0x20, 0x28 }; var buildPatternLength = buildPattern.Length; var build = ""; var patternOverride = ""; if (args.Length >= 3) { build = args[2]; if (args.Length >= 4) { patternOverride = args[3]; } } if (build == "") { while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), buildPattern); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; bin.BaseStream.Position = matchPos; bin.ReadBytes(6); var buildNumber = new string(bin.ReadChars(5)); bin.ReadBytes(2); var patch = new string(bin.ReadChars(5)); build = patch + "." + buildNumber; } else { bin.BaseStream.Position = bin.BaseStream.Position - buildPatternLength; } } } if (build == "") { // Retry with backup pattern (crash log output) bin.BaseStream.Position = 0; buildPattern = new byte?[] { 0x00, 0x3C, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x3E, 0x20 }; // <Version> buildPatternLength = buildPattern.Length; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), buildPattern); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; bin.BaseStream.Position = matchPos; bin.ReadBytes(11); build = new string(bin.ReadChars(11)); } else { bin.BaseStream.Position = bin.BaseStream.Position - buildPatternLength; } } } if (build == "") { // Retry with RenderService pattern.. bin.BaseStream.Position = 0; buildPattern = new byte?[] { 0x52, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, null, null, null, null, null, 0x00 }; // <Version> buildPatternLength = buildPattern.Length; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), buildPattern); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; bin.BaseStream.Position = matchPos; bin.ReadBytes(14); build = new string(bin.ReadChars(5)); if (args.Length == 3) { build = args[2]; } else { Console.WriteLine("Expansion, major and minor version not found in binary. Please enter it in this format X.X.X: "); build = Console.ReadLine() + "." + build; } } else { bin.BaseStream.Position = bin.BaseStream.Position - buildPatternLength; } } } if (build == "") { Console.WriteLine("Build was not found! Please enter a build in this format: X.X.X.XXXXX"); build = Console.ReadLine(); } if (build == "8.0.1.26321") { Console.WriteLine("Build 8.0.1.26321 has incorrect DBMeta, skipping.."); return; } // Reset position for DBMeta reading bin.BaseStream.Position = 0; // Extract DBMeta var metas = new Dictionary <string, DBMeta>(); var patternBuilder = new PatternBuilder(); foreach (var pattern in patternBuilder.patterns) { if (patternOverride == "") { // Skip versions of the pattern that aren't for this expansion if (build.StartsWith("1")) { if (!pattern.compatiblePatches.Contains(build.Substring(0, 6))) { Console.WriteLine("Skipping " + pattern.name + " as it does not list " + build + " as compatible!"); continue; } if (!pattern.compatiblePatches.Contains(build.Substring(0, 6))) { Console.WriteLine("Skipping " + pattern.name + " as it does not list " + build + " as compatible!"); continue; } if (pattern.minBuild != 0 && pattern.minBuild > int.Parse(build.Substring(7))) { Console.WriteLine("Skipping " + pattern.name + " as minimum build " + pattern.minBuild + " exceeds build of " + build.Substring(6)); continue; } if (pattern.maxBuild != 0 && int.Parse(build.Substring(7)) > pattern.maxBuild) { Console.WriteLine("Skipping " + pattern.name + " as maximum build " + pattern.maxBuild + " exceeds build of " + build.Substring(6)); continue; } } else { if (!pattern.compatiblePatches.Contains(build.Substring(0, 5))) { Console.WriteLine("Skipping " + pattern.name + " as it does not list " + build + " as compatible!"); continue; } if (!pattern.compatiblePatches.Contains(build.Substring(0, 5))) { Console.WriteLine("Skipping " + pattern.name + " as it does not list " + build + " as compatible!"); continue; } if (pattern.minBuild != 0 && pattern.minBuild > int.Parse(build.Substring(6))) { Console.WriteLine("Skipping " + pattern.name + " as minimum build " + pattern.minBuild + " exceeds build of " + build.Substring(6)); continue; } if (pattern.maxBuild != 0 && int.Parse(build.Substring(6)) > pattern.maxBuild) { Console.WriteLine("Skipping " + pattern.name + " as maximum build " + pattern.maxBuild + " exceeds build of " + build.Substring(6)); continue; } } } else { if (patternOverride != pattern.name) { continue; } } var patternBytes = ParsePattern(pattern.cur_pattern).ToArray(); var patternLength = patternBytes.Length; while (true) { if ((bin.BaseStream.Length - bin.BaseStream.Position) < chunkSize) { break; } var posInStack = Search(bin.ReadBytes(chunkSize), patternBytes); if (posInStack != chunkSize) { var matchPos = bin.BaseStream.Position - chunkSize + posInStack; Console.WriteLine("Pattern " + pattern.name + " matched at " + matchPos); if (pattern.offsets.ContainsKey(Name.FDID)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.FDID]; var fdid = bin.ReadUInt32(); if (fdid < 53183) { Console.WriteLine("Invalid filedataid " + fdid + ", skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.RECORD_SIZE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.RECORD_SIZE]; if (bin.ReadUInt32() == 0) { Console.WriteLine("Record size is 0, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_NAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_NAME]; if (bin.ReadUInt32() < 10) { Console.WriteLine("Name offset is invalid, skipping match.."); continue; } bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_NAME]; var targetOffset = (long)translate(bin.ReadUInt64()); if (targetOffset > bin.BaseStream.Length) { Console.WriteLine("Name offset is out of range of file, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_FILENAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_FILENAME]; if (bin.ReadUInt32() < 10) { Console.WriteLine("Name offset is invalid, skipping match.."); continue; } bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_FILENAME]; var targetOffset = (long)translate(bin.ReadUInt64()); if (targetOffset > bin.BaseStream.Length) { Console.WriteLine("Name offset is out of range of file, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.NUM_FIELD_IN_FILE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.NUM_FIELD_IN_FILE]; if (bin.ReadUInt32() > 5000) { Console.WriteLine("Num fields in file is over 5000, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.FIELD_TYPES_IN_FILE) && pattern.offsets.ContainsKey(Name.FIELD_SIZES_IN_FILE)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.FIELD_TYPES_IN_FILE]; var fieldTypesInFile = bin.ReadInt64(); bin.BaseStream.Position = matchPos + pattern.offsets[Name.FIELD_SIZES_IN_FILE]; var fieldSizesInFileOffs = bin.ReadInt64(); if (fieldTypesInFile == fieldSizesInFileOffs) { Console.WriteLine("Field types in file offset == field sizes in file offset, skipping match.."); continue; } } if (pattern.offsets.ContainsKey(Name.DB_CACHE_FILENAME)) { bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_CACHE_FILENAME]; bin.BaseStream.Position = (long)translate((ulong)bin.ReadInt64()); var adbname = bin.ReadCString(); bin.BaseStream.Position = matchPos + pattern.offsets[Name.DB_CACHE_FILENAME]; if (!adbname.EndsWith("adb")) { Console.WriteLine("ADB filename does not end in adb, skipping match.."); continue; } } bin.BaseStream.Position = matchPos; var meta = ReadMeta(bin, pattern); if (pattern.offsets.ContainsKey(Name.DB_NAME)) { bin.BaseStream.Position = (long)translate((ulong)meta.nameOffset); var filename = bin.ReadCString(); if (filename.Contains("DBFilesClient")) { filename = filename.Substring(filename.IndexOf("\\") + 1); } metas.TryAdd(Path.GetFileNameWithoutExtension(filename), meta); } else if (pattern.offsets.ContainsKey(Name.DB_FILENAME)) { bin.BaseStream.Position = (long)translate((ulong)meta.dbFilenameOffs); var name = bin.ReadCString(); metas.TryAdd(Path.GetFileNameWithoutExtension(name), meta); } bin.BaseStream.Position = matchPos + patternLength; } else { bin.BaseStream.Position = bin.BaseStream.Position - patternLength; } } bin.BaseStream.Position = 0; } var outputDirectory = Path.Combine(args[1], build); if (!Directory.Exists(outputDirectory)) { Directory.CreateDirectory(outputDirectory); } // Process DBMetas foreach (var meta in metas) { if ((long)translate((ulong)meta.Value.field_offsets_offs) > bin.BaseStream.Length) { Console.WriteLine("Skipping reading of " + meta.Key + " because first offset is way out of range!"); continue; } var writer = new StreamWriter(Path.Combine(outputDirectory, meta.Key + ".dbd")); writer.WriteLine("COLUMNS"); Console.Write("Writing " + meta.Key + ".dbd.."); var fieldCount = 0; if (meta.Value.num_fields == 0 && meta.Value.num_fields_in_file != 0) { fieldCount = meta.Value.num_fields_in_file; } else { fieldCount = meta.Value.num_fields; } var field_offsets = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_offsets_offs)); var field_sizes = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_sizes_offs)); var field_types = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_types_offs)); var field_flags = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_flags_offs)); var field_sizes_in_file = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_sizes_in_file_offs)); var field_types_in_file = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_types_in_file_offs)); var field_flags_in_file = ReadFieldArray(bin, fieldCount, (long)translate((ulong)meta.Value.field_flags_in_file_offs)); var field_names_in_file = ReadFieldOffsetArray(bin, fieldCount, (long)translate((ulong)meta.Value.namesInFileOffs)); if (meta.Value.id_column == -1) { writer.WriteLine("int ID"); } var columnNames = new List <string>(); var columnTypeFlags = new List <Tuple <int, int> >(); for (var i = 0; i < meta.Value.num_fields_in_file; i++) { if (field_flags_in_file.Count == 0) { columnTypeFlags.Add(new Tuple <int, int>(field_types_in_file[i], 0)); } else { columnTypeFlags.Add(new Tuple <int, int>(field_types_in_file[i], field_flags_in_file[i])); } } if (meta.Value.num_fields != 0 && (meta.Value.num_fields_in_file != meta.Value.num_fields)) { if (meta.Value.num_fields_in_file > field_flags.Count()) { columnTypeFlags.Add(new Tuple <int, int>(field_types[meta.Value.num_fields_in_file], 0)); } else { columnTypeFlags.Add(new Tuple <int, int>(field_types[meta.Value.num_fields_in_file], field_flags[meta.Value.num_fields_in_file])); } } for (var i = 0; i < columnTypeFlags.Count; i++) { if (field_names_in_file.Count > 0) { bin.BaseStream.Position = (long)translate(field_names_in_file[i]); columnNames.Add(CleanRealName(bin.ReadCString())); } else { columnNames.Add(GenerateName(i, build)); } var t = TypeToT(columnTypeFlags[i].Item1, (FieldFlags)columnTypeFlags[i].Item2); if (field_names_in_file.Count > 0) { if (t.Item1 == "locstring") { writer.WriteLine(t.Item1 + " " + columnNames[i] + "_lang"); } else { if (t.Item1 == "uint") { writer.WriteLine("int " + columnNames[i]); } else { writer.WriteLine(t.Item1 + " " + columnNames[i]); } } } else { if (t.Item1 == "locstring") { writer.WriteLine(t.Item1 + " " + columnNames[i] + "_lang?"); } else { if (t.Item1 == "uint") { writer.WriteLine("int " + columnNames[i] + "?"); } else { writer.WriteLine(t.Item1 + " " + columnNames[i] + "?"); } } } } writer.WriteLine(); if (meta.Value.layout_hash != 0) { writer.WriteLine("LAYOUT " + meta.Value.layout_hash.ToString("X8").ToUpper()); } writer.WriteLine("BUILD " + build); if (meta.Value.sparseTable == 1) { writer.WriteLine("COMMENT table is sparse"); } if (meta.Value.id_column == -1) { writer.WriteLine("$noninline,id$ID<32>"); } for (var i = 0; i < meta.Value.num_fields_in_file; i++) { var typeFlags = ("int", 32); if (field_flags_in_file.Count == 0) { typeFlags = TypeToT(field_types_in_file[i], 0); } else { typeFlags = TypeToT(field_types_in_file[i], (FieldFlags)field_flags_in_file[i]); } if (meta.Value.id_column == i) { writer.Write("$id$"); } if (build.StartsWith("7.3.5") || build.StartsWith("8")) { if (meta.Value.column_8C == i) { writer.Write("$relation$"); if (meta.Value.column_90 != i) { throw new Exception("No column_90 but there is column_8C send help!"); } } } writer.Write(columnNames[i]); if (typeFlags.Item1 == "locstring") { writer.Write("_lang"); } if (typeFlags.Item2 > 0) { if (typeFlags.Item1 == "uint") { writer.Write("<u" + typeFlags.Item2 + ">"); } else { writer.Write("<" + typeFlags.Item2 + ">"); } } if (field_sizes_in_file[i] != 1) { // 6.0.1 has sizes in bytes if (build.StartsWith("6.0.1")) { var supposedSize = 0; if ((typeFlags.Item1 == "uint" || typeFlags.Item1 == "int") && typeFlags.Item2 != 32) { supposedSize = typeFlags.Item2 / 8; } else { supposedSize = 4; } var fixedSize = field_sizes_in_file[i] / supposedSize; if (fixedSize > 1) { writer.Write("[" + fixedSize + "]"); } } else { writer.Write("[" + field_sizes_in_file[i] + "]"); } } writer.WriteLine(); } if (meta.Value.num_fields != 0 && (meta.Value.num_fields_in_file != meta.Value.num_fields)) { var i = meta.Value.num_fields_in_file; var typeFlags = TypeToT(columnTypeFlags[i].Item1, (FieldFlags)columnTypeFlags[i].Item2); writer.Write("$noninline,relation$" + columnNames[i]); if (typeFlags.Item1 == "locstring") { writer.Write("_lang"); } if (typeFlags.Item2 > 0) { if (typeFlags.Item1 == "uint") { writer.Write("<u" + typeFlags.Item2 + ">"); } else if (typeFlags.Item1 == "int") { writer.Write("<" + typeFlags.Item2 + ">"); } } if (field_sizes[i] != 1) { writer.Write("[" + field_sizes[i] + "]"); } } writer.Flush(); writer.Close(); Console.Write("..done!\n"); } } Environment.Exit(0); }
/// <summary> /// Creates a new, memory mapped, huge coordinate index. /// </summary> /// <param name="file">The memory mapped file.</param> /// <param name="size">The original size.</param> public HugeCoordinateCollectionIndex(MemoryMappedFile file, long size) { _index = new MemoryMappedHugeArrayUInt64(file, size); _coordinates = new MemoryMappedHugeArraySingle(file, size * 2 * ESTIMATED_SIZE); for (long idx = 0; idx < _index.Length; idx++) { _index[idx] = 0; } for (long idx = 0; idx < _coordinates.Length; idx++) { _coordinates[idx] = float.MinValue; } }