public void WriteHeader(Stream stream, ref BigFileHeaderStruct header) { log.Info("Writing header..."); byte[] buffer = buffers[header.StructSize]; int headerSize = MarshalUtil.StructToBytes <BigFileHeaderStruct>(header, buffer); stream.Write(buffer, 0, headerSize); log.Info("Header written!"); }
public void WriteHeader(ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (fileInfo == null || !fileInfo.Exists) { throw new IOException("Can't read header!"); } log.Info("Writing header to file: " + fileInfo.FullName); using (FileStream fs = File.OpenWrite(fileInfo.FullName)) { fs.Seek(segmentHeader.InfoOffset, SeekOrigin.Begin); WriteHeader(fs, ref header); } }
public void WriteFolderInfos(ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header, IBigFileFolderInfo[] infos) { if (fileInfo == null || !fileInfo.Exists) { throw new Exception("File info cannot be null!"); } log.Debug("Writing folder infos to file {0}", fileInfo.FullName); using (FileStream fs = File.OpenWrite(fileInfo.FullName)) { int folderOffset = BigFileUtil.CalculateFolderOffset(version, ref segmentHeader, ref header); fs.Seek(folderOffset, SeekOrigin.Begin); WriteFolderInfos(fs, infos); } }
public void WriteFileInfos(ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header, IBigFileFileInfo[] infos) { if (fileInfo == null || !fileInfo.Exists) { throw new Exception("File info cannot be null!"); } log.Debug("Writing file infos to file {0}", fileInfo.FullName); using (FileStream fs = File.OpenWrite(fileInfo.FullName)) { int fileOffset = segmentHeader.InfoOffset + header.StructSize; fs.Seek(fileOffset, SeekOrigin.Begin); WriteFileInfos(fs, infos); } }
public BigFileHeaderStruct ReadHeader(Stream stream, ref BigFileSegmentHeader segmentHeader) { BigFileHeaderStruct header = new BigFileHeaderStruct(); log.Info("Reading big file header..."); log.Debug("Header struct size: " + header.StructSize); byte[] buffer = buffers[header.StructSize]; stream.Seek(segmentHeader.InfoOffset, SeekOrigin.Begin); stream.Read(buffer, 0, header.StructSize); header = MarshalUtil.BytesToStruct <BigFileHeaderStruct>(buffer); log.Info("Header read!"); return(header); }
public IBigFileFileInfo[] ReadFileInfos(Stream stream, ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (version == null) { throw new NullReferenceException("Version cannot be null!"); } log.Info("Reading big file file infos, count: {0}", header.Files); BigFileVersions.DebugLogVersion(version, log); diag.StartStopwatch(); IBigFileFileInfo[] infos = new IBigFileFileInfo[header.Files]; IBigFileFileInfo tmpInfo = version.CreateFileInfo(); int fileOffset = segmentHeader.InfoOffset + header.StructSize; log.Debug("File info offset: {0:X8}", fileOffset); byte[] buffer = buffers[tmpInfo.StructSize]; stream.Seek(fileOffset, SeekOrigin.Begin); for (int i = 0; i < header.Files; i++) { stream.Read(buffer, 0, tmpInfo.StructSize); infos[i] = tmpInfo.FromBytes(buffer); infos[i].DebugLog(log); } log.Info("File infos read! Time taken: {0}ms", diag.StopwatchTime); log.Info("File count: {0}", header.Files); return(infos); }
public IBigFileFileInfo[] ReadFileInfos(ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (fileInfo == null || !fileInfo.Exists) { throw new Exception("File info cannot be null!"); } log.Debug("Reading file infos from file {0}", fileInfo.FullName); IBigFileFileInfo[] infos; using (FileStream fs = File.OpenRead(fileInfo.FullName)) { infos = ReadFileInfos(fs, ref segmentHeader, ref header); } return(infos); }
public IBigFileFolderInfo[] ReadFolderInfos(Stream stream, ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (version == null) { throw new NullReferenceException("Version cannot be null!"); } log.Info("Reading big file folders, count: {0}", header.Folders); BigFileVersions.DebugLogVersion(version, log); diag.StartStopwatch(); IBigFileFolderInfo[] infos = new IBigFileFolderInfo[header.Folders]; IBigFileFolderInfo tmpInfo = version.CreateFolderInfo(); int folderOffset = BigFileUtil.CalculateFolderOffset(version, ref segmentHeader, ref header); byte[] buffer = buffers[tmpInfo.StructSize]; stream.Seek(folderOffset, SeekOrigin.Begin); for (short i = 0; i < header.Folders; i++) { stream.Read(buffer, 0, tmpInfo.StructSize); infos[i] = tmpInfo.FromBytes(buffer); infos[i].DebugLog(log); } log.Info("Folder infos read! Time taken: {0}ms", diag.StopwatchTime); return(infos); }
public static int CalculateDataOffset(IBigFileVersion version, ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (version == null) { throw new NullReferenceException("There's no version! Can't calculate data offset!"); } IBigFileFolderInfo tmpFolderInfo = version.CreateFolderInfo(); int folderOffset = CalculateFolderOffset(version, ref segmentHeader, ref header); int dataOffset = folderOffset + (header.Folders * tmpFolderInfo.StructSize); dataOffset = (((dataOffset - 1) / 8) + 1) * 8; // align to 8 bytes; return(dataOffset); }
public int CalculateDataOffset(ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { return(CalculateDataOffset(this.version, ref segmentHeader, ref header)); }
public static int CalculateFolderOffset(IBigFileVersion version, ref BigFileSegmentHeader segmentHeader, ref BigFileHeaderStruct header) { if (version == null) { throw new NullReferenceException("There's no version! Can't calculate folder offset!"); } IBigFileFileInfo tmpFileInfo = version.CreateFileInfo(); int baseSize = (segmentHeader.InfoOffset + header.StructSize) + (header.Files * tmpFileInfo.StructSize); baseSize = (((baseSize - 1) / 8) + 1) * 8; // align to 8 bytes return(baseSize); }
private void internal_OnPackFinished(BigFilePackInfo info) { while (IsPacking) { Thread.Sleep(500); //wait for all threads to finish } log.Info("All chunking threads finished their work!"); log.Info(" >Chunking result:"); log.Info(" {0,6} {1,6} {2,6} {3,6}", "Thread", "Time", "Start", "Count"); for (int i = 0; i < info.Options.Threads; i++) { log.Info(" {0,6} {1,4}s {2,6} {3,6}", packInfos[i].ThreadID, packInfos[i].diag.StopwatchTime / 1000, packInfos[i].startIndex, packInfos[i].count); } log.Info("Starting packaging"); string targetFileName = info.Options.Directory.FullName + @"\" + info.Options.BigFileName + BigFileConst.BIGFILE_EXTENSION; FileInfo targetFileInfo = new FileInfo(targetFileName); if (targetFileInfo.Exists) { WinMessageBoxResult overwriteResult = WinMessageBox.Show("The file\n" + targetFileName + "\n already exists.\n\nOverwrite?", "File already exists", WinMessageBoxFlags.btnYesNo); if (overwriteResult != WinMessageBoxResult.Yes) { log.Error("Target file already exists and the user chose not to overwrite!"); if (info.Options.DeleteChunks) { log.Info("Deleting generated chunks!"); for (int threadID = 0; threadID < info.Options.Threads; threadID++) { string metadataFilename = Environment.CurrentDirectory + BigFileConst.PACK_STAGING_DIR + info.Options.BigFileName + ".meta" + packInfos[threadID].ThreadID.ToString(); string chunkFileName = Environment.CurrentDirectory + BigFileConst.PACK_STAGING_DIR + info.Options.BigFileName + ".chunk" + packInfos[threadID].ThreadID.ToString(); File.Delete(metadataFilename); File.Delete(chunkFileName); log.Info("Deleted metadata file {0}", metadataFilename); log.Info("Deleted chunk file {0}", chunkFileName); } } return; } } info.diag.StartStopwatch(); using (FileStream targetFS = new FileStream(targetFileName, FileMode.Create, FileAccess.Write)) { //Dictionary<int, ChunkedFileMetadata> metadataMap = new Dictionary<int, ChunkedFileMetadata>(); List <ChunkedFileMetadata> metadataList = new List <ChunkedFileMetadata>(); int chunkedFileOffsetInTargetFile = 0; for (int threadID = 0; threadID < info.Options.Threads; threadID++) { string metadataFilename = Environment.CurrentDirectory + BigFileConst.PACK_STAGING_DIR + info.Options.BigFileName + ".meta" + packInfos[threadID].ThreadID.ToString(); log.Info("Collating metadata from file " + metadataFilename); //extract the metadata from the metadata files using (FileStream metaFS = new FileStream(metadataFilename, FileMode.Open, FileAccess.Read)) { byte[] tmpBuffer = info.IOBuffers[8]; metaFS.Read(tmpBuffer, 0, 8); int fileCount = BitConverter.ToInt32(tmpBuffer, 0); int chunkFileSize = BitConverter.ToInt32(tmpBuffer, 4); tmpBuffer = info.IOBuffers[12]; for (int j = 0; j < fileCount; j++) { metaFS.Read(tmpBuffer, 0, 12); int offset = BitConverter.ToInt32(tmpBuffer, 8); if (offset != -1) { offset += chunkedFileOffsetInTargetFile; } ChunkedFileMetadata mdata = new ChunkedFileMetadata() { Number = BitConverter.ToInt32(tmpBuffer, 0), Key = BitConverter.ToInt32(tmpBuffer, 4), Offset = offset }; metadataList.Add(mdata); } chunkedFileOffsetInTargetFile += chunkFileSize; } if (info.Options.DeleteChunks) { log.Info("Deleting metadata file..."); File.Delete(metadataFilename); } } log.Info("Metadata collation took {0,4}s", info.diag.StopwatchTime / 1000); info.diag.StartStopwatch(); //write the segment header to the target bigfile log.Info("Writing segment header to new bigfile..."); info.bigFile.Segment.WriteSegmentHeader(targetFS, ref info.bigFile.SegmentHeader); log.Info("Segment header written!"); //create a new header with the number of files we're packing log.Info("Writing file header to new bigfile..."); BigFileHeaderStruct header = new BigFileHeaderStruct() { Files = metadataList.Count, Folders = (short)info.bigFile.RawFolderInfos.Length, //oh boy BigFileVersion = info.bigFile.Version.Identifier, Unknown_02 = info.bigFile.FileHeader.Unknown_02, }; header.DebugLog(log); info.bigFile.Header.WriteHeader(targetFS, ref header); log.Info("File header written!"); //create a list of file infos to write, copying all but the offset and file number from the original file info log.Info("Creating new file info list..."); IBigFileFileInfo[] newFileInfos = new IBigFileFileInfo[metadataList.Count]; for (int i = 0; i < metadataList.Count; i++) { newFileInfos[i] = info.bigFile.Version.CreateFileInfo(); info.bigFile.FileMap[metadataList[i].Key].FileInfo.Copy(newFileInfos[i]); if (metadataList[i].Offset == -1) { newFileInfos[i].Offset = -1; log.Error("METATADA FILE OFFSET IS -1"); } else { if (metadataList[i].Offset % 8 != 0) { log.Error("WAIT WHAT: {0} {1:X4}", metadataList[i].Offset, metadataList[i].Key); } newFileInfos[i].Offset = metadataList[i].Offset / 8; } newFileInfos[i].FileNumber = metadataList[i].Number; newFileInfos[i].ZIP = (info.Options.Flags & BigFileFlags.Compress) != 0 ? newFileInfos[i].ZIP : 0; } log.Info("New file info list created!"); log.Info("Writing file and folder infos to new bigfile..."); //write file infos to file info.bigFile.FilesAndFolders.WriteFileInfos(targetFS, newFileInfos); //write folder infos to file info.bigFile.FilesAndFolders.WriteFolderInfos(targetFS, info.bigFile.RawFolderInfos); log.Info("File and folder infos written!"); log.Info("File metadata generation took {0,4}s", info.diag.StopwatchTime / 1000); info.diag.StartStopwatch(); //copy chunk file data to target bigfile for (int threadID = 0; threadID < info.Options.Threads; threadID++) { string chunkFileName = Environment.CurrentDirectory + BigFileConst.PACK_STAGING_DIR + info.Options.BigFileName + ".chunk" + packInfos[threadID].ThreadID.ToString(); log.Info("Copying chunk data from chunk {0}", chunkFileName); log.Info(" Current offset: {0:X8}", targetFS.Position); byte[] buffer = info.IOBuffers[IOBuffers.MB * 36]; using (FileStream chunkFS = new FileStream(chunkFileName, FileMode.Open, FileAccess.Read)) { int readSize = -1; while ((readSize = chunkFS.Read(buffer, 0, IOBuffers.MB * 36)) != 0) { targetFS.Write(buffer, 0, readSize); } } log.Info("Chunk data copied! Current offset: {0:X8}", targetFS.Position); if (info.Options.DeleteChunks) { log.Info("Deleting chunk..."); File.Delete(chunkFileName); } } log.Info("All chunk data written!"); log.Info("Chunk data copying time taken: {0,4}s", info.diag.StopwatchTime / 1000); log.Info("Bigfile packing finished!"); } }