public override void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme compressionScheme, bool isTemp = false) { string account, key, container, blob; AzureUtils.FromAzureUri(dataSetUri, out account, out key, out container, out blob); if (compressionScheme != CompressionScheme.None) { throw new DryadLinqException("Not implemented: writing to Azure temporary storage with compression enabled"); } AzureDfsClient client = new AzureDfsClient(account, key, container); DryadLinqFactory <T> factory = (DryadLinqFactory <T>)DryadLinqCodeGen.GetFactory(context, typeof(T)); using (Stream stream = client.GetFileStreamWriterAsync(blob).Result) { DryadLinqBlockStream nativeStream = new DryadLinqBlockStream(stream); DryadLinqRecordWriter <T> writer = factory.MakeWriter(nativeStream); foreach (T rec in source) { writer.WriteRecordSync(rec); } writer.Close(); } }
public void Deserialize(Stream input) { if (input.ReadValueU32(Endian.Big) != 0x44435000) // 'DCP\0' { throw new FormatException(); } var scheme = input.ReadValueU32(Endian.Big); if (Enum.IsDefined(typeof(CompressionScheme), scheme) == false) { throw new FormatException(); } this.Scheme = (CompressionScheme)scheme; var size = input.ReadValueU32(Endian.Big); if (size != 32) { throw new FormatException(); } this.Level = input.ReadValueU8(); if (this.Level > 9) { throw new FormatException(); } input.Seek(3, SeekOrigin.Current); // padding? this.Unknown1C = input.ReadValueU32(Endian.Big); this.Unknown20 = input.ReadValueU32(Endian.Big); this.Unknown24 = input.ReadValueU32(Endian.Big); this.Flags = input.ReadValueU32(Endian.Big); }
/// <summary> /// Ingress a .NET collection to a specified store location. /// </summary> /// <typeparam name="T">The record type of the collection.</typeparam> /// <param name="context">An instance of DryadLinqContext.</param> /// <param name="source">The collection to be ingressed.</param> /// <param name="dataSetUri">The URI to store the collection.</param> /// <param name="metaData">The metadata for the collection.</param> /// <param name="outputScheme">The compression scheme used to store the collection.</param> /// <param name="isTemp">true to only store the collection temporarily with a time lease.</param> /// <param name="serializer">A stream-based serializer.</param> public abstract void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp, Expression <Action <IEnumerable <T>, Stream> > serializer);
private void Initialize(string filePath, FileMode mode, FileAccess access, CompressionScheme scheme) { try { FileOptions options = FileOptions.None; if (access == FileAccess.Read) { options |= FileOptions.SequentialScan; // options |= (FileOptions)FILE_FLAG_NO_BUFFERING; } else { // options |= FileOptions.WriteThrough; } // options |= FileOptions.Asynchronous; this.m_fstream = new FileStream(filePath, mode, access, FileShare.Read, DefaultBuffSize, options); } catch (Exception e) { throw new DryadLinqException(DryadLinqErrorCode.CannotAccesFilePath, String.Format(SR.CannotAccesFilePath, filePath), e); } this.m_fhandle = m_fstream.SafeFileHandle; this.m_isClosed = false; this.m_compressionScheme = scheme; this.m_compressStream = null; }
/// <summary> /// Compress response /// </summary> /// <param name="response"></param> /// <param name="scheme"></param> /// <returns></returns> public static Response WithCompression(this Response response, CompressionScheme scheme) { switch (scheme) { case CompressionScheme.Gzip: { Internal.Utils.ResponseHelper.CompressGzipResponse(response); break; } case CompressionScheme.Deflate: { Internal.Utils.ResponseHelper.CompressDeflateResponse(response); break; } default: { Internal.Utils.ResponseHelper.CompressGzipResponse(response); break; } } return(response); }
internal DryadLinqMultiFileStream(string[] filePathArray, CompressionScheme scheme) { this.m_filePathArray = filePathArray; this.m_compressionScheme = scheme; this.m_nextIndex = 0; this.InitNextStream(); }
// hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh // uuuuuuuu uuuuuuuu uuuuuuuu uuuuuuss // oooooooo oooooooo oooooooo oooooooo // oocccccc cccccccc cccccccc cccccccc // hash = 32 bits // compression scheme = 2 bits // uncompressed size = 30 bits // compressed size = 30 bits // offset = 34 bits public void Deserialize(Stream input, Endian endian) { var a = input.ReadValueU32(endian); var b = input.ReadValueU32(endian); var c = input.ReadValueU64(endian); this.NameHash = a; this.UncompressedSize = (uint)((b & 0xFFFFFFFCu) >> 2); this.CompressionScheme = (CompressionScheme)((b & 0x00000003u) >> 0); this.Offset = (long)((c & 0xFFFFFFFFC0000000ul) >> 30); this.CompressedSize = (uint)((c & 0x000000003FFFFFFFul) >> 0); if (this.CompressionScheme == CompressionScheme.None) { if (this.UncompressedSize != 0) { throw new FormatException(); } } else if (this.CompressionScheme == CompressionScheme.LZO1x) { if (this.CompressedSize == 0 && this.UncompressedSize > 0) { throw new FormatException(); } } else { throw new FormatException(); } }
internal DryadLinqFileBlockStream(FileStream fstream, CompressionScheme scheme) { this.m_fstream = fstream; this.m_fhandle = fstream.SafeFileHandle; this.m_compressionScheme = scheme; this.m_isClosed = false; this.m_compressStream = null; }
/// <summary> /// Converts an IEnumerable{T} to a DryadLinq specialized IQueryable{T}. /// </summary> /// <typeparam name="T">The type of the records in the table.</typeparam> /// <param name="data">The source data.</param> /// <returns>An IQueryable{T} representing the data with DryadLinq query provider.</returns> /// <remarks> /// The source data will be serialized to a temp stream. /// The resulting fileset has an auto-generated name and a temporary lease. /// </remarks> public IQueryable <T> FromEnumerable <T>(IEnumerable <T> data) { Uri dataSetName = this.MakeTemporaryStreamUri(); CompressionScheme compressionScheme = this.OutputDataCompressionScheme; DryadLinqMetaData metadata = new DryadLinqMetaData(this, typeof(T), dataSetName, compressionScheme); return(DataProvider.StoreData(this, data, dataSetName, metadata, compressionScheme, true)); }
public override void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp = false) { throw new DryadLinqException("TBA"); }
public void CompressFiles(Stream outputStream, string[] sources, CompressionScheme scheme, int compressionLevel, int pathDepth) { if (scheme == CompressionScheme.None || scheme == CompressionScheme.GZip) { var innerStream = outputStream; try { switch (scheme) { case CompressionScheme.None: // Nothing to do... break; case CompressionScheme.GZip: innerStream = new GZipOutputStream(outputStream) { IsStreamOwner = false }; ((GZipOutputStream) innerStream).SetLevel(compressionLevel); break; default: throw new NotImplementedException(String.Format( "Compression Scheme {0} not yet implemented.", scheme)); } using (var output = new TarOutputStream(innerStream)) { output.IsStreamOwner = false; foreach (var source in sources) { if (String.IsNullOrWhiteSpace(source)) { continue; } var fullpath = Context.LocalEnv.CurrentDirectory.Join(source); if (File.Exists(fullpath.ToString())) { TarFile(output, null, fullpath); } else { RecursiveTarDir(output, null, fullpath, pathDepth); } } } } finally { if (!ReferenceEquals(innerStream, outputStream)) { // We own the stream... close it. innerStream.Close(); } } } }
public override void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme compressionScheme, bool isTemp, Expression <Action <IEnumerable <T>, Stream> > serializer) { string fileName = dataSetUri.LocalPath; if (!String.IsNullOrEmpty(dataSetUri.Host)) { fileName = @"\\" + dataSetUri.Host + fileName; } // Write the partition: string partDir = Path.GetDirectoryName(fileName); partDir = Path.Combine(partDir, DryadLinqUtil.MakeUniqueName()); Directory.CreateDirectory(partDir); string uncPath = Path.Combine(partDir, "Part"); string partitionPath = uncPath + ".00000000"; DryadLinqFactory <T> factory = (DryadLinqFactory <T>)DryadLinqCodeGen.GetFactory(context, typeof(T)); using (FileStream fstream = new FileStream(partitionPath, FileMode.CreateNew, FileAccess.Write)) { if (serializer == null) { DryadLinqFileBlockStream nativeStream = new DryadLinqFileBlockStream(fstream, compressionScheme); DryadLinqRecordWriter <T> writer = factory.MakeWriter(nativeStream); foreach (T rec in source) { writer.WriteRecordSync(rec); } writer.Close(); } else { Action <IEnumerable <T>, Stream> serializerFunc = serializer.Compile(); serializerFunc(source, fstream); } } // Write the partfile: long partSize = new FileInfo(partitionPath).Length; using (StreamWriter writer = File.CreateText(fileName)) { writer.WriteLine(uncPath); writer.WriteLine("1"); writer.WriteLine("{0},{1}", 0, partSize); } }
/// <summary> /// Stores an IEnumerable{T} at a specified location. /// </summary> /// <typeparam name="T">The record type of the data.</typeparam> /// <param name="context">An instance of <see cref="DryadLinqContext"/></param> /// <param name="source">The data to store.</param> /// <param name="dataSetUri">The URI of the store location.</param> /// <param name="metaData">The metadata of the data.</param> /// <param name="outputScheme">The compression scheme.</param> /// <param name="isTemp">true if the data is only stored temporarily.</param> /// <returns>An instance of IQueryable{T} for the data.</returns> internal static DryadLinqQuery <T> StoreData <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp = false) { string scheme = DataPath.GetScheme(dataSetUri); DataProvider dataProvider = DataProvider.GetDataProvider(scheme); dataSetUri = dataProvider.RewriteUri <T>(context, dataSetUri); dataProvider.Ingress(context, source, dataSetUri, metaData, outputScheme, isTemp); return(DataProvider.GetPartitionedTable <T>(context, dataSetUri)); }
internal DryadLinqMetaData(DryadLinqContext context, Type recordType, Uri dataSetUri, CompressionScheme compressionScheme) { this.m_context = context; this.m_dataSetUri = dataSetUri; this.m_elemType = recordType; this.m_compressionScheme = compressionScheme; //this.m_version = context.ClientVersion(); //this.InitializeFlags(); //this.m_fp = 0UL; //this.m_dataSetInfo = node.OutputDataSetInfo; }
internal MultiBlockStream(List <string[]> srcList, string associatedDscStreamName, FileAccess access, CompressionScheme scheme) { this.m_srcList = srcList; this.m_associatedDscStreamName = associatedDscStreamName; if (srcList.Count == 0) { throw new DryadLinqException(DryadLinqErrorCode.MultiBlockEmptyPartitionList, SR.MultiBlockEmptyPartitionList); } this.m_compressionScheme = scheme; this.m_curIdx = 0; this.m_curStream = this.GetStream(this.m_curIdx++, access); }
internal MultiBlockStream(List<string[]> srcList, string associatedDscStreamName, FileAccess access, CompressionScheme scheme) { this.m_srcList = srcList; this.m_associatedDscStreamName = associatedDscStreamName; if (srcList.Count == 0) { throw new DryadLinqException(DryadLinqErrorCode.MultiBlockEmptyPartitionList, SR.MultiBlockEmptyPartitionList); } this.m_compressionScheme = scheme; this.m_curIdx = 0; this.m_curStream = this.GetStream(this.m_curIdx++, access); }
public override void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme compressionScheme, bool isTemp = false) { // Write the partition: string partDir = context.PartitionUncPath; if (partDir == null) { partDir = Path.GetDirectoryName(dataSetUri.LocalPath); } if (!Path.IsPathRooted(partDir)) { partDir = Path.Combine("/", partDir); } partDir = Path.Combine(partDir, DryadLinqUtil.MakeUniqueName()); Directory.CreateDirectory(partDir); string partPath = Path.Combine(partDir, "Part"); string partFilePath = partPath + ".00000000"; DryadLinqFactory <T> factory = (DryadLinqFactory <T>)DryadLinqCodeGen.GetFactory(context, typeof(T)); using (FileStream fstream = new FileStream(partFilePath, FileMode.CreateNew, FileAccess.Write)) { DryadLinqFileBlockStream nativeStream = new DryadLinqFileBlockStream(fstream, compressionScheme); DryadLinqRecordWriter <T> writer = factory.MakeWriter(nativeStream); foreach (T rec in source) { writer.WriteRecordSync(rec); } writer.Close(); } // Write the partfile: FileInfo finfo = new FileInfo(partFilePath); using (StreamWriter writer = File.CreateText(dataSetUri.LocalPath)) { writer.WriteLine(partPath); writer.WriteLine("1"); writer.WriteLine("{0},{1},{2}", 0, finfo.Length, Environment.MachineName); } }
/// <summary> /// Stores an IEnumerable{T} at a specified location. /// </summary> /// <typeparam name="T">The record type of the data.</typeparam> /// <param name="context">An instance of <see cref="DryadLinqContext"/></param> /// <param name="source">The data to store.</param> /// <param name="dataSetUri">The URI of the store location.</param> /// <param name="metaData">The metadata of the data.</param> /// <param name="outputScheme">The compression scheme.</param> /// <param name="isTemp">true if the data is only stored temporarily.</param> /// <param name="serializer">A stream-based serializer</param> /// <param name="deserializer">A stream-based deserializer</param> /// <returns>An instance of IQueryable{T} for the data.</returns> internal static DryadLinqQuery <T> StoreData <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp, Expression <Action <IEnumerable <T>, Stream> > serializer, Expression <Func <Stream, IEnumerable <T> > > deserializer) { string scheme = DataPath.GetScheme(dataSetUri); DataProvider dataProvider = DataProvider.GetDataProvider(scheme); dataSetUri = dataProvider.RewriteUri <T>(context, dataSetUri); dataProvider.Ingress(context, source, dataSetUri, metaData, outputScheme, isTemp, serializer); DryadLinqQuery <T> res = DataProvider.GetPartitionedTable <T>(context, dataSetUri, deserializer); res.CheckAndInitialize(); // must initialize return(res); }
public override void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp, Expression <Action <IEnumerable <T>, Stream> > serializer) { DryadLinqFactory <T> factory = (DryadLinqFactory <T>)DryadLinqCodeGen.GetFactory(context, typeof(T)); using (Stream stream = context.GetHdfsClient.GetDfsStreamWriter(dataSetUri)) { DryadLinqBlockStream nativeStream = new DryadLinqBlockStream(stream); DryadLinqRecordWriter <T> writer = factory.MakeWriter(nativeStream); foreach (T rec in source) { writer.WriteRecordSync(rec); } writer.Close(); } }
public Region(int x, int y) { Console.WriteLine(Directory.GetCurrentDirectory()); using (FileStream data = File.Open($"./world/region/r-{x}-{y}.mca", FileMode.Open)) { Console.WriteLine("test"); BinaryReader reader = new BinaryReader(data); int compressedChunkLength = (int)reader.ReadUInt32(); CompressionScheme compressionScheme = (CompressionScheme)reader.ReadByte(); Stream dataStream; if (compressionScheme == CompressionScheme.Zlib) { dataStream = new ZlibStream(data, CompressionMode.Decompress); } else { throw new NotImplementedException("GZIP not implemented"); } NbtReader nbtReader = new NbtReader(dataStream); Console.WriteLine($"Name: {nbtReader.ReadAsTag().Name}"); } }
internal DryadLinqFileBlockStream(string filePath, FileMode mode, FileAccess access, CompressionScheme scheme) { this.Initialize(filePath, mode, access, scheme); }
internal DryadLinqFileBlockStream(string filePath, FileAccess access, CompressionScheme scheme) { FileMode mode = (access == FileAccess.Read) ? FileMode.Open : FileMode.OpenOrCreate; this.Initialize(filePath, mode, access, scheme); }
private void Initialize(string filePath, FileMode mode, FileAccess access, CompressionScheme scheme) { try { FileOptions options = FileOptions.None; if (access == FileAccess.Read) { options |= FileOptions.SequentialScan; // options |= (FileOptions)FILE_FLAG_NO_BUFFERING; } else { // options |= FileOptions.WriteThrough; } // options |= FileOptions.Asynchronous; this.m_fstream = new FileStream(filePath, mode, access, FileShare.Read, DefaultBuffSize, options); } catch (Exception e) { throw new DryadLinqException(DryadLinqErrorCode.CannotAccesFilePath, String.Format(SR.CannotAccesFilePath , filePath),e); } this.m_fhandle = m_fstream.SafeFileHandle; this.m_isClosed = false; this.m_compressionScheme = scheme; this.m_compressStream = null; }
public void Deserialize(Stream input) { var basePosition = input.Position; // read as two unsigned longs so we don't have to actually // decode the strings var version1 = input.ReadValueU64(false); var version2 = input.ReadValueU64(false); if (version1 == 0x4552462056322E31) // ERF V2.1 { input.Seek(basePosition + 8, SeekOrigin.Begin); throw new NotSupportedException(); } else if (version1 == 0x4500520046002000 && version2 == 0x560032002E003000) // ERF V2.0 { input.Seek(basePosition + 16, SeekOrigin.Begin); this.Version = 1; var fileCount = input.ReadValueU32(); var unknown14 = input.ReadValueU32(); var unknown18 = input.ReadValueU32(); var unknown1C = input.ReadValueU32(); this.Flags = 0; this.Encryption = EncryptionScheme.None; this.Compression = CompressionScheme.None; this.ContentId = 0; this.PasswordDigest = null; this.Entries.Clear(); for (uint i = 0; i < fileCount; i++) { var entry = new Entry(); entry.Name = input.ReadString(64, true, Encoding.Unicode); entry.CalculateHashes(); entry.Offset = input.ReadValueU32(); entry.UncompressedSize = input.ReadValueU32(); entry.CompressedSize = entry.UncompressedSize; this.Entries.Add(entry); } } else if (version1 == 0x4500520046002000 && version2 == 0x560032002E003200) // ERF V2.2 { input.Seek(basePosition + 16, SeekOrigin.Begin); this.Version = 2; var fileCount = input.ReadValueU32(); var year = input.ReadValueU32(); var day = input.ReadValueU32(); var unknown1C = input.ReadValueU32(); // always 0xFFFFFFFF? var flags = input.ReadValueU32(); var contentId = input.ReadValueU32(); var passwordDigest = new byte[16]; input.Read(passwordDigest, 0, passwordDigest.Length); if (unknown1C != 0xFFFFFFFF) { throw new InvalidOperationException(); } this.Flags = (flags & 0x1FFFFF0F) >> 0; this.Encryption = (EncryptionScheme)((flags & 0x000000F0) >> 4); this.Compression = (CompressionScheme)((flags & 0xE0000000) >> 29); if (this.Flags != 0 && this.Flags != 1) { throw new FormatException("unknown flags value"); } this.ContentId = contentId; this.PasswordDigest = passwordDigest; this.Entries.Clear(); for (uint i = 0; i < fileCount; i++) { var entry = new Entry(); entry.Name = input.ReadString(64, true, Encoding.Unicode); entry.CalculateHashes(); entry.Offset = input.ReadValueU32(); entry.CompressedSize = input.ReadValueU32(); entry.UncompressedSize = input.ReadValueU32(); this.Entries.Add(entry); } } else if (version1 == 0x4500520046002000 && version2 == 0x560033002E003000) // ERF V3.0 { input.Seek(basePosition + 16, SeekOrigin.Begin); this.Version = 3; var stringTableSize = input.ReadValueU32(); var fileCount = input.ReadValueU32(); var flags = input.ReadValueU32(); var contentId = input.ReadValueU32(); var passwordDigest = new byte[16]; input.Read(passwordDigest, 0, passwordDigest.Length); this.Flags = (flags & 0x1FFFFF0F) >> 0; this.Encryption = (EncryptionScheme)((flags & 0x000000F0) >> 4); this.Compression = (CompressionScheme)((flags & 0xE0000000) >> 29); if (this.Flags != 0 && this.Flags != 1) { throw new FormatException("unknown flags value"); } this.ContentId = contentId; this.PasswordDigest = passwordDigest; MemoryStream stringTable = stringTableSize == 0 ? null : input.ReadToMemoryStream(stringTableSize); this.Entries.Clear(); for (uint i = 0; i < fileCount; i++) { var entry = new Entry(); uint nameOffset = input.ReadValueU32(); entry.NameHash = input.ReadValueU64(); if (nameOffset != 0xFFFFFFFF) { if (nameOffset + 1 > stringTable.Length) { throw new FormatException("file name exceeds string table bounds"); } stringTable.Position = nameOffset; entry.Name = stringTable.ReadStringZ(Encoding.ASCII); if (entry.Name.HashFNV64() != entry.NameHash) { throw new InvalidOperationException("hash mismatch"); } } else { entry.Name = null; } entry.TypeHash = input.ReadValueU32(); entry.Offset = input.ReadValueU32(); entry.CompressedSize = input.ReadValueU32(); entry.UncompressedSize = input.ReadValueU32(); this.Entries.Add(entry); } } else { throw new FormatException("unsupported / unknown ERF format"); } }
public static void DecompressEntry(sfarFile entry, Stream input, Stream output, CompressionScheme cScheme) { #endif byte[] inputBlock; byte[] outputBlock = new byte[DLCBase.MaximumBlockSize]; var left = entry.uncompressedSize; input.Seek(entry.dataOffset[0], SeekOrigin.Begin); int numBlocks = (int)(Math.Ceiling(entry.uncompressedSize / (float)DLCBase.MaximumBlockSize)); if (entry.blockSizeIndex == -1) { output.WriteFromStream(input, entry.uncompressedSize); } else { while (left > 0) { uint compressedBlockSize = entry.blockSizeArray[count]; if (compressedBlockSize == 0) { compressedBlockSize = DLCBase.MaximumBlockSize; } if (cScheme == CompressionScheme.None) { output.WriteFromStream(input, compressedBlockSize); left -= compressedBlockSize; } else if (cScheme == CompressionScheme.Lzma) { if (compressedBlockSize == DLCBase.MaximumBlockSize || compressedBlockSize == left) { output.WriteFromStream(input, compressedBlockSize); left -= compressedBlockSize; } else { var uncompressedBlockSize = (uint)Math.Min(left, DLCBase.MaximumBlockSize); if (compressedBlockSize < 5) { throw new InvalidOperationException(); } inputBlock = new byte[compressedBlockSize]; //var properties = input.ReadBytes(5); //compressedBlockSize -= 5; if (input.Read(inputBlock, 0, (int)compressedBlockSize) != compressedBlockSize) { throw new EndOfStreamException(); } uint actualUncompressedBlockSize = uncompressedBlockSize; uint actualCompressedBlockSize = compressedBlockSize; /*var error = LZMA.Decompress( * outputBlock, * ref actualUncompressedBlockSize, * inputBlock, * ref actualCompressedBlockSize, * properties, * (uint)properties.Length); * * if (error != LZMA.ErrorCode.Ok || * uncompressedBlockSize != actualUncompressedBlockSize || * compressedBlockSize != actualCompressedBlockSize) * { * throw new InvalidOperationException(); * }*/ outputBlock = SevenZipHelper.LZMA.Decompress(inputBlock, actualUncompressedBlockSize); if (outputBlock.Length != actualUncompressedBlockSize) { throw new NotImplementedException(); } output.Write(outputBlock, 0, (int)actualUncompressedBlockSize); left -= uncompressedBlockSize; } } else { throw new NotImplementedException(); } #if (WITH_GUI) if (worker != null) { int perc = (int)Math.Ceiling(count++ / (float)numBlocks * 100); if (perc > highPerc) { highPerc = perc; if (perc > 100) { perc = 100; } worker.ReportProgress(perc); } } else { count++; } #endif } } } // end of DecompressEntry
private void getStructure(Stream input) { var magic = input.ReadValueU32(Endian.Little); if (magic != sfarHeader && // SFAR magic.Swap() != sfarHeader) { throw new FormatException("Not a valid sfar file."); } var endian = magic == sfarHeader ? Endian.Little : Endian.Big; var version = input.ReadValueU32(endian); if (version != 0x00010000) { throw new FormatException("Not supported version."); } dataOffset = input.ReadValueU32(endian); uint minDataOffset = dataOffset; entryOffset = input.ReadValueU32(endian); var fileTableCount = numOfFiles = input.ReadValueU32(endian); blockTableOffset = input.ReadValueU32(endian); MaximumBlockSize = input.ReadValueU32(endian); this.CompressionScheme = input.ReadValueEnum <CompressionScheme>(endian); if (entryOffset != 0x20) { throw new FormatException(); } if (MaximumBlockSize != 0x010000) { throw new FormatException(); } if (this.CompressionScheme != CompressionScheme.None && this.CompressionScheme != CompressionScheme.Lzma && this.CompressionScheme != CompressionScheme.Lzx) { throw new FormatException(); } input.Seek(entryOffset, SeekOrigin.Begin); for (uint i = 0; i < fileTableCount; i++) { sfarFile entry = new sfarFile(); entry.entryOffset = input.Position; entry.nameHash = input.ReadFileNameHash(); entry.blockSizeIndex = input.ReadValueS32(endian); entry.uncompressedSize = input.ReadValueU32(endian); entry.uncompressedSize |= ((long)input.ReadValueU8()) << 32; totalUncSize += entry.uncompressedSize; if (entry.blockSizeIndex == -1) { entry.dataOffset = new long[1]; entry.dataOffset[0] = input.ReadValueU32(endian); entry.dataOffset[0] |= ((long)input.ReadValueU8()) << 32; totalComprSize += entry.uncompressedSize; } else { int numBlocks = (int)Math.Ceiling(entry.uncompressedSize / (double)MaximumBlockSize); entry.dataOffset = new long[numBlocks]; entry.blockSizeArray = new ushort[numBlocks]; entry.dataOffset[0] = input.ReadValueU32(endian); entry.dataOffset[0] |= ((long)input.ReadValueU8()) << 32; long lastPosition = input.Position; input.Seek(getBlockOffset(entry.blockSizeIndex, entryOffset, fileTableCount), 0); entry.blockSizeArray[0] = input.ReadValueU16(); for (int j = 1; j < numBlocks; j++) { entry.blockSizeArray[j] = input.ReadValueU16(); entry.dataOffset[j] = entry.dataOffset[j - 1] + entry.blockSizeArray[j]; totalComprSize += entry.blockSizeArray[j]; } input.Seek(lastPosition, 0); } fileList.Add(entry); }// end of foreach }
public static void DecompressEntry(sfarFile entry, Stream input, Stream output, CompressionScheme cScheme, BackgroundWorker worker = null) { int highPerc = 0; int count = 0;
private static void Wrap(Stream output, byte[] bytes, CompressionScheme compressionScheme) { byte[] compressedBytes; if (bytes.Length <= BlockSize) { if (compressionScheme == CompressionScheme.LZO) { compressedBytes = new byte[bytes.Length + (bytes.Length / 16) + 64 + 3]; var actualCompressedSize = compressedBytes.Length; var result = MiniLZO.LZO.Compress( bytes, 0, bytes.Length, compressedBytes, 0, ref actualCompressedSize, new MiniLZO.CompressWorkBuffer()); if (result != MiniLZO.ErrorCode.Success) { throw new SaveCorruptionException($"LZO compression failure ({result})"); } Array.Resize(ref compressedBytes, actualCompressedSize); } else if (compressionScheme == CompressionScheme.Zlib) { using (var temp = new MemoryStream()) { var zlib = new DeflaterOutputStream(temp); zlib.WriteBytes(bytes); zlib.Finish(); temp.Flush(); temp.Position = 0; compressedBytes = temp.ReadBytes((int)temp.Length); } } else { throw new InvalidOperationException("unsupported compression scheme"); } } else { if (compressionScheme == CompressionScheme.LZO) { int innerCompressedOffset = 0; int innerCompressedSizeLeft = bytes.Length; using (var blockData = new MemoryStream()) { var blockCount = (innerCompressedSizeLeft + BlockSize) / BlockSize; blockData.WriteValueS32(blockCount, Endian.Big); blockData.Position = 4 + (blockCount * 8); var blockInfos = new List <Tuple <uint, uint> >(); while (innerCompressedSizeLeft > 0) { var blockUncompressedSize = Math.Min(BlockSize, innerCompressedSizeLeft); compressedBytes = new byte[blockUncompressedSize + (blockUncompressedSize / 16) + 64 + 3]; var actualCompressedSize = compressedBytes.Length; var result = MiniLZO.LZO.Compress( bytes, innerCompressedOffset, blockUncompressedSize, compressedBytes, 0, ref actualCompressedSize, new MiniLZO.CompressWorkBuffer()); if (result != MiniLZO.ErrorCode.Success) { throw new SaveCorruptionException($"LZO compression failure ({result})"); } blockData.Write(compressedBytes, 0, actualCompressedSize); blockInfos.Add(new Tuple <uint, uint>((uint)actualCompressedSize, BlockSize)); innerCompressedOffset += blockUncompressedSize; innerCompressedSizeLeft -= blockUncompressedSize; } blockData.Position = 4; foreach (var blockInfo in blockInfos) { blockData.WriteValueU32(blockInfo.Item1, Endian.Big); blockData.WriteValueU32(blockInfo.Item2, Endian.Big); } blockData.Position = 0; compressedBytes = blockData.ReadBytes((int)blockData.Length); } } else if (compressionScheme == CompressionScheme.Zlib) { int innerCompressedOffset = 0; int innerCompressedSizeLeft = bytes.Length; using (var blockData = new MemoryStream()) { var blockCount = (innerCompressedSizeLeft + BlockSize) / BlockSize; blockData.WriteValueS32(blockCount, Endian.Big); blockData.Position = 4 + (blockCount * 8); var blockInfos = new List <Tuple <uint, uint> >(); while (innerCompressedSizeLeft > 0) { var blockUncompressedSize = Math.Min(BlockSize, innerCompressedSizeLeft); using (var temp = new MemoryStream()) { var zlib = new DeflaterOutputStream(temp); zlib.Write(bytes, innerCompressedOffset, blockUncompressedSize); zlib.Finish(); temp.Flush(); temp.Position = 0; compressedBytes = temp.ReadBytes((int)temp.Length); } blockData.WriteBytes(compressedBytes); blockInfos.Add(new Tuple <uint, uint>((uint)compressedBytes.Length, BlockSize)); innerCompressedOffset += blockUncompressedSize; innerCompressedSizeLeft -= blockUncompressedSize; } blockData.Position = 4; foreach (var blockInfo in blockInfos) { blockData.WriteValueU32(blockInfo.Item1, Endian.Big); blockData.WriteValueU32(blockInfo.Item2, Endian.Big); } blockData.Position = 0; compressedBytes = blockData.ReadBytes((int)blockData.Length); } } else { throw new InvalidOperationException("unsupported platform"); } } byte[] uncompressedBytes; using (var uncompressedData = new MemoryStream()) { uncompressedData.WriteValueS32(bytes.Length, Endian.Big); uncompressedData.WriteBytes(compressedBytes); uncompressedData.Position = 0; uncompressedBytes = uncompressedData.ReadBytes((int)uncompressedData.Length); } byte[] computedHash; using (var sha1 = new System.Security.Cryptography.SHA1Managed()) { computedHash = sha1.ComputeHash(uncompressedBytes); } output.WriteBytes(computedHash); output.WriteBytes(uncompressedBytes); }
private static byte[] Unwrap(Stream input, CompressionScheme compressionScheme, DeserializeSettings settings) { var readSha1Hash = input.ReadBytes(20); using (var data = input.ReadToMemoryStream((int)(input.Length - 20))) { byte[] computedSha1Hash; using (var sha1 = new System.Security.Cryptography.SHA1Managed()) { computedSha1Hash = sha1.ComputeHash(data); } if ((settings & DeserializeSettings.IgnoreSha1Mismatch) == 0 && readSha1Hash.SequenceEqual(computedSha1Hash) == false) { throw new SaveCorruptionException("invalid SHA1 hash"); } data.Position = 0; var uncompressedSize = data.ReadValueU32(Endian.Big); var uncompressedBytes = new byte[uncompressedSize]; if (uncompressedSize <= BlockSize) { if (compressionScheme == CompressionScheme.LZO) { var actualUncompressedSize = (int)uncompressedSize; var compressedSize = (int)(data.Length - 4); var compressedBytes = data.ReadBytes(compressedSize); var result = MiniLZO.LZO.DecompressSafe( compressedBytes, 0, (int)compressedSize, uncompressedBytes, 0, ref actualUncompressedSize); if (result != MiniLZO.ErrorCode.Success) { throw new SaveCorruptionException($"LZO decompression failure ({result})"); } if (actualUncompressedSize != (int)uncompressedSize) { throw new SaveCorruptionException("LZO decompression failure (uncompressed size mismatch)"); } } else if (compressionScheme == CompressionScheme.Zlib) { var compressedSize = (int)(data.Length - 4); using (var temp = data.ReadToMemoryStream(compressedSize)) { var zlib = new InflaterInputStream(temp); try { if (zlib.Read(uncompressedBytes, 0, uncompressedBytes.Length) != uncompressedBytes.Length) { throw new SaveCorruptionException( "zlib decompression failure (uncompressed size mismatch)"); } } catch (ICSharpCode.SharpZipLib.SharpZipBaseException e) { throw new SaveCorruptionException($"zlib decompression failure ({e.Message})", e); } } } else { throw new InvalidOperationException("unsupported compression scheme"); } } else { if (compressionScheme == CompressionScheme.LZO) { var blockCount = data.ReadValueU32(Endian.Big); var blockInfos = new List <Tuple <uint, uint> >(); for (uint i = 0; i < blockCount; i++) { var blockCompressedSize = data.ReadValueU32(Endian.Big); var blockUncompressedSize = data.ReadValueU32(Endian.Big); blockInfos.Add(new Tuple <uint, uint>(blockCompressedSize, blockUncompressedSize)); } int uncompressedOffset = 0; int uncompressedSizeLeft = (int)uncompressedSize; foreach (var blockInfo in blockInfos) { var blockUncompressedSize = Math.Min((int)blockInfo.Item2, uncompressedSizeLeft); var actualUncompressedSize = blockUncompressedSize; var compressedSize = (int)blockInfo.Item1; var compressedBytes = data.ReadBytes(compressedSize); var result = MiniLZO.LZO.DecompressSafe( compressedBytes, 0, compressedSize, uncompressedBytes, uncompressedOffset, ref actualUncompressedSize); if (result != MiniLZO.ErrorCode.Success) { throw new SaveCorruptionException($"LZO decompression failure ({result})"); } if (actualUncompressedSize != blockUncompressedSize) { throw new SaveCorruptionException( "LZO decompression failure (uncompressed size mismatch)"); } uncompressedOffset += blockUncompressedSize; uncompressedSizeLeft -= blockUncompressedSize; } if (uncompressedSizeLeft != 0) { throw new SaveCorruptionException("LZO decompression failure (uncompressed size left != 0)"); } } else if (compressionScheme == CompressionScheme.Zlib) { var blockCount = data.ReadValueU32(Endian.Big); var blockInfos = new List <Tuple <uint, uint> >(); for (uint i = 0; i < blockCount; i++) { var blockCompressedSize = data.ReadValueU32(Endian.Big); var blockUncompressedSize = data.ReadValueU32(Endian.Big); blockInfos.Add(new Tuple <uint, uint>(blockCompressedSize, blockUncompressedSize)); } int uncompressedOffset = 0; int uncompressedSizeLeft = (int)uncompressedSize; foreach (var blockInfo in blockInfos) { var blockUncompressedSize = Math.Min((int)blockInfo.Item2, uncompressedSizeLeft); int actualUncompressedSize; var compressedSize = (int)blockInfo.Item1; using (var temp = data.ReadToMemoryStream(compressedSize)) { var zlib = new InflaterInputStream(temp); try { actualUncompressedSize = zlib.Read( uncompressedBytes, uncompressedOffset, uncompressedBytes.Length); } catch (ICSharpCode.SharpZipLib.SharpZipBaseException e) { throw new SaveCorruptionException($"zlib decompression failure ({e.Message})", e); } } if (actualUncompressedSize != blockUncompressedSize) { throw new SaveCorruptionException( "zlib decompression failure (uncompressed size mismatch)"); } uncompressedOffset += blockUncompressedSize; uncompressedSizeLeft -= blockUncompressedSize; } if (uncompressedSizeLeft != 0) { throw new SaveCorruptionException("zlib decompression failure (uncompressed size left != 0)"); } } else { throw new InvalidOperationException("unsupported compression scheme"); } } return(uncompressedBytes); } }
/// <summary> /// Ingress a .NET collection to a specified store location. /// </summary> /// <typeparam name="T">The record type of the collection.</typeparam> /// <param name="context">An instance of DryadLinqContext.</param> /// <param name="source">The collection to be ingressed.</param> /// <param name="dataSetUri">The URI to store the collection.</param> /// <param name="metaData">The metadata for the collection.</param> /// <param name="outputScheme">The compression scheme used to store the collection.</param> /// <param name="isTemp">true to only store the collection temporarily with a time lease.</param> public abstract void Ingress <T>(DryadLinqContext context, IEnumerable <T> source, Uri dataSetUri, DryadLinqMetaData metaData, CompressionScheme outputScheme, bool isTemp = false);
internal DLinqOutputNode(DryadLinqContext context, Uri outputUri, bool isTempOutput, CompressionScheme outputScheme, Expression queryExpr, DLinqQueryNode child) : base(QueryNodeType.OutputTable, child.QueryGen, queryExpr, child) { if (TypeSystem.IsTypeOrAnyGenericParamsAnonymous(child.OutputTypes[0])) { throw DryadLinqException.Create(DryadLinqErrorCode.OutputTypeCannotBeAnonymous, SR.OutputTypeCannotBeAnonymous, queryExpr); } this.m_opName = "Output"; this.m_context = context; this.m_outputUri = outputUri; this.m_outputType = child.OutputTypes[0]; this.m_outputDataSetInfo = child.OutputDataSetInfo; this.m_partitionCount = child.OutputDataSetInfo.partitionInfo.Count; this.m_dynamicManager = DynamicManager.Splitter; this.m_outputCompressionScheme = outputScheme; this.m_isTempOutput = isTempOutput; }
public void CompressFiles(System.IO.Stream outputStream, string[] sources, CompressionScheme scheme, int compressionLevel, int pathDepth) { LogDryRun("CompressFiles", String.Format( "Compressing [{0}] into a stream using the compression scheme {1}", String.Join(", ", sources), scheme.ToString())); }
public void Deserialize(Stream input) { var magic = input.ReadUInt32(); if (magic != 0x53464152 && // SFAR magic.Swap() != 0x53464152) { throw new FormatException(); } var endian = magic == 0x53464152 ? ByteOrder.LittleEndian : ByteOrder.BigEndian; var version = input.ReadUInt32(endian); if (version != 0x00010000) { throw new FormatException(); } var dataOffset = input.ReadUInt32(endian); var fileTableOffset = input.ReadUInt32(endian); var fileTableCount = input.ReadUInt32(endian); var blockSizeTableOffset = input.ReadUInt32(endian); MaximumBlockSize = input.ReadUInt32(endian); CompressionScheme = input.ReadEnum <CompressionScheme>(endian); if (fileTableOffset != 0x20) { throw new FormatException(); } if (MaximumBlockSize != 0x010000) { throw new FormatException(); } /* * if (this.CompressionScheme != SFXArchive.CompressionScheme.None && * this.CompressionScheme != SFXArchive.CompressionScheme.LZMA && * this.CompressionScheme != SFXArchive.CompressionScheme.LZX) * { * throw new FormatException(); * } */ input.Seek(blockSizeTableOffset, SeekOrigin.Begin); var blockSizeTableSize = dataOffset - fileTableOffset; var blockSizeTableCount = blockSizeTableSize / 2; BlockSizes.Clear(); for (uint i = 0; i < blockSizeTableCount; i++) { BlockSizes.Add(input.ReadUInt16(endian)); } input.Seek(fileTableOffset, SeekOrigin.Begin); for (uint i = 0; i < fileTableCount; i++) { // ReSharper disable UseObjectOrCollectionInitializer var entry = new Entry(); // ReSharper restore UseObjectOrCollectionInitializer entry.NameHash = input.ReadFileNameHash(); entry.BlockSizeIndex = input.ReadInt32(endian); entry.UncompressedSize = input.ReadUInt32(endian); entry.UncompressedSize |= ((long)input.ReadUInt8()) << 32; entry.Offset = input.ReadUInt32(endian); entry.Offset |= ((long)input.ReadUInt8()) << 32; Entries.Add(entry); } }