public GZCompressorStream(IDataProcessorStream myInnerStream, CompressionMode mode) { this.innerStream = myInnerStream; length = 0; this.BlockMetadata = new List <IFileBlockMetadata>(); gz = new GZipStream(myInnerStream, mode, true); }
/// <summary> /// Initializes a new instance of the <see cref="QuickLZCompressionStream"/> class. /// </summary> /// <param name="targetStream">The target stream.</param> /// <param name="writeBuffer">The write buffer.</param> /// <param name="compressionBuffer">The compression buffer.</param> public QuickLZCompressionStream(IDataProcessorStream targetStream, byte[] writeBuffer, byte[] compressionBuffer) { //QuickLZ = Pool.Default.Alloc(); this.BlockMetadata = new List <IFileBlockMetadata>(); outputStream = targetStream; _writeBuffer = writeBuffer; _compressedBuffer = compressionBuffer; }
// if encrypt is false, we prepare the stream to decrypt //public EncryptorStream(IDataProcessorStream myInnerStream, bool encrypt, RSACryptoServiceProvider clientKey){ public EncryptorStream(IDataProcessorStream myInnerStream, bool encrypt, byte[] sessionKey, byte[] iv) { this.BlockMetadata = new List <IFileBlockMetadata>(); try{ this.innerStream = myInnerStream; length = 0; //GetKeyPair(out keyPair, encrypt); //key = clientKey; /*aes.KeySize = 256; * aes.BlockSize = 128; * aes.Mode = CipherMode.CBC; * transform = aes.CreateEncryptor();*/ if (encrypt) { AesCryptoServiceProvider aes = new AesCryptoServiceProvider(); if (sessionKey != null) { Console.WriteLine("EncryptorStream: session key length=" + sessionKey.Length + ", iv length=" + iv.Length + ", block size=" + aes.BlockSize); } else { Console.WriteLine("EncryptorStream: session key IS NULL!"); } aes.Key = sessionKey; aes.IV = iv; this.EncryptionMetadata = sessionKey; transform = aes.CreateEncryptor(); /*RSAPKCS1KeyExchangeFormatter keyFormatter = new RSAPKCS1KeyExchangeFormatter(key); * byte[] keyEncrypted = keyFormatter.CreateKeyExchange(aes.Key, aes.GetType()); * * // Create byte arrays to contain * // the length values of the key and IV. * byte[] LenK = new byte[4]; * byte[] LenIV = new byte[4];*/ /*if(encrypt){ * int lKey = keyEncrypted.Length; * LenK = BitConverter.GetBytes(lKey); * int lIV = aes.IV.Length; * LenIV = BitConverter.GetBytes(lIV); * EncryptionMetadata = new byte[4+4+lKey+lIV]; * Array.Copy(LenK, EncryptionMetadata, 4); * Array.Copy(LenIV, 0, EncryptionMetadata, 4, 4); * Array.Copy(keyEncrypted, 0, EncryptionMetadata, 8, lKey); * Array.Copy(aes.IV, 0, EncryptionMetadata, 8+lKey, lIV );*/ outStream = new CryptoStream(innerStream, transform, CryptoStreamMode.Write); } else { // http://msdn.microsoft.com/fr-fr/library/system.security.cryptography.x509certificates.x509certificate2.aspx } } catch (Exception e) { Logger.Append(Severity.ERROR, "Could not initialize encryption: " + e.Message + " --- " + e.StackTrace); } }
public ClientDeduplicatorStream(IDataProcessorStream inputStream, uint storageNode, DedupIndex ddb) { this.inputStream = inputStream; this.currentPos = 0; this.length = 0; this.DedupedCount = 0; storageNodeId = storageNode; this.BlockMetadata = new List <IFileBlockMetadata>(); dedupedBlocks = new List <long>(); this.ddb = ddb; }
public LzoCompressorStream(IDataProcessorStream inputStream, CompressorAlgorithm algorithm, int bufferSize) { this.inputStream = inputStream; this.currentPos = 0; this.length = 0; //this.compressorBufferSize = bufferSize; //1k buffer ; compression won't work well below 1k //tempBuffer = new byte[compressorBufferSize]; internalOffset = 0; compressedSize = 0; gatherBuffer = new byte[minBlockSize]; currentGatherPos = 0; this.BlockMetadata = new List <IFileBlockMetadata>(); }
/// <summary> /// Initializes a new instance of the <see cref="QuickLZCompressionStream"/> class. /// </summary> /// <param name="targetStream">The target.</param> public QuickLZCompressionStream(IDataProcessorStream targetStream) : this(targetStream, 1 << 20) // Yes, this is 1 MB { }
/// <summary> /// Initializes a new instance of the <see cref="QuickLZCompressionStream"/> class. /// </summary> /// <param name="targetStream">The target.</param> /// <param name="bufferSize">Size of the buffer.</param> public QuickLZCompressionStream(IDataProcessorStream targetStream, int bufferSize) : this(targetStream, new byte[bufferSize], new byte[bufferSize + 400]) { }
internal void Init() { Logger.Append(Severity.INFO, "Creating data pipeline with mode = " + this.Mode + ", flags = '" + this.Flags.ToString() + "'"); #if DEBUG if (ConfigManager.GetValue("BENCHMARK") != null) { this.OutputStream = new DummyStream(); } #endif if (this.Flags.HasFlag(DataProcessingFlags.CChecksum)) { counterStream = new NullSinkStream(new ChunkHasherStream(this.OutputStream), this.Mode); } else { counterStream = new NullSinkStream(this.OutputStream, this.Mode); } finalStream = counterStream; // top-of-chain streams firstStream = finalStream; if (this.Flags.HasFlag(DataProcessingFlags.CEncrypt) || this.Flags.HasFlag(DataProcessingFlags.SEncrypt)) { if (this.Mode == PipelineMode.Read) { throw new NotImplementedException("PipeLine read mode with decryption not yet implemented"); } else { Console.WriteLine("Pipeline.init() : this.CryptoKey=" + this.CryptoKey); EncryptorStream encStream = new EncryptorStream(firstStream, true, this.CryptoKey, this.IV); this.EncryptionMetaData = encStream.EncryptionMetadata; // TODO !! take encryptionMetadata and add it to index firstStream = encStream; } } if (this.Flags.HasFlag(DataProcessingFlags.CCompress) || this.Flags.HasFlag(DataProcessingFlags.SCompress)) { if (this.Mode == PipelineMode.Read) { firstStream = new LZ4Decompressor(firstStream); //firstStream = new GZCompressorStream(firstStream, System.IO.Compression.CompressionMode.Decompress); } else { //firstStream = new QuickLZCompressionStream(firstStream); //firstStream = new GZCompressorStream(firstStream, System.IO.Compression.CompressionMode.Compress); firstStream = new LZ4CompressorStream(firstStream); } } if (this.Flags.HasFlag(DataProcessingFlags.CDedup) || this.Flags.HasFlag(DataProcessingFlags.SDedup)) { cdds = new ClientDeduplicatorStream(firstStream, this.StorageNode, ddb /*DedupIndex.Instance(0, true)*/); /*try{ // TODO ! remove cksum provider selection from here, find a more elegant solution * firstStream = new ChecksummerStream_MHash((ClientDeduplicatorStream)cdds); * } * catch(Exception e){*/ firstStream = new ChecksummerStream((ClientDeduplicatorStream)cdds); //firstStream = new TigerTreeHasherStream((ClientDeduplicatorStream)cdds); } }
public LowPriorityStream(IDataProcessorStream innerStream) { this.BlockMetadata = new List <IFileBlockMetadata>(); }
internal DataPipeline_(PipelineMode mode, Session s, Backup b, DataProcessingFlags flags) { backup = b; storageSession = s; BinaryFormatter formatter = new BinaryFormatter(); BChunkHeader header = new BChunkHeader(); header.DataFlags = flags; header.Version = Utilities.PlatForm.Instance().NodeVersion; //header.TaskId = taskId; header.TaskId = b.TaskId; // end-of-chain stream sessionStream = new NetworkStream(storageSession.DataSocket); this.Flags = flags; #if DEBUG if (ConfigManager.GetValue("BENCHMARK") != null) { sessionStream = new DummyStream(); } #endif if (flags.HasFlag(DataProcessingFlags.CChecksum)) { finalStream = new NullSinkStream(new ChunkHasherStream(sessionStream), mode); } else { finalStream = new NullSinkStream(sessionStream, mode); // dummy dest stream } //firstStream = new NullSinkStream(); // test and benchmarking // top-of-chain streams firstStream = finalStream; if (flags.HasFlag(DataProcessingFlags.CEncrypt)) { EncryptorStream encStream = new EncryptorStream(firstStream, true, null); header.EncryptionMetaData = encStream.EncryptionMetadata; // TODO !! take encryptionMetadata and add it to index firstStream = encStream; } if (flags.HasFlag(DataProcessingFlags.CCompress)) { //firstStream = new CompressorStream(firstStream, CompressorAlgorithm.Lzo, 1024); firstStream = new GZCompressorStream(firstStream, System.IO.Compression.CompressionMode.Compress); } if (flags.HasFlag(DataProcessingFlags.CDedup)) { cdds = new ClientDeduplicatorStream(firstStream, s.ClientId); /*try{ // TODO ! remove cksum provider selection from here, find a more elegant solution * firstStream = new ChecksummerStream_MHash((ClientDeduplicatorStream)cdds); * } * catch(Exception e){*/ firstStream = new ChecksummerStream((ClientDeduplicatorStream)cdds); //firstStream = new TigerTreeHasherStream((ClientDeduplicatorStream)cdds); /*}*/ // Pre-Initialize dedup index (if needed) DedupIndex.Instance().Initialize(); } MemoryStream headerStream = new MemoryStream(); formatter.Serialize(headerStream, header); headerData = headerStream.ToArray(); Logger.Append(Severity.INFO, "Created data pipeline with flags " + flags.ToString()); //privilegesManager = new Utilities.PrivilegesManager(); }
internal void Process(BChunk chunk, long maxChunkSize) { IDataProcessorStream pipelineStream = pipeline.Stream; //pipeline.Stream.SetLength(0); pipeline.Reset(); pipeline.CurrentChunk = chunk.Name; try{ storageSession.AnnounceChunkBeginTransfer(chunk.Name, headerData.Length); sessionDataStream.Write(headerData, 0, headerData.Length); } catch (Exception ioe) { storageSession.LoggerInstance.Log(Severity.ERROR, "Network I/O error : " + ioe.Message + " ---- " + ioe.StackTrace); backup.AddHubNotificationEvent(904, "", ioe.Message); if (ioe.InnerException != null) { throw(ioe.InnerException); } } chunk.Size = headerData.Length; DateTime startChunkBuild = DateTime.Now; Stream fs = null; byte[] content = new byte[1024 * 512]; // read 512k at once long offset, remaining; // to know chunk final size (after pipeling processing streams) int read, itemReallyRead, partialRead; foreach (IFSEntry file in chunk.Items) { if (token.IsCancellationRequested) { Logger.Append(Severity.TRIVIA, "Received cancel order, exiting"); return; } if (file.FileSize == 0) { continue; } // TODO!! is that correct?? now that DataLayoutInfos is a flag if (file.ChangeStatus == DataLayoutInfos.NoChange || file.ChangeStatus == DataLayoutInfos.Deleted) // no data change { continue; } //Console.WriteLine ("\tProcessing/sending item "+file.Name+", starting at pos "+file.FileStartPos); //offset = file.FileStartPos; // if a file is split into multiple chunks, start reading at required filepart pos remaining = file.FileSize; read = 0; itemReallyRead = 0; partialRead = 0; try{ fs = file.OpenStream(FileMode.Open); long seeked = fs.Seek(file.FileStartPos, SeekOrigin.Begin); if (seeked != file.FileStartPos) { file.ChangeStatus = DataLayoutInfos.Invalid; storageSession.LoggerInstance.Log(Severity.ERROR, "Unable to seek to required position ( reached " + seeked + " instead of " + file.FileStartPos + ") in file " + file.SnapFullPath); backup.AddHubNotificationEvent(912, file.SnapFullPath, "Seek error : wanted to go to" + file.FileStartPos + " but went to " + seeked); } } catch (Exception e) { file.ChangeStatus = DataLayoutInfos.Invalid; storageSession.LoggerInstance.Log(Severity.ERROR, "Unable to open file " + file.SnapFullPath + ": " + e.Message); backup.AddHubNotificationEvent(912, file.SnapFullPath, e.Message); try{ fs.Close(); }catch {} continue; } try{ //Console.WriteLine ("reading item '"+file.Name+"'"); while ((read = fs.Read(content, partialRead, content.Length - partialRead)) > 0 && itemReallyRead <= maxChunkSize && !cancelRequested) { #if DEBUG //sw.Start(); #endif //read = fs.Read(content, partialRead, content.Length-partialRead); #if DEBUG //sw.Stop(); //BenchmarkStats.Instance().ReadTime += sw.ElapsedMilliseconds; //Console.WriteLine ("\t\tread "+read+" in "+sw.ElapsedMilliseconds+"ms"); //sw.Reset(); #endif // if file has to be splitted, take care to read no more than maxchunksize if (itemReallyRead + read > maxChunkSize) { read = (int)maxChunkSize - itemReallyRead; if (read == 0) { break; } } remaining -= read; partialRead += read; itemReallyRead += read; if (partialRead == content.Length || remaining == 0) { pipelineStream.Write(content, 0, partialRead); partialRead = 0; } if (token.IsCancellationRequested) { Logger.Append(Severity.TRIVIA, "Received cancel order while processing '" + file.SnapFullPath + "', giving up"); return; } } //Console.WriteLine ("\tDone reading item '"+file.Name+"', estimated size="+file.FileSize+", really read="+itemReallyRead); // now we correct FileSize with REAL size (which includes Alternate Streams on NT) // TODO 2: if total file size is < than expected, file has changed too. if (itemReallyRead > file.FileSize && Utilities.PlatForm.IsUnixClient()) { Logger.Append(Severity.WARNING, "Item '" + file.SnapFullPath + "' : size has changed during backup : expected " + file.FileSize + ", got " + itemReallyRead); backup.AddHubNotificationEvent(903, file.SnapFullPath, itemReallyRead.ToString()); } file.FileSize = itemReallyRead; // Now that file has been processed, gather its metadata from processing streams, and add it to index //Console.WriteLine ("Process(1/3) : about to call pipelineStream.FlushMetadata()"); pipelineStream.FlushMetadata(); foreach (IFileBlockMetadata mtd in pipeline.FinalStream.BlockMetadata) { if (mtd is ClientDedupedBlocks) { file.BlockMetadata.DedupedBlocks = ((ClientDedupedBlocks)mtd).Ids; } else { file.BlockMetadata.BlockMetadata.Add(mtd); } } pipeline.FinalStream.BlockMetadata = new System.Collections.Generic.List <IFileBlockMetadata>(); chunk.OriginalSize += itemReallyRead; } catch (Exception ioe) { if (ioe.InnerException is SocketException) { storageSession.LoggerInstance.Log(Severity.ERROR, "I/O error, could not process file " + file.SnapFullPath + " of chunk " + chunk.Name + ": " + ioe.Message /*+"---"+ioe.StackTrace*/); backup.AddHubNotificationEvent(904, file.SnapFullPath, ioe.Message); throw(ioe.InnerException); } else { storageSession.LoggerInstance.Log(Severity.ERROR, "Could not process file " + file.SnapFullPath + " of chunk " + chunk.Name + ": " + ioe.Message + "---" + ioe.StackTrace); backup.AddHubNotificationEvent(912, file.SnapFullPath, ioe.Message); } } finally{ fs.Close(); } } // end foreach ifile DateTime endChunkBuild = DateTime.Now; TimeSpan duration = endChunkBuild - startChunkBuild; pipeline.Stream.Flush(); #if DEBUG if (ConfigManager.GetValue("BENCHMARK") != null) { storageSession.AnnounceChunkEndTransfer(chunk.Name, 0); } else { storageSession.AnnounceChunkEndTransfer(chunk.Name, pipeline.FinalSize + headerData.Length); } #else storageSession.AnnounceChunkEndTransfer(chunk.Name, chunk.Size); #endif storageSession.LoggerInstance.Log(Severity.DEBUG, "Processed and transferred " + chunk.Name + ", original size=" + chunk.OriginalSize / 1024 + "k, final size=" + chunk.Size / 1024 + "k, " + chunk.Items.Count + " files in " + duration.Seconds + "." + duration.Milliseconds + " s, " + Math.Round((chunk.OriginalSize / 1024) / duration.TotalSeconds, 0) + "Kb/s"); }
public LZ4Decompressor(IDataProcessorStream myInnerStream) { this.innerStream = myInnerStream; this.BlockMetadata = new List <IFileBlockMetadata>(); lz = new LZ4Stream.LZ4Stream(myInnerStream, false, true, 1 * 1024 * 1024 /*1MB buffer*/); }