public void AddControlFile(string localfile, CompressionHint hint, string filename = null) { filename = filename ?? System.IO.Path.GetFileName(localfile); using (var t = m_compression.CreateFile(CONTROL_FILES_FOLDER + filename, hint, DateTime.UtcNow)) using (var s = System.IO.File.OpenRead(localfile)) Library.Utility.Utility.CopyStream(s, t); }
public void AddBlock(string hash, byte[] data, int offset, int size, CompressionHint hint) { m_blocks++; m_sourcesize += size; //Filenames are encoded with "modified Base64 for URL" https://en.wikipedia.org/wiki/Base64#URL_applications, using (var s = m_compression.CreateFile(Library.Utility.Utility.Base64PlainToBase64Url(hash), hint, DateTime.UtcNow)) s.Write(data, offset, size); }
/// <summary> /// Creates a new empty file /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>The stream used to access the file</returns> public System.IO.Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { string path = System.IO.Path.Combine(m_folder, file); System.IO.Stream res = System.IO.File.Create(path); //TODO: This should actually be set when closing the stream System.IO.File.SetLastWriteTime(path, lastWrite); return(res); }
/// <summary> /// Creates a file in the archive and returns a writeable stream /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>A writeable stream for the file contents</returns> public virtual Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if (!m_isWriting) { throw new InvalidOperationException("Cannot write while reading"); } m_flushBufferSize += CENTRAL_HEADER_ENTRY_SIZE + System.Text.Encoding.UTF8.GetByteCount(file); m_compressionInfo.DeflateCompressionLevel = hint == CompressionHint.Noncompressible ? SharpCompress.Compressor.Deflate.CompressionLevel.None : m_defaultCompressionLevel; return(((ZipWriter)m_writer).WriteToStream(file, lastWrite, null)); }
/// <summary> /// Creates a file in the archive and returns a writable stream. /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>A writable stream for the file contents</returns> public virtual Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if (m_mode != ArchiveMode.Write) { throw new InvalidOperationException(CannotWriteWhileReading); } // TODO: update m_flushBufferSize return(new FileBufferStream(file, (TarWriter)m_writer, lastWrite)); }
public Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if (string.IsNullOrEmpty(file)) { throw new ArgumentNullException(nameof(file)); } if (m_writer == null) { throw new InvalidOperationException(Strings.SevenZipCompression.NoWriterError); } var entry = new WriterEntry(file, lastWrite); if (hint != CompressionHint.Noncompressible) { if (m_lzma2Encoder == null) { if (m_lowOverheadMode) { m_lzma2Encoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.LzmaEncoder(); } else { m_lzma2Encoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.Lzma2Encoder(m_threadCount, m_encoderProps); } m_lzma2Encoder.OnOutputThresholdReached += mLzma2Encoder_OnOutputThresholdReached; m_lzma2Encoder.SetOutputThreshold(kStreamThreshold); } return(m_lzma2Encoder.BeginWriteFile(entry)); } else { if (m_copyEncoder == null) { m_copyEncoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.PlainEncoder(); } if (m_lzma2Encoder != null && m_lzma2Encoder == m_writer.CurrentEncoder) { m_lzma2Encoder.SetOutputThreshold(kStreamThreshold); // rearm threshold so we can switch back } if (m_writer.CurrentEncoder != m_copyEncoder) { m_writer.ConnectEncoder(m_copyEncoder); } return(m_copyEncoder.BeginWriteFile(entry)); } }
/// <summary> /// Creates a file in the archive and returns a writeable stream /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>A writeable stream for the file contents</returns> public virtual Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if (m_mode != ArchiveMode.Write) { throw new InvalidOperationException(CannotWriteWhileReading); } m_flushBufferSize += CENTRAL_HEADER_ENTRY_SIZE + System.Text.Encoding.UTF8.GetByteCount(file); if (m_usingZip64) { m_flushBufferSize += CENTRAL_HEADER_ENTRY_SIZE_ZIP64_EXTRA; } return(((ZipWriter)m_writer).WriteToStream(file, new ZipWriterEntryOptions() { DeflateCompressionLevel = hint == CompressionHint.Noncompressible ? SharpCompress.Compressors.Deflate.CompressionLevel.None : m_defaultCompressionLevel, ModificationDateTime = lastWrite, CompressionType = m_compressionType })); }
/// <summary> /// Adds the found file data to the output unless the block already exists /// </summary> /// <param name="key">The block hash</param> /// <param name="data">The data matching the hash</param> /// <param name="len">The size of the data</param> /// <param name="offset">The offset into the data</param> /// <param name="hint">Hint for compression module</param> /// <param name="isBlocklistData">Indicates if the block is list data</param> private bool AddBlockToOutput(string key, byte[] data, int offset, int len, CompressionHint hint, bool isBlocklistData) { if (m_database.AddBlock(key, len, m_blockvolume.VolumeID, m_transaction)) { m_blockvolume.AddBlock(key, data, offset, len, hint); //TODO: In theory a normal data block and blocklist block could be equal. // this would cause the index file to not contain all data, // if the data file is added before the blocklist data // ... highly theoretical ... if (m_options.IndexfilePolicy == Options.IndexFileStrategy.Full && isBlocklistData) m_indexvolume.WriteBlocklist(key, data, offset, len); if (m_blockvolume.Filesize > m_options.VolumeSize - m_options.Blocksize) { if (m_options.Dryrun) { m_blockvolume.Close(); m_result.AddDryrunMessage(string.Format("Would upload block volume: {0}, size: {1}", m_blockvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_blockvolume.LocalFilename).Length))); if (m_indexvolume != null) { UpdateIndexVolume(); m_indexvolume.FinishVolume(Library.Utility.Utility.CalculateHash(m_blockvolume.LocalFilename), new FileInfo(m_blockvolume.LocalFilename).Length); m_result.AddDryrunMessage(string.Format("Would upload index volume: {0}, size: {1}", m_indexvolume.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(m_indexvolume.LocalFilename).Length))); m_indexvolume.Dispose(); m_indexvolume = null; } m_blockvolume.Dispose(); m_blockvolume = null; m_indexvolume.Dispose(); m_indexvolume = null; } else { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible m_database.UpdateRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null, m_transaction); m_blockvolume.Close(); UpdateIndexVolume(); m_backend.FlushDbMessages(m_database, m_transaction); m_backendLogFlushTimer = DateTime.Now.Add(FLUSH_TIMESPAN); using(new Logging.Timer("CommitAddBlockToOutputFlush")) m_transaction.Commit(); m_transaction = m_database.BeginTransaction(); m_backend.Put(m_blockvolume, m_indexvolume); m_blockvolume = null; m_indexvolume = null; } m_blockvolume = new BlockVolumeWriter(m_options); m_blockvolume.VolumeID = m_database.RegisterRemoteVolume(m_blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary, m_transaction); if (m_options.IndexfilePolicy != Options.IndexFileStrategy.None) { m_indexvolume = new IndexVolumeWriter(m_options); m_indexvolume.VolumeID = m_database.RegisterRemoteVolume(m_indexvolume.RemoteFilename, RemoteVolumeType.Index, RemoteVolumeState.Temporary, m_transaction); } } return true; } return false; }
/// <summary> /// Creates a new empty file /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>The stream used to access the file</returns> public System.IO.Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { string path = System.IO.Path.Combine(m_folder, file); System.IO.Stream res = System.IO.File.Create(path); //TODO: This should actually be set when closing the stream System.IO.File.SetLastWriteTime(path, lastWrite); return res; }
public static async Task <bool> AddBlockToOutputAsync(IWriteChannel <DataBlock> channel, string hash, byte[] data, int offset, long size, CompressionHint hint, bool isBlocklistHashes) { var tcs = new TaskCompletionSource <bool>(); await channel.WriteAsync(new DataBlock() { HashKey = hash, Data = data, Offset = offset, Size = size, Hint = hint, IsBlocklistHashes = isBlocklistHashes, TaskCompletion = tcs }); var r = await tcs.Task; return(r); }
/// <summary> /// Creates a file in the archive and returns a writeable stream /// </summary> /// <param name="file">The name of the file to create</param> /// <param name="hint">A hint to the compressor as to how compressible the file data is</param> /// <param name="lastWrite">The time the file was last written</param> /// <returns>A writeable stream for the file contents</returns> public virtual Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if (!m_isWriting) throw new InvalidOperationException("Cannot write while reading"); m_flushBufferSize += CENTRAL_HEADER_ENTRY_SIZE + System.Text.Encoding.UTF8.GetByteCount(file); m_compressionInfo.DeflateCompressionLevel = hint == CompressionHint.Noncompressible ? SharpCompress.Compressor.Deflate.CompressionLevel.None : m_defaultCompressionLevel; return ((ZipWriter)m_writer).WriteToStream(file, lastWrite, null); }
public static void SetCompressionHint(CompressionHint hint) { ilHint((uint) ILDefines.IL_COMPRESSION_HINT, (uint) hint); }
public static void SetCompressionHint(CompressionHint hint) { IL.ilHint(1640U, (uint)hint); }
public static async Task <StreamProcessResult> ProcessStream(IWriteChannel <StreamBlock> channel, string path, Stream stream, bool isMetadata, CompressionHint hint) { var tcs = new TaskCompletionSource <StreamProcessResult>(); await channel.WriteAsync(new StreamBlock() { Path = path, Stream = stream, IsMetadata = isMetadata, Hint = hint, Result = tcs }); return(await tcs.Task); }
public Stream CreateFile(string file, CompressionHint hint, DateTime lastWrite) { if(string.IsNullOrEmpty(file)) throw new ArgumentNullException("file"); if(m_writer == null) throw new InvalidOperationException(Strings.SevenZipCompression.NoWriterError); var entry = new WriterEntry(file, lastWrite); if(hint != CompressionHint.Noncompressible) { if(m_lzma2Encoder == null) { if(m_lowOverheadMode) m_lzma2Encoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.LzmaEncoder(); else m_lzma2Encoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.Lzma2Encoder(m_threadCount); m_lzma2Encoder.OnOutputThresholdReached += mLzma2Encoder_OnOutputThresholdReached; m_lzma2Encoder.SetOutputThreshold(kStreamThreshold); } return m_lzma2Encoder.BeginWriteFile(entry); } else { if(m_copyEncoder == null) m_copyEncoder = new ManagedLzma.LZMA.Master.SevenZip.ArchiveWriter.PlainEncoder(); if(m_lzma2Encoder != null && m_lzma2Encoder == m_writer.CurrentEncoder) m_lzma2Encoder.SetOutputThreshold(kStreamThreshold); // rearm threshold so we can switch back if(m_writer.CurrentEncoder != m_copyEncoder) m_writer.ConnectEncoder(m_copyEncoder); return m_copyEncoder.BeginWriteFile(entry); } }
public static async Task <StreamProcessResult> ProcessStream(IWriteChannel <StreamBlock> channel, string path, Stream stream, bool isMetadata, CompressionHint hint) { var tcs = new TaskCompletionSource <StreamProcessResult>(); // limit the stream length to that found now, a fixed point in time var limitedStream = new Library.Utility.ReadLimitLengthStream(stream, stream.Length); var streamBlock = new StreamBlock { Path = path, Stream = limitedStream, IsMetadata = isMetadata, Hint = hint, Result = tcs }; await channel.WriteAsync(streamBlock); return(await tcs.Task.ConfigureAwait(false)); }