private bool UploadStreamInTrunk(string filePath, Stream fileStream, IDocument remoteDocument) { if (repoinfo.ChunkSize <= 0) { return false; } string fileName = remoteDocument.Name; for (long offset = fileStream.Position; offset < fileStream.Length; offset += repoinfo.ChunkSize) { bool isLastTrunk = false; if (offset + repoinfo.ChunkSize >= fileStream.Length) { isLastTrunk = true; } Logger.Debug(String.Format("Uploading next chunk (size={1}) of {0}: {2} of {3} finished({4}%)", fileName, repoinfo.ChunkSize, offset, fileStream.Length, 100 * offset / fileStream.Length)); using (ChunkedStream chunkstream = new ChunkedStream(fileStream, repoinfo.ChunkSize)) { chunkstream.ChunkPosition = offset; ContentStream contentStream = new ContentStream(); contentStream.FileName = fileName; contentStream.MimeType = MimeType.GetMIMEType(fileName); contentStream.Length = repoinfo.ChunkSize; if (isLastTrunk) { contentStream.Length = fileStream.Length - offset; } contentStream.Stream = chunkstream; lock (disposeLock) { if (disposed) { throw new ObjectDisposedException("Uploading"); } try { remoteDocument.AppendContentStream(contentStream, isLastTrunk); Logger.Debug("Response of the server: " + offset.ToString()); database.SetFileServerSideModificationDate(filePath, remoteDocument.LastModificationDate); } catch (Exception ex) { Logger.Fatal("Upload failed: " + ex); return false; } } } } return true; }
private Response <PathInfo> UploadInSequence( Stream content, int blockSize, PathHttpHeaders httpHeaders, DataLakeRequestConditions conditions, IProgress <long> progressHandler, CancellationToken cancellationToken) { // Wrap the append and flush calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each append file operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // Partition the stream into individual blocks and stage them IAsyncEnumerator <ChunkedStream> enumerator = PartitionedUploadExtensions.GetBlocksAsync( content, blockSize, async: false, _arrayPool, cancellationToken) .GetAsyncEnumerator(cancellationToken); // We need to keep track of how much data we have appended to // calculate offsets for the next appends, and the final // position to flush long appendedBytes = 0; while (enumerator.MoveNextAsync().EnsureCompleted()) { // Dispose the block after the loop iterates and return its // memory to our ArrayPool using ChunkedStream block = enumerator.Current; // Append the next block _client.Append( new MemoryStream(block.Bytes, 0, block.Length, writable: false), offset: appendedBytes, leaseId: conditions?.LeaseId, progressHandler: progressHandler, cancellationToken: cancellationToken); appendedBytes += block.Length; } // Commit the block list after everything has been staged to // complete the upload return(_client.Flush( position: appendedBytes, httpHeaders: httpHeaders, conditions: conditions, cancellationToken: cancellationToken)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
private Response <BlobContentInfo> UploadInSequence( Stream content, int blockSize, BlobHttpHeaders blobHttpHeaders, IDictionary <string, string> metadata, BlobRequestConditions conditions, IProgress <long> progressHandler, AccessTier?accessTier, CancellationToken cancellationToken) { // Wrap the staging and commit calls in an Upload span for // distributed tracing DiagnosticScope scope = _client.ClientDiagnostics.CreateScope( _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}"); try { scope.Start(); // Wrap progressHandler in a AggregatingProgressIncrementer to prevent // progress from being reset with each stage blob operation. if (progressHandler != null) { progressHandler = new AggregatingProgressIncrementer(progressHandler); } // The list tracking blocks IDs we're going to commit List <string> blockIds = new List <string>(); // Partition the stream into individual blocks and stage them IAsyncEnumerator <ChunkedStream> enumerator = PartitionedUploadExtensions.GetBlocksAsync(content, blockSize, async: false, _arrayPool, cancellationToken) .GetAsyncEnumerator(cancellationToken); #pragma warning disable AZC0107 while (enumerator.MoveNextAsync().EnsureCompleted()) #pragma warning restore AZC0107 { // Dispose the block after the loop iterates and return its // memory to our ArrayPool using ChunkedStream block = enumerator.Current; // Stage the next block string blockId = GenerateBlockId(block.AbsolutePosition); _client.StageBlock( blockId, new MemoryStream(block.Bytes, 0, block.Length, writable: false), conditions: conditions, progressHandler: progressHandler, cancellationToken: cancellationToken); blockIds.Add(blockId); } // Commit the block list after everything has been staged to // complete the upload return(_client.CommitBlockList( blockIds, blobHttpHeaders, metadata, conditions, accessTier, cancellationToken)); } catch (Exception ex) { scope.Failed(ex); throw; } finally { scope.Dispose(); } }
public void TestRead() { byte[] content = null; using (MemoryStream file = new MemoryStream()) { byte[] buffer = new byte[this.chunkSize]; this.FillArray <byte>(buffer, (byte)'1'); file.Write(buffer, 0, this.chunkSize); this.FillArray <byte>(buffer, (byte)'2'); file.Write(buffer, 0, this.chunkSize); this.FillArray <byte>(buffer, (byte)'3'); file.Write(buffer, 0, 3); content = file.ToArray(); } using (Stream file = new MemoryStream(content)) using (ChunkedStream chunked = new ChunkedStream(file, this.chunkSize)) { byte[] buffer = new byte[this.chunkSize]; byte[] result = new byte[this.chunkSize]; Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); this.FillArray <byte>(buffer, (byte)'1'); Assert.AreEqual(1, chunked.Read(result, 0, 1)); Assert.IsTrue(this.EqualArray(buffer, result, 1)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); Assert.AreEqual(this.chunkSize - 1, chunked.Read(result, 1, this.chunkSize)); Assert.IsTrue(this.EqualArray(buffer, result, this.chunkSize)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); Assert.AreEqual(0, chunked.Read(result, 0, this.chunkSize)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); chunked.ChunkPosition = 2 * this.chunkSize; Assert.AreEqual(2 * this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(3, chunked.Length); this.FillArray <byte>(buffer, (byte)'3'); Assert.AreEqual(3, chunked.Read(result, 0, this.chunkSize)); Assert.IsTrue(this.EqualArray(buffer, result, 3)); Assert.AreEqual(2 * this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(3, chunked.Position); Assert.AreEqual(3, chunked.Length); chunked.ChunkPosition = this.chunkSize; Assert.AreEqual(this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); this.FillArray <byte>(buffer, (byte)'2'); for (int i = 0; i < this.chunkSize; ++i) { Assert.AreEqual(1, chunked.Read(result, i, 1)); } Assert.IsTrue(this.EqualArray(buffer, result, this.chunkSize)); Assert.AreEqual(this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); } }
public void TestWrite() { using (MemoryStream file = new MemoryStream()) using (ChunkedStream chunked = new ChunkedStream(file, this.chunkSize)) { byte[] buffer = new byte[2 * this.chunkSize]; this.FillArray <byte>(buffer, (byte)'a'); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 0, 1); Assert.AreEqual(1, file.Position); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(1, chunked.Length); System.ArgumentOutOfRangeException e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, this.chunkSize)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(this.chunkSize, e.ActualValue); Assert.AreEqual(1, file.Position); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(1, chunked.Length); chunked.Write(buffer, 1, this.chunkSize - 1); Assert.AreEqual(this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); chunked.ChunkPosition = this.chunkSize; Assert.AreEqual(this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(this.chunkSize, file.Position); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 0, this.chunkSize); Assert.AreEqual(2 * this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(2 * this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); chunked.ChunkPosition = 4 * this.chunkSize; Assert.AreEqual(4 * this.chunkSize, chunked.ChunkPosition); Assert.AreEqual(4 * this.chunkSize, file.Position); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 1, this.chunkSize - 1); Assert.AreEqual((5 * this.chunkSize) - 1, file.Position); Assert.AreEqual(this.chunkSize - 1, chunked.Position); Assert.AreEqual(this.chunkSize - 1, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, this.chunkSize)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(this.chunkSize, e.ActualValue); Assert.AreEqual((5 * this.chunkSize) - 1, file.Position); Assert.AreEqual(this.chunkSize - 1, chunked.Position); Assert.AreEqual(this.chunkSize - 1, chunked.Length); chunked.Write(buffer, 0, 1); Assert.AreEqual(5 * this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(5 * this.chunkSize, file.Position); Assert.AreEqual(this.chunkSize, chunked.Position); Assert.AreEqual(this.chunkSize, chunked.Length); } }
private bool Write(ChunkedStream outStream, WireMessage[] messages, bool flush = true) { ThrowIfDisposed(); if (outStream == null) { return(false); } using (var dataStream = new ChunkedStream()) { try { var dataSize = (int)_serializer.Serialize(messages, dataStream); if (dataSize > RpcMessageSizeOf.MaxAllowedData) { return(false); } /* Header */ WriteHeader(outStream, dataSize, out byte[] messageId); if (dataSize == 0) { return(true); } dataStream.Position = 0; outStream.ReadFrom(dataStream, dataSize); /* var buffer = ByteArrayCache.Default.Acquire(); * try * { * var bufferLen = buffer.Length; * * using (var dataReader = dataStream.NewReader(0)) * { * var readLen = 0; * while (dataSize > 0) * { * readLen = dataReader.Read(buffer, 0, bufferLen); * * readLen = Math.Max(0, readLen); * if (readLen > 0) * { * dataSize -= readLen; * bufferLen = Math.Min(bufferLen, dataSize); * * outStream.Write(buffer, 0, readLen); * } * } * } * } * finally * { * ByteArrayCache.Default.Release(buffer); * } */ } finally { if (flush) { outStream.Flush(); } } } return(true); }
public void TestWrite() { //using (Database database = new Database(DatabasePath)) using (Stream file = new FileStream(TestFilePath, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.None)) using (ChunkedStream chunked = new ChunkedStream(file, ChunkSize)) { byte[] buffer = new byte[2 * ChunkSize]; FillArray <byte>(buffer, (byte)'a'); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 0, 1); Assert.AreEqual(1, file.Position); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(1, chunked.Length); System.ArgumentOutOfRangeException e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, ChunkSize)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(ChunkSize, e.ActualValue); Assert.AreEqual(1, file.Position); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(1, chunked.Length); chunked.Write(buffer, 1, ChunkSize - 1); Assert.AreEqual(ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); chunked.ChunkPosition = ChunkSize; Assert.AreEqual(ChunkSize, chunked.ChunkPosition); Assert.AreEqual(ChunkSize, file.Position); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 0, ChunkSize); Assert.AreEqual(2 * ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(2 * ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); chunked.ChunkPosition = 4 * ChunkSize; Assert.AreEqual(4 * ChunkSize, chunked.ChunkPosition); Assert.AreEqual(4 * ChunkSize, file.Position); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(0, chunked.Length); chunked.Write(buffer, 1, ChunkSize - 1); Assert.AreEqual(5 * ChunkSize - 1, file.Position); Assert.AreEqual(ChunkSize - 1, chunked.Position); Assert.AreEqual(ChunkSize - 1, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, ChunkSize)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(ChunkSize, e.ActualValue); Assert.AreEqual(5 * ChunkSize - 1, file.Position); Assert.AreEqual(ChunkSize - 1, chunked.Position); Assert.AreEqual(ChunkSize - 1, chunked.Length); chunked.Write(buffer, 0, 1); Assert.AreEqual(5 * ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); e = Assert.Catch <System.ArgumentOutOfRangeException>(() => chunked.Write(buffer, 0, 1)); Assert.AreEqual("count", e.ParamName); Assert.AreEqual(1, e.ActualValue); Assert.AreEqual(5 * ChunkSize, file.Position); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); } }
public void TestRead() { //using (Database database = new Database(DatabasePath)) { using (Stream file = File.OpenWrite(TestFilePath)) { byte[] buffer = new byte[ChunkSize]; FillArray <byte>(buffer, (byte)'1'); file.Write(buffer, 0, ChunkSize); FillArray <byte>(buffer, (byte)'2'); file.Write(buffer, 0, ChunkSize); FillArray <byte>(buffer, (byte)'3'); file.Write(buffer, 0, 3); } using (Stream file = new FileStream(TestFilePath, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.None)) using (ChunkedStream chunked = new ChunkedStream(file, ChunkSize)) { byte[] buffer = new byte[ChunkSize]; byte[] result = new byte[ChunkSize]; Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); FillArray <byte>(buffer, (byte)'1'); Assert.AreEqual(1, chunked.Read(result, 0, 1)); Assert.IsTrue(EqualArray(buffer, result, 1)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(1, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); Assert.AreEqual(ChunkSize - 1, chunked.Read(result, 1, ChunkSize)); Assert.IsTrue(EqualArray(buffer, result, ChunkSize)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); Assert.AreEqual(0, chunked.Read(result, 0, ChunkSize)); Assert.AreEqual(0, chunked.ChunkPosition); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); chunked.ChunkPosition = 2 * ChunkSize; Assert.AreEqual(2 * ChunkSize, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(3, chunked.Length); FillArray <byte>(buffer, (byte)'3'); Assert.AreEqual(3, chunked.Read(result, 0, ChunkSize)); Assert.IsTrue(EqualArray(buffer, result, 3)); Assert.AreEqual(2 * ChunkSize, chunked.ChunkPosition); Assert.AreEqual(3, chunked.Position); Assert.AreEqual(3, chunked.Length); chunked.ChunkPosition = ChunkSize; Assert.AreEqual(ChunkSize, chunked.ChunkPosition); Assert.AreEqual(0, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); FillArray <byte>(buffer, (byte)'2'); for (int i = 0; i < ChunkSize; ++i) { Assert.AreEqual(1, chunked.Read(result, i, 1)); } Assert.IsTrue(EqualArray(buffer, result, ChunkSize)); Assert.AreEqual(ChunkSize, chunked.ChunkPosition); Assert.AreEqual(ChunkSize, chunked.Position); Assert.AreEqual(ChunkSize, chunked.Length); } } }
public void Decode_NotGivingPartialDataFromStream() { var expected = Utilities.GetRandom<byte[]>(false); using (var chunked = new ChunkedStream(this.Stream, 3)) { var encoder = this.CreateEncoder(chunked); encoder.Encode(expected); encoder.Flush(); chunked.Seek(0, SeekOrigin.Begin); var decoder = this.CreateDecoder(chunked); var actual = decoder.DecodeByteArray(); Assert.IsTrue(expected.SequenceEqual(actual)); } }
/// <summary> /// Uploads the file. /// Resumes an upload if the given localFileStream.Position is larger than zero. /// </summary> /// <returns> /// The new CMIS document. /// </returns> /// <param name='remoteDocument'> /// Remote document where the local content should be uploaded to. /// </param> /// <param name='localFileStream'> /// Local file stream. /// </param> /// <param name='transmission'> /// Transmission status where the uploader should report its uploading status. /// </param> /// <param name='hashAlg'> /// Hash alg which should be used to calculate a checksum over the uploaded content. /// </param> /// <param name='overwrite'> /// If true, the local content will overwrite the existing content. /// </param> /// <param name="update">Is called on every new chunk, if not <c>null</c>.</param> /// <exception cref="CmisSync.Lib.Tasks.UploadFailedException"> /// Contains the last successful remote document state. This is needed for continue a failed upload. /// </exception> public override IDocument UploadFile( IDocument remoteDocument, Stream localFileStream, Transmission transmission, HashAlgorithm hashAlg, bool overwrite = true, UpdateChecksum update = null) { IDocument result = remoteDocument; for (long offset = localFileStream.Position; offset < localFileStream.Length; offset += this.ChunkSize) { bool isFirstChunk = offset == 0; bool isLastChunk = (offset + this.ChunkSize) >= localFileStream.Length; using (var hashstream = new NonClosingHashStream(localFileStream, hashAlg, CryptoStreamMode.Read)) using (var chunkstream = new ChunkedStream(hashstream, this.ChunkSize)) using (var offsetstream = new OffsetStream(chunkstream, offset)) using (var transmissionStream = transmission.CreateStream(offsetstream)) { transmission.Length = localFileStream.Length; transmission.Position = offset; chunkstream.ChunkPosition = offset; ContentStream contentStream = new ContentStream(); contentStream.FileName = remoteDocument.Name; contentStream.MimeType = Cmis.MimeType.GetMIMEType(remoteDocument.Name); if (isLastChunk) { contentStream.Length = localFileStream.Length - offset; } else { contentStream.Length = this.ChunkSize; } contentStream.Stream = transmissionStream; try { if (isFirstChunk && result.ContentStreamId != null && overwrite) { result.DeleteContentStream(true); } result.AppendContentStream(contentStream, isLastChunk, true); HashAlgorithmReuse reuse = hashAlg as HashAlgorithmReuse; if (reuse != null && update != null) { using (HashAlgorithm hash = (HashAlgorithm)reuse.Clone()) { hash.TransformFinalBlock(new byte[0], 0, 0); update(hash.Hash, result.ContentStreamLength.GetValueOrDefault()); } } } catch (Exception e) { if (e is FileTransmission.AbortException) { throw; } if (e.InnerException is FileTransmission.AbortException) { throw e.InnerException; } throw new UploadFailedException(e, result); } } } hashAlg.TransformFinalBlock(new byte[0], 0, 0); return(result); }