public async ValueTask Consume(int readBytes) { if (readBytes < AlreadyRead) { throw new InvalidOperationException(); } if (AlreadyRead > 0) { int consume = Math.Min(readBytes, AlreadyRead); AlreadyRead -= consume; readBytes -= consume; } while (readBytes > 0) { var r = await Bucket.ReadAsync(readBytes).ConfigureAwait(false); if (r.IsEmpty) { throw new BucketEofException($"EOF during poll consume of {Bucket.Name}"); } readBytes -= r.Length; } }
/// <summary> /// Read in-memory buckets combined in a single buffer /// </summary> /// <param name="bucket"></param> /// <param name="bufferSize"></param> /// <param name="requested"></param> /// <returns></returns> /// <exception cref="ArgumentNullException"></exception> public static async ValueTask <BucketBytes> ReadCombinedAsync(this Bucket bucket, int bufferSize, int requested = int.MaxValue) { if (bucket is null) { throw new ArgumentNullException(nameof(bucket)); } else if (bucket is IBucketReadBuffers iov) { var(Buffers, Done) = await iov.ReadBuffersAsync(bufferSize).ConfigureAwait(false); int bytes = (Buffers.Length > 0) ? Buffers.Sum(x => x.Length) : 0; if (bytes > 0) { byte[] buffer = new byte[bytes]; int pos = 0; foreach (var v in Buffers) { v.CopyTo(new Memory <byte>(buffer, pos, v.Length)); pos += v.Length; } return(buffer); } } return(await bucket.ReadAsync(requested).ConfigureAwait(false)); }
/// <summary> /// Writes the bucket to <paramref name="stream"/> and then closes <paramref name="bucket"/> /// </summary> /// <param name="stream"></param> /// <param name="bucket"></param> /// <param name="cancellationToken">Token for <see cref="Stream.WriteAsync(byte[], int, int, CancellationToken)"/></param> /// <returns></returns> /// <exception cref="ArgumentNullException"></exception> public static async ValueTask WriteToAsync(this Bucket bucket, Stream stream, CancellationToken cancellationToken = default) { if (bucket is null) { throw new ArgumentNullException(nameof(bucket)); } else if (stream is null) { throw new ArgumentNullException(nameof(stream)); } using (bucket) { #if NET6_0_OR_GREATER if (stream is FileStream fs && fs.CanSeek && bucket is IBucketReadBuffers rb) { var handle = fs.SafeFileHandle; long pos = fs.Position; while (true) { cancellationToken.ThrowIfCancellationRequested(); var(buffers, done) = await bucket.ReadBuffersAsync().ConfigureAwait(false); if (buffers.Length > 0) { var len = buffers.Sum(x => (long)x.Length); await RandomAccess.WriteAsync(handle, buffers, pos, cancellationToken).ConfigureAwait(false); pos += len; } if (done) { fs.Position = pos; return; } } } #endif while (true) { cancellationToken.ThrowIfCancellationRequested(); var bb = await bucket.ReadAsync().ConfigureAwait(false); if (bb.IsEof) { break; } await stream.WriteAsync(bb, cancellationToken).ConfigureAwait(false); } } }
public static async ValueTask <(ReadOnlyMemory <byte>[] Buffers, bool Done)> ReadBuffersAsync(this Bucket bucket, int maxRequested = int.MaxValue) { if (bucket is null) { throw new ArgumentNullException(nameof(bucket)); } else if (bucket is IBucketReadBuffers iov) { return(await iov.ReadBuffersAsync(maxRequested).ConfigureAwait(false)); } var bb = await bucket.ReadAsync(maxRequested).ConfigureAwait(false); if (bb.IsEof) { return(Array.Empty <ReadOnlyMemory <byte> >(), true); } else { return(new[] { bb.Memory }, false); } }
public static async ValueTask <BucketPollBytes> PollReadAsync(this Bucket bucket, int minRequested = 1) { if (bucket is null) { throw new ArgumentNullException(nameof(bucket)); } BucketBytes data; if (bucket is IBucketPoll bucketPoll) { data = await bucketPoll.PollAsync(minRequested).ConfigureAwait(false); if (!data.IsEmpty || data.IsEof) { return(new BucketPollBytes(bucket, data, 0)); } } else { data = bucket.Peek(); } if (data.Length >= minRequested) { return(new BucketPollBytes(bucket, data, 0)); // Nice peek, move along } data = await bucket.ReadAsync(minRequested).ConfigureAwait(false); if (data.IsEmpty) { return(new BucketPollBytes(bucket, BucketBytes.Eof, 0)); // Nothing to optimize } byte byte0 = data[0]; byte[]? dataBytes = (data.Length > 0) ? data.ToArray() : null; int alreadyRead = data.Length; // Now the special trick, we might just have triggered a much longer read and in // that case we want to provide more data data = bucket.Peek(); var(arr, offset) = data; if (arr is not null && offset > alreadyRead) { if ((alreadyRead == 1 && arr[offset - 1] == byte0) || arr.Skip(offset - alreadyRead).Take(alreadyRead).SequenceEqual(dataBytes !)) { // The very lucky, but common case. The peek buffer starts with what we already read return(new BucketPollBytes(bucket, new BucketBytes(arr, offset - alreadyRead, data.Length + alreadyRead), alreadyRead)); } } if (data.Length > 0) { // We have original data and peeked data. Let's copy some data to help our caller byte[] result = new byte[alreadyRead + data.Length]; if (alreadyRead == 1) { result[0] = byte0; } else { dataBytes !.CopyTo(result, 0); } data.CopyTo(result, alreadyRead); dataBytes = result; } else if (dataBytes == null) { dataBytes = new[] { byte0 } } ; return(new BucketPollBytes(bucket, dataBytes, alreadyRead)); }
public async ValueTask <BucketBytes> ReadAsync(int readBytes) { try { if (AlreadyRead == 0) { return(await Bucket.ReadAsync(readBytes).ConfigureAwait(false)); } else if (readBytes <= AlreadyRead) { if (readBytes < AlreadyRead) { throw new InvalidOperationException(); } AlreadyRead = 0; var r = Data.Slice(0, readBytes); Data = Data.Slice(readBytes); return(r); } else if (readBytes > Data.Length) { byte[] returnData; if (readBytes < Data.Length) { returnData = Data.Slice(0, readBytes).ToArray(); } else { returnData = Data.ToArray(); } int consume = readBytes - AlreadyRead; int copy = AlreadyRead; AlreadyRead = 0; // No errors in Dispose please var bb = await Bucket.ReadAsync(consume).ConfigureAwait(false); if (bb.IsEof) { return(new BucketBytes(returnData, 0, copy)); } if (copy + bb.Length <= returnData.Length) { return(new BucketBytes(returnData, 0, copy + bb.Length)); // Data already available from peek buffer } // We got new and old data, but how can we return that? var(arr, offset) = bb; // Unlikely, but cheap: The return buffer is what we need if (arr is not null && offset >= copy && new ReadOnlySpan <byte>(arr, offset - copy, copy).SequenceEqual(returnData)) { return(new BucketBytes(arr, offset - copy, bb.Length + copy)); } byte[] ret = new byte[bb.Length + copy]; Array.Copy(returnData, ret, copy); bb.Span.CopyTo(new Span <byte>(ret, copy, bb.Length)); return(ret); } else { int consume = readBytes - AlreadyRead; BucketBytes slicedDataCopy = Data.Slice(0, Math.Min(readBytes, Data.Length)).ToArray(); var bb = await Bucket.ReadAsync(consume).ConfigureAwait(false); AlreadyRead = Math.Max(0, AlreadyRead - bb.Length); if (bb.Length == consume) { return(slicedDataCopy); } else if (bb.Length < consume) { return(slicedDataCopy.Slice(0, slicedDataCopy.Length - (consume - bb.Length))); } else { throw new InvalidOperationException(); } } } finally { Data = BucketBytes.Empty; } }