//------------------------------------------------- // Write pages to a page blob //------------------------------------------------- private static void WriteToPageBlob(PageBlobClient pageBlobClient, Stream dataStream) { long startingOffset = 512; // <Snippet_WriteToPageBlob> pageBlobClient.UploadPages(dataStream, startingOffset); // </Snippet_WriteToPageBlob> }
protected void SendString(PageBlobClient blob, string message) { byte[] msgBytes = encoding.GetBytes(message); using (MemoryStream ms = new MemoryStream(msgBytes)) { blob.UploadPages(ms, 0); } }
protected void SendStream(PageBlobClient blob, Stream stream) { if (!overwrite && blob.Exists()) { return; } blob.UploadPages(stream, 0); }
protected void SendBytes(PageBlobClient blob, byte[] msgBytes) { if (!overwrite && blob.Exists()) { return; } using (MemoryStream ms = new MemoryStream(msgBytes)) { blob.UploadPages(ms, 0); } }
public void TestCanUseLeaseIdForBlobOperations() { using var provider = new TestingAzureBlobLeaseDistributedLockProvider(); var name = provider.GetUniqueSafeName(); var client = new PageBlobClient(AzureCredentials.ConnectionString, AzureCredentials.DefaultBlobContainerName, name); const int BlobSize = 512; client.Create(size: BlobSize); var @lock = new AzureBlobLeaseDistributedLock(client); using var handle = @lock.Acquire(); Assert.Throws <RequestFailedException>(() => client.UploadPages(new MemoryStream(new byte[BlobSize]), offset: 0)) .ErrorCode.ShouldEqual(AzureErrors.LeaseIdMissing); Assert.DoesNotThrow( () => client.UploadPages(new MemoryStream(new byte[BlobSize]), offset: 0, conditions: new PageBlobRequestConditions { LeaseId = handle.LeaseId }) ); handle.Dispose(); Assert.Throws <ObjectDisposedException>(() => handle.LeaseId.ToString()); }
protected void SendFile(PageBlobClient blob, string fileNameAndPath) { if (!File.Exists(fileNameAndPath)) { throw new CantSendFileDataWhenFileDoesNotExistException(fileNameAndPath); } if (!overwrite && blob.Exists()) { return; } using (StreamReader sr = new StreamReader(fileNameAndPath)) { blob.UploadPages(sr.BaseStream, 0); } }
protected virtual void FlushBuffer(byte[] b, int offset, int length) { // reset the buffer. _bufferPosition = 0; // is the length greater than max allowed size? if (length > MaxLength) { // flush the buffer in batches of max size. do { // ensure the page blob is large enough. EnsureSizeOrResize(MaxLength); using MemoryStream stream = new MemoryStream(b, offset, MaxLength); _pageBlobClient.UploadPages(stream, _currPageOffset); // write to the page blob. _currPagePosition += MaxLength; // reflects the size of the page blob after write. SetCurrentPageOffset(); offset += MaxLength; // advance the offset by max length. length -= MaxLength; // reduce length by max length. } while (length > MaxLength); } // would all the "length" bytes fit exactly into pages of 512 bytes each? int excessLength = length % 512; if (length <= 512 || excessLength == 0) { // ensure the page is large enough. EnsureSizeOrResize(length); // upload the pages. byte[] scratch = new byte[512]; Buffer.BlockCopy(b, offset, scratch, 0, length); using MemoryStream stream = new MemoryStream(scratch, 0, 512); PageInfo pageInfo = _pageBlobClient.UploadPages(stream, _currPageOffset); _currPagePosition += length; SetCurrentPageOffset(); } else { // ensure the page is large enough. EnsureSizeOrResize(length - excessLength + 512); using (MemoryStream stream = new MemoryStream(b, offset, length - excessLength)) { PageInfo pageInfo = _pageBlobClient.UploadPages(stream, _currPageOffset); _currPagePosition += length - excessLength; SetCurrentPageOffset(); } // don't yet write the excess bytes. // add them back to the buffer. Buffer.BlockCopy(b, length - excessLength, _buffer, _bufferPosition, excessLength); _bufferPosition += excessLength; // TODO: test this path more. // mostly will fail, and requires to flush. // ensure the page is large enough. //EnsureSizeOrResize(excessLength); //using (MemoryStream stream = new MemoryStream(b, length - excessLength, excessLength)) //{ // PageInfo pageInfo = _pageBlobClient.UploadPages(stream, _currPageOffset); // // we don't update the blob offset yet because we still have the same data in the buffer. // // this is an optimization to avoid sparse pages. // // the next time a flush operation is executed, we resume writing on the same page // // and possibly would overwrite a few bytes (up to a max of 511 bytes). // // on the other hand, if the flush buffer were to never be called again, we are still // // safe since we have already committed these excess bytes to the blob. //} } }