public string GetThumbnail(int id, System.IO.Stream strm) { string fileName = this.DB.Products.Where(a => a.ProductID == id).Select(a => a.ThumbnailPhotoFileName).FirstOrDefault(); if (string.IsNullOrEmpty(fileName)) { return(""); } System.Transactions.TransactionOptions top = new System.Transactions.TransactionOptions(); top.Timeout = TimeSpan.FromSeconds(60); top.IsolationLevel = System.Transactions.IsolationLevel.Serializable; using (TransactionScope scope = new TransactionScope(TransactionScopeOption.Required, top)) using (DbConnection conn = DBConnectionFactory.GetRIAppDemoConnection()) { byte[] bytes = new byte[64 * 1024]; string fldname = "ThumbNailPhoto"; BlobStream bstrm = new BlobStream(conn as SqlConnection, "[SalesLT].[Product]", fldname, string.Format("WHERE [ProductID]={0}", id)); bstrm.Open(); int cnt = bstrm.Read(bytes, 0, bytes.Length); while (cnt > 0) { strm.Write(bytes, 0, cnt); cnt = bstrm.Read(bytes, 0, bytes.Length); } bstrm.Close(); scope.Complete(); } return(fileName); }
// Get a page of a page blob. // Return true on success, false if unable to create, throw exception on error. public bool GetPage(string containerName, string blobName, int pageOffset, int pageSize, out string content) { content = null; try { CloudBlobContainer container = BlobClient.GetContainerReference(containerName); CloudPageBlob blob = container.GetPageBlobReference(blobName); BlobStream stream = blob.OpenRead(); byte[] data = new byte[pageSize]; stream.Seek(pageOffset, SeekOrigin.Begin); stream.Read(data, 0, pageSize); content = new UTF8Encoding().GetString(data); stream.Close(); return(true); } catch (StorageClientException ex) { if ((int)ex.StatusCode == 404) { return(false); } throw; } }
public void RecalculateCentroidsTest2() { KMeansJobData jobData = new KMeansJobData(Guid.NewGuid(), 0, null, 1, 10, DateTime.Now); KMeansJob_Accessor target = new KMeansJob_Accessor(jobData, "server"); target.InitializeStorage(); byte[] cBytes = new byte[Centroid.Size]; using (BlobStream cStream = target.Centroids.OpenRead()) { cStream.Read(cBytes, 0, cBytes.Length); } Centroid cOriginal = Centroid.FromByteArray(cBytes); target.totalPointsProcessedDataByCentroid[cOriginal.ID] = new PointsProcessedData(); target.RecalculateCentroids(); byte[] cBytesNew = new byte[Centroid.Size]; using (BlobStream cStreamNew = target.Centroids.OpenRead()) { cStreamNew.Read(cBytesNew, 0, cBytesNew.Length); } Centroid cNew = Centroid.FromByteArray(cBytesNew); Assert.AreEqual(cOriginal.ID, cNew.ID); Assert.AreEqual(cNew.X, 0); Assert.AreEqual(cNew.Y, 0); }
private static void DownloadVHDFromCloud(Config config) { StorageCredentialsAccountAndKey creds = new StorageCredentialsAccountAndKey(config.Account, config.Key); CloudBlobClient blobStorage = new CloudBlobClient(config.AccountUrl, creds); blobStorage.ReadAheadInBytes = 0; CloudBlobContainer container = blobStorage.GetContainerReference(config.Container); CloudPageBlob pageBlob = container.GetPageBlobReference(config.Blob); // Get the length of the blob pageBlob.FetchAttributes(); long vhdLength = pageBlob.Properties.Length; long totalDownloaded = 0; Console.WriteLine("Vhd size: " + Megabytes(vhdLength)); // Create a new local file to write into FileStream fileStream = new FileStream(config.Vhd.FullName, FileMode.Create, FileAccess.Write); fileStream.SetLength(vhdLength); // Download the valid ranges of the blob, and write them to the file IEnumerable <PageRange> pageRanges = pageBlob.GetPageRanges(); BlobStream blobStream = pageBlob.OpenRead(); foreach (PageRange range in pageRanges) { // EndOffset is inclusive... so need to add 1 int rangeSize = (int)(range.EndOffset + 1 - range.StartOffset); // Chop range into 4MB chucks, if needed for (int subOffset = 0; subOffset < rangeSize; subOffset += FourMegabyteAsBytes) { int subRangeSize = Math.Min(rangeSize - subOffset, FourMegabyteAsBytes); blobStream.Seek(range.StartOffset + subOffset, SeekOrigin.Begin); fileStream.Seek(range.StartOffset + subOffset, SeekOrigin.Begin); Console.WriteLine("Range: ~" + Megabytes(range.StartOffset + subOffset) + " + " + PrintSize(subRangeSize)); byte[] buffer = new byte[subRangeSize]; blobStream.Read(buffer, 0, subRangeSize); fileStream.Write(buffer, 0, subRangeSize); totalDownloaded += subRangeSize; } } Console.WriteLine("Downloaded " + Megabytes(totalDownloaded) + " of " + Megabytes(vhdLength)); }
public byte[] DownLoadBlock(string fileName, long offSet, int blockSize, int userId) { var blob = GetBlockBlob(fileName, userId); if (blob.Exists()) { BlobStream reader = blob.OpenRead(); reader.Seek(offSet, SeekOrigin.Begin); byte[] bufferBytes = new byte[blockSize]; int total = reader.Read(bufferBytes, 0, blockSize); return(bufferBytes); } return(null); }
private void DownloadBlob(string connectionString, string containerName, string blobFilename, string filename, bool deleteAfterDownload, bool updateProgress) { if (!this.cancelled) { CloudStorageAccount storageAccount = CloudStorageAccount.Parse(connectionString); CloudBlobClient blobClient = storageAccount.CreateCloudBlobClient(); CloudBlobContainer container = blobClient.GetContainerReference(containerName.ToLower()); CloudBlockBlob blob = container.GetBlockBlobReference(blobFilename); int progressPercentage; // Open a stream for writing using (BlobStream inStream = blob.OpenRead()) using (FileStream outStream = new FileStream(filename, FileMode.Create)) { // Get the attributes for this blob blob.FetchAttributes(); long totalSize = blob.Properties.Length; int bufferSize = 4096; long totalRead = 0; byte[] buffer = new byte[bufferSize]; while (true) { int read = inStream.Read(buffer, 0, buffer.Length); if (read <= 0) { break; } outStream.Write(buffer, 0, read); totalRead += read; if (updateProgress) { progressPercentage = (int)(((double)totalRead / (double)totalSize) * 100); OnProgress(progressPercentage); } } }; if (deleteAfterDownload) { blob.Delete(); } } }
/// <summary> /// Saves the BLOB data to the specified file. /// </summary> /// /// <param name="stream"> /// The stream to save the data to. /// </param> /// /// <remarks> /// The stream will not be closed by this method. /// </remarks> /// /// <exception cref="ArgumentNullException"> /// <paramref name="stream"/> is <b>null</b>. /// </exception> /// /// <exception cref="NotSupportedException"> /// If the Blob doesn't have data that was retrieved from HealthVault. /// </exception> /// /// <exception cref="HealthServiceException"> /// If there is a failure reading the data from HealthVault. /// </exception> /// public void SaveToStream(Stream stream) { Validator.ThrowIfArgumentNull(stream, nameof(stream), Resources.ArgumentNull); using (BlobStream blob = GetReaderStream()) { int bufferSize = (int)(ContentLength ?? int.MaxValue); bufferSize = Math.Min(bufferSize, DefaultStreamBufferSize); byte[] buffer = new byte[bufferSize]; int bytesRead; while ((bytesRead = blob.Read(buffer, 0, bufferSize)) > 0) { stream.Write(buffer, 0, bytesRead); } } }
public static void ParallelDownloadToFile(CloudBlockBlob blob, string fileName, int maxBlockSize) { try { // refresh the values blob.FetchAttributes(); long fileSize = blob.Attributes.Properties.Length; var filePath = Path.GetDirectoryName(fileName); var fileNameWithoutPath = Path.GetFileNameWithoutExtension(fileName); // let's figure out how big the file is here long leftToRead = fileSize; int startPosition = 0; // have 1 block for every maxBlockSize bytes plus 1 for the remainder var blockCount = ((int)Math.Floor((double)(fileSize / maxBlockSize))) + 1; // setup the control array BlockTransferDetail[] transferDetails = new BlockTransferDetail[blockCount]; // create an array of block keys string[] blockKeys = new string[blockCount]; var blockIds = new List <string>(); // populate the control array... for (int j = 0; j < transferDetails.Length; j++) { int toRead = (int)(maxBlockSize < leftToRead ? maxBlockSize : leftToRead); string blockId = Path.Combine(filePath, string.Format("{0}_{1}.dat", fileNameWithoutPath, j.ToString("00000000000"))); if (startPosition < 0) { startPosition = startPosition * -1; } if (toRead < 0) { toRead = toRead * -1; } transferDetails[j] = new BlockTransferDetail() { StartPosition = startPosition, BytesToRead = toRead, BlockId = blockId }; if (toRead > 0) { blockIds.Add(blockId); } // increment the starting position startPosition += toRead; leftToRead -= toRead; } // now we do a || download of the file. var result = Parallel.For(0, transferDetails.Length, j => { // get the blob as a stream try { using (BlobStream stream = blob.OpenRead()) { Thread.Sleep(10000); stream.Seek(transferDetails[j].StartPosition, SeekOrigin.Begin); // setup a buffer with the proper size byte[] buff = new byte[transferDetails[j].BytesToRead]; // read into the buffer stream.Read(buff, 0, transferDetails[j].BytesToRead); using (Stream fileStream = new FileStream(transferDetails[j].BlockId, FileMode.Create, FileAccess.Write, FileShare.None)) { using (BinaryWriter bw = new BinaryWriter(fileStream)) { bw.Write(buff); bw.Close(); } } buff = null; } } catch (Exception) { throw; } }); // assemble the file into one now... using (Stream fileStream = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.None)) { using (BinaryWriter bw = new BinaryWriter(fileStream)) { // loop through each of the files on the disk for (int j = 0; j < transferDetails.Length; j++) { // read them into the file (append) bw.Write(System.IO.File.ReadAllBytes(transferDetails[j].BlockId)); // and then delete them System.IO.File.Delete(transferDetails[j].BlockId); } } } transferDetails = null; } catch (Exception) { throw; } }
public void InitializeStorageTest() { KMeansJobData jobData = new KMeansJobData(Guid.NewGuid(), 2, null, 4, 10, DateTime.Now); KMeansJob target = new KMeansJob(jobData, "server"); target.InitializeStorage(); // Verify that the created containers and blobs actually exist CloudBlobClient client = AzureHelper.StorageAccount.CreateCloudBlobClient(); CloudBlobContainer c = null; try { c = client.GetContainerReference(jobData.JobID.ToString()); c.FetchAttributes(); } catch (StorageClientException e) { if (e.ErrorCode == StorageErrorCode.ResourceNotFound) { Assert.Fail(); } else { throw; } } CloudBlob points = null, centroids = null; try { points = c.GetBlobReference("points"); points.FetchAttributes(); centroids = c.GetBlobReference("centroids"); centroids.FetchAttributes(); } catch (StorageClientException e) { if (e.ErrorCode == StorageErrorCode.ResourceNotFound) { Assert.Fail(); } else { throw; } } // Verify that unpacking a ClusterPoint actually yields a point with coordinates [-50, 50) and a null centroidID byte[] pointBytes; using (BlobStream pointsStream = points.OpenRead()) { pointBytes = new byte[ClusterPoint.Size]; pointsStream.Read(pointBytes, 0, ClusterPoint.Size); } ClusterPoint p = ClusterPoint.FromByteArray(pointBytes); Assert.IsTrue(p.X >= -50 && p.X < 50); Assert.IsTrue(p.Y >= -50 && p.Y < 50); Assert.AreEqual(p.CentroidID, Guid.Empty); // Verify that the blobs are the correct length Assert.AreEqual(points.Properties.Length, ClusterPoint.Size * jobData.N); Assert.AreEqual(centroids.Properties.Length, Centroid.Size * jobData.K); }