public Task UploadTextAsync(string content, Encoding encoding = null, AccessCondition accessCondition = null, BlobRequestOptions options = null, OperationContext operationContext = null, CancellationToken cancellationToken = default(CancellationToken)) { using (CloudBlobStream stream = _store.OpenWriteAppend(_containerName, _blobName, _metadata)) { byte[] buffer = Encoding.UTF8.GetBytes(content); stream.Write(buffer, 0, buffer.Length); stream.Commit(); } return(Task.FromResult(0)); }
public override Task UploadTextAsync(string content, Encoding encoding, AccessCondition accessCondition, BlobRequestOptions options, OperationContext operationContext, CancellationToken cancellationToken) { using (CloudBlobStream stream = _store.OpenWriteBlock(_containerName, _blobName, _metadata)) { byte[] buffer = Encoding.UTF8.GetBytes(content); stream.Write(buffer, 0, buffer.Length); stream.CommitAsync().Wait(); } return(Task.FromResult(0)); }
public override void WriteBytes(byte[] b, int offset, int length) { _stream.Write(b, offset, length); _crc.Update(b, offset, length); }
public override void Write(byte[] buffer, int offset, int count) { _inner.Write(buffer, offset, count); }
private void ProcessQueueMessage(object state) { CloudQueueMessage msg = state as CloudQueueMessage; try { // Log and delete if this is a "poison" queue message (repeatedly processed // and always causes an error that prevents processing from completing). // Production applications should move the "poison" message to a "dead message" // queue for analysis rather than deleting the message. if (msg.DequeueCount > 5) { Trace.TraceError("Deleting poison message: message {0} Role Instance {1}.", msg.ToString(), GetRoleInstance()); DataModel.StorageFactory.Instance.IpcAzureAppWorkerJobQueue.DeleteMessage(msg); return; } RmsCommand rmsCommand = new RmsCommand(msg.AsString); switch (rmsCommand.RmsOperationCommand) { case RmsCommand.Command.GetTemplate: { ServicePrincipalModel sp = ServicePrincipalModel.GetFromStorage(rmsCommand.Parameters.First <object>().ToString()); RMS.RmsContentPublisher rmsPublisher = RMS.RmsContentPublisher.Create(sp.TenantId, sp.AppId, sp.Key); var templates = rmsPublisher.GetRmsTemplates(); List <TemplateModel> templateEntityList = new List <TemplateModel>(); foreach (var temp in templates) { TemplateModel templateEntity = new TemplateModel(); templateEntity.TenantId = sp.TenantId; templateEntity.TemplateId = temp.TemplateId; templateEntity.TemplateName = temp.Name; templateEntity.TemplateDescription = temp.Description; templateEntityList.Add(templateEntity); } TemplateModel.SaveToStorage(templateEntityList); break; } case RmsCommand.Command.PublishTemplate: { PublishModel publishJob = PublishModel.GetFromStorage(rmsCommand.Parameters[0].ToString(), rmsCommand.Parameters[1].ToString()); ServicePrincipalModel sp = ServicePrincipalModel.GetFromStorage(rmsCommand.Parameters[0].ToString()); CloudBlockBlob originalFileBlob = DataModel.StorageFactory.Instance.IpcAzureAppFileBlobContainer.GetBlockBlobReference(publishJob.OriginalFileBlobRef); Stream sinkStream = null; string tempFilePath = null; try { //if file length is less than 100,000 bytes, keep it in memory if (publishJob.OriginalFileSizeInBytes < 100000) { sinkStream = new MemoryStream(); } else { tempFilePath = Path.GetRandomFileName(); sinkStream = new FileStream(tempFilePath, FileMode.Create); } using (Stream sourceStream = originalFileBlob.OpenRead()) using (sinkStream) { RMS.RmsContent rmsContent = new RMS.RmsContent(sourceStream, sinkStream); rmsContent.RmsTemplateId = publishJob.TemplateId; rmsContent.OriginalFileNameWithExtension = publishJob.OriginalFileName; RMS.RmsContentPublisher rmsContentPublisher = RMS.RmsContentPublisher.Create(sp.TenantId, sp.AppId, sp.Key); rmsContentPublisher.PublishContent(rmsContent); publishJob.PublishedFileName = rmsContent.PublishedFileNameWithExtension; sinkStream.Flush(); sinkStream.Seek(0, SeekOrigin.Begin); //published file is uploaded to blob storage. //Note: This sample code doesn't manage lifetime of this original and published file blob //Actual code must manage the lifetime as appropriate CloudBlockBlob destFileBlob = DataModel.StorageFactory.Instance.IpcAzureAppFileBlobContainer.GetBlockBlobReference(publishJob.PublishedFileBlobRef); using (CloudBlobStream blobStream = destFileBlob.OpenWrite()) { int tempSize = 1024; byte[] tempBuffer = new byte[tempSize]; while (true) { int readSize = sinkStream.Read(tempBuffer, 0, tempSize); if (readSize <= 0) { break; } blobStream.Write(tempBuffer, 0, readSize); } blobStream.Flush(); } } publishJob.JState = PublishModel.JobState.Completed.ToString(); publishJob.SaveToStorage(); break; } finally { if (!string.IsNullOrWhiteSpace(tempFilePath) && File.Exists(tempFilePath)) { File.Delete(tempFilePath); } } } } //delete the message from the queue DataModel.StorageFactory.Instance.IpcAzureAppWorkerJobQueue.DeleteMessage(msg); } catch (Exception ex) { Process p = Process.GetCurrentProcess(); string a = p.ProcessName; string b = p.MainModule.FileName; string err = ex.Message; if (ex.InnerException != null) { err += " Inner Exception: " + ex.InnerException.Message; } if (msg != null) { err += " Last queue message retrieved: " + msg.AsString; } Trace.TraceError(err); } }