public TEntity AddOrUpdate(TKey key, Func <TEntity> addViewFactory, Func <TEntity, TEntity> updateViewFactory, AddOrUpdateHint hint) { string etag = null; var blob = GetBlobReference(key); TEntity view; try { // atomic entities should be small, so we can use the simple method var bytes = blob.DownloadByteArray(); using (var stream = new MemoryStream(bytes)) { view = _strategy.Deserialize <TEntity>(stream); } view = updateViewFactory(view); etag = blob.Attributes.Properties.ETag; } catch (StorageClientException ex) { switch (ex.ErrorCode) { case StorageErrorCode.ContainerNotFound: var s = string.Format( "Container '{0}' does not exist. You need to initialize this atomic storage and ensure that '{1}' is known to '{2}'.", blob.Container.Name, typeof(TEntity).Name, _strategy.GetType().Name); throw new InvalidOperationException(s, ex); case StorageErrorCode.BlobNotFound: case StorageErrorCode.ResourceNotFound: view = addViewFactory(); break; default: throw; } } // atomic entities should be small, so we can use the simple method // http://toolheaven.net/post/Azure-and-blob-write-performance.aspx using (var memory = new MemoryStream()) { _strategy.Serialize(view, memory); // note that upload from stream does weird things var bro = etag != null ? new BlobRequestOptions { AccessCondition = AccessCondition.IfMatch(etag) } : new BlobRequestOptions { AccessCondition = AccessCondition.IfNoneMatch("*") }; // make sure that upload is not rejected due to cashed content MD5 // http://social.msdn.microsoft.com/Forums/hu-HU/windowsazuredata/thread/4764e38f-b200-4efe-ada2-7de442dc4452 blob.Properties.ContentMD5 = null; blob.UploadByteArray(memory.ToArray(), bro); } return(view); }
public bool TrySet(T obj) { BlobRequestOptions reqOpt = new BlobRequestOptions(); MemoryStream stream; if (blob.Attributes.Properties.ETag == null) { throw new EtagNotSet(); } reqOpt.AccessCondition = AccessCondition.IfMatch(blob.Attributes.Properties.ETag); try { using (stream = new MemoryStream()) { Serializer.Serialize(stream, obj); stream.Seek(0, SeekOrigin.Begin); blob.UploadFromStream(stream, reqOpt); } return(true); } catch { return(false); } }
private BlobRequestOptions CreateBlobRequestOptions() { var etag = this.Properties.ETag; var options = new BlobRequestOptions(); options.AccessCondition = etag == null? AccessCondition.IfNoneMatch("*") : AccessCondition.IfMatch(etag); return(options); }
protected override string DoGet(string objId, out OptimisticConcurrencyContext context) { CloudBlob blob = this.Container.GetBlobReference(objId); blob.FetchAttributes(); context = new OptimisticConcurrencyContext() { ObjectId = objId, AccessCondition = AccessCondition.IfMatch(blob.Properties.ETag) }; return(blob.DownloadText()); }
// The mutex must already exists public Mutex(CloudBlobContainer container, string mutexName, Exception e) { blob = container.GetBlobReference(mutexName); byte[] b1 = { 1 }; BlobRequestOptions requestOpt = new BlobRequestOptions(); bool keepGoing = true; string oldEtag = ""; int lastChange = 0; do { byte[] b; string eTag; try { blob.FetchAttributes(); eTag = blob.Attributes.Properties.ETag; if (eTag != oldEtag) { lastChange = Environment.TickCount; oldEtag = eTag; } b = blob.DownloadByteArray(); } catch (Exception) { throw e; } requestOpt.AccessCondition = AccessCondition.IfMatch(eTag); if (b[0] == 0 || Environment.TickCount - lastChange > 3000) // on ne peut garder un lock plus de 3 s { try { blob.UploadByteArray(b1, requestOpt); keepGoing = false; } catch (StorageClientException ex) { if (ex.ErrorCode != StorageErrorCode.ConditionFailed) { throw; } } } else { Thread.Sleep(50); // constante arbitraire } } while (keepGoing); }
private string UploadBlob(CloudBlob blob, Stream data, bool overwrite, string eTag) { BlobRequestOptions options; if (overwrite) { options = String.IsNullOrEmpty(eTag) ? new BlobRequestOptions { AccessCondition = AccessCondition.None } : new BlobRequestOptions { AccessCondition = AccessCondition.IfMatch(eTag) }; } else { options = new BlobRequestOptions { AccessCondition = AccessCondition.IfNotModifiedSince(DateTime.MinValue) }; } try { if (data.CanSeek) { this.retryPolicy.ExecuteAction(() => { data.Seek(0, SeekOrigin.Begin); blob.UploadFromStream(data, options); }); } else { // Stream is not seekable, cannot use retry logic as data consistency cannot be guaranteed. blob.UploadFromStream(data, options); } return(blob.Properties.ETag); } catch (StorageClientException ex) { if (ex.ErrorCode != StorageErrorCode.ConditionFailed) { throw; } } return(null); }
static AccessCondition MapCondition(StreamingCondition condition) { switch (condition.Type) { case StreamingConditionType.None: return(AccessCondition.None); case StreamingConditionType.IfMatch: var x = ExposeException(condition.ETag, "'ETag' should be present"); return(AccessCondition.IfMatch(x)); case StreamingConditionType.IfNoneMatch: var etag = ExposeException(condition.ETag, "'ETag' should be present"); return(AccessCondition.IfNoneMatch(etag)); default: throw new ArgumentOutOfRangeException(); } }
private static void SetReadOnlySharedAccessPolicy(CloudBlobContainer container) { var permissions = container.GetPermissions(); var options = new BlobRequestOptions { // Fail if someone else has already changed the container before we do. AccessCondition = AccessCondition.IfMatch(container.Properties.ETag) }; var sharedAccessPolicy = new SharedAccessPolicy { Permissions = SharedAccessPermissions.Read, SharedAccessExpiryTime = DateTime.UtcNow + TimeSpan.FromDays(StorageServicesContext.Current.Configuration.BlobsSasExpirationTime) }; permissions.SharedAccessPolicies.Remove("readonly"); permissions.SharedAccessPolicies.Add("readonly", sharedAccessPolicy); container.SetPermissions(permissions, options); }
// we can't use the Etag * with IfMatch // beacause it only works with IfNoneMatch // We have to use an Etag retry policy --> TODO : find something better public bool SetIfExists(T obj) { BlobRequestOptions reqOpt = new BlobRequestOptions(); try { while (true) { blob.FetchAttributes(); string etag = blob.Attributes.Properties.ETag; reqOpt.AccessCondition = AccessCondition.IfMatch(etag); try { using (MemoryStream stream = new MemoryStream()) { Serializer.Serialize(stream, obj); stream.Seek(0, SeekOrigin.Begin); blob.UploadFromStream(stream, reqOpt); } return(true); } catch (StorageClientException e) { if (e.ErrorCode != StorageErrorCode.ConditionFailed) { throw; } } } } catch (StorageClientException e) { if (e.ErrorCode == StorageErrorCode.ResourceNotFound) { return(false); } else { throw; } } }
private static void SetReadOnlySharedAccessPolicy(CloudBlobContainer container) { var blobSASExperiationTime = int.Parse(ConfigReader.GetConfigValue("BlobSASExperiationTime"), NumberStyles.Integer, CultureInfo.InvariantCulture); var permissions = container.GetPermissions(); var options = new BlobRequestOptions { // Fail if someone else has already changed the container before we do. AccessCondition = AccessCondition.IfMatch(container.Properties.ETag) }; var sharedAccessPolicy = new SharedAccessPolicy { Permissions = SharedAccessPermissions.Read, SharedAccessExpiryTime = DateTime.UtcNow + TimeSpan.FromDays(blobSASExperiationTime) }; permissions.SharedAccessPolicies.Remove("readonly"); permissions.SharedAccessPolicies.Add("readonly", sharedAccessPolicy); container.SetPermissions(permissions, options); }
// Put (create or update) a blob conditionally based on expected ETag value. // Return true on success, false if unable to create, throw exception on error. public bool PutBlobIfUnchanged(string containerName, string blobName, string content, string ExpectedETag) { try { CloudBlobContainer container = BlobClient.GetContainerReference(containerName); CloudBlob blob = container.GetBlobReference(blobName); BlobRequestOptions options = new BlobRequestOptions(); options.AccessCondition = AccessCondition.IfMatch(ExpectedETag); blob.UploadText(content, new UTF8Encoding(), options); return(true); } catch (StorageClientException ex) { if ((int)ex.StatusCode == 404 || (int)ex.StatusCode == 412) { return(false); } throw; } }
public bool TryUploadMetadata() { if (blob.Attributes.Properties.ETag == null) { throw new EtagNotSet(); } BlobRequestOptions reqOpt = new BlobRequestOptions(); reqOpt.AccessCondition = AccessCondition.IfMatch(blob.Attributes.Properties.ETag); try { blob.SetMetadata(reqOpt); return(true); } catch { return(false); } }
internal OptimisticConcurrencyContext(string entityTag) { this.AccessCondition = AccessCondition.IfMatch(entityTag); }
private void RunCommitThread() { while (!_stopCommitThread) { BlockInfo blockInfo = null; try { if (_commitList.TryDequeue(out blockInfo)) { //Trace.TraceInformation("AzureBlockStore.CommitThread: Attempting to commit block {0}", blockInfo); if (blockInfo != null) { //Trace.TraceInformation("AzureBlockStore.CommitThread: Got block to commit {0}", blockInfo); bool retry; do { retry = false; var pageBlob = GetPageBlob(blockInfo.StoreName); if (pageBlob != null) { Trace.TraceInformation("AzureBlockStore.CommitThread: Got target page blob: {0}", pageBlob.Uri); try { pageBlob.FetchAttributes(); } catch (StorageClientException ex) { Trace.TraceWarning( "AzureBlockStore.CommitThread: Could not retrieve attributes for page blob: {0} due to {1}. Aborting update", pageBlob.Uri, ex); continue; } var dataLength = long.Parse( pageBlob.Attributes.Metadata[AzureConstants.BlobDataLengthPropertyName]); if (dataLength <= (blockInfo.Offset + blockInfo.Length)) { try { Trace.TraceInformation( "AzureBlockStore.CommitThread: Attempting to write pages to page blob"); var requestOptions = new BlobRequestOptions { AccessCondition = AccessCondition.IfMatch( pageBlob.Attributes.Properties.ETag) }; byte[] paddedBuffer; if (blockInfo.Data.Length % 512 == 0) { paddedBuffer = blockInfo.Data; } else { paddedBuffer = new byte[blockInfo.Data.Length + (512 - (blockInfo.Data.Length % 512)) ]; Buffer.BlockCopy(blockInfo.Data, 0, paddedBuffer, 0, blockInfo.Data.Length); } using (var ms = new MemoryStream(paddedBuffer, false)) { pageBlob.WritePages(ms, blockInfo.Offset, requestOptions); } pageBlob.Metadata[AzureConstants.BlobDataLengthPropertyName] = (blockInfo.Offset + blockInfo.Length).ToString( CultureInfo.InvariantCulture); Trace.TraceInformation("AzureBlockStore.CommitThread: Block data written. Now updating metadata."); pageBlob.SetMetadata(); Trace.TraceInformation( "AzureBlockStore.CommitThread: Block written and pageBlob datalength updated to {0}", pageBlob.Metadata[AzureConstants.BlobDataLengthPropertyName]); } catch (StorageClientException ex) { if (ex.ErrorCode == StorageErrorCode.ConditionFailed) { Trace.TraceWarning( "AzureBlockStore.CommitThread: page blob update failed with a ConditionFailed storage exception. Attempting retry."); retry = true; } else { Trace.TraceError( "Commit of block {0} failed with StorageClientException {1} ({2}). Operation will not be retried.", blockInfo, ex, ex.ErrorCode); } } catch (Exception ex) { Trace.TraceError("Commit of block {0} failed with exception {1}. Operation will not be retried.", blockInfo, ex); } } else { Trace.TraceInformation( "AzureBlockStore.CommitThread: page blob data length {0} already exceeeds commit block end offset of {1}. Update aborted.", dataLength, blockInfo.Offset + blockInfo.Length); } } } while (retry); } } if (_commitList.IsEmpty) { //Trace.WriteLine("AzureBlockStore.CommitThread: Commit list is currently empty. Sleeping"); Thread.Sleep(1000); } } catch (Exception ex) { if (blockInfo != null) { // TODO : This should go into event logs too. Trace.TraceError( "AzureBlockStore.CommitThread: Unhandled exception {0}. Aborting update for block: {1}", ex, blockInfo); } else { Trace.TraceError( "AzureBlockStore.CommitThread: Unhandled exception {0}", ex); } } } }