/// <inheritdoc/> protected override async Task <ResponseBaseDTO> DoWorkAsync(StorageBlobCreatedEventData eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); var context = new StorageClientProviderContext(eventData.ClientRequestId); var blobCreatedData = new ResponseBlobCreatedSuccessDTO { BlobUri = new Uri(eventData.Url), BlobMetadata = null, OperationContext = context.ClientRequestIdAsJObject, DoNotPublish = context.IsMuted, }; try { JObject blobMetadata = await _storageService.GetBlobMetadataAsync(blobCreatedData.BlobUri, context).ConfigureAwait(false); if (blobMetadata != null) { blobCreatedData.BlobMetadata = blobMetadata; } } catch (Exception e) { // This is considered a non-critical error, just log it. Log.LogException(LogEventIds.FailedToGetBlobMetadataInBlobCreatedHandler, e, $"GetBlobMetadataAsync failed while getting {blobCreatedData?.BlobUri}"); } return(blobCreatedData); }
/// <inheritdoc/> protected override async Task <ResponseBaseDTO> DoWorkAsync(RequestBlobCopyDTO eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); // 0. Get Inputs from Data _ = eventData.SourceUri ?? throw new ArgumentException("Source uri cannot be null"); _ = eventData.DestinationUri ?? throw new ArgumentException("Destination uri cannot be null"); var context = new StorageClientProviderContext(eventData.OperationContext); // 1. Do Work await StartBlobCopy(eventData, context).ConfigureAwait(false); // 2. Interpret Work for Event Crafting // TODO: Do we want check Copy Results, and tell Requestor in the "Scheduled" event??? // bool done = copyResult.HasCompleted; // var response = copyResult.UpdateStatus(); // // SE: The sequence needs to stay as Request/Scheduled/Done (RSD). Confirmed that // Requestor is fine with the fact that they may receive RSD or RDS - latter if // copy completes synchronously. What might be worth checking is whether Requestor // could benefit from an extra flag in the scheduled response indicating if the copy // is already done. var metadata = await _storageService.GetBlobMetadataAsync(eventData.SourceUri, context).ConfigureAwait(false); return(new ResponseBlobCopyScheduledDTO { SourceUri = eventData.SourceUri, BlobMetadata = metadata, DestinationUri = eventData.DestinationUri, OperationContext = eventData.OperationContext, }); }
/// <param name="context"></param> /// <inheritdoc/> public IMediaInfoService GetMediaInfoLib(StorageClientProviderContext context) { _ = context ?? throw new ArgumentNullException(nameof(context)); IMediaInfoService mediaInfoServiceLib; string libraryVersion; try { mediaInfoServiceLib = new MediaInfoServiceWrapper(new MI.MediaInfo()); libraryVersion = mediaInfoServiceLib.GetOption("Info_Version", "0.7.0.0;MediaInfoDLL_Example_CS;0.7.0.0"); } catch (Exception e) { _logger.LogException(LogEventIds.MediaInfoLibFailedToLoad, e, "Could not load MediaInfo.dll"); throw new GridwichMediaInfoLibException("An exception was found when loading MediaInfoLib", LogEventIds.MediaInfoLibFailedToLoad, context.ClientRequestIdAsJObject, e); } if (string.IsNullOrEmpty(libraryVersion) || libraryVersion == "Unable to load MediaInfo library") { throw new GridwichMediaInfoLibException("Unable to load MediaInfo library.", LogEventIds.MediaInfoLibFailedToLoad, context.ClientRequestIdAsJObject); } return(mediaInfoServiceLib); }
/// <inheritdoc/> public IStorageContainerClientSleeve GetBlobContainerSleeveForUri(Uri uri, StorageClientProviderContext context) { if (uri == null) { throw new ArgumentNullException(nameof(uri)); } if (context == null) { throw new ArgumentNullException( nameof(context), $"Use {nameof(StorageClientProviderContext.None)} instead of null for 'empty' context"); } // Cache policy notes -- see commentary in Gridwich.Core/Models/BlobBaseClientProvider.cs var cacheKey = GetCacheKeyForBlobContainerClient(uri, context); var sleeve = _blobContainerClientCache.Get <IStorageContainerClientSleeve>(cacheKey); if (sleeve != null) { // found one, fix up it's context values to the new ones. // Note that the fact that we found one means the ClientRequestID is already correct, but // we should update the ETag & flag information, thus the ResetTo(). sleeve.Context.ResetTo(context); } else { sleeve = CreateBlobContainerClientForUri(uri, context); _blobContainerClientCache.Add(cacheKey, sleeve, ClientExpirationTime); } return(sleeve); }
/// <summary> /// Gets the connection string for account. /// </summary> /// <param name="storageAccountName">Name of the storage account.</param> /// <param name="context">The storage context.</param> /// <returns> /// The connection string to <paramref name="storageAccountName" /></returns> /// <remarks> /// This method creates a connection string based on the Storage account key. /// Since the connection string is not ever sent outside of Griwich, and that /// the key itself is obtain from AzureStorageManagement.GetAccountKey, which /// in turn, obtains the key information using the MSI identify, this sequence /// does not represent any new authorization exposure. /// </remarks> public string GetConnectionStringForAccount(string storageAccountName, StorageClientProviderContext context) { _ = StringHelpers.NullIfNullOrWhiteSpace(storageAccountName) ?? throw new ArgumentNullException(nameof(storageAccountName)); _ = context ?? throw new ArgumentNullException(nameof(context)); var accountKey = _azureStorageManagement.GetAccountKey(storageAccountName); if (accountKey == null) { throw new GridwichStorageServiceException($"Failed to get account key for account named '{storageAccountName}'", LogEventIds.FailedToGetAccountKey, context.ClientRequestIdAsJObject); } // This one is one long string with many subs, so more readable via StringBuilder than $"" var connStr = new StringBuilder(150); connStr .Append("DefaultEndpointsProtocol=").Append(StorageServiceConstants.AzureStorageProtocol) .Append(";AccountName=").Append(storageAccountName) .Append(";AccountKey=").Append(accountKey) .Append(";EndpointSuffix=").Append(StorageServiceConstants.AzureStorageDnsSuffix) .Append(";"); return(connStr.ToString()); }
/// <inheritdoc/> public async Task <bool> BlobDelete(Uri blobUri, StorageClientProviderContext context) { _ = blobUri ?? throw new ArgumentNullException(nameof(blobUri)); _ = context ?? throw new ArgumentNullException(nameof(context)); // 0. Get the SDK storage client IStorageBlobClientSleeve blobSleeve = _blobBaseClientProvider.GetBlobClientSleeveForUri(blobUri, context); Response <bool> resp; try { // 1. Get a SAS for the blob string sasUri = GetSasUrlForBlob(blobUri, new TimeSpan(0, 5, 0), context); // 2. Request the deletion // From the SDK source (see below), there are 3 possible responses: // 1. true -- blob existed and was deleted from index. // 2. false -- blob did not exist // 3a. RequestFailedException -- some storage problem, other than the blob not existing. // 3b. Some other exception -- unlikely but possible. // Azure SDK Source for Delete: https://github.com/Azure/azure-sdk-for-net/blob/master/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs // DeleteBlob REST Call: https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob resp = await blobSleeve.Client.DeleteIfExistsAsync(DeleteSnapshotsOption.IncludeSnapshots, null).ConfigureAwait(false); } catch (Exception e) { // something other than an expected Storage problem. _log.LogExceptionObject(LogEventIds.FailedToDeleteDueToStorageExceptionInStorageService, e, blobUri); throw new GridwichStorageServiceException(blobUri, "Failed to delete the blob.", LogEventIds.FailedToDeleteDueToStorageExceptionInStorageService, context.ClientRequestIdAsJObject, e); } return(resp); }
public async Task BlobCopy_ShouldThrow_When_CreateContainerThrows() { // Arrange var sourceUri = new Uri("https://gridwichinbox00sasb.blob.core.windows.net/test00/alexa_play_despacito.mp4"); var destUri = new Uri("https://gridwichinbox00sasb.blob.core.windows.net/test00/alexa_play_despacito_again.mp4"); var context = new StorageClientProviderContext("{ \"xxx\" : 1232 }", false, string.Empty); var destinationClient = Mock.Of <BlobBaseClient>(); var destinationSleeve = MockSleeve(destinationClient, context); var containerClient = Mock.Of <BlobContainerClient>(); var containerSleeve = MockContainerSleeve(containerClient, context); Mock.Get(_blobBaseClientProvider) .Setup(x => x.GetBlobClientSleeveForUri(It.IsAny <Uri>(), It.IsAny <StorageClientProviderContext>())) .Returns(destinationSleeve); Mock.Get(_blobContainerClientProvider) .Setup(x => x.GetBlobContainerSleeveForUri(It.IsAny <Uri>(), It.IsAny <StorageClientProviderContext>())) .Returns(containerSleeve); Mock.Get(containerClient) .Setup(x => x.CreateIfNotExistsAsync( It.IsAny <PublicAccessType>(), It.IsAny <IDictionary <string, string> >(), It.IsAny <BlobContainerEncryptionScopeOptions>(), It.IsAny <CancellationToken>())) .Throws <ArgumentException>(); // Act & Assert await Assert.ThrowsAsync <GridwichStorageServiceException>(() => _storageService.BlobCopy(sourceUri, destUri, context)).ConfigureAwait(false); Mock.Get(_logger).Verify(x => x.LogExceptionObject(LogEventIds.StorageServiceFailedCreatingTargetContainerForBlobCopy, It.IsAny <Exception>(), It.IsAny <object>()), Times.Once); }
public void GetBlobBaseClientForUriWithId_ShouldReturnValidClient_WhenCalledAsExpected() { // Arrange var clientRequestId = "{\"somehappykey\" : \"somehappyvalue\"}"; var expectedScheme = "https"; var expectedAccount = "gridwichasset00sasb"; var expectedEndpointSuffix = "core.windows.net"; var expectedContainer = "container1"; var expectedBlob = "path1/blob1.mp4"; Uri uri1 = new Uri($"{expectedScheme}://{expectedAccount}.blob.{expectedEndpointSuffix}/{expectedContainer}/{expectedBlob}"); // Ensure we don't pick up a cached object: _blobBaseClientCache.Remove(BlobBaseClientProvider.GetCacheKeyForBlobBaseClient(uri1)); // Act var ctx = new StorageClientProviderContext(clientRequestId, false, string.Empty); var newClientSleeve = _blobBaseClientProvider.GetBlobClientSleeveForUri(uri1, ctx); var newClient = newClientSleeve.Client; // Assert newClient.AccountName.ShouldBe(expectedAccount); newClient.BlobContainerName.ShouldBe(expectedContainer); newClient.Name.ShouldBe(expectedBlob); newClient.Uri.ShouldBe(uri1); newClient.ShouldBeOfType <BlobBaseClient>(); }
public void GetBlobContainerClientForUri_ShouldThrow_WhenNullUri() { var context = new StorageClientProviderContext("{ \"something\" : \"good\" }", false, string.Empty); // Act & Assert Assert.Throws <ArgumentNullException>( () => _blobContainerClientProvider.GetBlobContainerSleeveForUri(null, context)); }
/// <inheritdoc/> protected override JObject ParseOperationContext(StorageBlobDeletedEventData eventData) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); var ctx = new StorageClientProviderContext(eventData.ClientRequestId); return(ctx.ClientRequestIdAsJObject); }
/// <summary> /// Returns unique cache key. /// </summary> /// <param name="blobUri">blobUri.</param> /// <param name="context">the storage context (containing clientRequestId)</param> /// <returns>A unique cache key.</returns> internal static string GetCacheKeyForBlobBaseClient(Uri blobUri, StorageClientProviderContext context) { if (context == null) { context = StorageClientProviderContext.None; } return($"{CacheKeyPrefix}-{blobUri.GetHashCode()}-{context.ClientRequestID.GetHashCode()}"); }
public async Task GetMediaInfoCompleteInformForUriAsync_Should_Throw_When_BlobUriIsNull() { // Arrange StorageClientProviderContext context = StorageClientProviderContext.None; // Act & Assert await Assert.ThrowsAsync <ArgumentNullException>(async() => await _service.GetMediaInfoCompleteInformForUriAsync(null, context).ConfigureAwait(true)) .ConfigureAwait(true); }
/// <summary> /// Create simple guid-based storage contexts for Test cases. /// </summary> /// <param name="guidToUse">The unique identifier to use.</param> /// <param name="trackETag">if set to <c>true</c> [track e tag].</param> /// <param name="eTagValue">The e tag value.</param> /// <returns> /// <see cref="StorageClientProviderContext"/> /// </returns> public static StorageClientProviderContext CreateGUIDContext(Guid?guidToUse = null, bool trackETag = false, string eTagValue = null) { Guid g = (guidToUse == null && guidToUse.HasValue) ? guidToUse.Value : Guid.NewGuid(); var opContext = $"{{\"guid\":\"{g}\"}}"; // some simple JSON to house the GUID var ret = new StorageClientProviderContext( opContext, muted: false, trackETag: trackETag, initialETag: eTagValue); return(ret); }
public void GetBlobBaseClientForUriWithRequestId_ShouldThrow_WhenNullContext() { // Arrange Uri uri = new Uri("https://gridwichasset00sasb.blob.core.windows.net/container1/blob1.mp4"); // Act & Assert StorageClientProviderContext ctx = null; Assert.Throws <ArgumentNullException>(() => _blobBaseClientProvider.GetBlobClientSleeveForUri(uri, ctx)); }
/// <inheritdoc/> public IStorageBlobClientSleeve GetBlobClientSleeveForUri(Uri blobUri, StorageClientProviderContext context) { if (blobUri == null) { throw new ArgumentNullException(nameof(blobUri)); } if (context == null) { throw new ArgumentNullException(nameof(context)); } // Cache policy // // We need to support sequences of related storage operations. For example, in Gridwich, // deleting a blob requires two sequential steps: retrieving metadata and then deleting the blob. If // we permit caching keyed solely on blob URI, there arise issues with "crossing the streams" -- i.e., // two request streams both happen to target the same blob at the overlapping times. Specifically, // retrieving a client primed for a different Operation Context/ETag combination causes failures. // // Another consideration is that the Storage Service, the consumer of the cache items, is not // structured to pass clients from method to method. In general, if one storage service method // calls another (e.g., DeleteBlob calls GetMetadata), it passed only the URI and context, not the client. // Thus it is important that it be possible for those two to be sufficient for each method to retrieve // a "hot" client instance from the cache (or maybe a fresh one, the first time), avoiding the need for // using a fresh client each time and incurring authentication, connection establishment, etc. // // For those reasons, two policies/conventions: // 1. The client cache will always be keyed solely on the blobURI/Context combination. // Should some valid circumstance arise where a context is not available, code falls back to using // StorageClientProviderContext.NO_CONTEXT. // 2. The cache item expiry time will be dropped to a much lower time span than the original 10 minutes. // Cache items should persist for as close to the duration of a request. Since Storage service // methods are unaware of how callers are sequencing them, it is not clear when it is "safe" for // a method to remove cache items. Thus, we will have to depend on cache item expiry to keep // the cache size down. var cacheKey = GetCacheKeyForBlobBaseClient(blobUri, context); var sleeve = _blobBaseClientCache.Get <IStorageBlobClientSleeve>(cacheKey); if (sleeve != null) { // found one, fix up it's context values to the new ones. // Note that the fact that we found one means the ClientRequestID is already correct, but // we should update the ETag & flag information, thus the ResetTo(). sleeve.Context.ResetTo(context); } else { sleeve = CreateBlobClientSleeveForUri(blobUri, context); _blobBaseClientCache.Add(cacheKey, sleeve, ClientExpirationTime); } return(sleeve); }
public async void HandleAsync_ShouldReturnNullOperationContext_WhenItIsAGuid(bool muteContext) { // Arrange const string BLOB_URL = _expectedInboxUrl; var topicEndpointUri = new Uri("https://www.topichost.com"); var reqId = new StorageClientProviderContext("9d87e668-0000-0000-0000-b84ff6a53784"); reqId.IsMuted = muteContext; var testEvent = new EventGridEvent { EventTime = DateTime.UtcNow, EventType = EventTypes.StorageBlobDeletedEvent, DataVersion = "1.0", Data = JsonConvert.SerializeObject(new StorageBlobDeletedEventData { Url = BLOB_URL, ClientRequestId = reqId.ClientRequestID, }) }; EventGridEvent publishedEvent = null; // Arrange Mocks Mock.Get(_settingsProvider) .Setup(x => x.GetAppSettingsValue(Publishing.TopicOutboundEndpointSettingName)) .Returns(topicEndpointUri.ToString()); Mock.Get(_eventGridPublisher) .Setup(x => x.PublishEventToTopic(It.IsAny <EventGridEvent>())) .Callback <EventGridEvent>((eventGridEvent) => publishedEvent = eventGridEvent) .ReturnsAsync(true); // Act var handleAsyncResult = await _handler.HandleAsync(testEvent).ConfigureAwait(true); // Assert handleAsyncResult.ShouldBe(true, "handleAsync should always return true"); // Assert publishedEvent: if (muteContext) { Mock.Get(_eventGridPublisher).Verify(x => x.PublishEventToTopic(It.IsAny <EventGridEvent>()), Times.Never, "Muted context results should not be published."); publishedEvent.ShouldBeNull(); } else { publishedEvent.Data.ShouldBeOfType(typeof(ResponseBlobDeleteSuccessDTO)); var data = (ResponseBlobDeleteSuccessDTO)publishedEvent.Data; data.OperationContext.ShouldNotBeNull(); data.OperationContext.ShouldBeEquivalentTo(reqId.ClientRequestIdAsJObject); } }
/// <summary> /// Initializes a new instance of the <see cref="StorageContainerClientSleeve" /> class. /// </summary> /// <param name="client">The client.</param> /// <param name="service">The service.</param> /// <param name="context">The context.</param> /// <exception cref="ArgumentException">@"client.AccountName is invalid - client</exception> public StorageContainerClientSleeve(BlobContainerClient client, BlobServiceClient service, StorageClientProviderContext context) { Client = client ?? throw new ArgumentNullException(nameof(client)); if (string.IsNullOrWhiteSpace(client.AccountName)) { throw new ArgumentException($@"client.AccountName is invalid", nameof(client)); } Context = context ?? throw new ArgumentNullException(nameof(context)); Service = service ?? throw new ArgumentNullException(nameof(service)); }
/// <inheritdoc/> public string GetSasUrlForBlob(Uri blobUri, TimeSpan ttl, StorageClientProviderContext context) { _ = blobUri ?? throw new ArgumentNullException(nameof(blobUri)); _ = context ?? throw new ArgumentNullException(nameof(context)); try { var blobUriBuilder = new BlobUriBuilder(blobUri); // Create a SAS token that's valid for the TimeSpan, plus a back-off start for clock skew. var timeRange = StorageHelpers.CreateTimeRangeForUrl(ttl); BlobSasBuilder sasBuilder = new BlobSasBuilder { BlobContainerName = blobUriBuilder.BlobContainerName, BlobName = blobUriBuilder.BlobName, Resource = "b", // "b" is for blob StartsOn = timeRange.StartTime, ExpiresOn = timeRange.EndTime, }.UnescapeTargetPath(); // Important adjustment(s) for SAS computation sasBuilder.SetPermissions(BlobSasPermissions.Read); // read permissions only for the SAS. var sleeve = _blobBaseClientProvider.GetBlobClientSleeveForUri(blobUri, context); var userDelegation = sleeve.Service.GetUserDelegationKey(sasBuilder.StartsOn, sasBuilder.ExpiresOn)?.Value; if (userDelegation == null) { var msg = $@"Unable to get a user delegation key from the Storage service for blob {blobUri}"; _log.LogEvent(LogEventIds.StorageServiceMissingConnectionString, msg); throw new GridwichStorageServiceException(blobUri, msg, LogEventIds.StorageServiceMissingConnectionString, context.ClientRequestIdAsJObject); } var sasToken = sasBuilder.ToSasQueryParameters(userDelegation, blobUriBuilder.AccountName); blobUriBuilder.Sas = sasToken; // Construct the full URI, including the SAS token. AbsoluteUri (vs. ToString) is to ensure the %HH escaping is used. return(blobUriBuilder.ToUri().AbsoluteUri); } catch (RequestFailedException e) { var msg = $@"Unable to get a user delegation key from the Storage service for blob {blobUri}"; _log.LogEvent(LogEventIds.StorageServiceMissingConnectionString, msg); throw new GridwichStorageServiceException(blobUri, msg, LogEventIds.StorageServiceMissingConnectionString, context.ClientRequestIdAsJObject, e); } catch (Exception e) { _log.LogExceptionObject(LogEventIds.FailedToCreateBlobSasUriInStorageService, e, blobUri); throw new GridwichStorageServiceException(blobUri, "Failed to generate the SAS url.", LogEventIds.FailedToCreateBlobSasUriInStorageService, context.ClientRequestIdAsJObject, e); } }
/// <inheritdoc/> protected override async Task <ResponseBaseDTO> DoWorkAsync(RequestContainerDeleteDTO eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); var context = new StorageClientProviderContext(eventData.OperationContext); await _storageService.ContainerDeleteAsync(eventData.StorageAccountName, eventData.ContainerName, context).ConfigureAwait(false); return(new ResponseContainerDeleteSuccessDTO { StorageAccountName = eventData.StorageAccountName, ContainerName = eventData.ContainerName }); }
/// <inheritdoc/> public async Task <ServiceOperationResultEncodeDispatched> EncodeCreateAsync(RequestFlipEncodeCreateDTO requestorFlipEncodeCreateDTO) { _ = requestorFlipEncodeCreateDTO ?? throw new ArgumentNullException(nameof(requestorFlipEncodeCreateDTO)); TimeSpan ttl = requestorFlipEncodeCreateDTO.SecToLive == 0 ? _defaultTTL : TimeSpan.FromSeconds(requestorFlipEncodeCreateDTO.SecToLive); var inputs = requestorFlipEncodeCreateDTO.Inputs.ToArray(); var context = new StorageClientProviderContext(requestorFlipEncodeCreateDTO.OperationContext); var input = new Uri(inputs[0].BlobUri); // EncodeAsync is broken into 2 parts // 1. Configure any storage needs for the encoder // 2. Call the encoder // 1. configure storage for encoder // 1a. Input must exist var exists = await _storageService.GetBlobExistsAsync(input, context).ConfigureAwait(false); if (!exists) { throw new GridwichFlipMissingInputException( $"Attempt to use nonexistent blob as input: {input}", input.AbsoluteUri, context.ClientRequestIdAsJObject); } // 1b. SAS URI needed for input. string sasUri; try { sasUri = _storageService.GetSasUrlForBlob(input, ttl, context); if (string.IsNullOrEmpty(sasUri)) { throw new GridwichFlipSASException($"Failed to generate SAS for: {input}", requestorFlipEncodeCreateDTO.OperationContext); } } catch (Exception e) { throw new GridwichFlipSASException($"Failed to generate SAS for: {input}", requestorFlipEncodeCreateDTO.OperationContext, e); } // 2. Execute Encode var result = await CreateVideoAsync(sasUri, requestorFlipEncodeCreateDTO).ConfigureAwait(false); return(new ServiceOperationResultEncodeDispatched( workflowJobName: result.Id, null, requestorFlipEncodeCreateDTO.OperationContext)); }
/// <inheritdoc/> protected override async Task <ResponseBaseDTO> DoWorkAsync(RequestBlobSasUrlCreateDTO eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); // Read SecToLive as a time span. var ttl = TimeSpan.FromSeconds(eventData.SecToLive); var context = new StorageClientProviderContext(eventData.OperationContext); var sasUrl = _storageService.GetSasUrlForBlob(eventData.BlobUri, ttl, context); return(await Task.FromResult <ResponseBaseDTO>(new ResponseBlobSasUrlSuccessDTO { SasUrl = new Uri(sasUrl), OperationContext = eventData.OperationContext, }).ConfigureAwait(false)); }
private async Task <List <TrackInfo> > GetAudioTracks(Uri containerUri) { // Used to correlate storage service calls that Braniac doesn't need to know about. var internalTracker = new JObject { { "~AMS-V3-Audio-Track", $"G:{Guid.NewGuid()}" } }; var internalContext = new StorageClientProviderContext(internalTracker, muted: true); var blobs = await _storageService.ListBlobsAsync(containerUri, internalContext).ConfigureAwait(false); var ismFile = blobs.Where(b => b.Name.EndsWith(".ism", StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault(); if (ismFile is null) { var msg = $"Attempt to get audio tracks for container {containerUri} but this container doesn't contain an ism manifest file."; throw new GridwichPublicationMissingManifestFileException( containerUri, msg); } var ismUri = new BlobUriBuilder(containerUri) { BlobName = ismFile.Name }.ToUri(); var ismStream = await _storageService.DownloadHttpRangeAsync(ismUri, internalContext).ConfigureAwait(false); var audioTracks = new List <TrackInfo>(); using (var sr = new StreamReader(ismStream.Content)) { using var reader = XmlReader.Create(sr); var serializer = new XmlSerializer(typeof(IsmFile)); var manifest = (IsmFile)serializer.Deserialize(reader); foreach (var audio in manifest?.Body?.Switch?.Audio) { var trackId = audio.Param.Where(p => p.Name.Equals("trackID", StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault().Value; var trackName = audio.Param.Where(p => p.Name.Equals("trackName", StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault().Value; audioTracks.Add(new TrackInfo() { TrackID = int.Parse(trackId, NumberFormatInfo.InvariantInfo), TrackName = trackName, TrackType = "Audio" }); } } return(audioTracks); }
/// <summary> /// Creates a BlobContainerClient. Called when one does not exist yet. /// </summary> /// <param name="uri">The target Uri.</param> /// <param name="context">The context.</param> /// <returns> /// A BlobContainerClient object. /// </returns> internal StorageContainerClientSleeve CreateBlobContainerClientForUri( Uri uri, StorageClientProviderContext context) { // set up a copy of the Context... var ctxCopy = new StorageClientProviderContext(context); BlobContainerClient blobContainerClient; BlobUriBuilder containerUriBuilder; BlobClientOptions clientOptions; try { containerUriBuilder = new BlobUriBuilder(uri); clientOptions = new BlobClientOptions(); var clientRequestIdPolicy = new BlobClientPipelinePolicy(ctxCopy); var uriC = StorageHelpers.BuildBlobStorageUri(containerUriBuilder.AccountName, containerUriBuilder.BlobContainerName); blobContainerClient = new BlobContainerClient(uriC, _identity, clientOptions); } catch (Exception fe) when(fe is ArgumentNullException || fe is UriFormatException) { var aex = new ArgumentException(LogEventIds.BlobContainerClientProviderUriMissingAccountName.Name, fe); _log.LogExceptionObject(LogEventIds.BlobContainerClientProviderUriMissingAccountName, aex, uri); throw aex; } catch (Exception e) { _log.LogExceptionObject(LogEventIds.BlobContainerClientProviderFailedToCreateBlobContainerClient, e, uri); throw; } try { var accountUri = StorageHelpers.BuildStorageAccountUri(containerUriBuilder.AccountName, buildForBlobService: true); var sleeve = new StorageContainerClientSleeve(blobContainerClient, new BlobServiceClient(accountUri, _identity, clientOptions), ctxCopy); return(sleeve); } catch (ArgumentException aex) { _log.LogExceptionObject(LogEventIds.BlobContainerClientProviderUriMissingAccountName, aex, uri); throw; } }
public void GetBlobContainerClientForUri_ShouldThrow_WhenUnknownStorageAccount() { // Arrange Uri uri1 = new Uri("https://gridwichasset00sasb.com/container1"); var context = new StorageClientProviderContext("{ \"something\" : \"good\" }", false, string.Empty); // Ensure we don't pick up a cached object: _blobContainerClientCache.Remove(BlobContainerClientProvider.GetCacheKeyForBlobContainerClient(uri1, context)); // Act & Assert Assert.ThrowsAny <ArgumentException>(() => _blobContainerClientProvider.GetBlobContainerSleeveForUri(uri1, context)); Mock.Get(_logger).Verify(x => x.LogExceptionObject(LogEventIds.BlobContainerClientProviderUriMissingAccountName, It.IsAny <ArgumentException>(), It.IsAny <object>()), Times.Once, "A critical error should be logged when the storage account is unknown."); }
/// <summary> /// Create a mock sleeve, properly set up so that the two main getter's work. /// </summary> private static IStorageBlobClientSleeve MockSleeve(BlobBaseClient client, StorageClientProviderContext context) { Mock.Get(client) .SetupGet(x => x.AccountName) .Returns(@"ut"); var sleeve = Mock.Of <IStorageBlobClientSleeve>(); Mock.Get(sleeve) .SetupGet(x => x.Client) .Returns(client); Mock.Get(sleeve) .SetupGet(x => x.Context) .Returns(context); return(sleeve); }
/// <inheritdoc/> protected override async Task <ResponseBaseDTO> DoWorkAsync(RequestBlobTierChangeDTO eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); var context = new StorageClientProviderContext(eventData.OperationContext); var rehydratePriority = eventData.RehydratePriority; await _storageService.ChangeBlobTierAsync(new Uri(eventData.BlobUri), eventData.AccessTier, rehydratePriority, context).ConfigureAwait(false); return(new ResponseBlobTierChangeSuccessDTO() { AccessTier = eventData.AccessTier, BlobUri = eventData.BlobUri, OperationContext = eventData.OperationContext, RehydratePriority = eventData.RehydratePriority }); }
/// <inheritdoc/> public async Task <bool> GetBlobExistsAsync(Uri blobUri, StorageClientProviderContext context) { _ = blobUri ?? throw new ArgumentNullException(nameof(blobUri)); _ = context ?? throw new ArgumentNullException(nameof(context)); IStorageBlobClientSleeve blobSleeve = _blobBaseClientProvider.GetBlobClientSleeveForUri(blobUri, context); try { return(await blobSleeve.Client.ExistsAsync().ConfigureAwait(false)); } catch (Exception e) { _log.LogExceptionObject(LogEventIds.FailedToCheckBlobExistenceDueToStorageExceptionInStorageService, e, blobUri); throw new GridwichStorageServiceException(blobUri, "The operation failed because the requested blob does not exist.", LogEventIds.FailedToCheckBlobExistenceDueToStorageExceptionInStorageService, context.ClientRequestIdAsJObject, e); } }
/// <inheritdoc/> public async Task <JObject> GetBlobMetadataAsync(Uri blobUri, StorageClientProviderContext context) { _ = blobUri ?? throw new ArgumentNullException(nameof(blobUri)); _ = context ?? throw new ArgumentNullException(nameof(context)); JObject blobMetadata = null; var blobProperties = await GetBlobPropertiesAsync(blobUri, context).ConfigureAwait(false); var metadata = blobProperties?.Metadata; if (metadata != null) { // TODO: is this intended? Shouldn't we use JsonHelpers.ConvertFromNative(metadata) instead? blobMetadata = JObject.FromObject(metadata); } return(blobMetadata); }
public void GetBlobContainerClientForUri_ShouldReturnNewClient_WhenNotInDictionary() { // Arrange var context = new StorageClientProviderContext("{ \"something\" : \"good\" }", false, string.Empty); var expectedScheme = "https"; var expectedAccount = "gridwichasset00sasb"; var expectedEndpointSuffix = "blob.core.windows.net"; var expectedContainer = "container1"; Uri uri1 = new Uri($"{expectedScheme}://{expectedAccount}.{expectedEndpointSuffix}/{expectedContainer}"); Uri uri2 = new Uri($"{expectedScheme}://{expectedAccount}.{expectedEndpointSuffix}/container2"); var key1 = BlobContainerClientProvider.GetCacheKeyForBlobContainerClient(uri1, context); var key2 = BlobContainerClientProvider.GetCacheKeyForBlobContainerClient(uri2, context); // Ensure we don't pick up a cached object: _blobContainerClientCache.Remove(key1); _blobContainerClientCache.Remove(key2); var blobContainerClient = Mock.Of <BlobContainerClient>(); var sleeve = MockSleeve(blobContainerClient, context); _blobContainerClientCache.Add(key1, sleeve); // Act var existingSleeve = _blobContainerClientProvider.GetBlobContainerSleeveForUri(uri1, context); var newSleeve = _blobContainerClientProvider.GetBlobContainerSleeveForUri(uri2, context); // Assert // Existing should match original and new shouldn't. newSleeve.Client.ShouldBeOfType <BlobContainerClient>(); newSleeve.Client.ShouldNotBeSameAs(blobContainerClient); newSleeve.Context.ShouldBeEquivalentTo(context); existingSleeve.Client.ShouldBeAssignableTo <BlobContainerClient>(); existingSleeve.Client.ShouldBeSameAs(blobContainerClient); existingSleeve.Context.ShouldBeEquivalentTo(context); newSleeve.ShouldNotBe(sleeve); newSleeve.ShouldNotBe(existingSleeve); existingSleeve.ShouldNotBe(newSleeve); existingSleeve.ShouldBe(sleeve); }
/// <inheritdoc/> protected override Task <ResponseBaseDTO> DoWorkAsync(StorageBlobDeletedEventData eventData, string eventType) { _ = eventData ?? throw new ArgumentNullException(nameof(eventData)); var context = StorageClientProviderContext.CreateSafe(eventData.ClientRequestId); if (!Uri.TryCreate(eventData.Url, UriKind.Absolute, out Uri outputUri)) { throw new GridwichArgumentException(nameof(eventData.Url), $"Invalid uri {eventData.Url}", LogEventIds.FailedToCreateBlobDeletedDataWithEventDataInBlobDeletedHandler, context.ClientRequestIdAsJObject); } return(Task.FromResult <ResponseBaseDTO>(new ResponseBlobDeleteSuccessDTO { BlobUri = outputUri, OperationContext = context.ClientRequestIdAsJObject, DoNotPublish = context.IsMuted, })); }