public async Task ClearAsync(string prefix = null) { AzureBlobStorageCredentials creds = new AzureBlobStorageCredentials(ConnectionString); var blobClient = creds.CreateCloudBlobClient(); using var cts = new CancellationTokenSource(); cts.CancelAfter(TimeSpan.FromSeconds(10)); var token = cts.Token; BlobContinuationToken continuation = null; while (!cts.IsCancellationRequested) { var containers = await blobClient.ListContainersSegmentedAsync( prefix : prefix, detailsIncluded : ContainerListingDetails.None, maxResults : null, continuation, options : null, operationContext : null, cancellationToken : token); continuation = containers.ContinuationToken; foreach (var container in containers.Results) { await container.DeleteIfExistsAsync(accessCondition : null, options : null, operationContext : null, cancellationToken : token); } if (continuation == null) { break; } } }
private static async Task <AzureBlobStorageLog> CreateAzureBlobStorageLogAsync(OperationContext operationContext, LoggerFactoryArguments arguments) { var configuration = arguments.LoggingSettings.Configuration; Contract.AssertNotNull(configuration); // There is a big issue here: on the one hand, we'd like to be able to configure everything from the XML // instead of our JSON configuration, simply because the XML is self-contained. On the other hand, the XML // will likely be shared across all stamps, so there's no "stamp-specific" configuration in there. That // means all stamp-level configuration must be done through the JSON. AzureBlobStorageCredentials credentials = await arguments.SecretsProvider.GetBlobCredentialsAsync( configuration.SecretName, configuration.UseSasTokens, operationContext.Token); var azureBlobStorageLogConfiguration = ToInternalConfiguration(configuration); var azureBlobStorageLog = new AzureBlobStorageLog( configuration: azureBlobStorageLogConfiguration, context: operationContext, clock: SystemClock.Instance, fileSystem: new PassThroughFileSystem(), telemetryFieldsProvider: arguments.TelemetryFieldsProvider, credentials: credentials, additionalBlobMetadata: null); await azureBlobStorageLog.StartupAsync().ThrowIfFailure(); return(azureBlobStorageLog); }
public AzureBlobStorageLog( AzureBlobStorageLogConfiguration configuration, OperationContext context, IClock clock, IAbsFileSystem fileSystem, ITelemetryFieldsProvider telemetryFieldsProvider, AzureBlobStorageCredentials credentials, IReadOnlyDictionary <string, string> additionalBlobMetadata) : this(configuration, context, clock, fileSystem, telemetryFieldsProvider, credentials.CreateCloudBlobClient().GetContainerReference(configuration.ContainerName), additionalBlobMetadata) { }
private static AzureBlobStorageCredentials CreateAzureBlobCredentialsFromSasToken(UpdatingSasToken updatingSasToken) { var storageCredentials = new StorageCredentials(sasToken: updatingSasToken.Token.Token); updatingSasToken.TokenUpdated += (token, sasToken) => { storageCredentials.UpdateSASToken(sasToken.Token); }; // The account name should never actually be updated, so its OK to take it from the initial token var azureCredentials = new AzureBlobStorageCredentials(storageCredentials, updatingSasToken.Token.StorageAccount); return(azureCredentials); }
private AzureBlobStorageCredentials CreateAzureBlobCredentialsFromSasToken(string secretName, UpdatingSasToken updatingSasToken) { var storageCredentials = new StorageCredentials(sasToken: updatingSasToken.Token.Token); updatingSasToken.TokenUpdated += (_, sasToken) => { _logger.Debug($"Updating SAS token for Azure Storage secret {secretName}"); storageCredentials.UpdateSASToken(sasToken.Token); }; // The account name should never actually be updated, so its OK to take it from the initial token var azureCredentials = new AzureBlobStorageCredentials(storageCredentials, updatingSasToken.Token.StorageAccount); return(azureCredentials); }
public Task WithConfiguration(Func <AzureBlobStorageLogConfiguration, OperationContext, IClock, IAbsFileSystem, ITelemetryFieldsProvider, AzureBlobStorageCredentials, Task> action) { var fileSystem = new PassThroughFileSystem(); using var workspace = new DisposableDirectory(fileSystem); // See: https://docs.microsoft.com/en-us/azure/storage/common/storage-use-emulator#connect-to-the-emulator-account-using-a-shortcut var credentials = new AzureBlobStorageCredentials(connectionString: "UseDevelopmentStorage=true"); var tracingContext = new Context(Logger); var context = new OperationContext(tracingContext); var configuration = new AzureBlobStorageLogConfiguration(workspace.Path); var clock = SystemClock.Instance; var telemetryFieldsProvider = new MockTelemetryFieldsProvider(); return(action(configuration, context, clock, fileSystem, telemetryFieldsProvider, credentials)); }
private static async Task <AzureBlobStorageLog> CreateAzureBlobStorageLogAsync(OperationContext operationContext, DistributedCacheServiceArguments arguments, AzureBlobStorageLogPublicConfiguration configuration) { Contract.RequiresNotNull(configuration); // There is a big issue here: on the one hand, we'd like to be able to configure everything from the XML // instead of our JSON configuration, simply because the XML is self-contained. On the other hand, the XML // will likely be shared across all stamps, so there's no "stamp-specific" configuration in there. That // means all stamp-level configuration must be done through the JSON. AzureBlobStorageCredentials credentials = null; if (configuration.UseSasTokens) { var secrets = await arguments.Host.RetrieveSecretsAsync(new List <RetrieveSecretsRequest>() { new RetrieveSecretsRequest(configuration.SecretName, SecretKind.SasToken) }, token : operationContext.Token); credentials = new AzureBlobStorageCredentials((UpdatingSasToken)secrets[configuration.SecretName]); } else { var secrets = await arguments.Host.RetrieveSecretsAsync(new List <RetrieveSecretsRequest>() { new RetrieveSecretsRequest(configuration.SecretName, SecretKind.PlainText) }, token : operationContext.Token); credentials = new AzureBlobStorageCredentials((PlainTextSecret)secrets[configuration.SecretName]); } var azureBlobStorageLogConfiguration = ToInternalConfiguration(configuration); var azureBlobStorageLog = new AzureBlobStorageLog( configuration: azureBlobStorageLogConfiguration, context: operationContext, clock: SystemClock.Instance, fileSystem: new PassThroughFileSystem(), telemetryFieldsProvider: arguments.TelemetryFieldsProvider, credentials: credentials); await azureBlobStorageLog.StartupAsync().ThrowIfFailure(); return(azureBlobStorageLog); }
private void Start() { // Can reuse an existing process only when this instance successfully created a connection to it. // Otherwise the test will fail with NRE. if (_process != null) { _logger.Debug("Storage process is already running. Reusing an existing instance."); return; } _logger.Debug("Starting a storage server."); var storageName = (OperatingSystemHelper.IsWindowsOS ? "tools/win-x64/blob.exe" : (OperatingSystemHelper.IsLinuxOS ? "tools/linux-x64/blob" : "tools/osx-x64/blob")); string storageServerPath = Path.GetFullPath(Path.Combine("azurite", storageName)); if (!File.Exists(storageServerPath)) { throw new InvalidOperationException($"Could not find {storageName} at {storageServerPath}"); } _portNumber = 0; const int maxRetries = 10; for (int i = 0; i < maxRetries; i++) { var storageServerWorkspacePath = _tempDirectory.CreateRandomFileName(); _fileSystem.CreateDirectory(storageServerWorkspacePath); _portNumber = PortExtensions.GetNextAvailablePort(); var args = $"--blobPort {_portNumber} --location {storageServerWorkspacePath}"; _logger.Debug($"Running cmd=[{storageServerPath} {args}]"); _process = new ProcessUtility(storageServerPath, args, createNoWindow: true, workingDirectory: Path.GetDirectoryName(storageServerPath)); _process.Start(); string processOutput; if (_process == null) { processOutput = "[Process could not start]"; throw new InvalidOperationException(processOutput); } if (_process.HasExited) { if (_process.WaitForExit(5000)) { throw new InvalidOperationException(_process.GetLogs()); } throw new InvalidOperationException("Process or either wait handle timed out. " + _process.GetLogs()); } processOutput = $"[Process {_process.Id} is still running]"; _logger.Debug("Process output: " + processOutput); ConnectionString = $"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:{_portNumber}/devstoreaccount1;"; AzureBlobStorageCredentials creds = new AzureBlobStorageCredentials(ConnectionString); var client = creds.CreateCloudBlobClient(); try { bool exists = client.GetContainerReference("test").ExistsAsync(DefaultBlobStorageRequestOptions, null).GetAwaiter().GetResult(); break; } catch (StorageException ex) { SafeKillProcess(); _logger.Debug($"Retrying for exception connecting to storage process {_process.Id} with port {_portNumber}: {ex.ToString()}. Has process exited {_process.HasExited} with output {_process.GetLogs()}"); if (i != maxRetries - 1) { Thread.Sleep(300); } else { throw; } } catch (Exception ex) { SafeKillProcess(); _logger.Error( $"Exception connecting to storage process {_process.Id} with port {_portNumber}: {ex.ToString()}. Has process exited {_process.HasExited} with output {_process.GetLogs()}"); throw; } } _logger.Debug($"Storage server {_process.Id} is up and running at port {_portNumber}."); }
public AzureBlobStorageLog(AzureBlobStorageLogConfiguration configuration, OperationContext context, IClock clock, IAbsFileSystem fileSystem, ITelemetryFieldsProvider telemetryFieldsProvider, AzureBlobStorageCredentials credentials) { _configuration = configuration; _context = context; _clock = clock; _fileSystem = fileSystem; _telemetryFieldsProvider = telemetryFieldsProvider; var cloudBlobClient = credentials.CreateCloudBlobClient(); _container = cloudBlobClient.GetContainerReference(configuration.ContainerName); _writeQueue = NagleQueue <string> .CreateUnstarted( configuration.WriteMaxDegreeOfParallelism, configuration.WriteMaxInterval, configuration.WriteMaxBatchSize); _uploadQueue = NagleQueue <LogFile> .CreateUnstarted( configuration.UploadMaxDegreeOfParallelism, configuration.UploadMaxInterval, 1); // TODO: this component doesn't have a quota, which could potentially be useful. If Azure Blob Storage // becomes unavailable for an extended period of time, we might cause disk space issues. }
public BlobCentralStoreConfiguration(AzureBlobStorageCredentials credentials, string containerName, string checkpointsKey) : this(new[] { credentials }, containerName, checkpointsKey) { }