public override async Task Run() { // Get the target blob container (for archiving decompressed log files) var targetBlobContainer = _cloudBlobClient.GetContainerReference( _configuration.AzureCdnCloudStorageContainerName + "-archive"); await targetBlobContainer.CreateIfNotExistsAsync(); // Get the dead-letter table (corrupted or failed blobs will end up there) var deadLetterBlobContainer = _cloudBlobClient.GetContainerReference( _configuration.AzureCdnCloudStorageContainerName + "-deadletter"); await deadLetterBlobContainer.CreateIfNotExistsAsync(); // Create a parser var warehouse = new Warehouse( LoggerFactory, OpenSqlConnectionAsync <StatisticsDbConfiguration>, _applicationInsightsHelper); var statisticsBlobContainerUtility = new StatisticsBlobContainerUtility( targetBlobContainer, deadLetterBlobContainer, LoggerFactory, _applicationInsightsHelper); var logProcessor = new LogFileProcessor( statisticsBlobContainerUtility, LoggerFactory, warehouse, _applicationInsightsHelper); // Get the next to-be-processed raw log file using the cdn raw log file name prefix var prefix = string.Format(CultureInfo.InvariantCulture, "{0}_{1}_", _azureCdnPlatform.GetRawLogFilePrefix(), _configuration.AzureCdnAccountNumber); // Get next raw log file to be processed IReadOnlyCollection <string> alreadyAggregatedLogFiles = null; if (_configuration.AggregatesOnly) { // We only want to process aggregates for the log files. // Get the list of files we already processed so we can skip them. alreadyAggregatedLogFiles = await warehouse.GetAlreadyAggregatedLogFilesAsync(); } var leasedLogFiles = await _blobLeaseManager.LeaseNextLogFilesToBeProcessedAsync(prefix, alreadyAggregatedLogFiles); foreach (var leasedLogFile in leasedLogFiles) { var packageTranslator = new PackageTranslator(); var packageStatisticsParser = new PackageStatisticsParser(packageTranslator, LoggerFactory); await logProcessor.ProcessLogFileAsync(leasedLogFile, packageStatisticsParser, _configuration.AggregatesOnly); if (_configuration.AggregatesOnly) { _blobLeaseManager.TrackLastProcessedBlobUri(leasedLogFile.Uri); } leasedLogFile.Dispose(); } }
public override async Task <bool> Run() { try { // Get the target blob container (for archiving decompressed log files) var targetBlobContainer = _cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-archive"); await targetBlobContainer.CreateIfNotExistsAsync(); // Get the dead-letter table (corrupted or failed blobs will end up there) var deadLetterBlobContainer = _cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-deadletter"); await deadLetterBlobContainer.CreateIfNotExistsAsync(); // Create a parser var warehouse = new Warehouse(_loggerFactory, _targetDatabase); var statisticsBlobContainerUtility = new StatisticsBlobContainerUtility( targetBlobContainer, deadLetterBlobContainer, _loggerFactory); var logProcessor = new LogFileProcessor(statisticsBlobContainerUtility, _loggerFactory, warehouse); // Get the next to-be-processed raw log file using the cdn raw log file name prefix var prefix = string.Format(CultureInfo.InvariantCulture, "{0}_{1}_", _azureCdnPlatform.GetRawLogFilePrefix(), _azureCdnAccountNumber); // Get next raw log file to be processed IReadOnlyCollection <string> alreadyAggregatedLogFiles = null; if (_aggregatesOnly) { // We only want to process aggregates for the log files. // Get the list of files we already processed so we can skip them. alreadyAggregatedLogFiles = await warehouse.GetAlreadyAggregatedLogFilesAsync(); } var leasedLogFiles = await _blobLeaseManager.LeaseNextLogFilesToBeProcessedAsync(prefix, alreadyAggregatedLogFiles); foreach (var leasedLogFile in leasedLogFiles) { var packageTranslator = new PackageTranslator("packagetranslations.json"); var packageStatisticsParser = new PackageStatisticsParser(packageTranslator); await logProcessor.ProcessLogFileAsync(leasedLogFile, packageStatisticsParser, _aggregatesOnly); if (_aggregatesOnly) { _blobLeaseManager.TrackLastProcessedBlobUri(leasedLogFile.Uri); } leasedLogFile.Dispose(); } } catch (Exception exception) { _logger.LogCritical(LogEvents.JobRunFailed, exception, "Job run failed!"); return(false); } return(true); }
public override async Task <bool> Run() { try { // construct a cloud blob client for the configured storage account var cloudBlobClient = _cloudStorageAccount.CreateCloudBlobClient(); cloudBlobClient.DefaultRequestOptions.RetryPolicy = new ExponentialRetry(TimeSpan.FromSeconds(10), 5); // Get the source blob container (containing compressed log files) // and construct a log source (fetching raw logs from the source blob container) var sourceBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName); var blobLeaseManager = new LogFileProvider(sourceBlobContainer); // Get the target blob container (for archiving decompressed log files) var targetBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-archive"); await targetBlobContainer.CreateIfNotExistsAsync(); // Get the dead-letter table (corrupted or failed blobs will end up there) var deadLetterBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-deadletter"); // Create a parser var logProcessor = new LogFileProcessor(targetBlobContainer, deadLetterBlobContainer, _targetDatabase); // Get the next to-be-processed raw log file using the cdn raw log file name prefix var prefix = string.Format(CultureInfo.InvariantCulture, "{0}_{1}_", _azureCdnPlatform.GetRawLogFilePrefix(), _azureCdnAccountNumber); // Get next raw log file to be processed var leasedLogFiles = await blobLeaseManager.LeaseNextLogFilesToBeProcessedAsync(prefix); foreach (var leasedLogFile in leasedLogFiles) { await logProcessor.ProcessLogFileAsync(leasedLogFile); leasedLogFile.Dispose(); } return(true); } catch (Exception exception) { Trace.TraceError(exception.ToString()); return(false); } }
public override async Task<bool> Run() { try { // construct a cloud blob client for the configured storage account var cloudBlobClient = _cloudStorageAccount.CreateCloudBlobClient(); cloudBlobClient.DefaultRequestOptions.RetryPolicy = new ExponentialRetry(TimeSpan.FromSeconds(10), 5); // Get the source blob container (containing compressed log files) // and construct a log source (fetching raw logs from the source blob container) var sourceBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName); var blobLeaseManager = new LogFileProvider(sourceBlobContainer); // Get the target blob container (for archiving decompressed log files) var targetBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-archive"); await targetBlobContainer.CreateIfNotExistsAsync(); // Get the dead-letter table (corrupted or failed blobs will end up there) var deadLetterBlobContainer = cloudBlobClient.GetContainerReference(_cloudStorageContainerName + "-deadletter"); // Create a parser var logProcessor = new LogFileProcessor(targetBlobContainer, deadLetterBlobContainer, _targetDatabase); // Get the next to-be-processed raw log file using the cdn raw log file name prefix var prefix = string.Format(CultureInfo.InvariantCulture, "{0}_{1}_", _azureCdnPlatform.GetRawLogFilePrefix(), _azureCdnAccountNumber); // Get next raw log file to be processed var leasedLogFiles = await blobLeaseManager.LeaseNextLogFilesToBeProcessedAsync(prefix); foreach (var leasedLogFile in leasedLogFiles) { await logProcessor.ProcessLogFileAsync(leasedLogFile); leasedLogFile.Dispose(); } return true; } catch (Exception exception) { Trace.TraceError(exception.ToString()); return false; } }