public ConsumerInitializationParameters( string applicationInstanceId, string sectionName, string fabricNodeId, string fabricNodeInstanceName, string logDirectory, string workDirectory, DiskSpaceManager diskSpaceManager) { if (string.IsNullOrEmpty(sectionName)) { throw new ArgumentNullException("sectionName"); } if (string.IsNullOrEmpty(logDirectory)) { throw new ArgumentNullException("logDirectory"); } if (diskSpaceManager == null) { throw new ArgumentNullException("diskSpaceManager"); } this.applicationInstanceId = applicationInstanceId; this.sectionName = sectionName; this.fabricNodeId = fabricNodeId; this.fabricNodeInstanceName = fabricNodeInstanceName; this.logDirectory = logDirectory; this.workDirectory = workDirectory; this.diskSpaceManager = diskSpaceManager; }
public LttProducer( DiskSpaceManager diskSpaceManager, ITraceEventSourceFactory traceEventSourceFactory, ProducerInitializationParameters initParam) { this.logSourceId = string.Concat(initParam.ApplicationInstanceId, "_", initParam.SectionName); this.AppInstanceId = initParam.ApplicationInstanceId; this.traceSource = traceEventSourceFactory.CreateTraceEventSource(FabricEvents.Tasks.FabricDCA); this.serviceConfigSections = new List <string>(); this.consumerSinks = initParam.ConsumerSinks; // Read the timer config value from dca section var configReader = new ConfigReader(initParam.ApplicationInstanceId); long lttReadIntervalMinutes = configReader.GetUnencryptedConfigValue( initParam.SectionName, LttProducerConstants.LttReadIntervalParamName, LttProducerConstants.DefaultLttReadIntervalMinutes); if (initParam.ApplicationInstanceId == Utility.WindowsFabricApplicationInstanceId) { this.CreateWindowsFabricLttProducerWorkerInfo(initParam, lttReadIntervalMinutes); } else { this.CreateAppProducerWorkerInfo(initParam, lttReadIntervalMinutes); } }
public void CleanupIfNeeded_DiskSpaceLow_PerformsCleanupAndReportsState(double numGigabytes) { _fakeDriveInfo.AvailableFreeSpace.Returns(ConvertGBToBytes(numGigabytes)); var manager = new DiskSpaceManager(_fakeDriveInfo, _logger, _issueReporter); var wasCleaned = manager.CleanupIfNeeded(); Assert.That(wasCleaned, Is.True); _issueReporter.ReceivedWithAnyArgs(1).ReportError(default, default, default, default);
internal AppInstance(string applicationInstanceId, AppConfig appConfig, string servicePackageName, ServiceConfig serviceConfig, DiskSpaceManager diskSpaceManager) { this.applicationInstanceId = applicationInstanceId; this.diskSpaceManager = diskSpaceManager; // Make the configuration available to the application ConfigReader.AddAppConfig(this.applicationInstanceId, appConfig); if (null != servicePackageName) { ConfigReader.AddServiceConfig(this.applicationInstanceId, servicePackageName, serviceConfig); } // Create the data collector for the application instance this.dataCollector = new FabricDCA(this.applicationInstanceId, diskSpaceManager); }
internal CsvToUploadFolderWriter( string logSourceId, string fabricNodeId, string csvFolder, string sourceFolder, DiskSpaceManager diskSpaceManager, bool dtrCompressionDisabled) : this( logSourceId, fabricNodeId, csvFolder, sourceFolder, diskSpaceManager, dtrCompressionDisabled, new EtlToCsvFileWriterConfigReader()) { }
internal EtlToCsvFileWriter( ITraceEventSourceFactory traceEventSourceFactory, string logSourceId, string fabricNodeId, string etwCsvFolder, bool dtrCompressionDisabled, DiskSpaceManager diskSpaceManager) : this( traceEventSourceFactory, logSourceId, fabricNodeId, etwCsvFolder, dtrCompressionDisabled, diskSpaceManager, new EtlToCsvFileWriterConfigReader()) { }
internal EtlInMemoryProducer( DiskSpaceManager diskSpaceManager, IEtlInMemoryProducerConfigReaderFactory configReaderFactory, ITraceFileEventReaderFactory traceFileEventReaderFactory, ITraceEventSourceFactory traceEventSourceFactory, ProducerInitializationParameters initParam) { this.diskSpaceManager = diskSpaceManager; this.traceFileEventReaderFactory = traceFileEventReaderFactory; // Initialization this.traceSource = traceEventSourceFactory.CreateTraceEventSource(FabricEvents.Tasks.FabricDCA); this.logSourceId = string.Concat(initParam.ApplicationInstanceId, "_", initParam.SectionName); this.consumerSinks = initParam.ConsumerSinks; // Read settings var configReader = configReaderFactory.CreateEtlInMemoryProducerConfigReader(this.traceSource, this.logSourceId); this.etlInMemoryProducerSettings = configReader.GetSettings(); // ETL in-memory file processing is not enabled or we are not processing // winfab etl files, so return immediately if (false == this.etlInMemoryProducerSettings.Enabled || false == this.etlInMemoryProducerSettings.ProcessingWinFabEtlFiles) { return; } // Create a new worker object var newWorkerParam = new EtlInMemoryProducerWorker.EtlInMemoryProducerWorkerParameters() { TraceSource = this.traceSource, LogDirectory = initParam.LogDirectory, ProducerInstanceId = this.logSourceId, EtlInMemoryProducer = this, LatestSettings = this.etlInMemoryProducerSettings }; var newWorker = new EtlInMemoryProducerWorker( newWorkerParam, this.diskSpaceManager, this.traceFileEventReaderFactory); this.producerWorker = newWorker; }
private bool CreateMdsUploader(string sectionName, out MdsEtwEventUploader uploader) { // Once Uploader is constructed DiskSpaceManager is no longer needed. using (var dsm = new DiskSpaceManager()) { ConfigReader.AddAppConfig(Utility.WindowsFabricApplicationInstanceId, null); var initParam = new ConsumerInitializationParameters( Utility.WindowsFabricApplicationInstanceId, sectionName, TestFabricNodeId, TestFabricNodeName, Utility.LogDirectory, Utility.DcaWorkFolder, dsm); uploader = new MdsEtwEventUploader(initParam); EtwCsvFolder = uploader.EtwCsvFolder; return(true); } }
private AzureTableQueryableEventUploader CreateAndInitializeUploader() { const string TestFabricNodeInstanceName = "test"; const string TestLogDirectory = "Logs"; const string TestWorkDirectory = "Work"; // Once Uploader is constructed DiskSpaceManager is no longer needed. using (var testDiskSpaceManager = new DiskSpaceManager()) { ConfigReader.AddAppConfig(Utility.WindowsFabricApplicationInstanceId, null); ConsumerInitializationParameters initParam = new ConsumerInitializationParameters( Utility.WindowsFabricApplicationInstanceId, TestConfigSectionName, TestFabricNodeId, TestFabricNodeInstanceName, TestLogDirectory, TestWorkDirectory, testDiskSpaceManager); return(new AzureTableQueryableEventUploader(initParam)); } }
internal AppInstanceManager() { this.applicationInstances = new Dictionary <string, AppInstance>(); this.diskSpaceManager = new DiskSpaceManager(); }
internal EtlProducerWorker( EtlProducerWorkerParameters initParam, DiskSpaceManager diskSpaceManager, ITraceFileEventReaderFactory traceFileEventReaderFactory) { this.logSourceId = initParam.ProducerInstanceId; this.traceSource = initParam.TraceSource; this.isReadingFromApplicationManifest = initParam.IsReadingFromApplicationManifest; this.perfHelper = new EtlPerformance(this.traceSource); this.cancellationTokenSource = new CancellationTokenSource(); this.diskSpaceManager = diskSpaceManager; // Initialize the settings this.etlProducerSettings = EtlProducerWorkerSettingsHelper.InitializeSettings(initParam); if (WinFabricEtlType.DefaultEtl == this.etlProducerSettings.WindowsFabricEtlType) { // If we're processing the default ETL files, we should keep track of // whether or not we're on the FMM node. This information is used by // some other plugin types. Utility.LastFmmEventTimestamp = DateTime.MinValue; } // Initialize the sink list this.sinks = EtlProducerWorkerSettingsHelper.InitializeSinks( initParam.EtlProducers, message => this.traceSource.WriteError(this.logSourceId, message)).AsReadOnly(); this.etlToCsvFileWriters = EtlProducerWorkerSettingsHelper.InitializeFileWriters(this.sinks, this).AsReadOnly(); this.bufferedEtwEventProviders = EtlProducerWorkerSettingsHelper.InitializeBufferedEtwEventProviders(this.sinks, this).AsReadOnly(); // Figure out where the ETL files are located this.traceDirectory = EtlProducerWorkerSettingsHelper.InitializeTraceDirectory( initParam.IsReadingFromApplicationManifest, this.etlProducerSettings.EtlPath, initParam.LogDirectory, this.etlProducerSettings.WindowsFabricEtlType); this.markerFileDirectory = EtlProducerWorkerSettingsHelper.InitializeMarkerFileDirectory( initParam.IsReadingFromApplicationManifest, this.etlProducerSettings.EtlPath, initParam.LogDirectory, this.etlProducerSettings.WindowsFabricEtlType, this.traceDirectory, initParam.ProducerInstanceId); if (initParam.IsReadingFromApplicationManifest) { lock (InternalMarkerFileDirectoriesForApps) { InternalMarkerFileDirectoriesForApps.Add(this.markerFileDirectory); } } this.providers = EtlProducerWorkerSettingsHelper.InitializeProviders( initParam.IsReadingFromApplicationManifest, this.etlProducerSettings.EtlPath, this.etlProducerSettings.WindowsFabricEtlType, this.etlProducerSettings.EtlFilePatterns, message => this.traceSource.WriteError(this.logSourceId, message)).AsReadOnly(); if (0 == this.providers.Count) { // No ETL files to read, so return immediately this.traceSource.WriteWarning( this.logSourceId, "No ETL files have been specified for processing."); } this.checkpointManager = new CheckpointManager( initParam.IsReadingFromApplicationManifest, this.etlProducerSettings.EtlPath, initParam.LogDirectory, this.traceDirectory, initParam.ProducerInstanceId, this.traceSource, this.logSourceId); if (false == initParam.IsReadingFromApplicationManifest) { // Ensure that no other instance of EtlProducerWorker is processing the // same ETL files var patternsAdded = new List <string>(); var isUnique = VerifyEtlFilesUniqueToCurrentInstance( this.traceDirectory, this.providers, patternsAdded, message => this.traceSource.WriteError(this.logSourceId, message)); this.patternsAddedToKnownEtlFileSet = patternsAdded.AsReadOnly(); if (!isUnique) { throw new InvalidOperationException( string.Format("{0} is already being monitored for files matching one of the file patterns.", this.traceDirectory)); } } this.etlReadInterval = this.etlProducerSettings.EtlReadInterval; if (this.etlReadInterval > TimeSpan.Zero) { // Create the directory that contains the marker files. this.CreateDirectoriesForEtlProcessing(); if (false == initParam.IsReadingFromApplicationManifest) { // We need to collect bootstrap traces this.bootstrapTraceProcessor = new BootstrapTraceProcessor( this.traceDirectory, this.markerFileDirectory, this.etlToCsvFileWriters, this.etlProducerSettings.EtlReadInterval, this.traceSource, this.logSourceId); this.bootstrapTraceProcessor.Start(); } // Create the ETL processor this.etlProcessor = new EtlProcessor( false == initParam.IsReadingFromApplicationManifest, this.IsProcessingWindowsFabricEtlFilesFromDefaultLocation(), this.etlProducerSettings.CustomManifestPaths, initParam.ApplicationType, this.markerFileDirectory, this.etlProducerSettings.WindowsFabricEtlType, this.traceSource, this.logSourceId, this.perfHelper, this.sinks, this.etlToCsvFileWriters, this.bufferedEtwEventProviders, this.etlProducerSettings.AppEtwGuids, traceFileEventReaderFactory); // Create a periodic timer to read ETL files var timerId = string.Concat( this.logSourceId, EtlReadTimerIdSuffix); this.etlReadTimer = new DcaTimer( timerId, state => this.EtlReadCallback(this.cancellationTokenSource.Token), this.etlReadInterval); this.etlReadTimer.Start(); // If there is a huge backlog of ETL files to process, we limit the // amount of time that we spend on processing ETL files in each // pass. Figure out how much processing time is available to each // provider. this.ComputePerProviderEtlProcessingTimeSeconds(); } foreach (var provider in this.providers) { var capturedProvider = provider; if (Directory.Exists(this.traceDirectory)) { diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(this.traceDirectory).EnumerateFiles(capturedProvider.EtlFileNamePattern), f => FabricFile.Exists(Path.Combine(this.markerFileDirectory, f.Name)), // Safe to delete once marker file exists f => f.LastWriteTimeUtc >= DateTime.UtcNow.Add(-initParam.LatestSettings.EtlDeletionAgeMinutes)); } } if (Directory.Exists(this.markerFileDirectory)) { diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(this.markerFileDirectory).EnumerateFiles(), f => !FabricFile.Exists(Path.Combine(this.traceDirectory, f.Name)), // Safe to delete once original has been cleaned up f => f.LastWriteTimeUtc >= DateTime.UtcNow.Add(-initParam.LatestSettings.EtlDeletionAgeMinutes)); } }
internal CsvUploadWorker(CsvUploadWorkerParameters initParam, DiskSpaceManager diskSpaceManager) { // Initialization this.FlushDataOnDispose = false; this.initParam = initParam; this.logSourceId = this.initParam.UploaderInstanceId; this.traceSource = this.initParam.TraceSource; this.blobUploadSettings = this.initParam.Settings; this.azureUtility = new AzureUtility(this.traceSource, this.logSourceId); this.destinationKey = String.Join( "_", new string[] { StandardPluginTypes.AzureBlobCsvUploader, this.blobUploadSettings.StorageAccountFactory.Connection.UseDevelopmentStorage ? AzureConstants.DevelopmentStorageConnectionString : this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, this.blobUploadSettings.LttTraceContainerName }); this.disposed = false; // Create a sub-directory for ourselves under the log directory bool success = GetCsvSubDirectory(); string sourceDirectory = Path.Combine(initParam.LogDirectory, "Traces"); // Create the helper object that writes events delivered from the LTT // files into CSV files. if (success) { this.csvToUploadFolderWriter = new CsvToUploadFolderWriter( this.logSourceId, this.initParam.FabricNodeId, this.csvFolder, sourceDirectory, diskSpaceManager, false); if (null == this.csvToUploadFolderWriter) { this.traceSource.WriteError( this.logSourceId, "Failed to create CSV to Upload Foler writer helper object."); success = false; } } if (success) { // Create a sub-directory for the uploader under the log directory success = GetUploaderWorkSubDirectory(); } if (this.blobUploadSettings.Enabled) { // Create and initialize the uploader // // NOTE: By specifying 'true' for the 'filterDeletionByNodeId' parameter, // we only delete those blobs that were uploaded by the current node. We // identify this via the Fabric node ID that the ETL-to-CSV writer prefixed // to the file name before uploading. This is done so that all nodes don't // wastefully try to delete all blobs. try { var destinationPath = string.Concat( this.blobUploadSettings.StorageAccountFactory.Connection.UseDevelopmentStorage ? AzureConstants.DevelopmentStorageConnectionString : this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, ";", // This separator cannot occur in account name or container name this.blobUploadSettings.LttTraceContainerName); this.uploader = new AzureFileUploader( this.traceSource, this.logSourceId, this.csvFolder, destinationPath, this.workFolder, this.blobUploadSettings.StorageAccountFactory, this.blobUploadSettings.LttTraceContainerName, this.blobUploadSettings.UploadInterval, this.blobUploadSettings.FileSyncInterval, this.blobUploadSettings.BlobDeletionAge, this.initParam.FabricNodeInstanceName, this.blobUploadSettings.DeploymentId); this.uploader.Start(); } catch (Exception ex) { throw new InvalidOperationException("AzureFileUploader could not be constructed.", ex); } this.traceSource.WriteInfo( this.logSourceId, "Upload to blob storage is configured. Storage account: {0}, Trace container: {1}, Local trace Path: {2}," + "Upload interval (minutes): {3}", this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, this.blobUploadSettings.LttTraceContainerName, this.csvFolder, this.blobUploadSettings.UploadInterval); } else { this.traceSource.WriteInfo( this.logSourceId, "Upload to blob storage is disabled (Storage key not available). Only log age management is enabled." + "Local trace Path: {0}", this.csvFolder ); } }
private static Dictionary <string, IDcaConsumer> CreateConsumers( ConsumerFactory consumerFactory, IDictionary <string, List <object> > producerConsumerMap, DCASettings settings, DiskSpaceManager diskSpaceManager, string applicationInstanceId, IList <string> errorEvents) { // Initialize consumer instance list var consumers = new Dictionary <string, IDcaConsumer>(); foreach (string consumerInstance in settings.ConsumerInstances.Keys) { // Get the consumer instance information DCASettings.ConsumerInstanceInfo consumerInstanceInfo = settings.ConsumerInstances[consumerInstance]; // Prepare the consumer initialization parameters var initParam = new ConsumerInitializationParameters( applicationInstanceId, consumerInstanceInfo.SectionName, Utility.FabricNodeId, Utility.FabricNodeName, Utility.LogDirectory, Utility.DcaWorkFolder, diskSpaceManager); // if the application is a container move to container log folder. if (ContainerEnvironment.IsContainerApplication(applicationInstanceId)) { initParam = new ConsumerInitializationParameters( applicationInstanceId, consumerInstanceInfo.SectionName, Utility.FabricNodeId, Utility.FabricNodeName, ContainerEnvironment.GetContainerLogFolder(applicationInstanceId), Utility.DcaWorkFolder, diskSpaceManager); } // Create consumer instance IDcaConsumer consumerInterface; try { consumerInterface = consumerFactory.CreateConsumer( consumerInstance, initParam, consumerInstanceInfo.TypeInfo.AssemblyName, consumerInstanceInfo.TypeInfo.TypeName); } catch (Exception e) { // We should continue trying to create other consumers. errorEvents.Add(e.Message); continue; } // Get the consumer's data sink object sink = consumerInterface.GetDataSink(); if (null == sink) { // The consumer does not wish to provide a data sink. // One situation this might happen is if the consumer has been // disabled. This is not an error, so just move on to the next // consumer. continue; } // Add the data sink to the corresponding producer's consumer sink list string producerInstance = consumerInstanceInfo.ProducerInstance; Debug.Assert(false == string.IsNullOrEmpty(producerInstance), "Consumers must be tied to a producer"); if (false == producerConsumerMap.ContainsKey(producerInstance)) { producerConsumerMap[producerInstance] = new List <object>(); } producerConsumerMap[producerInstance].Add(sink); // Add the consumer to the consumer list consumers[consumerInstance] = consumerInterface; } return(consumers); }
internal EtlInMemoryProducerWorker( EtlInMemoryProducerWorkerParameters initParam, DiskSpaceManager diskSpaceManager, ITraceFileEventReaderFactory traceFileEventReaderFactory) { this.logSourceId = initParam.ProducerInstanceId; this.traceSource = initParam.TraceSource; this.cancellationTokenSource = new CancellationTokenSource(); this.perfHelper = new EtlInMemoryPerformance(this.traceSource); this.diskSpaceManager = diskSpaceManager; // Initialize the settings this.etlInMemoryProducerWorkerSettings = EtlInMemoryProducerWorkerSettingsHelper.InitializeSettings(initParam); if (WinFabricEtlType.DefaultEtl == this.etlInMemoryProducerWorkerSettings.WindowsFabricEtlType) { // If we're processing the default ETL files, we should keep track of // whether or not we're on the FMM node. This information is used by // some other plugin types. Utility.LastFmmEventTimestamp = DateTime.MinValue; } // Initialize the sink list this.sinks = initParam.EtlInMemoryProducer.ConsumerSinks.Cast <IEtlInMemorySink>().ToList().AsReadOnly(); this.etlToInMemoryBufferWriters = initParam.EtlInMemoryProducer.ConsumerSinks.OfType <EtlToInMemoryBufferWriter>().ToList().AsReadOnly(); this.etlToInMemoryBufferWriters.ForEach(e => e.SetEtlProducer(this)); // Figure out where the ETL files are located this.traceDirectory = EtlInMemoryProducerWorkerSettingsHelper.InitializeTraceDirectory( this.etlInMemoryProducerWorkerSettings.EtlPath, initParam.LogDirectory, this.etlInMemoryProducerWorkerSettings.WindowsFabricEtlType); this.markerFileDirectory = EtlInMemoryProducerWorkerSettingsHelper.InitializeMarkerFileDirectory( this.etlInMemoryProducerWorkerSettings.EtlPath, initParam.LogDirectory, this.traceDirectory, this.etlInMemoryProducerWorkerSettings.WindowsFabricEtlType); this.providers = EtlInMemoryProducerWorkerSettingsHelper.InitializeProviders( this.etlInMemoryProducerWorkerSettings.EtlPath, this.etlInMemoryProducerWorkerSettings.EtlFilePatterns, this.etlInMemoryProducerWorkerSettings.WindowsFabricEtlType, message => this.traceSource.WriteError(this.logSourceId, message)).AsReadOnly(); if (0 == this.providers.Count) { // No ETL files to read, so return immediately this.traceSource.WriteWarning( this.logSourceId, "No ETL files have been specified for processing."); } this.checkpointManager = new CheckpointManager( this.etlInMemoryProducerWorkerSettings.EtlPath, initParam.LogDirectory, this.traceDirectory, this.traceSource, this.logSourceId); this.etlReadInterval = this.etlInMemoryProducerWorkerSettings.EtlReadInterval; if (this.etlReadInterval > TimeSpan.Zero) { // Create the directory that contains the marker files. this.CreateDirectoriesForEtlProcessing(); // We need to collect bootstrap traces this.bootstrapTraceProcessor = new BootstrapTraceProcessor( this.traceDirectory, this.markerFileDirectory, this.etlToInMemoryBufferWriters, this.etlInMemoryProducerWorkerSettings.EtlReadInterval, this.traceSource, this.logSourceId); this.bootstrapTraceProcessor.Start(); // Create the ETL processor this.etlProcessor = new EtlProcessor( true, this.IsProcessingWindowsFabricEtlFilesFromDefaultLocation(), this.markerFileDirectory, this.etlInMemoryProducerWorkerSettings.WindowsFabricEtlType, this.traceSource, this.logSourceId, this.perfHelper, this.sinks, this.etlToInMemoryBufferWriters, traceFileEventReaderFactory); // Create a periodic timer to read ETL files var timerId = string.Concat( this.logSourceId, EtlReadTimerIdSuffix); this.etlReadTimer = new DcaTimer( timerId, state => this.EtlReadCallback(this.cancellationTokenSource.Token), this.etlReadInterval); this.etlReadTimer.Start(); // Figure out how much processing time is available to each provider. this.ComputePerProviderEtlProcessingTimeSeconds(); } // Disk manager set up for traces foreach (var provider in this.providers) { var capturedProvider = provider; this.diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(this.traceDirectory).EnumerateFiles(capturedProvider.EtlFileNamePattern), f => FabricFile.Exists(Path.Combine(this.markerFileDirectory, f.Name)), // Safe to delete once marker file exists f => f.LastWriteTimeUtc >= DateTime.UtcNow.Add(-initParam.LatestSettings.EtlDeletionAgeMinutes)); } // Disk manager set up for marker files this.diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(this.markerFileDirectory).EnumerateFiles(), f => !FabricFile.Exists(Path.Combine(this.traceDirectory, f.Name)), // Safe to delete once original has been cleaned up f => f.LastWriteTimeUtc >= DateTime.UtcNow.Add(-initParam.LatestSettings.EtlDeletionAgeMinutes)); }
internal FabricDCA(string applicationInstanceId, DiskSpaceManager diskSpaceManager) { this.registeredAppConfigSections = new HashSet <string>(); this.registeredServiceConfigSections = new HashSet <string>(); // Retrieve DCA settings this.settings = new DCASettings(applicationInstanceId); // Get the names of sections in settings.xml that contain DCA-related // configuration information this.registeredAppConfigSections.UnionWith(GetConfigurationSections(this.settings)); // Dictionary that represents the mapping of producers and consumers // Key is the producer instance and value is the list of consumer instances // that are interested in receiving data from that producer instance. var producerConsumerMap = new Dictionary <string, List <object> >(); var errorEvents = new List <string>(); this.consumers = CreateConsumers( new ConsumerFactory(), producerConsumerMap, this.settings, diskSpaceManager, applicationInstanceId, errorEvents); // Create the Telemetry consumer and maps it to a etlfileproducer if it exists if (applicationInstanceId == Utility.WindowsFabricApplicationInstanceId) { CreateTelemetryConsumer( this.consumers, producerConsumerMap, this.settings, applicationInstanceId); } // Create the producers this.producers = CreateProducers( new ProducerFactory(diskSpaceManager), producerConsumerMap, this.settings, applicationInstanceId, errorEvents); // Send all errors found in initialization of plugins as single report. if (Utility.IsSystemApplicationInstanceId(applicationInstanceId)) { if (errorEvents.Any()) { var message = string.Join(Environment.NewLine, errorEvents); HealthClient.SendNodeHealthReport(message, HealthState.Error); } else { HealthClient.ClearNodeHealthReport(); } } // Get additional configuration sections that the producers are // interested in this.registeredAppConfigSections.UnionWith(GetAdditionalProducerAppSections(this.producers)); this.RegisteredServiceConfigSections.UnionWith(GetAdditionalProducerServiceSections(this.producers)); }
internal CsvToUploadFolderWriter( string logSourceId, string fabricNodeId, string csvFolder, string sourceFolder, DiskSpaceManager diskSpaceManager, bool dtrCompressionDisabled, IEtlToCsvFileWriterConfigReader configReader) { this.traceSource = new FabricEvents.ExtensionsEvents(FabricEvents.Tasks.FabricDCA); this.logSourceId = logSourceId; this.organizeWindowsFabricTracesByType = true; this.fabricNodeId = fabricNodeId; this.dtrCompressionDisabledByConsumer = dtrCompressionDisabled; this.configReader = configReader; this.disposed = false; this.stopping = false; this.compressCsvFiles = FileCompressor.CompressionEnabled && (false == this.dtrCompressionDisabledByConsumer) && (false == this.configReader.IsDtrCompressionDisabledGlobally()); this.diskSpaceManager = diskSpaceManager; try { string currentAssemblyLocation = typeof(CsvToUploadFolderWriter).GetTypeInfo().Assembly.Location; string versionFile = Path.Combine(Path.GetDirectoryName(currentAssemblyLocation), ClusterVersionFile); this.fabricVersion = File.ReadAllText(versionFile); } catch (Exception e) { this.traceSource.WriteExceptionAsError( this.logSourceId, e, "Could not find the version of current service fabric code"); throw; } // Create the directory that containing filtered traces, in case it // doesn't already exist this.csvFolder = csvFolder; FabricDirectory.CreateDirectory(this.csvFolder); CreateWindowsFabricTraceSubFolders(); this.sourceFolder = sourceFolder; this.traceSource.WriteInfo( this.logSourceId, "Directory containing trace files: {0} Directory containing dtr traces: {1}", this.sourceFolder, this.csvFolder); diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(this.csvFolder).EnumerateFiles("*.dtr*", SearchOption.AllDirectories), null, f => f.LastWriteTimeUtc > (DateTime.UtcNow - configReader.GetDtrDeletionAge())); string timerIdMove = string.Concat( this.logSourceId, moveFilesTimerIdSuffix); this.csvMoveFilesTimer = new DcaTimer( timerIdMove, this.CsvMoveFilesHandler, 1 * 60 * 1000); this.csvMoveFilesTimer.Start(); }
internal EtlToCsvFileWriter( ITraceEventSourceFactory traceEventSourceFactory, string logSourceId, string fabricNodeId, string etwCsvFolder, bool dtrCompressionDisabled, DiskSpaceManager diskSpaceManager, IEtlToCsvFileWriterConfigReader configReader) : base(traceEventSourceFactory, logSourceId) { this.organizeWindowsFabricTracesByType = true; this.fabricNodeId = fabricNodeId; this.dtrCompressionDisabledByConsumer = dtrCompressionDisabled; this.diskSpaceManager = diskSpaceManager; this.configReader = configReader; #if !DotNetCoreClr this.perfHelper = new EtlToCsvPerformance(this.TraceSource, this.LogSourceId); #endif // Create the directory that containing filtered traces, in case it // doesn't already exist this.filteredTraceDirName = etwCsvFolder; FabricDirectory.CreateDirectory(this.filteredTraceDirName); this.TraceSource.WriteInfo( this.LogSourceId, "Directory containing filtered ETW traces: {0}", this.filteredTraceDirName); // Create a timer to delete old logs // Figure out the retention time for the CSV files // Time after which the CSV file on disk becomes a candidate for deletion // Read this value from config every time. Do not cache it. That's how // we pick up the latest value when an update happens. // // From the above files, get the ones whose corresponding ETL files // have already been fully processed. We should delete only the files // whose corresponding ETL files have been fully processed. All other // files should be kept around because their file name gives us the // bookmark up to which we have processed events. var deletionAge = configReader.GetDtrDeletionAge(); diskSpaceManager.RegisterFolder( logSourceId, () => { // Get the filtered ETW trace files that are old enough to be deleted var dirInfo = new DirectoryInfo(this.filteredTraceDirName); return(dirInfo.EnumerateFiles(EtlConsumerConstants.FilteredEtwTraceSearchPattern, SearchOption.AllDirectories)); }, f => f.LastWriteTimeUtc < DateTime.UtcNow - deletionAge, // we don't have any indication whether work is done currently, use timer to estimate f => f.LastWriteTimeUtc >= DateTime.UtcNow - deletionAge, f => { DateTime lastEventTimeStamp; GetLastEventTimestamp(f.Name, out lastEventTimeStamp); this.lastDeletedDtrName = f.Name; this.lastDeletedDtrEventTime = lastEventTimeStamp; try { FabricFile.Delete(f.FullName); return(true); } catch (Exception) { return(false); } }); diskSpaceManager.RetentionPassCompleted += () => { this.TraceSource.WriteInfo( this.LogSourceId, "The last dtr file deleted during disk space manager pass {0} with events up til {1}.", this.lastDeletedDtrName, this.lastDeletedDtrEventTime); }; diskSpaceManager.RegisterFolder( logSourceId, () => new DirectoryInfo(this.filteredTraceDirName) .EnumerateFiles(EtlConsumerConstants.BootstrapTraceSearchPattern, SearchOption.AllDirectories), f => true, f => f.LastWriteTimeUtc >= DateTime.UtcNow - deletionAge); }
public void TestCsvNotDeletedOnDiskFull() { var mockTraceSourceFactory = new Mock <ITraceEventSourceFactory>(MockBehavior.Strict); mockTraceSourceFactory .Setup(tsf => tsf.CreateTraceEventSource(FabricEvents.Tasks.FabricDCA)) .Returns(new ErrorAndWarningFreeTraceEventSource()); var mockConfigReader = new Mock <IEtlToCsvFileWriterConfigReader>(MockBehavior.Strict); mockConfigReader .Setup(cr => cr.IsDtrCompressionDisabledGlobally()) .Returns(false); mockConfigReader .Setup(cr => cr.GetDtrDeletionAge()) .Returns(TimeSpan.FromMinutes(1)); var mockEtlProducer = new Mock <IEtlProducer>(MockBehavior.Strict); mockEtlProducer .Setup(ep => ep.IsProcessingWindowsFabricEtlFiles()) .Returns(true); mockEtlProducer .Setup(ep => ep.HasEtlFileBeenFullyProcessed(EtlToCsvFileWriter.GetEtlFileNameFromTraceFileName(this.testCsvFileNames[0]))) .Returns(true); mockEtlProducer .Setup(ep => ep.HasEtlFileBeenFullyProcessed(EtlToCsvFileWriter.GetEtlFileNameFromTraceFileName(this.testCsvFileNames[1]))) .Returns(true); mockEtlProducer .Setup(ep => ep.HasEtlFileBeenFullyProcessed(EtlToCsvFileWriter.GetEtlFileNameFromTraceFileName(this.testCsvFileNames[2]))) .Returns(false); var diskSpaceManager = new DiskSpaceManager(() => 1 << 30, () => 0, () => 80, TimeSpan.FromMilliseconds(5)); var passCompletedEvent = new ManualResetEvent(false); var fileDeletedEvent = new ManualResetEvent(false); var testSetupEvent = new ManualResetEvent(false); diskSpaceManager.GetAvailableSpace = d => { testSetupEvent.WaitOne(); passCompletedEvent.Reset(); fileDeletedEvent.Set(); return(1 << 30); // 1GB }; diskSpaceManager.RetentionPassCompleted += () => { passCompletedEvent.Set(); }; var writer = new EtlToCsvFileWriter( mockTraceSourceFactory.Object, TestLogSourceId, TestNodeId, TestCsvFolder, false, diskSpaceManager, mockConfigReader.Object); writer.SetEtlProducer(mockEtlProducer.Object); File.WriteAllText(Path.Combine(TestCsvFolder, this.testCsvFileNames[0]), TestContents); File.SetLastWriteTimeUtc(Path.Combine(TestCsvFolder, this.testCsvFileNames[0]), DateTime.UtcNow - TimeSpan.FromHours(2)); File.WriteAllText(Path.Combine(TestCsvFolder, this.testCsvFileNames[1]), TestContents); File.WriteAllText(Path.Combine(TestCsvFolder, this.testCsvFileNames[2]), TestContents); testSetupEvent.Set(); Assert.IsTrue(fileDeletedEvent.WaitOne(TestTimeout), "File delete should happen within timeout."); Assert.IsTrue(passCompletedEvent.WaitOne(TestTimeout), "A retention pass should happen within timeout."); // Wait for a second completion to ensure both local and global policies get applied. passCompletedEvent.Reset(); Assert.IsTrue(passCompletedEvent.WaitOne(TestTimeout), "A retention pass should happen within timeout."); Assert.IsFalse(File.Exists(Path.Combine(TestCsvFolder, this.testCsvFileNames[0])), "File should be deleted by local policy."); Assert.IsTrue(File.Exists(Path.Combine(TestCsvFolder, this.testCsvFileNames[1])), "File is not old enough to be deleted."); Assert.IsTrue(File.Exists(Path.Combine(TestCsvFolder, this.testCsvFileNames[2])), "File is not old enough to be deleted."); }
public FolderProducer(DiskSpaceManager diskSpaceManager, ProducerInitializationParameters initializationParameters) { this.diskSpaceManager = diskSpaceManager; // Initialization this.initParam = initializationParameters; this.logSourceId = string.Concat(this.initParam.ApplicationInstanceId, "_", this.initParam.SectionName); this.traceSource = new FabricEvents.ExtensionsEvents(FabricEvents.Tasks.FabricDCA); this.configReader = new ConfigReader(this.initParam.ApplicationInstanceId); this.additionalAppConfigSections = new List <string>(); this.serviceConfigSections = new List <string>(); // Read instance-specific settings from settings.xml this.GetSettings(); if (false == this.folderProducerSettings.Enabled) { // Producer is not enabled, so return immediately return; } if (this.configReader.IsReadingFromApplicationManifest && FolderProducerType.WindowsFabricCrashDumps == this.folderProducerSettings.Type) { this.serviceConfigSections.Add(ServiceConfig.ExeHostElement); } var additionalFoldersToTrim = new List <string>(); #if !DotNetCoreClr if (FolderProducerType.WindowsFabricPerformanceCounters == this.folderProducerSettings.Type) { // We will need information from the <PerformanceCounterLocalStore> section of the // service manifest. this.additionalAppConfigSections.Add(PerformanceCounterCommon.PerformanceCounterSectionName); // The performance counter binary files cannot be read while the OS is still // writing to them. Therefore, we make the files available to the consumer only // when the OS has finished writing to them. Hence we need a special processor // for these files. List <string> additionalPerfCounterFoldersToTrim; bool perfCounterCollectionEnabled; // There should be only one path in the path list for performance counters string perfCounterPath = this.folderProducerSettings.Paths[0]; this.perfCounterFolderProcessor = PerfCounterFolderProcessor.Create( this.traceSource, this.logSourceId, this.configReader, this.initParam.LogDirectory, perfCounterPath, out perfCounterCollectionEnabled, out additionalPerfCounterFoldersToTrim); if (null == this.perfCounterFolderProcessor) { return; } if (false == perfCounterCollectionEnabled) { return; } if (null != additionalPerfCounterFoldersToTrim) { additionalFoldersToTrim.AddRange(additionalPerfCounterFoldersToTrim); } } #endif if (null != this.initParam.ConsumerSinks) { foreach (object sinkAsObject in this.initParam.ConsumerSinks) { IFolderSink folderSink = null; try { folderSink = (IFolderSink)sinkAsObject; } catch (InvalidCastException e) { this.traceSource.WriteError( this.logSourceId, "Exception occured while casting a sink object of type {0} to interface IFolderSink. Exception information: {1}.", sinkAsObject.GetType(), e); } if (null == folderSink) { continue; } folderSink.RegisterFolders(this.folderProducerSettings.Paths); } } #if DotNetCoreClrLinux if (FolderProducerType.WindowsFabricCrashDumps == this.folderProducerSettings.Type) { const int filePermissions = Helpers.LINUX_USER_READ | Helpers.LINUX_USER_WRITE | Helpers.LINUX_USER_EXECUTE | Helpers.LINUX_GROUP_READ | Helpers.LINUX_GROUP_WRITE | Helpers.LINUX_GROUP_EXECUTE | Helpers.LINUX_OTHER_READ | Helpers.LINUX_OTHER_WRITE | Helpers.LINUX_OTHER_EXECUTE; Helpers.UpdateFilePermission(this.folderProducerSettings.Paths[0], filePermissions); using (FileStream fs = new FileStream("/proc/sys/kernel/core_pattern", FileMode.Open)) { using (StreamWriter sw = new StreamWriter(fs)) { sw.Write(Path.Combine(this.folderProducerSettings.Paths[0], "%e.%p.dmp")); } } } #endif var foldersToTrim = this.folderProducerSettings.Paths.Concat(additionalFoldersToTrim).ToArray(); if (this.IsDiskSpaceManagementEnabled()) { foreach (string folderPath in foldersToTrim) { // Figure out the timestamp before which all files will be deleted this.diskSpaceManager.RegisterFolder( this.logSourceId, () => new DirectoryInfo(folderPath).EnumerateFiles("*", SearchOption.AllDirectories), f => f.LastWriteTimeUtc < DateTime.UtcNow - MinimumFileRetentionTime, // isSafeToDelete f => (!Utility.IgnoreUploadFileList.Exists(x => x.Equals(f)) && f.LastWriteTimeUtc >= DateTime.UtcNow - this.folderProducerSettings.DataDeletionAge)); // shouldBeRetained } } }
internal EtlProducer( DiskSpaceManager diskSpaceManager, IEtlProducerConfigReaderFactory configReaderFactory, ITraceFileEventReaderFactory traceFileEventReaderFactory, ITraceEventSourceFactory traceEventSourceFactory, ProducerInitializationParameters initParam) { this.diskSpaceManager = diskSpaceManager; this.traceFileEventReaderFactory = traceFileEventReaderFactory; // Initialization this.logSourceId = string.Concat(initParam.ApplicationInstanceId, "_", initParam.SectionName); this.traceSource = traceEventSourceFactory.CreateTraceEventSource(FabricEvents.Tasks.FabricDCA); this.serviceConfigSections = new List <string>(); this.logDirectory = initParam.LogDirectory; this.consumerSinks = initParam.ConsumerSinks; // Read instance-specific settings from the settings file var configReader = configReaderFactory.CreateEtlProducerConfigReader(this.traceSource, this.logSourceId); this.etlProducerSettings = configReader.GetSettings(); if (false == this.etlProducerSettings.Enabled) { // ETL file processing is not enabled, so return immediately return; } if (!this.etlProducerSettings.ProcessingWinFabEtlFiles) { // If we are collecting ETW events on behalf of an app, then we will // need information from the <ETW> section of the service manifest. this.serviceConfigSections.Add(ServiceConfig.EtwElement); // Check if we can use an existing worker object string applicationType = this.etlProducerSettings.ApplicationType; lock (ProducerWorkers) { EtlProducerWorkerInfo workerInfo = ProducerWorkers.FirstOrDefault(w => w.ApplicationType.Equals( applicationType, StringComparison.Ordinal)); if (null != workerInfo) { // Existing worker object is available. this.traceSource.WriteInfo( this.logSourceId, "Existing ETL producer worker object for application type {0} is available. Restarting the worker object now.", applicationType); // Restart the worker object workerInfo.ProducerWorker.Dispose(); workerInfo.ProducerWorker = null; List <EtlProducer> etlProducers = new List <EtlProducer>(workerInfo.EtlProducers) { this }; EtlProducerWorker.EtlProducerWorkerParameters newWorkerParam = new EtlProducerWorker.EtlProducerWorkerParameters() { TraceSource = this.traceSource, IsReadingFromApplicationManifest = !this.etlProducerSettings.ProcessingWinFabEtlFiles, ApplicationType = applicationType, LogDirectory = initParam.LogDirectory, ProducerInstanceId = applicationType, EtlProducers = etlProducers, LatestSettings = this.etlProducerSettings }; try { EtlProducerWorker newWorker = new EtlProducerWorker( newWorkerParam, this.diskSpaceManager, this.traceFileEventReaderFactory); workerInfo.EtlProducers.Add(this); workerInfo.ProducerWorker = newWorker; } catch (InvalidOperationException) { this.traceSource.WriteError( this.logSourceId, "Failed to restart ETL producer worker object for application type {0}.", applicationType); } } else { // Create a new worker object this.traceSource.WriteInfo( this.logSourceId, "Creating ETL producer worker object for application type {0} ...", applicationType); List <EtlProducer> etlProducers = new List <EtlProducer> { this }; EtlProducerWorker.EtlProducerWorkerParameters newWorkerParam = new EtlProducerWorker.EtlProducerWorkerParameters() { TraceSource = this.traceSource, IsReadingFromApplicationManifest = !this.etlProducerSettings.ProcessingWinFabEtlFiles, ApplicationType = applicationType, LogDirectory = initParam.LogDirectory, ProducerInstanceId = applicationType, EtlProducers = etlProducers, LatestSettings = this.etlProducerSettings }; try { EtlProducerWorker newWorker = new EtlProducerWorker( newWorkerParam, this.diskSpaceManager, this.traceFileEventReaderFactory); workerInfo = new EtlProducerWorkerInfo() { ApplicationType = applicationType, EtlProducers = new List <EtlProducer>(), ProducerWorker = newWorker }; workerInfo.EtlProducers.Add(this); ProducerWorkers.Add(workerInfo); } catch (InvalidOperationException) { this.traceSource.WriteError( this.logSourceId, "Failed to create ETL producer worker object for application type {0}.", applicationType); } } } } else { // Create a new worker object List <EtlProducer> etlProducers = new List <EtlProducer> { this }; EtlProducerWorker.EtlProducerWorkerParameters newWorkerParam = new EtlProducerWorker.EtlProducerWorkerParameters() { TraceSource = this.traceSource, IsReadingFromApplicationManifest = !this.etlProducerSettings.ProcessingWinFabEtlFiles, ApplicationType = string.Empty, LogDirectory = initParam.LogDirectory, ProducerInstanceId = this.logSourceId, EtlProducers = etlProducers, LatestSettings = this.etlProducerSettings }; try { EtlProducerWorker newWorker = new EtlProducerWorker( newWorkerParam, this.diskSpaceManager, this.traceFileEventReaderFactory); this.producerWorker = newWorker; } catch (InvalidOperationException) { } } }
internal ProducerFactory(DiskSpaceManager diskSpaceManager) { this.diskSpaceManager = diskSpaceManager; }
public AzureBlobCsvUploader(ConsumerInitializationParameters initParam) { // Initialization this.logSourceId = string.Concat(initParam.ApplicationInstanceId, "_", initParam.SectionName); this.traceSource = new FabricEvents.ExtensionsEvents(FabricEvents.Tasks.FabricDCA); this.configReader = new AzureBlobConfigReader( new ConfigReader(initParam.ApplicationInstanceId), initParam.SectionName, this.traceSource, this.logSourceId); this.diskSpaceManager = initParam.DiskSpaceManager; this.disposed = false; // Read blob-specific settings from settings.xml this.blobUploadSettings = this.GetSettings(); if (this.configReader.IsReadingFromApplicationManifest) { // Check if we can use an existing upload worker object UploadWorkerKey key = new UploadWorkerKey() { // Destination path is an concatenation of storage account name and container name DestinationPath = string.Concat( this.blobUploadSettings.StorageAccountFactory.Connection.UseDevelopmentStorage ? AzureConstants.DevelopmentStorageConnectionString : this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, ";", // This separator cannot occur in account name or container name this.blobUploadSettings.LttTraceContainerName), ApplicationType = this.configReader.GetApplicationType(), }; lock (UploadWorkers) { UploadWorkerInfo workerInfo = UploadWorkers.FirstOrDefault(w => w.Matches(key)); if (null != workerInfo) { // Existing upload worker object is available. Increment its // reference count this.traceSource.WriteInfo( this.logSourceId, "Existing upload worker object for application type {0}, Azure storage account {1} and container {2} is available and will be used.", key.ApplicationType, this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, this.blobUploadSettings.LttTraceContainerName); (workerInfo.RefCount)++; workerInfo.UploadWorker.UpdateSettings(this.blobUploadSettings); this.uploadWorker = workerInfo.UploadWorker; } else { // Create a new upload worker object this.traceSource.WriteInfo( this.logSourceId, "Creating upload worker object for application type {0}, Azure storage account {1} and container {2} ...", key.ApplicationType, this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, this.blobUploadSettings.LttTraceContainerName); CsvUploadWorker.CsvUploadWorkerParameters param = new CsvUploadWorker.CsvUploadWorkerParameters() { TraceSource = this.traceSource, FabricNodeId = initParam.FabricNodeId, FabricNodeInstanceName = initParam.FabricNodeInstanceName, IsReadingFromApplicationManifest = this.configReader.IsReadingFromApplicationManifest, LogDirectory = initParam.LogDirectory, WorkDirectory = initParam.WorkDirectory, UploaderInstanceId = key.ApplicationType, ParentWorkFolderName = key.ApplicationType, Settings = this.blobUploadSettings }; CsvUploadWorker newWorker; try { newWorker = new CsvUploadWorker(param, this.diskSpaceManager); } catch (Exception) { this.traceSource.WriteError( this.logSourceId, "Failed to create upload worker object for application type {0}, Azure storage account {1} and container {2}.", key.ApplicationType, this.blobUploadSettings.StorageAccountFactory.Connection.AccountName, this.blobUploadSettings.LttTraceContainerName); throw; } workerInfo = new UploadWorkerInfo() { Key = key, RefCount = 1, UploadWorker = newWorker }; UploadWorkers.Add(workerInfo); this.uploadWorker = workerInfo.UploadWorker; } } } else { // Create a new upload worker object CsvUploadWorker.CsvUploadWorkerParameters param = new CsvUploadWorker.CsvUploadWorkerParameters() { TraceSource = this.traceSource, FabricNodeId = initParam.FabricNodeId, FabricNodeInstanceName = initParam.FabricNodeInstanceName, IsReadingFromApplicationManifest = this.configReader.IsReadingFromApplicationManifest, LogDirectory = initParam.LogDirectory, WorkDirectory = initParam.WorkDirectory, UploaderInstanceId = this.logSourceId, ParentWorkFolderName = Utility.ShortWindowsFabricIdForPaths, Settings = this.blobUploadSettings }; this.uploadWorker = new CsvUploadWorker(param, this.diskSpaceManager); } }
private static void ValidateMultipleInstances( TestInstance[] testInstances, long diskSpaceRemaining, long maxDiskQuota = TestMaxDiskQuota, long diskFullSafetySpace = TestDiskFullSafetySpace, double diskQuotaUsageTargetPercent = TestDiskQuotaUsageTargetPercent) { const string User = "******"; var beginPreparationEvent = new ManualResetEvent(false); var endPreparationEvent = new ManualResetEvent(false); // Use big enough timespan to prevent spinning before folders are registered. using (var diskSpaceManager = new DiskSpaceManager( () => maxDiskQuota, () => diskFullSafetySpace, () => diskQuotaUsageTargetPercent, TimeSpan.FromMilliseconds(5))) { diskSpaceManager.RetentionPassCompleted += () => { beginPreparationEvent.Set(); endPreparationEvent.WaitOne(); }; beginPreparationEvent.WaitOne(); var guid = Guid.NewGuid(); var traceFolder = Directory.CreateDirectory(guid.ToString()); for (var i = 0; i < testInstances.Length; ++i) { Directory.CreateDirectory(Path.Combine(traceFolder.FullName, i.ToString())); Assert.IsTrue(testInstances[i].DiskSpaceUsed % DefaultBytesPerFile == 0, "Must allocate in blocks of 1MB"); for (var j = 0; j < testInstances[i].DiskSpaceUsed / DefaultBytesPerFile; ++j) { // Create under folder unique to call and test instance. var file = new FileInfo(Path.Combine(traceFolder.FullName, i.ToString(), j.ToString())); testInstances[i].CreateFileCallback(file); } } // Make sure files to remove are unique var filesToRemove = testInstances.Select(t => new HashSet <FileInfo>()).ToArray(); Func <FileInfo, bool> testDeleteFunc = f => { var testFolderIndex = int.Parse(Path.GetFileName(Path.GetDirectoryName(f.FullName))); filesToRemove[testFolderIndex].Add(f); f.Delete(); return(true); }; diskSpaceManager.GetAvailableSpace = d => diskSpaceRemaining + filesToRemove.Sum(u => u.Count) * DefaultBytesPerFile; for (var i = 0; i < testInstances.Length; ++i) { diskSpaceManager.RegisterFolder( string.Format("{0}{1}", User, i), new DirectoryInfo(Path.Combine(traceFolder.FullName, i.ToString())), null, testInstances[i].SafeToDeleteCallback, testInstances[i].FileRetentionPolicy, testDeleteFunc); } var finishedEvent = new ManualResetEvent(false); diskSpaceManager.RetentionPassCompleted += () => { finishedEvent.Set(); }; endPreparationEvent.Set(); // Need to wait twice to ensure a full pass of both Local and Global policy var success = finishedEvent.WaitOne(TimeSpan.FromSeconds(10)); Assert.IsTrue(success, "Initial pass should finish."); finishedEvent.Reset(); success = finishedEvent.WaitOne(TimeSpan.FromSeconds(10)); Assert.IsTrue(success, "Second pass should finish"); for (var i = 0; i < testInstances.Length; ++i) { Assert.AreEqual( testInstances[i].ExpectedDiskSpaceToFree, filesToRemove[i].Count * DefaultBytesPerFile, "Actual and Expected bytes differ for testInstance {0}", i); } } }
public LttProducer( DiskSpaceManager diskSpaceManager, ITraceEventSourceFactory traceEventSourceFactory, ProducerInitializationParameters initParam) { this.logSourceId = string.Concat(initParam.ApplicationInstanceId, "_", initParam.SectionName); this.traceSource = traceEventSourceFactory.CreateTraceEventSource(FabricEvents.Tasks.FabricDCA); this.serviceConfigSections = new List <string>(); this.consumerSinks = initParam.ConsumerSinks; // Read the timer config value from dca section var configReader = new ConfigReader(initParam.ApplicationInstanceId); long newLttReadIntervalMinutes = configReader.GetUnencryptedConfigValue( initParam.SectionName, LttProducerConstants.LttReadIntervalParamName, LttProducerConstants.DefaultLttReadIntervalMinutes); lock (ProducerWorkers) { LttProducerWorkerInfo workerInfo = ProducerWorkers.FirstOrDefault(); if (null != workerInfo) { // Existing worker object is available. this.traceSource.WriteInfo( this.logSourceId, "Existing Ltt producer worker object. Restarting the worker object now."); // Save the old value for comparision long oldLttReadIntervalMinutes = workerInfo.ProducerWorker.LttReadIntervalMinutes; // Restart the worker object workerInfo.ProducerWorker.Dispose(); workerInfo.ProducerWorker = null; // Keep the smaller value intact // as this worker handles both producers Ltt trace conversion and table events if (oldLttReadIntervalMinutes < newLttReadIntervalMinutes) { newLttReadIntervalMinutes = oldLttReadIntervalMinutes; } List <LttProducer> LttProducers = new List <LttProducer>(workerInfo.LttProducers) { this }; LttProducerWorker.LttProducerWorkerParameters newWorkerParam = new LttProducerWorker.LttProducerWorkerParameters() { TraceSource = this.traceSource, LogDirectory = initParam.LogDirectory, ProducerInstanceId = this.logSourceId, LttProducers = LttProducers, LatestSettings = initParam, LttReadIntervalMinutes = newLttReadIntervalMinutes }; try { LttProducerWorker newWorker = new LttProducerWorker(newWorkerParam); workerInfo.LttProducers.Add(this); workerInfo.ProducerWorker = newWorker; } catch (InvalidOperationException) { this.traceSource.WriteError( this.logSourceId, "Failed to restart Ltt producer worker object."); } } else { // Create a new worker object this.traceSource.WriteInfo( this.logSourceId, "Creating Ltt producer worker object ..."); List <LttProducer> LttProducers = new List <LttProducer> { this }; LttProducerWorker.LttProducerWorkerParameters newWorkerParam = new LttProducerWorker.LttProducerWorkerParameters() { TraceSource = this.traceSource, LogDirectory = initParam.LogDirectory, ProducerInstanceId = this.logSourceId, LttProducers = LttProducers, LatestSettings = initParam, LttReadIntervalMinutes = newLttReadIntervalMinutes }; try { LttProducerWorker newWorker = new LttProducerWorker(newWorkerParam); workerInfo = new LttProducerWorkerInfo() { LttProducers = new List <LttProducer>(), ProducerWorker = newWorker }; workerInfo.LttProducers.Add(this); ProducerWorkers.Add(workerInfo); } catch (InvalidOperationException) { this.traceSource.WriteError( this.logSourceId, "Failed to create Ltt producer worker object."); } } } }