public async Task TableRead_ManyRows_Succeeds() { var monitor1 = new TestScaleMonitor1(); var monitors = new IScaleMonitor[] { monitor1 }; var metricsTable = _repository.GetMetricsTable(); await _repository.CreateIfNotExistsAsync(metricsTable); TableBatchOperation batch = new TableBatchOperation(); int numRows = 500; for (int i = 0; i < numRows; i++) { var sample = new TestScaleMetrics1 { Count = i }; await Task.Delay(5); batch.Add(TableStorageScaleMetricsRepository.CreateMetricsInsertOperation(sample, TestHostId, monitor1.Descriptor)); if (batch.Count % 100 == 0) { await metricsTable.ExecuteBatchAsync(batch); batch = new TableBatchOperation(); } } if (batch.Count > 0) { await metricsTable.ExecuteBatchAsync(batch); } var results = await _repository.ReadMetricsAsync(monitors); Assert.Equal(1, results.Count); Assert.Equal(numRows, results[monitor1].Count); // verify results are returned in the order they were inserted (ascending // time order) and the timestamps are monotonically increasing var metrics = results[monitor1].ToArray(); for (int i = 0; i < numRows - 1; i++) { for (int j = i + 1; j < numRows; j++) { var m1 = (TestScaleMetrics1)metrics[i]; var m2 = (TestScaleMetrics1)metrics[j]; Assert.True(m1.Count < m2.Count); Assert.True(metrics[i].Timestamp < metrics[j].Timestamp); } } }
public TableStorageScaleMetricsRepositoryTests() { var configuration = new ConfigurationBuilder().AddEnvironmentVariables().Build(); _hostIdProviderMock = new Mock <IHostIdProvider>(MockBehavior.Strict); _hostIdProviderMock.Setup(p => p.GetHostIdAsync(It.IsAny <CancellationToken>())).ReturnsAsync(TestHostId); _scaleOptions = new ScaleOptions { MetricsPurgeEnabled = false }; _loggerProvider = new TestLoggerProvider(); ILoggerFactory loggerFactory = new LoggerFactory(); loggerFactory.AddProvider(_loggerProvider); _repository = new TableStorageScaleMetricsRepository(configuration, _hostIdProviderMock.Object, new OptionsWrapper <ScaleOptions>(_scaleOptions), loggerFactory); EmptyMetricsTableAsync().GetAwaiter().GetResult(); }
public TableStorageScaleMetricsRepositoryTests() { var configuration = new ConfigurationBuilder().AddEnvironmentVariables().Build(); _hostIdProviderMock = new Mock <IHostIdProvider>(MockBehavior.Strict); _hostIdProviderMock.Setup(p => p.GetHostIdAsync(It.IsAny <CancellationToken>())).ReturnsAsync(TestHostId); _scaleOptions = new ScaleOptions { MetricsPurgeEnabled = false }; _loggerProvider = new TestLoggerProvider(); ILoggerFactory loggerFactory = new LoggerFactory(); loggerFactory.AddProvider(_loggerProvider); // Allow for up to 30 seconds of creation retries for tests due to slow table deletes _repository = new TableStorageScaleMetricsRepository(configuration, _hostIdProviderMock.Object, new OptionsWrapper <ScaleOptions>(_scaleOptions), loggerFactory, 60, new DefaultDelegatingHandlerProvider(new TestEnvironment())); EmptyMetricsTableAsync().GetAwaiter().GetResult(); }
public async Task InvalidStorageConnection_Handled() { var configuration = new ConfigurationBuilder().Build(); Assert.Null(configuration.GetWebJobsConnectionString(ConnectionStringNames.Storage)); var options = new ScaleOptions(); ILoggerFactory loggerFactory = new LoggerFactory(); loggerFactory.AddProvider(_loggerProvider); var localRepository = new TableStorageScaleMetricsRepository(configuration, _hostIdProviderMock.Object, new OptionsWrapper <ScaleOptions>(options), loggerFactory, new TestEnvironment()); var monitor1 = new TestScaleMonitor1(); var monitor2 = new TestScaleMonitor2(); var monitor3 = new TestScaleMonitor3(); var monitors = new IScaleMonitor[] { monitor1, monitor2, monitor3 }; var result = await localRepository.ReadMetricsAsync(monitors); Assert.Empty(result); var logs = _loggerProvider.GetAllLogMessages(); Assert.Single(logs); Assert.Equal("Azure Storage connection string is empty or invalid. Unable to read/write scale metrics.", logs[0].FormattedMessage); _loggerProvider.ClearAllLogMessages(); Dictionary <IScaleMonitor, ScaleMetrics> metricsMap = new Dictionary <IScaleMonitor, ScaleMetrics>(); metricsMap.Add(monitor1, new TestScaleMetrics1 { Count = 10 }); metricsMap.Add(monitor2, new TestScaleMetrics2 { Num = 50 }); metricsMap.Add(monitor3, new TestScaleMetrics3 { Length = 100 }); await localRepository.WriteMetricsAsync(metricsMap); }
public async Task ReadWriteMetrics_IntegerConversion_HandlesLongs() { var monitor1 = new TestScaleMonitor1(); var monitors = new IScaleMonitor[] { monitor1 }; // first write a couple entities manually to the table to simulate // the change in entity property type (int -> long) // this shows that the table can have entities of both formats with // no versioning issues // add an entity with Count property of type int var entity = new DynamicTableEntity { RowKey = TableStorageScaleMetricsRepository.GetRowKey(DateTime.UtcNow), PartitionKey = TestHostId, Properties = new Dictionary <string, EntityProperty>() }; var expectedIntCountValue = int.MaxValue; entity.Properties.Add("Timestamp", new EntityProperty(DateTime.UtcNow)); entity.Properties.Add("Count", new EntityProperty(expectedIntCountValue)); entity.Properties.Add(TableStorageScaleMetricsRepository.MonitorIdPropertyName, EntityProperty.GeneratePropertyForString(monitor1.Descriptor.Id)); var batch = new TableBatchOperation(); batch.Add(TableOperation.Insert(entity)); // add an entity with Count property of type long entity = new DynamicTableEntity { RowKey = TableStorageScaleMetricsRepository.GetRowKey(DateTime.UtcNow), PartitionKey = TestHostId, Properties = new Dictionary <string, EntityProperty>() }; var expectedLongCountValue = long.MaxValue; entity.Properties.Add("Timestamp", new EntityProperty(DateTime.UtcNow)); entity.Properties.Add("Count", new EntityProperty(expectedLongCountValue)); entity.Properties.Add(TableStorageScaleMetricsRepository.MonitorIdPropertyName, EntityProperty.GeneratePropertyForString(monitor1.Descriptor.Id)); batch.Add(TableOperation.Insert(entity)); await _repository.ExecuteBatchSafeAsync(batch); // push a long max value through serialization var metricsMap = new Dictionary <IScaleMonitor, ScaleMetrics>(); metricsMap.Add(monitor1, new TestScaleMetrics1 { Count = long.MaxValue }); await _repository.WriteMetricsAsync(metricsMap); // add one more metricsMap = new Dictionary <IScaleMonitor, ScaleMetrics>(); metricsMap.Add(monitor1, new TestScaleMetrics1 { Count = 12345 }); await _repository.WriteMetricsAsync(metricsMap); // read the metrics back var result = await _repository.ReadMetricsAsync(monitors); Assert.Equal(1, result.Count); var monitorMetricsList = result[monitor1]; Assert.Equal(4, monitorMetricsList.Count); // verify the explicitly written int record was read correctly var currSample = (TestScaleMetrics1)monitorMetricsList[0]; Assert.Equal(expectedIntCountValue, currSample.Count); // verify the explicitly written long record was read correctly currSample = (TestScaleMetrics1)monitorMetricsList[1]; Assert.Equal(expectedLongCountValue, currSample.Count); // verify the final roundtripped values currSample = (TestScaleMetrics1)monitorMetricsList[2]; Assert.Equal(long.MaxValue, currSample.Count); currSample = (TestScaleMetrics1)monitorMetricsList[3]; Assert.Equal(12345, currSample.Count); }