public Program(IConfiguration config) { #if !COREFX ServicePointManager.DefaultConnectionLimit = 500; ServicePointManager.Expect100Continue = false; ServicePointManager.UseNagleAlgorithm = false; #endif var ats = config.GetSection("AtsStorage"); var sql = config.GetSection("SqlStorage"); var redis = config.GetSection("RedisCache"); var startTime = DateTime.UtcNow; _dataSources = new List <DataSource>(); for (int i = 0; i < 80; i++) { _dataSources.Add(new DataSource(new BasicKey { Id = Guid.NewGuid(), Sampling = Sampling.Daily }, startTime, TimeSpan.FromMilliseconds(10))); } var dats = new AtsDynamicStorage <BasicKey, BasicEntry>( "VatsTables1", ats.GetSection("ConnectionString").Value, new ConcurrencyControl(AtsDynamicStorage <BasicKey, BasicEntry> .DefaultReadParallelism, AtsDynamicStorage <BasicKey, BasicEntry> .DefaultWriteParallelism), new YearlyPartitioningProvider <BasicKey>(), new YearlyTableProvider(), this); var dsql = new SqlDynamicStorage <BasicKey, BasicEntry>( "SqlTable3", sql.GetSection("ConnectionString").Value, new ConcurrencyControl(5, 5), this); var switchDate = new DateTime(2016, 10, 20, 18, 25, 0, DateTimeKind.Utc); var selector = new TestDynamicStorageSelector(new StorageSelection <BasicKey, BasicEntry, IDynamicStorage <BasicKey, BasicEntry> >[] { new StorageSelection <BasicKey, BasicEntry, IDynamicStorage <BasicKey, BasicEntry> >(dsql, switchDate, null), new StorageSelection <BasicKey, BasicEntry, IDynamicStorage <BasicKey, BasicEntry> >(dats, null, switchDate), }); var vats = new AtsVolumeStorage <BasicKey, BasicEntry>( "DatsTable1", ats.GetSection("ConnectionString").Value, new ConcurrencyControl(AtsVolumeStorage <BasicKey, BasicEntry> .DefaultReadParallelism, AtsVolumeStorage <BasicKey, BasicEntry> .DefaultWriteParallelism), new YearlyPartitioningProvider <BasicKey>(), this); var tfs = new TemporaryFileStorage <BasicKey, BasicEntry>( @"C:\tsdb\cache", TemporaryFileStorage <BasicKey, BasicEntry> .DefaultMaxFileSize, TemporaryFileStorage <BasicKey, BasicEntry> .DefaultMaxStorageSize, this); var client = new TsdbClient <BasicKey, BasicEntry>(selector, vats, tfs, this); // redis.GetSection( "ConnectionString" ).Value var batcher = new TsdbWriteBatcher <BasicKey, BasicEntry>(client, PublicationType.None, TimeSpan.FromSeconds(5), 20000, this); ThreadPool.QueueUserWorkItem(obj => batcher.Handle()); //var engine = new TsdbEngine<BasicKey, BasicEntry>( this, client ); //engine.StartAsync().Wait(); Console.WriteLine($"Info: Writing entries..."); while (true) { var now = DateTime.UtcNow; foreach (var ds in _dataSources) { var serie = ds.GetEntries(now); batcher.Write(serie); } Thread.Sleep(1000); } }
public Program(IConfiguration config) { #if !COREFX ServicePointManager.DefaultConnectionLimit = 500; ServicePointManager.Expect100Continue = false; ServicePointManager.UseNagleAlgorithm = false; #endif var ats = config.GetSection("AtsStorage"); var sql = config.GetSection("SqlStorage"); var redis = config.GetSection("RedisCache"); var startTime = DateTime.UtcNow; _dataSources = new List <DataSource>(); for (int i = 0; i < 15; i++) { _dataSources.Add(new DataSource(new BasicKey { Id = Guid.NewGuid(), Sampling = Sampling.Daily }, startTime, TimeSpan.FromMilliseconds(1))); } var dats = new AtsStorage <BasicKey, BasicEntry>( ats.GetSection("ConnectionString").Value, new ConcurrencyControl(AtsStorage <BasicKey, BasicEntry> .DefaultReadParallelism, AtsStorage <BasicKey, BasicEntry> .DefaultWriteParallelism), new YearlyPartitioningProvider <BasicKey>(), new YearlyTableProvider <BasicKey>("DatsTables13"), this); var dsql = new SqlStorage <BasicKey, BasicEntry>( "SqlTable3", sql.GetSection("ConnectionString").Value, new ConcurrencyControl(5, 5), this); var switchDate = new DateTime(2018, 9, 27, 15, 40, 0, DateTimeKind.Utc); var selector = new TestStorageSelector(new StorageSelection <BasicKey, BasicEntry, IStorage <BasicKey, BasicEntry> >[] { new StorageSelection <BasicKey, BasicEntry, IStorage <BasicKey, BasicEntry> >(dats, switchDate, null), new StorageSelection <BasicKey, BasicEntry, IStorage <BasicKey, BasicEntry> >(dsql, null, switchDate), //new StorageSelection<BasicKey, BasicEntry, IStorage<BasicKey, BasicEntry>>( dats ), }); var vats = new AtsVolumeStorage <BasicKey, BasicEntry>( "VatsTable1", ats.GetSection("ConnectionString").Value, new ConcurrencyControl(AtsVolumeStorage <BasicKey, BasicEntry> .DefaultReadParallelism, AtsVolumeStorage <BasicKey, BasicEntry> .DefaultWriteParallelism), new YearlyPartitioningProvider <BasicKey>(), this); var tfs = new TemporaryFileStorage <BasicKey, BasicEntry>( @"C:\tsdb\cache", TemporaryFileStorage <BasicKey, BasicEntry> .DefaultMaxFileSize, TemporaryFileStorage <BasicKey, BasicEntry> .DefaultMaxStorageSize, this); //var client = new TsdbClient<BasicKey, BasicEntry>( selector, vats, tfs, this ); //var batcher = new TsdbWriteBatcher<BasicKey, BasicEntry>( client, PublicationType.None, TimeSpan.FromSeconds( 5 ), 20000, this ); //ThreadPool.QueueUserWorkItem( obj => batcher.Handle() ); ////var engine = new TsdbEngine<BasicKey, BasicEntry>( this, client ); ////engine.StartAsync().Wait(); //Console.WriteLine( $"Info: Writing entries..." ); //while( true ) //{ // var now = DateTime.UtcNow; // foreach( var ds in _dataSources ) // { // var serie = ds.GetEntries( now ); // batcher.Write( serie ); // } // Thread.Sleep( 1000 ); //} var typeStorage = new TestTypedKeyStorage(_dataSources.Select(x => x.Id)); var client = new TsdbClient <BasicKey, BasicEntry>(selector, tfs, this); var aggregationFunctions = new AggregationTsdbClient <BasicKey, BasicEntry, MeasureType>(dats, typeStorage, this); var batcher = new TsdbWriteBatcher <BasicKey, BasicEntry>(client, PublicationType.None, Publish.Locally, false, TimeSpan.FromSeconds(5), 20000, this); ThreadPool.QueueUserWorkItem(obj => batcher.Handle()); Console.WriteLine($"Info: Writing entries..."); for (int i = 0; i < 3600; i++) { var now = DateTime.UtcNow; foreach (var ds in _dataSources) { var serie = ds.GetEntries(now); batcher.Write(serie); } Thread.Sleep(1000); } var from = switchDate.AddSeconds(-5); var to = switchDate.AddSeconds(5); var id = DefaultKeyConverter <Guid> .Current.ConvertAsync("fsmVxkSDCkKOcdV9t52R4A").Result; var result = client.ReadAsync(new BasicKey { Id = id, Sampling = Sampling.Daily }, from, to).Result; Console.WriteLine(result); //Thread.Sleep( 30000 ); //Console.WriteLine( $"Info: Reading groupings..." ); //var test = client.ReadLatestAsync( _dataSources[ 0 ].Id, 100 ).Result; //Console.WriteLine( "Latest since: " + test.Entries.Count ); //// make optional or nullable... ////var result = client.ReadLatestAsync( _dataSources.Select( x => x.Id ), 10 ).Result; //var fields = AggregationParameters.Fields( new AggregatedField( "Value", AggregationFunction.Average ) ); //var result = aggregationFunctions.ReadGroupsAsync( "Temperature", fields, from, to, AggregationParameters.NoTagRequirements, new[] { "Placement" } ).Result; //Console.WriteLine( result.Sum( x => x.Entries.Count ) ); //result = aggregationFunctions.ReadGroupsAsync( "Temperature", fields, from, to, AggregationParameters.NoTagRequirements, AggregationParameters.NoGroupings ).Result; //Console.WriteLine( result.Sum( x => x.Entries.Count ) ); }