public SimulationViewModel(IRuntimeService runtime) { Runtime = runtime; Model = new Models.Simulation() { Version = _pluginVersion }; }
public override async Task Start(Models.Simulation model, CancellationToken token, TextEditorViewModel messages, int interval = 5000) { _messages = messages; var generator = new Log131Generator(); var index = 0d; var logList = new Log() { UidWell = model.WellUid, NameWell = model.WellName, UidWellbore = model.WellboreUid, NameWellbore = model.WellboreName, Uid = model.LogUid, Name = model.LogName, IndexType = Convert(model.LogIndexType) } .AsList(); while (true) { if (token.IsCancellationRequested) { break; } var result = Connection.Read(new LogList() { Log = logList }, OptionsIn.ReturnElements.HeaderOnly); if (!result.Log.Any()) { Runtime.Invoke(() => Runtime.ShowError("Log not found.")); break; } var log = result.Log[0]; if (log.IndexType != LogIndexType.datetime && log.EndIndex != null) { index = log.EndIndex.Value; } log.Direction = LogIndexDirection.increasing; log.IndexCurve = new IndexCurve(model.Channels.Select(x => x.ChannelName).FirstOrDefault()); log.LogCurveInfo = model.Channels.Select(ToLogCurveInfo).ToList(); index = generator.GenerateLogData(log, startIndex: index, interval: 0.1); //result.Log[0].LogData[0].MnemonicList = generator.Mnemonics(result.Log[0].LogCurveInfo); //result.Log[0].LogData[0].UnitList = generator.Units(result.Log[0].LogCurveInfo); Connection.Update(result); await Task.Delay(interval); } }
private async Task DeletePartitionsAsync(Models.Simulation sim, int deviceCount) { int partitionCount = (int)Math.Ceiling(deviceCount / (double)this.maxPartitionSize); this.log.Debug("Deleting incomplete partitions", () => new { SimulationId = sim.Id, this.maxPartitionSize, partitionCount }); for (int i = 1; i <= partitionCount; i++) { await this.partitionsStorage.DeleteAsync(this.GetPartitionId(sim.Id, i)); } }
// Split the full list of devices in small chunks and save them as partitions private async Task CreatePartitionsInternalAsync(Models.Simulation sim, Dictionary <string, List <string> > allDeviceIdsByModel) { var deviceCount = allDeviceIdsByModel.Select(x => x.Value.Count).Sum(); this.log.Debug("Creating partitions for the simulation...", () => new { Simulation = sim.Id, deviceCount, this.maxPartitionSize }); var partitionCount = 0; var currentSize = 0; // A partition contains multiple devices, organized by model ID var partitionContent = new Dictionary <string, List <string> >(); foreach (KeyValuePair <string, List <string> > deviceIdsInModel in allDeviceIdsByModel) { var modelId = deviceIdsInModel.Key; if (!partitionContent.ContainsKey(modelId)) { partitionContent[modelId] = new List <string>(); } foreach (var deviceId in deviceIdsInModel.Value) { partitionContent[modelId].Add(deviceId); currentSize++; if (currentSize < this.maxPartitionSize) { continue; } partitionCount++; await this.CreatePartitionAsync(sim.Id, partitionCount, partitionContent); partitionContent[modelId].Clear(); currentSize = 0; } } // Save last data if (currentSize > 0) { partitionCount++; await this.CreatePartitionAsync(sim.Id, partitionCount, partitionContent); } this.log.Debug("Partitions created", () => new { Simulation = sim.Id, partitionCount, this.maxPartitionSize }); }
public override async Task Start(Models.Simulation model, CancellationToken token, int interval = 5000) { Model = model; Simulator = new Etp12Simulator(model); _log.Debug($"Establishing ETP connection for {Model.EtpConnection}"); using (Client = Model.EtpConnection.CreateEtpClient(Model.Name, Model.Version)) { Client.Register <IChannelStreamingProducer, ChannelStreamingProducerHandler>(); Client.Handler <IChannelStreamingProducer>().OnStartStreaming += OnStartStreaming; Client.Handler <IChannelStreamingProducer>().OnStopStreaming += OnStopStreaming; //Client.Handler<IChannelStreamingProducer>().OnStart += OnStart; //Client.Handler<IChannelStreamingProducer>().OnChannelDescribe += OnChannelDescribe(); //Client.Handler<IChannelStreamingProducer>().OnChannelStreamingStart += OnChannelStreamingStart; //Client.Handler<IChannelStreamingProducer>().OnChannelStreamingStop += OnChannelStreamingStop; //Client.Handler<IChannelStreamingProducer>().IsSimpleStreamer = Model.IsSimpleStreamer; //Client.Handler<IChannelStreamingProducer>().DefaultDescribeUri = EtpUri.RootUri; Client.SocketClosed += OnClientSocketClosed; Client.Output = Log; if (!await Client.OpenAsync()) { Log("Error opening web socket connection"); return; } while (true) { if (token.IsCancellationRequested) { break; } try { await Task.Delay(interval, token); } catch (TaskCanceledException) { break; } } TaskRunner.Stop(); Client.Handler <ICoreClient>() .CloseSession("Streaming stopped."); } }
public abstract Task Start(Models.Simulation model, CancellationToken token, int interval = 5000);
public SimulationChannelStreamingProvider(Models.Simulation simulation) { Simulation = simulation; IsSimpleStreamer = true; }
public abstract Task Start(Models.Simulation model, CancellationToken token, TextEditorViewModel messages, int interval = 5000);
public SimulationDiscoveryProvider(Models.Simulation simulation) { Simulation = simulation; }
public Etp11Simulator(Models.Simulation model) { Model = model; }
public override async Task Start(Models.Simulation model, CancellationToken token, TextEditorViewModel messages, int interval = 5000, double?increment = null) { _messages = messages; _counter = 0; Model = model; var generator = new Log141Generator(); var index = 0d; var log = GetLogToUpdate(); if (log == null) { Runtime.Invoke(() => Runtime.ShowError("Log not found.")); return; } if (log.IndexType != LogIndexType.datetime && log.EndIndex != null) { index = log.EndIndex.Value; } var depthIncrement = increment ?? 0.1; if (log.Direction.HasValue && log.Direction == LogIndexDirection.decreasing) { depthIncrement *= -1; } var logCurveInfo = model.Channels.Select(ToLogCurveInfo).ToList(); var previousTimestamp = (DateTimeOffset.UtcNow - TimeSpan.FromMinutes(model.DateTimeIndexOffsetInMinutes) - TimeSpan.FromMilliseconds(interval)).TruncateToSeconds(); var timeIncrement = TimeSpan.FromMilliseconds(increment ?? 1000); while (true) { var swOuter = new Stopwatch(); swOuter.Start(); if (token.IsCancellationRequested) { break; } _counter++; var currentTimestamp = (DateTimeOffset.UtcNow - TimeSpan.FromMinutes(model.DateTimeIndexOffsetInMinutes)).TruncateToSeconds(); var rows = (int)(currentTimestamp - previousTimestamp).TotalSeconds; // Clear any previously existing log data. List <string> indexes; if (log.IndexType == LogIndexType.datetime) { indexes = generator.GenerateDateTimeIndexes(rows, previousTimestamp, timeIncrement); } else { indexes = generator.GenerateNumericIndexes(rows, index, depthIncrement); index += depthIncrement * rows; } var logData = generator.GenerateLogData(logCurveInfo, indexes, Model.GenerateNulls); // Create minimal log object var update = new LogList { Log = new List <Log> { new Log { Uid = Model.LogUid, UidWell = Model.WellUid, UidWellbore = Model.WellboreUid, IndexType = log.IndexType, LogData = new List <LogData>() { logData } } } }; var swInner = new Stopwatch(); try { swInner.Start(); Connection.Update(update); swInner.Stop(); swOuter.Stop(); Log($"Update #{_counter} was successful. Added {rows} rows. Time taken : {swOuter.ElapsedMilliseconds} ms. UpdateInStore time : {swInner.ElapsedMilliseconds} ms."); } catch (Exception ex) { swInner.Stop(); swOuter.Stop(); Log($"Update #{_counter} was unsuccessful. Time taken : {swOuter.ElapsedMilliseconds} ms. UpdateInStore time : {swInner.ElapsedMilliseconds} ms.\n{ex.Message}"); } previousTimestamp = currentTimestamp; // Compensate for the time it took to send the update in store var delayInterval = swOuter.ElapsedMilliseconds > interval ? 0 : interval - swOuter.ElapsedMilliseconds; try { await Task.Delay((int)delayInterval, token); } catch (TaskCanceledException) { break; } } }