internal async Task <Queue <LoggedDataConsumer> > CreateLoggersAsync(Queue <DataTypeBase> producers) { LoggedDataConsumer nextLogger = null; var result = new Queue <LoggedDataConsumer>(); try { while (producers.Count != 0) { nextLogger = new LoggedDataConsumer(producers.Dequeue()); byte[] eventConfig = nextLogger.source.eventConfig; var nReqLogIds = (byte)((nextLogger.source.attributes.length() - 1) / LOG_ENTRY_SIZE + 1); int remainder = nextLogger.source.attributes.length(); for (byte i = 0; i < nReqLogIds; i++, remainder -= LOG_ENTRY_SIZE) { int entrySize = Math.Min(remainder, LOG_ENTRY_SIZE), entryOffset = LOG_ENTRY_SIZE * i + nextLogger.source.attributes.offset; byte[] command = new byte[6]; command[0] = (byte)LOGGING; command[1] = TRIGGER; command[2 + eventConfig.Length] = (byte)(((entrySize - 1) << 5) | entryOffset); Array.Copy(eventConfig, 0, command, 2, eventConfig.Length); var id = await createLoggerTask.Execute("Did not receive logger id within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(command)); nextLogger.addId(id); } nextLogger.register(dataLoggers); result.Enqueue(nextLogger); } return(result); } catch (TimeoutException e) { while (result.Count != 0) { result.Dequeue().remove(bridge, true); } nextLogger?.remove(bridge, true); throw e; } }
private void createLogger() { if (pendingProducers.Count != 0) { nextLogger = new LoggedDataConsumer(pendingProducers.Dequeue()); byte[] eventConfig = nextLogger.source.eventConfig; nReqLogIds = (byte)((nextLogger.source.attributes.length() - 1) / LOG_ENTRY_SIZE + 1); int remainder = nextLogger.source.attributes.length(); for (byte i = 0; i < nReqLogIds; i++, remainder -= LOG_ENTRY_SIZE) { int entrySize = Math.Min(remainder, LOG_ENTRY_SIZE), entryOffset = LOG_ENTRY_SIZE * i + nextLogger.source.attributes.offset; byte[] command = new byte[6]; command[0] = (byte)LOGGING; command[1] = TRIGGER; command[2 + eventConfig.Length] = (byte)(((entrySize - 1) << 5) | entryOffset); Array.Copy(eventConfig, 0, command, 2, eventConfig.Length); bridge.sendCommand(command); } timeoutFuture = new Timer(e => { while (successfulLoggers.Count != 0) { successfulLoggers.Dequeue().remove(bridge, true); } nextLogger.remove(bridge, true); pendingProducers = null; createLoggerTask.SetException(new TimeoutException("Creating logger timed out")); }, null, nReqLogIds * MetaWearBoard.COMMS_TIMEOUT, Timeout.Infinite); } else { createLoggerTask.SetResult(successfulLoggers); } }
internal async Task <ICollection <LoggedDataConsumer> > queryActiveLoggersAsync() { dataLoggers.Clear(); var nRemainingLoggers = new Dictionary <DataTypeBase, byte>(); var placeholder = new Dictionary <Tuple <byte, byte, byte>, byte>(); ICollection <DataTypeBase> producers = bridge.aggregateDataSources(); DataTypeBase guessLogSource(Tuple <byte, byte, byte> key, byte offset, byte length) { List <DataTypeBase> possible = new List <DataTypeBase>(); foreach (DataTypeBase it in producers) { if (it.eventConfig[0] == key.Item1 && it.eventConfig[1] == key.Item2 && it.eventConfig[2] == key.Item3) { possible.Add(it); if (it.components != null) { possible.AddRange(it.components); } } } DataTypeBase original = null; bool multipleEntries = false; foreach (DataTypeBase it in possible) { if (it.attributes.length() > 4) { original = it; multipleEntries = true; } } if (multipleEntries) { if (offset == 0 && length > LOG_ENTRY_SIZE) { return(original); } if (!placeholder.ContainsKey(key)) { if (length == LOG_ENTRY_SIZE) { placeholder.Add(key, length); return(original); } } else { placeholder[key] += length; if (placeholder[key] == original.attributes.length()) { placeholder.Remove(key); } return(original); } } foreach (DataTypeBase it in possible) { if (it.attributes.offset == offset && it.attributes.length() == length) { return(it); } } return(null); } for (byte i = 0; i < bridge.lookupModuleInfo(LOGGING).extra[0]; i++) { var response = await queryLogConfigTask.Execute("Querying log configuration (id = " + i + ") timed out after {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)LOGGING, Util.setRead(TRIGGER), i })); if (response.Length > 2) { byte offset = (byte)(response[5] & 0x1f), length = (byte)(((response[5] >> 5) & 0x3) + 1); var source = guessLogSource(Tuple.Create(response[2], response[3], response[4]), offset, length); var dataprocessor = bridge.GetModule <IDataProcessor>() as DataProcessor; var state = Util.clearRead(response[3]) == DataProcessor.STATE; if (response[2] == (byte)DATA_PROCESSOR && (response[3] == DataProcessor.NOTIFY || state)) { var chain = await dataprocessor.pullChainAsync(response[4]); var first = chain.First(); var type = first.source != null? guessLogSource(Tuple.Create(first.source[0], first.source[1], first.source[2]), first.offset, first.length) : dataprocessor.activeProcessors[first.id].Item2.source; while (chain.Count() != 0) { var current = chain.Pop(); var currentConfigObj = DataProcessorConfig.from(bridge.getFirmware(), bridge.lookupModuleInfo(DATA_PROCESSOR).revision, current.config); var next = type.transform(currentConfigObj); next.Item1.eventConfig[2] = current.id; if (next.Item2 != null) { next.Item2.eventConfig[2] = current.id; } if (!dataprocessor.activeProcessors.ContainsKey(current.id)) { dataprocessor.activeProcessors.Add(current.id, Tuple.Create(next.Item2, new NullEditor(currentConfigObj, type, bridge) as EditorImplBase)); } type = next.Item1; } source = state ? dataprocessor.lookupProcessor(response[4]).Item1 : type; } if (!nRemainingLoggers.ContainsKey(source) && source.attributes.length() > LOG_ENTRY_SIZE) { nRemainingLoggers.Add(source, (byte)Math.Ceiling((float)(source.attributes.length() / LOG_ENTRY_SIZE))); } LoggedDataConsumer logger = null; foreach (LoggedDataConsumer it in dataLoggers.Values) { if (it.source.eventConfig.SequenceEqual(source.eventConfig) && it.source.attributes.Equals(source.attributes)) { logger = it; break; } } if (logger == null || (offset != 0 && !nRemainingLoggers.ContainsKey(source))) { logger = new LoggedDataConsumer(source); } logger.addId(i); dataLoggers.Add(i, logger); if (nRemainingLoggers.TryGetValue(source, out var count)) { byte remaining = (byte)(count - 1); nRemainingLoggers[source] = remaining; if (remaining < 0) { nRemainingLoggers.Remove(source); } } } } List <LoggedDataConsumer> orderedLoggers = new List <LoggedDataConsumer>(); foreach (var k in dataLoggers.Keys.OrderBy(d => d)) { if (!orderedLoggers.Contains(dataLoggers[k])) { orderedLoggers.Add(dataLoggers[k]); } } return(orderedLoggers); }