public async Task PullConfigAsync() { var response = await readValueTask.Execute("Did not receive sensor fusion config within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)SENSOR_FUSION, Util.setRead(MODE) })); mode = (Mode)response[2]; }
public async Task <BleAdvertisementConfig> ReadBleAdConfigAsync() { var response = await readAdConfigTask.Execute("Did not received device name within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)SETTINGS, Util.setRead(DEVICE_NAME) })); var deviceName = Encoding.ASCII.GetString(response, 2, response.Length - 2); ushort interval; byte timeout; response = await readAdConfigTask.Execute("Did not received ad interval within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)SETTINGS, Util.setRead(AD_INTERVAL) })); if (bridge.lookupModuleInfo(SETTINGS).revision >= CONN_PARAMS_REVISION) { interval = (ushort)(BitConverter.ToUInt16(response, 2) * AD_INTERVAL_STEP); timeout = response[4]; } else { interval = BitConverter.ToUInt16(response, 2); timeout = response[4]; } response = await readAdConfigTask.Execute("Did not received scan response within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)SETTINGS, Util.setRead(SCAN_RESPONSE) })); var scanResponse = new byte[response.Length - 2]; Array.Copy(response, 2, scanResponse, 0, scanResponse.Length); return(new BleAdvertisementConfig(deviceName, interval, timeout, scanResponse)); }
public async Task PullConfigAsync() { byte[] response = await readConfigTask.Execute("Did not receive gyro config within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)GYRO, Util.setRead(CONFIG) })); Array.Copy(response, 2, gyrDataConfig, 0, gyrDataConfig.Length); }
public async override Task PullConfigAsync() { byte[] response = await readConfigTask.Execute("Did not receive accelerometer config within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)ACCELEROMETER, Util.setRead(DATA_CONFIG) })); Array.Copy(response, 2, accDataConfig, 0, accDataConfig.Length); }
internal async Task <Queue <byte> > queueEvents(Queue <Tuple <DataTypeBase, Action> > eventCodeBlocks) { var successfulEvents = new Queue <byte>(); try { while (eventCodeBlocks.Count != 0) { ActiveDataType = eventCodeBlocks.Peek().Item1; recordedCommands = new Queue <byte[]>(); eventCodeBlocks.Peek().Item2(); ActiveDataType = null; while (recordedCommands.Count != 0) { var id = await createEventTask.Execute("Programming command timed out after {0}ms", bridge.TimeForResponse * 2, () => { bridge.sendCommand(recordedCommands.Dequeue()); bridge.sendCommand(recordedCommands.Dequeue()); }); successfulEvents.Enqueue(id); } eventCodeBlocks.Dequeue(); } } catch (TimeoutException e) { foreach (byte id in successfulEvents) { remove(id); } throw e; } return(successfulEvents); }
public async Task <int> ReadTmpValueAsync() { var result = await readTmpValueTask.Execute("Did not received response from tmp register within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)DEBUG, Util.setRead(TMP_VALUE) })); return(Util.bytesLeToInt(result, 2)); }
public async Task <T> ReadAsync() { var cmd = dataTypeBase.createReadStateCmd(); return(await readTask.Execute("Did not receive a response for command " + Util.ArrayToHexString(cmd) + " within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(cmd))); }
internal async Task QueryTimeAsync() { ///remove return; queryTimeTask = new TimedTask <bool>(); await queryTimeTask.Execute("Failed to receive current time tick within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)LOGGING, Util.setRead(TIME) })); }
internal async Task <Stack <ProcessorEntry> > pullChainAsync(byte id) { var entries = new Stack <ProcessorEntry>(); var terminate = false; var readId = id; while (!terminate) { if (activeProcessors.TryGetValue(readId, out var processor)) { var entry = new ProcessorEntry { id = readId, config = processor.Item2.config }; entries.Push(entry); if (processor.Item2.source.eventConfig[0] == (byte)DATA_PROCESSOR) { readId = processor.Item2.source.eventConfig[2]; } else { terminate = true; } } else { var config = await pullProcessorConfigTask.Execute("Did not receive data processor config within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)DATA_PROCESSOR, Util.setRead(ADD), readId })); var entry = new ProcessorEntry { id = readId, offset = (byte)(config[5] & 0x1f), length = (byte)(((config[5] >> 5) & 0x7) + 1), source = new byte[3], config = new byte[config.Length - 6] }; Array.Copy(config, 2, entry.source, 0, entry.source.Length); Array.Copy(config, 6, entry.config, 0, entry.config.Length); entries.Push(entry); if (config[2] == (byte)DATA_PROCESSOR) { readId = config[4]; } else { terminate = true; } } } return(entries); }
internal async Task <Queue <byte> > queueDataProcessors(LinkedList <Tuple <DataTypeBase, EditorImplBase> > pendingProcessors) { var successfulProcessors = new Queue <byte>(); try { while (pendingProcessors.Count != 0) { Tuple <DataTypeBase, EditorImplBase> current = pendingProcessors.First.Value; DataTypeBase input = current.Item2.source.input; if (current.Item2.configObj is DataProcessorConfig.FuserConfig) { (current.Item2.configObj as DataProcessorConfig.FuserConfig).SyncFilterIds(this); } byte[] filterConfig = new byte[input.eventConfig.Length + 1 + current.Item2.config.Length]; filterConfig[input.eventConfig.Length] = (byte)(((input.attributes.length() - 1) << 5) | input.attributes.offset); Array.Copy(input.eventConfig, 0, filterConfig, 0, input.eventConfig.Length); Array.Copy(current.Item2.config, 0, filterConfig, input.eventConfig.Length + 1, current.Item2.config.Length); var id = await createProcessorTask.Execute("Did not receive data processor id within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(DATA_PROCESSOR, ADD, filterConfig)); pendingProcessors.RemoveFirst(); current.Item2.source.eventConfig[2] = id; if (current.Item2.source.components != null) { foreach (var c in current.Item2.source.components) { c.eventConfig[2] = id; } } if (current.Item1 != null) { current.Item1.eventConfig[2] = id; } activeProcessors[id] = current; successfulProcessors.Enqueue(id); } } catch (TimeoutException e) { foreach (byte it in successfulProcessors) { removeProcessor(true, it); } throw e; } return(successfulProcessors); }
internal async Task <DataTypeBase> create(uint period, ushort repititions, bool delay) { byte[] cmd = new byte[9]; cmd[0] = (byte)TIMER; cmd[1] = TIMER_ENTRY; cmd[8] = (byte)(delay ? 0 : 1); Array.Copy(Util.uintToBytesLe(period), 0, cmd, 2, 4); Array.Copy(Util.ushortToBytesLe(repititions), 0, cmd, 6, 2); var id = await createTimerTask.Execute("Did not receive timer ID within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(cmd)); return(new IntegralDataType(TIMER, NOTIFY, id, new DataAttributes(new byte[] { }, 0, 0, false))); }
internal async Task <Queue <LoggedDataConsumer> > CreateLoggersAsync(Queue <DataTypeBase> producers) { LoggedDataConsumer nextLogger = null; var result = new Queue <LoggedDataConsumer>(); try { while (producers.Count != 0) { nextLogger = new LoggedDataConsumer(producers.Dequeue()); byte[] eventConfig = nextLogger.source.eventConfig; var nReqLogIds = (byte)((nextLogger.source.attributes.length() - 1) / LOG_ENTRY_SIZE + 1); int remainder = nextLogger.source.attributes.length(); for (byte i = 0; i < nReqLogIds; i++, remainder -= LOG_ENTRY_SIZE) { int entrySize = Math.Min(remainder, LOG_ENTRY_SIZE), entryOffset = LOG_ENTRY_SIZE * i + nextLogger.source.attributes.offset; byte[] command = new byte[6]; command[0] = (byte)LOGGING; command[1] = TRIGGER; command[2 + eventConfig.Length] = (byte)(((entrySize - 1) << 5) | entryOffset); Array.Copy(eventConfig, 0, command, 2, eventConfig.Length); var id = await createLoggerTask.Execute("Did not receive logger id within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(command)); nextLogger.addId(id); } nextLogger.register(dataLoggers); result.Enqueue(nextLogger); } return(result); } catch (TimeoutException e) { while (result.Count != 0) { result.Dequeue().remove(bridge, true); } nextLogger?.remove(bridge, true); throw e; } }
public async Task <byte> EndRecordAsync() { isRecording = false; await Task.Delay(WRITE_MACRO_DELAY); var id = await beginMacroTask.Execute("Did not receive macro id within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)MACRO, BEGIN, (byte)(execOnBoot ? 1 : 0) })); foreach (var it in commands) { foreach (var cmd in convertToMacroCommand(it)) { bridge.sendCommand(cmd); } } bridge.sendCommand(new byte[] { (byte)MACRO, END }); return(id); }
public async Task <byte[]> ReadSPIAsync(byte length, byte slaveSelectPin, byte clockPin, byte mosiPin, byte misoPin, byte mode, SpiFrequency frequency, byte[] data = null, bool lsbFirst = true, bool useNativePins = true) { SpiParameterBuilder builder = new SpiParameterBuilder((byte)((length - 1) | (DIRECT_SPI_READ_ID << 4))); builder.slaveSelectPin(slaveSelectPin) .clockPin(clockPin) .mosiPin(mosiPin) .misoPin(misoPin) .mode(mode) .frequency(frequency); if (lsbFirst) { builder.lsbFirst(); } if (useNativePins) { builder.useNativePins(); } if (data != null) { builder.data(data); } var response = await spiReadTask.Execute("Did not received SPI data within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(SERIAL_PASSTHROUGH, Util.setRead(SPI_RW), builder.build())); if (response.Length > 3) { byte[] dataInner = new byte[response.Length - 3]; Array.Copy(response, 3, dataInner, 0, dataInner.Length); return(dataInner); } else { throw new InvalidOperationException("Error reading SPI data from device or register address. Response: " + Util.arrayToHexString(response)); } }
public async Task <Configuration> ReadConfigAsync() { var response = await readConfigTask.Execute("Did not receive ibeacon ad UUID within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(AD_UUID) })); byte[] copy = new byte[response.Length - 2]; Array.Copy(response, 2, copy, 0, copy.Length); Array.Reverse(copy); Array.Reverse(copy, 0, 4); Array.Reverse(copy, 4, 2); Array.Reverse(copy, 6, 2); var adUuid = new Guid(copy); response = await readConfigTask.Execute("Did not receive ibeacon major ID within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(MAJOR) })); var major = BitConverter.ToUInt16(response, 2); response = await readConfigTask.Execute("Did not receive ibeacon minor ID within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(MINOR) })); var minor = BitConverter.ToUInt16(response, 2); response = await readConfigTask.Execute("Did not receive ibeacon rx power within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(RX) })); var rxPower = (sbyte)response[2]; response = await readConfigTask.Execute("Did not receive ibeacon tx power within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(TX) })); var txPower = (sbyte)response[2]; response = await readConfigTask.Execute("Did not receive ibeacon ad period within {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)IBEACON, Util.setRead(PERIOD) })); var period = BitConverter.ToUInt16(response, 2); return(new Configuration(adUuid, major, minor, period, rxPower, txPower)); }
internal async Task <ICollection <LoggedDataConsumer> > queryActiveLoggersAsync() { dataLoggers.Clear(); var nRemainingLoggers = new Dictionary <DataTypeBase, byte>(); var placeholder = new Dictionary <Tuple <byte, byte, byte>, byte>(); ICollection <DataTypeBase> producers = bridge.aggregateDataSources(); DataTypeBase guessLogSource(Tuple <byte, byte, byte> key, byte offset, byte length) { List <DataTypeBase> possible = new List <DataTypeBase>(); foreach (DataTypeBase it in producers) { if (it.eventConfig[0] == key.Item1 && it.eventConfig[1] == key.Item2 && it.eventConfig[2] == key.Item3) { possible.Add(it); if (it.components != null) { possible.AddRange(it.components); } } } DataTypeBase original = null; bool multipleEntries = false; foreach (DataTypeBase it in possible) { if (it.attributes.length() > 4) { original = it; multipleEntries = true; } } if (multipleEntries) { if (offset == 0 && length > LOG_ENTRY_SIZE) { return(original); } if (!placeholder.ContainsKey(key)) { if (length == LOG_ENTRY_SIZE) { placeholder.Add(key, length); return(original); } } else { placeholder[key] += length; if (placeholder[key] == original.attributes.length()) { placeholder.Remove(key); } return(original); } } foreach (DataTypeBase it in possible) { if (it.attributes.offset == offset && it.attributes.length() == length) { return(it); } } return(null); } for (byte i = 0; i < bridge.lookupModuleInfo(LOGGING).extra[0]; i++) { var response = await queryLogConfigTask.Execute("Querying log configuration (id = " + i + ") timed out after {0}ms", bridge.TimeForResponse, () => bridge.sendCommand(new byte[] { (byte)LOGGING, Util.setRead(TRIGGER), i })); if (response.Length > 2) { byte offset = (byte)(response[5] & 0x1f), length = (byte)(((response[5] >> 5) & 0x3) + 1); var source = guessLogSource(Tuple.Create(response[2], response[3], response[4]), offset, length); var dataprocessor = bridge.GetModule <IDataProcessor>() as DataProcessor; var state = Util.clearRead(response[3]) == DataProcessor.STATE; if (response[2] == (byte)DATA_PROCESSOR && (response[3] == DataProcessor.NOTIFY || state)) { var chain = await dataprocessor.pullChainAsync(response[4]); var first = chain.First(); var type = first.source != null? guessLogSource(Tuple.Create(first.source[0], first.source[1], first.source[2]), first.offset, first.length) : dataprocessor.activeProcessors[first.id].Item2.source; while (chain.Count() != 0) { var current = chain.Pop(); var currentConfigObj = DataProcessorConfig.from(bridge.getFirmware(), bridge.lookupModuleInfo(DATA_PROCESSOR).revision, current.config); var next = type.transform(currentConfigObj); next.Item1.eventConfig[2] = current.id; if (next.Item2 != null) { next.Item2.eventConfig[2] = current.id; } if (!dataprocessor.activeProcessors.ContainsKey(current.id)) { dataprocessor.activeProcessors.Add(current.id, Tuple.Create(next.Item2, new NullEditor(currentConfigObj, type, bridge) as EditorImplBase)); } type = next.Item1; } source = state ? dataprocessor.lookupProcessor(response[4]).Item1 : type; } if (!nRemainingLoggers.ContainsKey(source) && source.attributes.length() > LOG_ENTRY_SIZE) { nRemainingLoggers.Add(source, (byte)Math.Ceiling((float)(source.attributes.length() / LOG_ENTRY_SIZE))); } LoggedDataConsumer logger = null; foreach (LoggedDataConsumer it in dataLoggers.Values) { if (it.source.eventConfig.SequenceEqual(source.eventConfig) && it.source.attributes.Equals(source.attributes)) { logger = it; break; } } if (logger == null || (offset != 0 && !nRemainingLoggers.ContainsKey(source))) { logger = new LoggedDataConsumer(source); } logger.addId(i); dataLoggers.Add(i, logger); if (nRemainingLoggers.TryGetValue(source, out var count)) { byte remaining = (byte)(count - 1); nRemainingLoggers[source] = remaining; if (remaining < 0) { nRemainingLoggers.Remove(source); } } } } List <LoggedDataConsumer> orderedLoggers = new List <LoggedDataConsumer>(); foreach (var k in dataLoggers.Keys.OrderBy(d => d)) { if (!orderedLoggers.Contains(dataLoggers[k])) { orderedLoggers.Add(dataLoggers[k]); } } return(orderedLoggers); }