// Decompresses the given buffer based on the compression style and algorithm currently used by the parser. // The result is placed back in the buffer that was sent to this method. private void Decompress(ref byte[] buffer) { if (CompressionAlgorithm == CompressionAlgorithm.None || CompressionStyle == CompressionStyle.None) { return; } byte[] readBuffer = new byte[65536]; using (BlockAllocatedMemoryStream readStream = new BlockAllocatedMemoryStream()) { int readAmount; // Faster here to use provided buffer as non-expandable memory stream using (MemoryStream bufferStream = new MemoryStream(buffer)) { using (ZlibStream inflater = new ZlibStream(bufferStream, CompressionMode.Decompress)) { do { readAmount = inflater.Read(readBuffer, 0, readBuffer.Length); readStream.Write(readBuffer, 0, readAmount); }while (readAmount != 0); } } buffer = readStream.ToArray(); } }
/// <summary> /// Decompress a byte array. /// </summary> /// <param name="source">The <see cref="Byte"/> array to decompress.</param> /// <param name="length">The number of bytes to read into the byte array for compression.</param> /// <param name="startIndex">An <see cref="Int32"/> representing the start index of the byte array.</param> /// <returns>A decompressed <see cref="Byte"/> array.</returns> public static byte[] Decompress(this byte[] source, int startIndex, int length) { const byte CompressionStrengthMask = (byte)(Bits.Bit00 | Bits.Bit01); // Unmask compression strength from first buffer byte CompressionStrength strength = (CompressionStrength)(source[startIndex] & CompressionStrengthMask); if (strength == CompressionStrength.NoCompression) { // No compression was applied to original buffer, return specified portion of the buffer return(source.BlockCopy(startIndex + 1, length - 1)); } // Create a new decompression deflater using (BlockAllocatedMemoryStream compressedData = new BlockAllocatedMemoryStream(source, startIndex + 1, length - 1)) { byte[] destination; int compressionDepth = (source[startIndex] & ~CompressionStrengthMask) >> 2; using (DeflateStream inflater = new DeflateStream(compressedData, CompressionMode.Decompress)) { // Read uncompressed data destination = inflater.ReadStream(); } // When user requests multi-pass compression, there may be multiple compression passes on a buffer, // so we cycle through the needed uncompressions to get back to the original data if (strength == CompressionStrength.MultiPass && compressionDepth > 0) { return(destination.Decompress()); } return(destination); } }
/// <summary> /// Creates an XML string containing an unmerged view of the <see cref="ConfigurationSection"/> object as a single section to write to a file. /// </summary> /// <returns> /// An XML string containing an unmerged view of the <see cref="ConfigurationSection"/> object. /// </returns> /// <param name="parentElement">The <see cref="ConfigurationElement"/> instance to use as the parent when performing the un-merge.</param> /// <param name="name">The name of the section to create.</param> /// <param name="saveMode">The <see cref="ConfigurationSaveMode"/> instance to use when writing to a string.</param> protected override string SerializeSection(ConfigurationElement parentElement, string name, ConfigurationSaveMode saveMode) { if ((object)m_sections != null) { const string tempRoot = "__tempRoot__"; using (BlockAllocatedMemoryStream stream = new BlockAllocatedMemoryStream()) { XmlTextWriter writer = new XmlTextWriter(stream, Encoding.UTF8); writer.Indentation = 2; writer.Formatting = Formatting.Indented; // Add a temporary root so that indentation is at the desired level writer.WriteStartElement(tempRoot); writer.WriteStartElement(name); foreach (string section in m_sections.Keys) { CategorizedSettingsElementCollection categorySettings = this[section]; // Add category section writer.WriteStartElement(section); // Write each category value foreach (CategorizedSettingsElement categorySetting in categorySettings) { // <add name="TestConfigParam" value="-1.0" description="Parameter description." encrypted="false" scope="User"/> writer.WriteStartElement("add"); writer.WriteAttributeString("name", categorySetting.Name); writer.WriteAttributeString("value", categorySetting.SerializedValue); writer.WriteAttributeString("description", categorySetting.Description ?? ""); writer.WriteAttributeString("encrypted", categorySetting.Encrypted.ToString()); if (categorySetting.Scope == SettingScope.User) { writer.WriteAttributeString("scope", categorySetting.Scope.ToString()); } writer.WriteEndElement(); } writer.WriteEndElement(); } writer.WriteEndElement(); writer.WriteEndElement(); writer.Flush(); string settings = Encoding.UTF8.GetString(stream.ToArray()); // Remove temporary root return(settings.Replace(string.Format("<{0}>", tempRoot), "").Replace(string.Format("</{0}>", tempRoot), "")); } } return(base.SerializeSection(parentElement, name, saveMode)); }
// Rotates base time offsets private void RotateBaseTimes() { if ((object)m_parent != null && (object)m_baseTimeRotationTimer != null) { if ((object)m_baseTimeOffsets == null) { m_baseTimeOffsets = new long[2]; m_baseTimeOffsets[0] = RealTime; m_baseTimeOffsets[1] = RealTime + (long)m_baseTimeRotationTimer.Interval * Ticks.PerMillisecond; m_timeIndex = 0; } else { int oldIndex = m_timeIndex; // Switch to newer timestamp m_timeIndex ^= 1; // Now make older timestamp the newer timestamp m_baseTimeOffsets[oldIndex] = RealTime + (long)m_baseTimeRotationTimer.Interval * Ticks.PerMillisecond; } // Since this function will only be called periodically, there is no real benefit // to maintaining this memory stream at a member level using (BlockAllocatedMemoryStream responsePacket = new BlockAllocatedMemoryStream()) { responsePacket.Write(BigEndian.GetBytes(m_timeIndex), 0, 4); responsePacket.Write(BigEndian.GetBytes(m_baseTimeOffsets[0]), 0, 8); responsePacket.Write(BigEndian.GetBytes(m_baseTimeOffsets[1]), 0, 8); m_parent.SendClientResponse(m_clientID, ServerResponse.UpdateBaseTimes, ServerCommand.Subscribe, responsePacket.ToArray()); } } }
/// <summary> /// Queues a sequence of bytes, from the specified data source, onto the stream for parsing. /// </summary> /// <param name="source">Identifier of the data source.</param> /// <param name="buffer">An array of bytes. This method copies count bytes from buffer to the queue.</param> /// <param name="offset">The zero-based byte offset in buffer at which to begin copying bytes to the current stream.</param> /// <param name="count">The number of bytes to be written to the current stream.</param> public override void Parse(SourceChannel source, byte[] buffer, int offset, int count) { // When SEL Fast Message is transmitted over Ethernet it is embedded in a Telnet stream. As a result // any 0xFF will be encoded for Telnet compliance as a duplicate, i.e., 0xFF 0xFF. We remove these // duplications when encountered to make sure check-sums and parsing work as expected. int doubleFFPosition = buffer.IndexOfSequence(new byte[] { 0xFF, 0xFF }, offset, count); while (doubleFFPosition > -1) { using (BlockAllocatedMemoryStream newBuffer = new BlockAllocatedMemoryStream()) { // Write buffer before repeated byte newBuffer.Write(buffer, offset, doubleFFPosition - offset + 1); int nextByte = doubleFFPosition + 2; // Write buffer after repeated byte, if any if (nextByte < offset + count) { newBuffer.Write(buffer, nextByte, offset + count - nextByte); } buffer = newBuffer.ToArray(); } offset = 0; count = buffer.Length; // Find next 0xFF 0xFF sequence doubleFFPosition = buffer.IndexOfSequence(new byte[] { 0xFF, 0xFF }, offset, count); } base.Parse(source, buffer, offset, count); }
/// <summary> /// Reads entire <see cref="Stream"/> contents, and returns <see cref="byte"/> array of data. /// </summary> /// <param name="source">The <see cref="Stream"/> to be converted to <see cref="byte"/> array.</param> /// <returns>An array of <see cref="byte"/>.</returns> public static byte[] ReadStream(this Stream source) { using BlockAllocatedMemoryStream outStream = new BlockAllocatedMemoryStream(); source.CopyTo(outStream); return(outStream.ToArray()); }
/// <summary> /// Returns a binary array of decrypted data for the given parameters. /// </summary> /// <param name="algorithm"><see cref="SymmetricAlgorithm"/> to use for decryption.</param> /// <param name="data">Source buffer containing data to decrypt.</param> /// <param name="startIndex">Offset into <paramref name="data"/> buffer.</param> /// <param name="length">Number of bytes in <paramref name="data"/> buffer to decrypt starting from <paramref name="startIndex"/> offset.</param> /// <param name="key">The secret key to use for the symmetric algorithm.</param> /// <param name="iv">The initialization vector to use for the symmetric algorithm.</param> /// <returns>Decrypted version of <paramref name="data"/> buffer.</returns> public static byte[] Decrypt(this SymmetricAlgorithm algorithm, byte[] data, int startIndex, int length, byte[] key, byte[] iv) { // Fastest to use existing buffer in non-expandable memory stream for source and large block allocated memory stream for destination using MemoryStream source = new MemoryStream(data, startIndex, length); using BlockAllocatedMemoryStream destination = new BlockAllocatedMemoryStream(); algorithm.Decrypt(source, destination, key, iv); return(destination.ToArray()); }
// When user requests multi-pass compression, we allow multiple compression passes on a buffer because // this can often produce better compression results private static byte[] Compress(this byte[] source, int startIndex, int length, CompressionStrength strength, int compressionDepth) { if (strength == CompressionStrength.NoCompression) { // No compression requested, return specified portion of the buffer byte[] outBuffer = new byte[++length]; outBuffer[0] = (byte)strength; for (int x = 1; x < length; x++) { outBuffer[x] = source[startIndex + x - 1]; } return(outBuffer); } // Create a new compression deflater using (BlockAllocatedMemoryStream compressedData = new BlockAllocatedMemoryStream()) { using (DeflateStream deflater = new DeflateStream(compressedData, CompressionMode.Compress, true)) { // Provide data for compression deflater.Write(source, startIndex, length); } byte[] destination = compressedData.ToArray(); int destinationLength = destination.Length; // Prepend compression depth and extract only used part of compressed buffer byte[] outBuffer = new byte[++destinationLength]; // First two bits are reserved for compression strength - this leaves 6 bits for a maximum of 64 compressions outBuffer[0] = (byte)((compressionDepth << 2) | (int)strength); for (int x = 1; x < destinationLength; x++) { outBuffer[x] = destination[x - 1]; } if (strength == CompressionStrength.MultiPass && destinationLength < length && compressionDepth < 64) { // See if another pass would help the compression... byte[] testBuffer = outBuffer.Compress(0, outBuffer.Length, strength, compressionDepth + 1); if (testBuffer.Length < outBuffer.Length) { return(testBuffer); } return(outBuffer); } return(outBuffer); } }
/// <summary> /// Creates a new <see cref="UnsynchronizedClientSubscription"/>. /// </summary> /// <param name="parent">Reference to parent.</param> /// <param name="clientID"><see cref="Guid"/> based client connection ID.</param> /// <param name="subscriberID"><see cref="Guid"/> based subscriber ID.</param> public UnsynchronizedClientSubscription(DataPublisher parent, Guid clientID, Guid subscriberID) { m_parent = parent; m_clientID = clientID; m_subscriberID = subscriberID; m_signalIndexCache = new SignalIndexCache(); m_signalIndexCache.SubscriberID = subscriberID; m_workingBuffer = new BlockAllocatedMemoryStream(); m_bufferBlockCache = new List <byte[]>(); m_bufferBlockCacheLock = new object(); }
private void ProcessBinaryMeasurements(IEnumerable <IBinaryMeasurement> measurements, long frameLevelTimestamp, bool useCompactMeasurementFormat, bool usePayloadCompression) { // Create working buffer using (BlockAllocatedMemoryStream workingBuffer = new BlockAllocatedMemoryStream()) { // Serialize data packet flags into response DataPacketFlags flags = DataPacketFlags.Synchronized; if (useCompactMeasurementFormat) { flags |= DataPacketFlags.Compact; } workingBuffer.WriteByte((byte)flags); // Serialize frame timestamp into data packet - this only occurs in synchronized data packets, // unsynchronized subscriptions always include timestamps in the serialized measurements workingBuffer.Write(BigEndian.GetBytes(frameLevelTimestamp), 0, 8); // Serialize total number of measurement values to follow workingBuffer.Write(BigEndian.GetBytes(measurements.Count()), 0, 4); if (usePayloadCompression && m_compressionModes.HasFlag(CompressionModes.TSSC)) { throw new InvalidOperationException("TSSC must be processed at the frame level. Please check call stack - this is considered an error."); } // Attempt compression when requested - encoding of compressed buffer only happens if size would be smaller than normal serialization if (!usePayloadCompression || !measurements.Cast <CompactMeasurement>().CompressPayload(workingBuffer, m_compressionStrength, false, ref flags)) { // Serialize measurements to data buffer foreach (IBinaryMeasurement measurement in measurements) { measurement.CopyBinaryImageToStream(workingBuffer); } } // Update data packet flags if it has updated compression flags if ((flags & DataPacketFlags.Compressed) > 0) { workingBuffer.Seek(0, SeekOrigin.Begin); workingBuffer.WriteByte((byte)flags); } // Publish data packet to client if ((object)m_parent != null) { m_parent.SendClientResponse(m_clientID, ServerResponse.DataPacket, ServerCommand.Subscribe, workingBuffer.ToArray()); } } }
public void Test4() { MemoryStream ms = new MemoryStream(); BlockAllocatedMemoryStream ms2 = new BlockAllocatedMemoryStream(); for (int x = 0; x < 10000; x++) { int value = Random.Int32; ms.Write(value); ms.Write((byte)value); ms2.Write(value); ms2.Write((byte)value); } for (int x = 0; x < 10000; x++) { long position = Random.Int64Between(0, ms.Length - 5); ms.Position = position; ms2.Position = position; if (ms.ReadInt32() != ms2.ReadInt32()) { throw new Exception(); } if (ms.ReadNextByte() != ms2.ReadNextByte()) { throw new Exception(); } } for (int x = 0; x < 10000; x++) { byte[] buffer1 = new byte[100]; byte[] buffer2 = new byte[100]; long position = Random.Int64Between(0, (long)(ms.Length * 1.1)); int readLength = Random.Int32Between(0, 100); ms.Position = position; ms2.Position = position; if (ms.Read(buffer1, 99 - readLength, readLength) != ms2.Read(buffer2, 99 - readLength, readLength)) { CompareBytes(buffer1, buffer2); } } Compare(ms, ms2); }
private void ProcessBinaryMeasurements(IEnumerable <IBinaryMeasurement> measurements, bool useCompactMeasurementFormat, bool usePayloadCompression) { // Create working buffer using (BlockAllocatedMemoryStream workingBuffer = new BlockAllocatedMemoryStream()) { // Serialize data packet flags into response DataPacketFlags flags = DataPacketFlags.NoFlags; // No flags means bit is cleared, i.e., unsynchronized if (useCompactMeasurementFormat) { flags |= DataPacketFlags.Compact; } workingBuffer.WriteByte((byte)flags); // No frame level timestamp is serialized into the data packet since all data is unsynchronized and essentially // published upon receipt, however timestamps are optionally included in the serialized measurements. // Serialize total number of measurement values to follow workingBuffer.Write(BigEndian.GetBytes(measurements.Count()), 0, 4); // Attempt compression when requested - encoding of compressed buffer only happens if size would be smaller than normal serialization if (!usePayloadCompression || !measurements.Cast <CompactMeasurement>().CompressPayload(workingBuffer, m_compressionStrength, m_includeTime, ref flags)) { // Serialize measurements to data buffer foreach (IBinaryMeasurement measurement in measurements) { measurement.CopyBinaryImageToStream(workingBuffer); } } // Update data packet flags if it has updated compression flags if ((flags & DataPacketFlags.Compressed) > 0) { workingBuffer.Seek(0, SeekOrigin.Begin); workingBuffer.WriteByte((byte)flags); } // Publish data packet to client if ((object)m_parent != null) { m_parent.SendClientResponse(m_clientID, ServerResponse.DataPacket, ServerCommand.Subscribe, workingBuffer.ToArray()); } // Track last publication time m_lastPublishTime = DateTime.UtcNow.Ticks; } }
/// <summary> /// Serializes an <see cref="Object"/>. /// </summary> /// <typeparam name="T"><see cref="Type"/> of the <paramref name="serializableObject"/>.</typeparam> /// <param name="serializableObject"><see cref="Object"/> to be serialized.</param> /// <param name="serializationFormat"><see cref="SerializationFormat"/> in which the <paramref name="serializableObject"/> is to be serialized.</param> /// <returns>An <see cref="Array"/> of <see cref="Byte"/> of the serialized <see cref="Object"/>.</returns> /// <exception cref="ArgumentNullException"><paramref name="serializableObject"/> is null.</exception> /// <exception cref="NotSupportedException">Specified <paramref name="serializationFormat"/> is not supported.</exception> public static byte[] Serialize <T>(T serializableObject, SerializationFormat serializationFormat) { // FYI, using statement will not work here as this creates a read-only variable that cannot be passed by reference Stream stream = null; try { stream = new BlockAllocatedMemoryStream(); Serialize(serializableObject, serializationFormat, stream); return(((BlockAllocatedMemoryStream)stream).ToArray()); } finally { stream?.Dispose(); } }
public void Test() { MemoryStream ms = new MemoryStream(); BlockAllocatedMemoryStream ms2 = new BlockAllocatedMemoryStream(); for (int x = 0; x < 10000; x++) { int value = Random.Int32; ms.Write(value); ms.Write((byte)value); ms2.Write(value); ms2.Write((byte)value); } Compare(ms, ms2); }
private static void Compare(MemoryStream ms, BlockAllocatedMemoryStream ms2) { if (ms.Position != ms2.Position) { throw new Exception(); } if (ms.Length != ms2.Length) { throw new Exception(); } byte[] data1 = ms.ToArray(); byte[] data2 = ms2.ToArray(); CompareBytes(data1, data2); }
/// <summary> /// Initiates inter-process synchronized save of <see cref="DataSet"/>. /// </summary> public override void Save() { byte[] serializedDataSet; // Wait for thread level lock on data set lock (m_dataSetLock) { using (BlockAllocatedMemoryStream stream = new BlockAllocatedMemoryStream()) { m_dataSet.SerializeToStream(stream); serializedDataSet = stream.ToArray(); } } // File data is the serialized data set, assignment will initiate auto-save if needed FileData = serializedDataSet; }
// Open wave reader - either from memory loaded WAV or directly from disk private WaveDataReader OpenWaveDataReader() { if (MemoryCache) { if ((object)m_dataCache == null) { m_dataCache = new BlockAllocatedMemoryStream(); using (FileStream stream = File.OpenRead(WavFileName)) stream.CopyTo(m_dataCache); } m_dataCache.Position = 0; return(WaveDataReader.FromStream(m_dataCache)); } return(WaveDataReader.FromFile(WavFileName)); }
private static byte[] SerializeCache(Dictionary <string, UserData> cache) { using (BlockAllocatedMemoryStream stream = new BlockAllocatedMemoryStream()) using (BinaryWriter writer = new BinaryWriter(stream, Encoding.Default)) { writer.Write(CacheHeaderBytes); writer.Write(cache.Count); foreach (KeyValuePair <string, UserData> data in cache) { UserData userData = data.Value; writer.Write(data.Key); writer.Write(userData.Username); writer.Write(userData.FirstName); writer.Write(userData.LastName); writer.Write(userData.CompanyName); writer.Write(userData.PhoneNumber); writer.Write(userData.EmailAddress); writer.Write(userData.IsLockedOut); writer.Write(userData.IsDisabled); writer.Write(userData.PasswordChangeDateTime.Ticks); writer.Write(userData.AccountCreatedDateTime.Ticks); writer.Write(userData.Roles.Count); foreach (string role in userData.Roles) { writer.Write(role); } writer.Write(userData.Groups.Count); foreach (string group in userData.Groups) { writer.Write(group); } } return(stream.ToArray()); } }
/// <summary> /// Converts a <see cref="Bitmap"/> image to the specified <see cref="ImageFormat"/>. /// </summary> /// <param name="originalImage">The <see cref="Bitmap"/> image to be converted.</param> /// <param name="newFormat">The new <see cref="ImageFormat"/> of the image.</param> /// <param name="disposeOriginal">true if the original image is to be disposed after converting it; otherwise false.</param> /// <returns>A <see cref="Bitmap"/> instance.</returns> /// <example> /// This example shows how to convert the format of an image and dispose the original image that was converted: /// <code> /// using System; /// using System.Drawing; /// using System.Drawing.Imaging; /// using GSF.Drawing; /// /// class Program /// { /// static void Main(string[] args) /// { /// // Load original, convert it, and dispose original. /// using (Bitmap converted = ((Bitmap)Bitmap.FromFile("Original.jpg")).ConvertTo(ImageFormat.Gif)) /// { /// // Save the converted image to file. /// converted.Save("OriginalGif.gif"); /// } /// /// Console.ReadLine(); /// } /// } /// </code> /// </example> public static Bitmap ConvertTo(this Image originalImage, ImageFormat newFormat, bool disposeOriginal) { Bitmap newImage; using (BlockAllocatedMemoryStream newImageStream = new BlockAllocatedMemoryStream()) { // Save image to memory stream in the specified format. originalImage.Save(newImageStream, newFormat); // Create new bitmap from the memory stream. newImage = new Bitmap(newImageStream); // Dispose original if indicated. if (disposeOriginal) { originalImage.Dispose(); } return(newImage); } }
public void Test3() { MemoryStream ms = new MemoryStream(); BlockAllocatedMemoryStream ms2 = new BlockAllocatedMemoryStream(); for (int x = 0; x < 10000; x++) { long position = Random.Int64Between(0, 100000); ms.Position = position; ms2.Position = position; int value = Random.Int32; ms.Write(value); ms2.Write(value); long length = Random.Int64Between(100000 >> 1, 100000); ms.SetLength(length); ms2.SetLength(length); } Compare(ms, ms2); }
public void Test2() { MemoryStream ms = new MemoryStream(); BlockAllocatedMemoryStream ms2 = new BlockAllocatedMemoryStream(); for (int x = 0; x < 10000; x++) { int value = Random.Int32; ms.Write(value); ms2.Write(value); int seek = Random.Int16Between(-10, 20); if (ms.Position + seek < 0) { seek = -seek; } ms.Position += seek; ms2.Position += seek; } Compare(ms, ms2); }
/// <summary> /// Combines an array of buffers together as a single image. /// </summary> /// <param name="buffers">Array of byte buffers.</param> /// <returns>Combined buffers.</returns> /// <exception cref="InvalidOperationException">Cannot create a byte array with more than 2,147,483,591 elements.</exception> /// <remarks> /// Only use this function if you need a copy of the combined buffers, it will be optimal /// to use the Linq function <see cref="Enumerable.Concat{T}"/> if you simply need to /// iterate over the combined buffers. /// </remarks> public static byte[] Combine(this byte[][] buffers) { if (buffers is null) { throw new ArgumentNullException(nameof(buffers)); } using BlockAllocatedMemoryStream combinedBuffer = new BlockAllocatedMemoryStream(); // Combine all currently queued buffers for (int x = 0; x < buffers.Length; x++) { if (buffers[x] is null) { throw new ArgumentNullException($"buffers[{x}]"); } combinedBuffer.Write(buffers[x], 0, buffers[x].Length); } // return combined data buffers return(combinedBuffer.ToArray()); }
/// <summary> /// Combines an array of buffers together as a single image. /// </summary> /// <param name="buffers">Array of byte buffers.</param> /// <returns>Combined buffers.</returns> /// <exception cref="InvalidOperationException">Cannot create a byte array with more than 2,147,483,591 elements.</exception> /// <remarks> /// Only use this function if you need a copy of the combined buffers, it will be optimal /// to use the Linq function <see cref="Enumerable.Concat{T}"/> if you simply need to /// iterate over the combined buffers. /// </remarks> public static byte[] Combine(this byte[][] buffers) { if ((object)buffers == null) { throw new ArgumentNullException("buffers"); } using (BlockAllocatedMemoryStream combinedBuffer = new BlockAllocatedMemoryStream()) { // Combine all currently queued buffers for (int x = 0; x < buffers.Length; x++) { if ((object)buffers[x] == null) { throw new ArgumentNullException("buffers[" + x + "]"); } combinedBuffer.Write(buffers[x], 0, buffers[x].Length); } // return combined data buffers return(combinedBuffer.ToArray()); } }
/// <summary> /// Attempts to compress payload of <see cref="CompactMeasurement"/> values onto the <paramref name="destination"/> stream. /// </summary> /// <param name="compactMeasurements">Payload of <see cref="CompactMeasurement"/> values.</param> /// <param name="destination">Memory based <paramref name="destination"/> stream to hold compressed payload.</param> /// <param name="compressionStrength">Compression strength to use.</param> /// <param name="includeTime">Flag that determines if time should be included in the compressed payload.</param> /// <param name="flags">Current <see cref="DataPacketFlags"/>.</param> /// <returns><c>true</c> if payload was compressed and encoded onto <paramref name="destination"/> stream; otherwise <c>false</c>.</returns> /// <remarks> /// <para> /// Compressed payload will only be encoded onto <paramref name="destination"/> stream if compressed size would be smaller /// than normal serialized size. /// </para> /// <para> /// As an optimization this function uses a compression method that uses pointers to native structures, as such the /// endian order encoding of the compressed data will always be in the native-endian order of the operating system. /// This will be an important consideration when writing a endian order neutral payload decompressor. To help with /// this the actual endian order used during compression is marked in the data flags. However, measurements values /// are consistently encoded in big-endian order prior to buffer compression. /// </para> /// </remarks> public static bool CompressPayload(this IEnumerable <CompactMeasurement> compactMeasurements, BlockAllocatedMemoryStream destination, byte compressionStrength, bool includeTime, ref DataPacketFlags flags) { // Instantiate a buffer that is larger than we'll need byte[] buffer = new byte[ushort.MaxValue]; // Go ahead an enumerate all the measurements - this will cast all values to compact measurements CompactMeasurement[] measurements = compactMeasurements.ToArray(); int measurementCount = measurements.Length; int sizeToBeat = measurementCount * measurements[0].BinaryLength; int index = 0; // Encode compact state flags and runtime IDs together -- // Together these are three bytes, so we pad with a zero byte. // The zero byte and state flags are considered to be more compressible // than the runtime ID, so these are stored in the higher order bytes. for (int i = 0; i < measurementCount; i++) { uint value = ((uint)measurements[i].CompactStateFlags << 16) | measurements[i].RuntimeID; index += NativeEndianOrder.Default.CopyBytes(value, buffer, index); } // Encode values for (int i = 0; i < measurementCount; i++) { // Encode using adjusted value (accounts for adder and multiplier) index += NativeEndianOrder.Default.CopyBytes((float)measurements[i].AdjustedValue, buffer, index); } if (includeTime) { // Encode timestamps for (int i = 0; i < measurementCount; i++) { // Since large majority of 8-byte tick values will be repeated, they should compress well index += NativeEndianOrder.Default.CopyBytes((long)measurements[i].Timestamp, buffer, index); } } // Attempt to compress buffer int compressedSize = PatternCompressor.CompressBuffer(buffer, 0, index, ushort.MaxValue, compressionStrength); // Only encode compressed buffer if compression actually helped payload size if (compressedSize <= sizeToBeat) { // Set payload compression flag flags |= DataPacketFlags.Compressed; // Make sure decompressor knows original endian encoding order if (BitConverter.IsLittleEndian) { flags |= DataPacketFlags.LittleEndianCompression; } else { flags &= ~DataPacketFlags.LittleEndianCompression; } // Copy compressed payload onto destination stream destination.Write(buffer, 0, compressedSize); return(true); } // Clear payload compression flag flags &= ~DataPacketFlags.Compressed; return(false); }
/// <summary> /// Decompress a byte array. /// </summary> /// <param name="source">The <see cref="Byte"/> array to decompress.</param> /// <param name="length">The number of bytes to read into the byte array for compression.</param> /// <param name="startIndex">An <see cref="Int32"/> representing the start index of the byte array.</param> /// <returns>A decompressed <see cref="Byte"/> array.</returns> public static byte[] Decompress(this byte[] source, int startIndex, int length) { const byte CompressionStrengthMask = (byte)(Bits.Bit00 | Bits.Bit01); // Unmask compression strength from first buffer byte CompressionStrength strength = (CompressionStrength)(source[startIndex] & CompressionStrengthMask); if (strength == CompressionStrength.NoCompression) { // No compression was applied to original buffer, return specified portion of the buffer return source.BlockCopy(startIndex + 1, length - 1); } // Create a new decompression deflater using (BlockAllocatedMemoryStream compressedData = new BlockAllocatedMemoryStream(source, startIndex + 1, length - 1)) { byte[] destination; int compressionDepth = (source[startIndex] & ~CompressionStrengthMask) >> 2; using (DeflateStream inflater = new DeflateStream(compressedData, CompressionMode.Decompress)) { // Read uncompressed data destination = inflater.ReadStream(); } // When user requests multi-pass compression, there may be multiple compression passes on a buffer, // so we cycle through the needed uncompressions to get back to the original data if (strength == CompressionStrength.MultiPass && compressionDepth > 0) return destination.Decompress(); return destination; } }
/// <summary> /// Writes a sequence of bytes onto the stream for parsing. /// </summary> /// <param name="source">Defines the source channel for the data.</param> /// <param name="buffer">An array of bytes. This method copies count bytes from buffer to the current stream.</param> /// <param name="offset">The zero-based byte offset in buffer at which to begin copying bytes to the current stream.</param> /// <param name="count">The number of bytes to be written to the current stream.</param> public override void Parse(SourceChannel source, byte[] buffer, int offset, int count) { // Since the Macrodyne implementation supports both 0xAA and 0xBB as sync-bytes, we must manually check for both during stream initialization, // base class handles this only then there is a consistently defined set of sync-bytes, not variable. if (Enabled) { // See if there are any 0xAA 0xAA sequences - these must be removed int syncBytePosition = buffer.IndexOfSequence(new byte[] { 0xAA, 0xAA }, offset, count); while (syncBytePosition > -1) { using (BlockAllocatedMemoryStream newBuffer = new BlockAllocatedMemoryStream()) { // Write buffer before repeated byte newBuffer.Write(buffer, offset, syncBytePosition - offset + 1); int nextByte = syncBytePosition + 2; // Write buffer after repeated byte, if any if (nextByte < offset + count) { newBuffer.Write(buffer, nextByte, offset + count - nextByte); } buffer = newBuffer.ToArray(); } offset = 0; count = buffer.Length; // Find next 0xAA 0xAA sequence syncBytePosition = buffer.IndexOfSequence(new byte[] { 0xAA, 0xAA }, offset, count); } if (StreamInitialized) { base.Parse(source, buffer, offset, count); } else { // Initial stream may be anywhere in the middle of a frame, so we attempt to locate sync-bytes to "line-up" data stream, // First we look for data frame sync-byte: syncBytePosition = buffer.IndexOfSequence(new byte[] { 0xAA }, offset, count); if (syncBytePosition > -1) { StreamInitialized = true; base.Parse(source, buffer, syncBytePosition, count - (syncBytePosition - offset)); } else { // Second we look for command frame response sync-byte: syncBytePosition = buffer.IndexOfSequence(new byte[] { 0xBB }, offset, count); if (syncBytePosition > -1) { StreamInitialized = true; base.Parse(source, buffer, syncBytePosition, count - (syncBytePosition - offset)); } } } } }
/// <summary> /// Reads XML from the configuration file. /// </summary> /// <param name="reader">The <see cref="System.Xml.XmlReader"/> object, which reads from the configuration file.</param> protected override void DeserializeSection(XmlReader reader) { using (BlockAllocatedMemoryStream configSectionStream = new BlockAllocatedMemoryStream()) { XmlDocument configSection = new XmlDocument(); configSection.Load(reader); configSection.Save(configSectionStream); // Adds all the categories that are under the categorizedSettings section of the configuration file // to the property collection. Again, this is essentially doing what marking a property with the // <ConfigurationProperty()> attribute does. If this is not done, then an exception will be raised // when the category elements are being deserialized. if ((object)configSection.DocumentElement != null) { XmlNodeList categories = configSection.DocumentElement.SelectNodes("*"); if ((object)categories != null) { foreach (XmlNode category in categories) { ConfigurationProperty configProperty = new ConfigurationProperty(category.Name, typeof(CategorizedSettingsElementCollection)); base.Properties.Add(configProperty); if ((object)m_sections != null) { CategorizedSettingsElementCollection settingsCategory = new CategorizedSettingsElementCollection { Name = category.Name, Section = this, }; settingsCategory.SetCryptoKey(m_cryptoKey); m_sections.Add(category.Name, settingsCategory); // Read all elements within this category section XmlNodeList elements = category.SelectNodes("*"); SettingScope scope; if ((object)elements != null) { foreach (XmlNode element in elements) { CategorizedSettingsElement categorySetting = new CategorizedSettingsElement(settingsCategory); categorySetting.Name = element.GetAttributeValue("name"); categorySetting.Value = element.GetAttributeValue("value"); categorySetting.Description = element.GetAttributeValue("description") ?? ""; categorySetting.Encrypted = element.GetAttributeValue("encrypted").ToNonNullNorWhiteSpace("false").ParseBoolean(); if (Enum.TryParse(element.GetAttributeValue("scope").ToNonNullNorWhiteSpace("Application"), out scope)) { categorySetting.Scope = scope; } else { categorySetting.Scope = SettingScope.Application; } settingsCategory.Add(categorySetting); } } } } } } m_sectionLoaded = true; if ((object)m_sections == null) { configSectionStream.Seek(0, SeekOrigin.Begin); base.DeserializeSection(XmlReader.Create(configSectionStream)); } } }
/// <summary> /// Writes the given record to the PQDIF file. /// </summary> /// <param name="record">The record to be written to the file.</param> /// <param name="lastRecord">Indicates whether this record is the last record in the file.</param> public void WriteRecord(Record record, bool lastRecord = false) { byte[] bodyImage; Adler32 checksum; if (m_disposed) { throw new ObjectDisposedException(GetType().Name); } using (BlockAllocatedMemoryStream bodyStream = new BlockAllocatedMemoryStream()) using (BinaryWriter bodyWriter = new BinaryWriter(bodyStream)) { // Write the record body to the memory stream if ((object)record.Body != null) { WriteCollection(bodyWriter, record.Body.Collection); } // Read and compress the body to a byte array bodyImage = bodyStream.ToArray(); if (m_compressionAlgorithm == CompressionAlgorithm.Zlib && m_compressionStyle == CompressionStyle.RecordLevel) { bodyImage = ZlibStream.CompressBuffer(bodyImage); } // Create the checksum after compression checksum = new Adler32(); checksum.Update(bodyImage); // Write the record body to the memory stream if ((object)record.Body != null) { record.Body.Checksum = checksum.Value; } } // Fix the pointer to the next // record before writing this record if (m_stream.CanSeek && m_stream.Length > 0) { m_writer.Write((int)m_stream.Length); m_stream.Seek(0L, SeekOrigin.End); } // Make sure the header points to the correct location based on the size of the body record.Header.HeaderSize = 64; record.Header.BodySize = bodyImage.Length; record.Header.NextRecordPosition = (int)m_stream.Length + record.Header.HeaderSize + record.Header.BodySize; record.Header.Checksum = checksum.Value; // Write up to the next record position m_writer.Write(record.Header.RecordSignature.ToByteArray()); m_writer.Write(record.Header.RecordTypeTag.ToByteArray()); m_writer.Write(record.Header.HeaderSize); m_writer.Write(record.Header.BodySize); // The PQDIF standard defines the NextRecordPosition to be 0 for the last record in the file // We treat seekable streams differently because we can go back and fix the pointers later if (m_stream.CanSeek || lastRecord) { m_writer.Write(0); } else { m_writer.Write(record.Header.NextRecordPosition); } // Write the rest of the header as well as the body m_writer.Write(record.Header.Checksum); m_writer.Write(record.Header.Reserved); m_writer.Write(bodyImage); // If the stream is seekable, seek to the next record // position so we can fix the pointer if we end up // writing another record to the file if (m_stream.CanSeek) { m_stream.Seek(-(24 + record.Header.BodySize), SeekOrigin.Current); } // Dispose of the writer if this is the last record if (!m_stream.CanSeek && lastRecord) { Dispose(); } }
/// <summary> /// Rotates or initializes the crypto keys for this <see cref="ClientConnection"/>. /// </summary> public bool RotateCipherKeys() { // Make sure at least a second has passed before next key rotation if ((DateTime.UtcNow.Ticks - m_lastCipherKeyUpdateTime).ToMilliseconds() >= 1000.0D) { try { // Since this function cannot be not called more than once per second there // is no real benefit to maintaining these memory streams at a member level using (BlockAllocatedMemoryStream response = new BlockAllocatedMemoryStream()) { byte[] bytes, bufferLen; // Create or update cipher keys and initialization vectors UpdateKeyIVs(); // Add current cipher index to response response.WriteByte((byte)m_cipherIndex); // Serialize new keys using (BlockAllocatedMemoryStream buffer = new BlockAllocatedMemoryStream()) { // Write even key bufferLen = BigEndian.GetBytes(m_keyIVs[EvenKey][KeyIndex].Length); buffer.Write(bufferLen, 0, bufferLen.Length); buffer.Write(m_keyIVs[EvenKey][KeyIndex], 0, m_keyIVs[EvenKey][KeyIndex].Length); // Write even initialization vector bufferLen = BigEndian.GetBytes(m_keyIVs[EvenKey][IVIndex].Length); buffer.Write(bufferLen, 0, bufferLen.Length); buffer.Write(m_keyIVs[EvenKey][IVIndex], 0, m_keyIVs[EvenKey][IVIndex].Length); // Write odd key bufferLen = BigEndian.GetBytes(m_keyIVs[OddKey][KeyIndex].Length); buffer.Write(bufferLen, 0, bufferLen.Length); buffer.Write(m_keyIVs[OddKey][KeyIndex], 0, m_keyIVs[OddKey][KeyIndex].Length); // Write odd initialization vector bufferLen = BigEndian.GetBytes(m_keyIVs[OddKey][IVIndex].Length); buffer.Write(bufferLen, 0, bufferLen.Length); buffer.Write(m_keyIVs[OddKey][IVIndex], 0, m_keyIVs[OddKey][IVIndex].Length); // Get bytes from serialized buffer bytes = buffer.ToArray(); } // Encrypt keys using private keys known only to current client and server if (m_authenticated && !string.IsNullOrWhiteSpace(m_sharedSecret)) { bytes = bytes.Encrypt(m_sharedSecret, CipherStrength.Aes256); } // Add serialized key response response.Write(bytes, 0, bytes.Length); // Send cipher key updates m_parent.SendClientResponse(m_clientID, ServerResponse.UpdateCipherKeys, ServerCommand.Subscribe, response.ToArray()); } // Send success message m_parent.SendClientResponse(m_clientID, ServerResponse.Succeeded, ServerCommand.RotateCipherKeys, "New cipher keys established."); m_parent.OnStatusMessage(MessageLevel.Info, $"{ConnectionID} cipher keys rotated."); return(true); } catch (Exception ex) { // Send failure message m_parent.SendClientResponse(m_clientID, ServerResponse.Failed, ServerCommand.RotateCipherKeys, "Failed to establish new cipher keys: " + ex.Message); m_parent.OnStatusMessage(MessageLevel.Warning, $"Failed to establish new cipher keys for {ConnectionID}: {ex.Message}"); return(false); } } m_parent.SendClientResponse(m_clientID, ServerResponse.Failed, ServerCommand.RotateCipherKeys, "Cipher key rotation skipped, keys were already rotated within last second."); m_parent.OnStatusMessage(MessageLevel.Warning, $"Cipher key rotation skipped for {ConnectionID}, keys were already rotated within last second."); return(false); }
public static void SerializeToStream(this DataSet source, Stream destination, bool assumeStringForUnknownTypes = true, bool useNullableDataTypes = true) { if (source == null) { throw new ArgumentNullException(nameof(source)); } if (destination == null) { throw new ArgumentNullException(nameof(destination)); } if (!destination.CanWrite) { throw new InvalidOperationException("Cannot write to a read-only stream"); } BinaryWriter output = new BinaryWriter(destination); // Serialize dataset name and table count output.Write(source.DataSetName); output.Write(source.Tables.Count); // Serialize tables foreach (DataTable table in source.Tables) { List <int> columnIndices = new List <int>(); List <DataType> columnDataTypes = new List <DataType>(); // Serialize column metadata using (BlockAllocatedMemoryStream columnMetaDataStream = new BlockAllocatedMemoryStream()) { BinaryWriter columnMetaData = new BinaryWriter(columnMetaDataStream); foreach (DataColumn column in table.Columns) { // Get column data type, unknown types will be represented as object DataType dataType = GetDataType(column.DataType, assumeStringForUnknownTypes); // Only objects of a known type can be properly serialized if (dataType == DataType.Object) { continue; } byte dtByte = (byte)dataType; if (useNullableDataTypes) { dtByte |= 0x80; } // Serialize column name and type columnMetaData.Write(column.ColumnName); columnMetaData.Write(dtByte); // Track data types and column indices in parallel lists for faster DataRow serialization columnIndices.Add(column.Ordinal); columnDataTypes.Add(dataType); } // Serialize table name and column count output.Write(table.TableName); output.Write(columnIndices.Count); // Write column metadata output.Write(columnMetaDataStream.ToArray(), 0, (int)columnMetaDataStream.Length); } // Serialize row count output.Write(table.Rows.Count); // Serialize rows foreach (DataRow row in table.Rows) { // Serialize column data for (int i = 0; i < columnIndices.Count; i++) { object value = row[columnIndices[i]]; if (useNullableDataTypes) { output.Write((byte)(value == DBNull.Value ? 1 : 0)); if (value == DBNull.Value) { continue; } } switch (columnDataTypes[i]) { case DataType.Boolean: output.Write(value.NotDBNull <bool>()); break; case DataType.Byte: output.Write(value.NotDBNull <byte>()); break; case DataType.Char: output.Write(value.NotDBNull <char>()); break; case DataType.DateTime: output.Write(value.NotDBNull <DateTime>().Ticks); break; case DataType.Decimal: output.Write(value.NotDBNull <decimal>()); break; case DataType.Double: output.Write(value.NotDBNull <double>()); break; case DataType.Guid: output.Write(value.NotDBNull <Guid>().ToByteArray()); break; case DataType.Int16: output.Write(value.NotDBNull <short>()); break; case DataType.Int32: output.Write(value.NotDBNull <int>()); break; case DataType.Int64: output.Write(value.NotDBNull <long>()); break; case DataType.SByte: output.Write(value.NotDBNull <sbyte>()); break; case DataType.Single: output.Write(value.NotDBNull <float>()); break; case DataType.String: output.Write(value.NotDBNullString()); break; case DataType.TimeSpan: output.Write(value.NotDBNull <TimeSpan>().Ticks); break; case DataType.UInt16: output.Write(value.NotDBNull <ushort>()); break; case DataType.UInt32: output.Write(value.NotDBNull <uint>()); break; case DataType.UInt64: output.Write(value.NotDBNull <ulong>()); break; case DataType.Blob: byte[] blob = value.NotDBNull <byte[]>(); if (blob == null || blob.Length == 0) { output.Write(0); } else { output.Write(blob.Length); output.Write(blob); } break; } } } } }
// When user requests multi-pass compression, we allow multiple compression passes on a buffer because // this can often produce better compression results private static byte[] Compress(this byte[] source, int startIndex, int length, CompressionStrength strength, int compressionDepth) { if (strength == CompressionStrength.NoCompression) { // No compression requested, return specified portion of the buffer byte[] outBuffer = new byte[++length]; outBuffer[0] = (byte)strength; for (int x = 1; x < length; x++) outBuffer[x] = source[startIndex + x - 1]; return outBuffer; } // Create a new compression deflater using (BlockAllocatedMemoryStream compressedData = new BlockAllocatedMemoryStream()) { using (DeflateStream deflater = new DeflateStream(compressedData, CompressionMode.Compress, true)) { // Provide data for compression deflater.Write(source, startIndex, length); } byte[] destination = compressedData.ToArray(); int destinationLength = destination.Length; // Prepend compression depth and extract only used part of compressed buffer byte[] outBuffer = new byte[++destinationLength]; // First two bits are reserved for compression strength - this leaves 6 bits for a maximum of 64 compressions outBuffer[0] = (byte)((compressionDepth << 2) | (int)strength); for (int x = 1; x < destinationLength; x++) outBuffer[x] = destination[x - 1]; if (strength == CompressionStrength.MultiPass && destinationLength < length && compressionDepth < 64) { // See if another pass would help the compression... byte[] testBuffer = outBuffer.Compress(0, outBuffer.Length, strength, compressionDepth + 1); if (testBuffer.Length < outBuffer.Length) return testBuffer; return outBuffer; } return outBuffer; } }