internal static List <ArraySegment <byte> > Slice( this IList <ArraySegment <byte> > src, OutputStream.Position from, OutputStream.Position to) { var dst = new List <ArraySegment <byte> >(); if (from.Segment == to.Segment) { dst.Add(src[from.Segment].Slice(from.Offset, to.Offset - from.Offset)); } else { ArraySegment <byte> segment = src[from.Segment].Slice(from.Offset); if (segment.Count > 0) { dst.Add(segment); } for (int i = from.Segment + 1; i < to.Segment; i++) { dst.Add(src[i]); } segment = src[to.Segment].Slice(0, to.Offset); if (segment.Count > 0) { dst.Add(segment); } } return(dst); }
internal override async ValueTask SendAsync( long streamId, OutgoingFrame frame, bool fin, CancellationToken cancel) { var data = new List <ArraySegment <byte> >(); var ostr = new OutputStream(Encoding, data); FrameType frameType = fin ? FrameType.StreamLast : FrameType.Stream; ostr.WriteByte((byte)frameType); OutputStream.Position sizePos = ostr.StartFixedLengthSize(4); ostr.WriteVarLong(streamId); OutputStream.Position ice2HeaderPos = ostr.Tail; if (frame is OutgoingRequestFrame requestFrame) { ostr.WriteByte((byte)Ice2Definitions.FrameType.Request); } else if (frame is OutgoingResponseFrame responseFrame) { ostr.WriteByte((byte)Ice2Definitions.FrameType.Response); } else { Debug.Assert(false); return; } ostr.WriteSize(frame.Size); int ice2HeaderSize = ostr.Tail.Offset - ice2HeaderPos.Offset; data[^ 1] = data[^ 1].Slice(0, ostr.Tail.Offset); // TODO: Shouldn't this be the job of ostr.Finish()?
private protected void WriteBinaryContext(OutputStream ostr) { Debug.Assert(Protocol == Protocol.Ice2); Debug.Assert(ostr.Encoding == Encoding.V20); int sizeLength = OutputStream.GetSizeLength20(InitialBinaryContext.Count + (_binaryContextOverride?.Count ?? 0)); int size = 0; OutputStream.Position start = ostr.StartFixedLengthSize(sizeLength); // First write the overrides, then the InitialBinaryContext entries that were not overridden. if (_binaryContextOverride is Dictionary <int, Action <OutputStream> > binaryContextOverride) { foreach ((int key, Action <OutputStream> action) in binaryContextOverride) { ostr.WriteVarInt(key); OutputStream.Position startValue = ostr.StartFixedLengthSize(2); action(ostr); ostr.EndFixedLengthSize(startValue, 2); size++; } } foreach ((int key, ReadOnlyMemory <byte> value) in InitialBinaryContext) { if (_binaryContextOverride == null || !_binaryContextOverride.ContainsKey(key)) { ostr.WriteVarInt(key); ostr.WriteSize(value.Length); ostr.WriteByteSpan(value.Span); size++; } } ostr.RewriteFixedLengthSize20(size, start, sizeLength); }
/// <summary>Compresses the encapsulation payload using GZip compression. Compressed encapsulation payload is /// only supported with the 2.0 encoding.</summary> /// <returns>A <see cref="CompressionResult"/> value indicating the result of the compression operation. /// </returns> public CompressionResult CompressPayload() { if (IsSealed) { throw new InvalidOperationException("cannot modify a sealed frame"); } if (Encoding != Encoding.V20) { throw new NotSupportedException("payload compression is only supported with 2.0 encoding"); } else { IList <ArraySegment <byte> > payload = Payload; int encapsulationOffset = this is OutgoingResponseFrame ? 1 : 0; // The encapsulation always starts in the first segment of the payload (at position 0 or 1). Debug.Assert(encapsulationOffset < payload[0].Count); int sizeLength = Protocol == Protocol.Ice2 ? payload[0][encapsulationOffset].ReadSizeLength20() : 4; byte compressionStatus = payload.GetByte(encapsulationOffset + sizeLength + 2); if (compressionStatus != 0) { throw new InvalidOperationException("payload is already compressed"); } int encapsulationSize = payload.GetByteCount() - encapsulationOffset; // this includes the size length if (encapsulationSize < _compressionMinSize) { return(CompressionResult.PayloadTooSmall); } // Reserve memory for the compressed data, this should never be greater than the uncompressed data // otherwise we will just send the uncompressed data. byte[] compressedData = new byte[encapsulationOffset + encapsulationSize]; // Copy the byte before the encapsulation, if any if (encapsulationOffset == 1) { compressedData[0] = payload[0][0]; } // Write the encapsulation header int offset = encapsulationOffset + sizeLength; compressedData[offset++] = Encoding.Major; compressedData[offset++] = Encoding.Minor; // Set the compression status to '1' GZip compressed compressedData[offset++] = 1; // Write the size of the uncompressed data OutputStream.WriteFixedLengthSize20(encapsulationSize - sizeLength, compressedData.AsSpan(offset, sizeLength)); offset += sizeLength; using var memoryStream = new MemoryStream(compressedData, offset, compressedData.Length - offset); using var gzipStream = new GZipStream( memoryStream, _compressionLevel == CompressionLevel.Fastest ? System.IO.Compression.CompressionLevel.Fastest : System.IO.Compression.CompressionLevel.Optimal); try { // The data to compress starts after the compression status byte, + 3 corresponds to (Encoding 2 // bytes, Compression status 1 byte) foreach (ArraySegment <byte> segment in payload.Slice(encapsulationOffset + sizeLength + 3)) { gzipStream.Write(segment); } gzipStream.Flush(); } catch (NotSupportedException) { // If the data doesn't fit in the memory stream NotSupportedException is thrown when GZipStream // try to expand the fixed size MemoryStream. return(CompressionResult.PayloadNotCompressible); } int binaryContextLastSegmentOffset = -1; if (_binaryContextOstr is OutputStream ostr) { // If there is a binary context, we make sure it uses its own segment(s). OutputStream.Position binaryContextEnd = ostr.Tail; binaryContextLastSegmentOffset = binaryContextEnd.Offset; // When we have a _binaryContextOstr, we wrote at least the size placeholder for the binary context // dictionary. Debug.Assert(binaryContextEnd.Segment > PayloadEnd.Segment || binaryContextEnd.Offset > PayloadEnd.Offset); // The first segment of the binary context is immediately after the payload ArraySegment <byte> segment = Data[PayloadEnd.Segment].Slice(PayloadEnd.Offset); if (segment.Count > 0) { Data.Insert(PayloadEnd.Segment + 1, segment); if (binaryContextEnd.Segment == PayloadEnd.Segment) { binaryContextLastSegmentOffset -= PayloadEnd.Offset; } } // else the binary context already starts with its own segment } int start = PayloadStart.Segment; if (PayloadStart.Offset > 0) { // There is non payload bytes in the first payload segment: we move them to their own segment. ArraySegment <byte> segment = Data[PayloadStart.Segment]; Data[PayloadStart.Segment] = segment.Slice(0, PayloadStart.Offset); start += 1; } Data.RemoveRange(start, PayloadEnd.Segment - start + 1); offset += (int)memoryStream.Position; Data.Insert(start, new ArraySegment <byte>(compressedData, 0, offset)); PayloadStart = new OutputStream.Position(start, 0); PayloadEnd = new OutputStream.Position(start, offset); Size = Data.GetByteCount(); if (_binaryContextOstr != null) { // Recreate binary context OutputStream _binaryContextOstr = new OutputStream(_binaryContextOstr.Encoding, Data, new OutputStream.Position(Data.Count - 1, binaryContextLastSegmentOffset)); } // Rewrite the encapsulation size OutputStream.WriteEncapsulationSize(offset - sizeLength - encapsulationOffset, compressedData.AsSpan(encapsulationOffset, sizeLength), Protocol.GetEncoding()); _payload = null; // reset cache return(CompressionResult.Success); } }
public Endpoint?CreateEndpoint(string endpointString, bool oaEndpoint) { string[]? args = IceUtilInternal.StringUtil.SplitString(endpointString, " \t\r\n"); if (args == null) { throw new FormatException($"mismatched quote in endpoint `{endpointString}'"); } if (args.Length == 0) { throw new FormatException("no non-whitespace character in endpoint string"); } string transport = args[0]; if (transport == "default") { transport = DefaultTransport; } var options = new Dictionary <string, string?>(); // Parse args into options (and skip transport at args[0]) for (int n = 1; n < args.Length; ++n) { // Any option with < 2 characters or that does not start with - is illegal string option = args[n]; if (option.Length < 2 || option[0] != '-') { throw new FormatException($"invalid option `{option}' in endpoint `{endpointString}'"); } // Extract the argument given to the current option, if any string?argument = null; if (n + 1 < args.Length && args[n + 1][0] != '-') { argument = args[++n]; } try { options.Add(option, argument); } catch (ArgumentException) { throw new FormatException($"duplicate option `{option}' in endpoint `{endpointString}'"); } } IEndpointFactory?factory = null; lock (this) { for (int i = 0; i < _endpointFactories.Count; i++) { IEndpointFactory f = _endpointFactories[i]; if (f.Transport() == transport) { factory = f; } } } if (factory != null) { Endpoint endpoint = factory.Create(endpointString, options, oaEndpoint); if (options.Count > 0) { throw new FormatException( $"unrecognized option(s) `{ToString(options)}' in endpoint `{endpointString}'"); } return(endpoint); } // // If the stringified endpoint is opaque, create an unknown endpoint, // then see whether the type matches one of the known endpoints. // if (transport == "opaque") { var opaqueEndpoint = new OpaqueEndpoint(endpointString, options); if (options.Count > 0) { throw new FormatException( $"unrecognized option(s) `{ToString(options)}' in endpoint `{endpointString}'"); } if (opaqueEndpoint.Encoding.IsSupported && GetEndpointFactory(opaqueEndpoint.Type) != null) { // We may be able to unmarshal this endpoint, so we first marshal it into a byte buffer and then // unmarshal it from this buffer. var bufferList = new List <ArraySegment <byte> > { // 8 = size of short + size of encapsulation header new byte[8 + opaqueEndpoint.Bytes.Length] }; var ostr = new OutputStream(Ice1Definitions.Encoding, bufferList); ostr.WriteEndpoint(opaqueEndpoint); OutputStream.Position tail = ostr.Save(); Debug.Assert(bufferList.Count == 1); Debug.Assert(tail.Segment == 0 && tail.Offset == 8 + opaqueEndpoint.Bytes.Length); return(new InputStream(this, bufferList[0]).ReadEndpoint()); } else { return(opaqueEndpoint); } } return(null); }