internal static SliceWriter OpenWriter(this IKeySubspace self, int extra = 32) { var key = self.GetPrefix(); var sw = new SliceWriter(key.Count + extra); //TODO: BufferPool ? sw.WriteBytes(key); return(sw); }
void IUnorderedTypeCodec <TDocument> .EncodeUnorderedSelfTerm(ref SliceWriter output, TDocument value) { var packed = EncodeInternal(value); Contract.Assert(packed.Count >= 0); output.WriteVarInt32((uint)packed.Count); output.WriteBytes(in packed); }
public static Slice EncodeKey <T1>([NotNull] this IKeyEncoder <T1> encoder, Slice prefix, T1 value) { var writer = new SliceWriter(prefix.Count + 16); // ~16 bytes si T1 = Guid writer.WriteBytes(in prefix); encoder.WriteKeyTo(ref writer, value); return(writer.ToSlice()); }
private Slice GetNextEtagKey() { var nextEtag = ++lastEtag; var keyWriter = new SliceWriter(keyBuffer); keyWriter.WriteBigEndian(nextEtag); return(keyWriter.CreateSlice()); }
public static Slice EncodeKey <T1, T2>(this ICompositeKeyEncoder <T1, T2> encoder, Slice prefix, T1 item1, T2 item2) { var writer = new SliceWriter(prefix.Count + 24); writer.WriteBytes(in prefix); encoder.WriteKeyTo(ref writer, item1, item2); return(writer.ToSlice()); }
public void PackKey <TTuple>(ref SliceWriter writer, TTuple items) where TTuple : IVarTuple { var tw = new TupleWriter(writer); TupleEncoder.WriteTo(ref tw, items); writer = tw.Output; }
/// <summary>Serialize a <typeparamref name="T"/> into a binary buffer</summary> /// <param name="writer">Target buffer</param> /// <param name="value">Value that will be serialized</param> /// <remarks> /// The buffer does not need to be preallocated. /// This method DOES NOT support embedded tupels, and assumes that we are serializing a top-level Tuple! /// If you need support for embedded tuples, use <see cref="SerializeTo(ref TupleWriter,T)"/> instead! /// </remarks> public static void SerializeTo(ref SliceWriter writer, T value) { var tw = new TupleWriter(writer); Encoder(ref tw, value); writer = tw.Output; //REVIEW: we loose the depth information here! :( }
public void EncodeKey <T1>(ref SliceWriter writer, T1 item1) { var tw = new TupleWriter(writer); FdbTuplePacker <T1> .SerializeTo(ref tw, item1); writer = tw.Output; }
private Slice CreateBucketAndEtagKey(int bucket, Etag id) { var sliceWriter = new SliceWriter(20); sliceWriter.WriteBigEndian(bucket); sliceWriter.Write(id.ToByteArray()); return(sliceWriter.CreateSlice()); }
public override void EncodeOrderedSelfTerm(ref SliceWriter output, T value) { //HACKHACK: we lose the current depth! var writer = new TupleWriter(output); TuplePackers.SerializeTo(ref writer, value); output = writer.Output; }
/// <summary>Pack a tuple into a key, using the specified encoder</summary> public static Slice Pack <TTuple>(this IDynamicKeyEncoder encoder, TTuple tuple) where TTuple : IVarTuple { var writer = new SliceWriter(checked (tuple.Count * 8)); encoder.PackKey(ref writer, tuple); return(writer.ToSlice()); }
public void PackTo(ref SliceWriter writer) { FdbTuplePacker <T1> .Encoder(ref writer, this.Item1); FdbTuplePacker <T2> .Encoder(ref writer, this.Item2); FdbTuplePacker <T3> .Encoder(ref writer, this.Item3); }
public void PackTo(ref SliceWriter writer) { var slices = m_slices; for (int n = m_count, p = m_offset; n > 0; n--) { writer.WriteBytes(slices[p++]); } }
public void WriteKeyTo(ref SliceWriter writer, TKey value) { if (this.Pack is Func <TKey, Slice> f) { writer.WriteBytes(f(value)); return; } throw new InvalidOperationException(); }
public void DeleteIndexEntry(long id) { var sliceWriter = new SliceWriter(8); sliceWriter.WriteBigEndian(id); _currentDocumentIdSlice = sliceWriter.CreateSlice(); _writeBatch.Add(_currentDocumentIdSlice, Stream.Null, "deletes"); }
private Slice CreateMappedResultKey(int view, string reduceKey) { var sliceWriter = new SliceWriter(12); sliceWriter.WriteBigEndian(view); sliceWriter.WriteBigEndian(Hashing.XXHash64.CalculateRaw(reduceKey)); return(sliceWriter.CreateSlice()); }
private Slice CreateScheduleReductionKey(int view, int level, string reduceKey) { var sliceWriter = new SliceWriter(16); sliceWriter.WriteBigEndian(view); sliceWriter.WriteBigEndian(level); sliceWriter.WriteBigEndian(Hashing.XXHash64.CalculateRaw(reduceKey)); return(sliceWriter.CreateSlice()); }
public static Slice EncodePartialKey <T1, T2>(this ICompositeKeyEncoder <T1, T2> encoder, Slice prefix, T1 item1) { var writer = new SliceWriter(prefix.Count + 16); writer.WriteBytes(in prefix); var tuple = (item1, default(T2)); encoder.WriteKeyPartsTo(ref writer, 1, ref tuple); return(writer.ToSlice()); }
private Slice CreateReduceResultsWithBucketKey(int view, string reduceKey, int level, int bucket) { var sliceWriter = new SliceWriter(20); sliceWriter.WriteBigEndian(view); sliceWriter.WriteBigEndian(Hashing.XXHash64.CalculateRaw(reduceKey)); sliceWriter.WriteBigEndian(level); sliceWriter.WriteBigEndian(bucket); return(sliceWriter.CreateSlice()); }
/// <summary>Writes a 64-bit UUID</summary> public static void WriteUuid64(ref SliceWriter writer, Uuid64 value) { writer.EnsureBytes(9); writer.UnsafeWriteByte(FdbTupleTypes.Uuid64); unsafe { byte *ptr = stackalloc byte[8]; value.WriteTo(ptr); writer.UnsafeWriteBytes(ptr, 8); } }
/// <summary>Writes a binary string</summary> public static void WriteBytes(ref SliceWriter writer, byte[] value) { if (value == null) { writer.WriteByte(FdbTupleTypes.Nil); } else { WriteNulEscapedBytes(ref writer, FdbTupleTypes.Bytes, value); } }
/// <summary>Writes an UInt8 at the end, and advance the cursor</summary> /// <param name="writer">Target buffer</param> /// <param name="value">Unsigned BYTE, 32 bits</param> public static void WriteInt8(ref SliceWriter writer, byte value) { if (value == 0) { // zero writer.WriteByte(FdbTupleTypes.IntZero); } else { // 1..255: frequent for array index writer.WriteByte2(FdbTupleTypes.IntPos1, value); } }
/// <summary>Writes a RFC 4122 encoded 128-bit UUID</summary> public static void WriteUuid128(ref SliceWriter writer, Uuid128 value) { writer.EnsureBytes(17); writer.UnsafeWriteByte(FdbTupleTypes.Uuid128); unsafe { byte *ptr = stackalloc byte[16]; value.WriteTo(ptr); writer.UnsafeWriteBytes(ptr, 16); } }
public void EncodeKey <T1, T2, T3>(ref SliceWriter writer, T1 item1, T2 item2, T3 item3) { var tw = new TupleWriter(writer); TuplePacker <T1> .SerializeTo(ref tw, item1); TuplePacker <T2> .SerializeTo(ref tw, item2); TuplePacker <T3> .SerializeTo(ref tw, item3); writer = tw.Output; }
public override void WriteKeyPartsTo(ref SliceWriter writer, int count, ref (T1, T2) items) { Contract.Requires(count > 0); if (count >= 1) { m_codec1.EncodeOrderedSelfTerm(ref writer, items.Item1); } if (count >= 2) { m_codec2.EncodeOrderedSelfTerm(ref writer, items.Item2); } }
/// <summary>Writes a RFC 4122 encoded 16-byte Microsoft GUID</summary> public static void WriteGuid(ref SliceWriter writer, Guid value) { writer.EnsureBytes(17); writer.UnsafeWriteByte(FdbTupleTypes.Uuid128); unsafe { // UUIDs are stored using the RFC 4122 standard, so we need to swap some parts of the System.Guid byte *ptr = stackalloc byte[16]; Uuid128.Write(value, ptr); writer.UnsafeWriteBytes(ptr, 16); } }
public void Commit() { ThrowIfDisposed(); if (_hasIdChanged) { var valueWriter = new SliceWriter(sizeof(long)); valueWriter.WriteBigEndian(_nextId); MetadataTree.Add(_nextIdKey, valueWriter.CreateSlice(_storage.ByteStringContext)); } _tx.Commit(); }
public void NewIndexEntry() { if (AutoFlush && _writeBatch.Size() > FlushThresholdBytes) { Flush(); } CurrentDocumentId = _parent.NextDocumentId(); var sliceWriter = new SliceWriter(8); sliceWriter.WriteBigEndian(CurrentDocumentId); _currentDocumentIdSlice = sliceWriter.CreateSlice(); _writeBatch.Add(_currentDocumentIdSlice, Stream.Null, "docs"); }
public void EncodeKey <T1, T2, T3, T4, T5>(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) { var tw = new TupleWriter(writer); FdbTuplePacker <T1> .SerializeTo(ref tw, item1); FdbTuplePacker <T2> .SerializeTo(ref tw, item2); FdbTuplePacker <T3> .SerializeTo(ref tw, item3); FdbTuplePacker <T4> .SerializeTo(ref tw, item4); FdbTuplePacker <T5> .SerializeTo(ref tw, item5); writer = tw.Output; }
public static Slice[] Convert(SliceWriter writer, [NotNull, ItemNotNull] IEnumerable <TValue> values, Handler handler, TState state) { Contract.Requires(values != null && handler != null); //Note on performance: // - we will reuse the same buffer for each temp key, and copy them into a slice buffer // - doing it this way adds a memory copy (writer => buffer) but reduce the number of byte[] allocations (and reduce the GC overhead) int start = writer.Position; var buffer = new SliceBuffer(); if (values is ICollection <TValue> coll) { // pre-allocate the final array with the correct size var res = new Slice[coll.Count]; int p = 0; foreach (var tuple in coll) { // reset position to just after the subspace prefix writer.Position = start; handler(ref writer, tuple, state); // copy full key in the buffer res[p++] = buffer.Intern(writer.ToSlice()); } Contract.Assert(p == res.Length); return(res); } else { // we won't now the array size until the end... var res = new List <Slice>(); foreach (var tuple in values) { // reset position to just after the subspace prefix writer.Position = start; handler(ref writer, tuple, state); // copy full key in the buffer res.Add(buffer.Intern(writer.ToSlice())); } return(res.ToArray()); } }
private Slice CreateReduceResultsKey(int view, string reduceKey, int level) { var sliceWriter = new SliceWriter(16); sliceWriter.WriteBigEndian(view); sliceWriter.WriteBigEndian(Hashing.XXHash64.CalculateRaw(reduceKey)); sliceWriter.WriteBigEndian(level); return sliceWriter.CreateSlice(); }
private Slice CreateBucketAndEtagKey(int bucket, Etag id) { var sliceWriter = new SliceWriter(20); sliceWriter.WriteBigEndian(bucket); sliceWriter.Write(id.ToByteArray()); return sliceWriter.CreateSlice(); }
private Slice CreateMappedResultWithBucketKey(int view, string reduceKey, int bucket) { var sliceWriter = new SliceWriter(16); sliceWriter.WriteBigEndian(view); sliceWriter.WriteBigEndian(Hashing.XXHash64.CalculateRaw(reduceKey)); sliceWriter.WriteBigEndian(bucket); return sliceWriter.CreateSlice(); }