public void Serialize(ref byte[] buffer, ref int offset, RectOffset value) { SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, value.left); SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, value.right); SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, value.top); SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, value.bottom); }
/// <summary> /// Use this overload whenever you can. The intention is that you reuse the serialization buffer so the serializer only has to resize/reallocate a newer (larger) one if there really is not enough space; instead of allocating an array for every Serialize() call, this lets you avoid GC-pressure. /// You *can* pass in null for 'targetByteArray' and let the serializer allocate one for you. /// </summary> public int Serialize <T>(T obj, ref byte[] buffer, int offset = 0) { EnterRecursive(RecursionMode.Serialization); if (buffer == null) { buffer = new byte[0x4000]; // 16k } try { // // Root object is the IExternalObject we're serializing (if any) // We have to keep track of it so the CacheFormatter knows what NOT to skip // otherwise we'd obviously only write one byte lol (the external ID) and nothing else. InstanceData.CurrentRoot = obj as IExternalRootObject; // // The actual serialization int offsetBeforeWrite = offset; { if (Config.Advanced.EmbedChecksum) { SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, ProtocolChecksum.Checksum); } var formatter = (IFormatter <T>)GetReferenceFormatter(typeof(T)); formatter.Serialize(ref buffer, ref offset, obj); } int offsetAfterWrite = offset; // // After we're done, we have to clear all our caches! // Only very rarely can we avoid that // todo: implement check-pointing inside the TypeDictionary itself if (!Config.Advanced.PersistTypeCache) { InstanceData.TypeCache.ResetSerializationCache(); } InstanceData.ObjectCache.ClearSerializationCache(); int dataSize = offsetAfterWrite - offsetBeforeWrite; return(dataSize); } finally { // // Clear the root object again //InstanceData.WrittenSchemata.Clear(); InstanceData.EncounteredSchemaTypes.Clear(); InstanceData.CurrentRoot = null; LeaveRecursive(RecursionMode.Serialization); } }
/// <summary> /// Use this overload whenever you can. The intention is that you reuse the serialization buffer so the serializer only has to resize/reallocate a newer (larger) one if there really is not enough space; instead of allocating an array for every Serialize() call, this lets you avoid GC-pressure. /// You *can* pass in null for 'targetByteArray' and let the serializer allocate one for you. /// </summary> public int Serialize <T>(T obj, ref byte[] targetByteArray, int offset = 0) { EnterRecursive(RecursionMode.Serialization); if (Config.EmbedChecksum) { SerializerBinary.WriteInt32Fixed(ref targetByteArray, ref offset, ProtocolChecksum.Checksum); } try { // // Root object is the IExternalObject we're serializing (if any) // We have to keep track of it so the CacheFormatter knows what NOT to skip // otherwise we'd obviously only write one byte lol (the external ID) and nothing else. InstanceData.CurrentRoot = obj as IExternalRootObject; var formatter = (IFormatter <T>)GetGenericFormatter(typeof(T)); // // The actual serialization int offsetBeforeWrite = offset; formatter.Serialize(ref targetByteArray, ref offset, obj); int offsetAfterWrite = offset; // // After we're done, we probably have to clear all our caches! // Only very rarely can we avoid that // todo: would it be more efficient to have one static and one dynamic dictionary? if (!Config.PersistTypeCache) { InstanceData.TypeCache.ClearSerializationCache(); foreach (var t in Config.KnownTypes) { InstanceData.TypeCache.RegisterObject(t); } } if (!Config.PersistObjectCache) { InstanceData.ObjectCache.ClearSerializationCache(); } return(offsetAfterWrite - offsetBeforeWrite); } finally { // // Clear the root object again LeaveRecursive(RecursionMode.Serialization); } }
public void Serialize(ref byte[] buffer, ref int offset, LayerMask value) { SerializerBinary.WriteInt32Fixed(ref buffer, ref offset, value.value); }