Example #1
0
        public void ReadWritePackedUIntWithDeferred()
        {
            using (var dataStream = new DataStreamWriter(300 * 4, Allocator.Persistent))
                using (var compressionModel = new NetworkCompressionModel(Allocator.Persistent))
                {
                    uint base_val = 2000;
                    uint count    = 277;
                    var  def      = dataStream.Write((int)0);
                    for (uint i = 0; i < count; ++i)
                    {
                        dataStream.WritePackedUInt(base_val + i, compressionModel);
                    }

                    dataStream.Flush();
                    def.Update(1979);
                    def = dataStream.Write((int)0);
                    def.Update(1979);
                    dataStream.Flush();
                    var reader = new DataStreamReader(dataStream, 0, dataStream.Length);
                    var ctx    = default(DataStreamReader.Context);
                    Assert.AreEqual(1979, reader.ReadInt(ref ctx));
                    for (uint i = 0; i < count; ++i)
                    {
                        var val = reader.ReadPackedUInt(ref ctx, compressionModel);
                        Assert.AreEqual(base_val + i, val);
                    }
                    Assert.AreEqual(1979, reader.ReadInt(ref ctx));
                }
        }
Example #2
0
        public void ReadWritePackedUIntWithDeferred()
        {
            using (var compressionModel = new NetworkCompressionModel(Allocator.Persistent))
            {
                var  dataStream = new DataStreamWriter(300 * 4, Allocator.Temp);
                uint base_val   = 2000;
                uint count      = 277;
                var  def        = dataStream;
                dataStream.WriteInt((int)0);
                for (uint i = 0; i < count; ++i)
                {
                    dataStream.WritePackedUInt(base_val + i, compressionModel);
                }

                dataStream.Flush();
                def.WriteInt(1979);
                def = dataStream;
                dataStream.WriteInt((int)0);
                def.WriteInt(1979);
                dataStream.Flush();
                var reader = new DataStreamReader(dataStream.AsNativeArray());
                Assert.AreEqual(1979, reader.ReadInt());
                for (uint i = 0; i < count; ++i)
                {
                    var val = reader.ReadPackedUInt(compressionModel);
                    Assert.AreEqual(base_val + i, val);
                }
                Assert.AreEqual(1979, reader.ReadInt());
            }
        }
Example #3
0
            public void Execute([ReadOnly] ref NetworkStreamConnection connection,
                                [ReadOnly] ref NetworkSnapshotAckComponent ack, [ReadOnly] ref CommandTargetComponent state)
            {
                if (isNullCommandData && state.targetEntity != Entity.Null)
                {
                    return;
                }
                if (!isNullCommandData && !inputFromEntity.Exists(state.targetEntity))
                {
                    return;
                }
                DataStreamWriter writer = driver.BeginSend(unreliablePipeline, connection.Value);

                if (!writer.IsCreated)
                {
                    return;
                }
                writer.WriteByte((byte)NetworkStreamProtocol.Command);
                writer.WriteUInt(ack.LastReceivedSnapshotByLocal);
                writer.WriteUInt(ack.ReceivedSnapshotByLocalMask);
                writer.WriteUInt(localTime);
                uint returnTime = ack.LastReceivedRemoteTime;

                if (returnTime != 0)
                {
                    returnTime -= (localTime - ack.LastReceiveTimestamp);
                }
                writer.WriteUInt(returnTime);
                writer.WriteUInt(interpolationDelay);
                writer.WriteUInt(inputTargetTick);
                if (state.targetEntity != Entity.Null)
                {
                    var          input = inputFromEntity[state.targetEntity];
                    TCommandData baselineInputData;
                    input.GetDataAtTick(inputTargetTick, out baselineInputData);
                    baselineInputData.Serialize(ref writer);
                    for (uint inputIndex = 1; inputIndex < k_InputBufferSendSize; ++inputIndex)
                    {
                        TCommandData inputData;
                        input.GetDataAtTick(inputTargetTick - inputIndex, out inputData);
                        inputData.Serialize(ref writer, baselineInputData, compressionModel);
                    }

                    writer.Flush();
                }
#if UNITY_EDITOR || DEVELOPMENT_BUILD
                netStats[0] = inputTargetTick;
                netStats[1] = (uint)writer.Length;
#endif

                driver.EndSend(writer);
            }
Example #4
0
        public void ReadWritePackedStringDelta()
        {
            var dataStream       = new DataStreamWriter(300 * 4, Allocator.Temp);
            var compressionModel = new NetworkCompressionModel(Allocator.Temp);

            NativeString64 src      = new NativeString64("This is a string");
            NativeString64 baseline = new NativeString64("This is another string");

            dataStream.WritePackedStringDelta(src, baseline, compressionModel);
            dataStream.Flush();

            //Assert.LessOrEqual(dataStream.Length, src.LengthInBytes+2);

            var reader = new DataStreamReader(dataStream.AsNativeArray());
            var dst    = reader.ReadPackedStringDelta(baseline, compressionModel);

            Assert.AreEqual(src, dst);
        }
Example #5
0
            public unsafe void Execute()
            {
                var snapshotAck = ackFromEntity[connectionEntity];
                var ackTick     = snapshotAck.LastReceivedSnapshotByRemote;

                DataStreamWriter dataStream = new DataStreamWriter(2048, Allocator.Temp);

                dataStream.Clear();
                dataStream.Write((byte)NetworkStreamProtocol.Snapshot);

                dataStream.Write(localTime);

                // dataStream.Write(snapshotAck.LastReceivedRemoteTime - (localTime - snapshotAck.LastReceiveTimestamp));
                // TODO: LZ:
                //      to be confirmed
                //      we should send "t0 + (T1 - T0)", but not "t0 - (T1 - T0)"
                //
                // because:
                //      RTT should equals to : (t1 - t0) - (T1 - T0) = t1 - [t0 + (T1 - T0)]
                //      t0: A send time         // snapshotAck.LastReceivedRemoteTime
                //      T0: B receive time      // snapshotAck.LastReceiveTimestamp
                //      T1: B send time         // localTime
                //      t1: A receive time
                dataStream.Write(snapshotAck.LastReceivedRemoteTime + (localTime - snapshotAck.LastReceiveTimestamp));

                dataStream.Write(currentTick);

                int entitySize = UnsafeUtility.SizeOf <Entity>();

                var  despawnLenWriter = dataStream.Write((uint)0);
                var  updateLenWriter  = dataStream.Write((uint)0);
                uint despawnLen       = 0;

                // TODO: if not all despawns fit, sort them based on age and maybe time since last send
                // TODO: only resend despawn on nack
                // FIXME: the TargetPacketSize cannot be used since CleanupGhostJob relies on all ghosts being sent every frame
                for (var chunk = 0; chunk < despawnChunks.Length /*&& dataStream.Length < TargetPacketSize*/; ++chunk)
                {
                    var entities = despawnChunks[chunk].GetNativeArray(entityType);
                    var ghosts   = despawnChunks[chunk].GetNativeArray(ghostSystemStateType);
                    for (var ent = 0; ent < entities.Length /*&& dataStream.Length < TargetPacketSize*/; ++ent)
                    {
                        if (ackTick == 0 || SequenceHelpers.IsNewer(ghosts[ent].despawnTick, ackTick))
                        {
                            dataStream.WritePackedUInt((uint)ghosts[ent].ghostId, compressionModel);
                            ++despawnLen;
                        }
                    }
                }

                uint updateLen    = 0;
                var  serialChunks = new NativeList <PrioChunk>(ghostChunks.Length + serialSpawnChunks.Length, Allocator.Temp);

                serialChunks.AddRange(serialSpawnChunks);
                var existingChunks = new NativeHashMap <ArchetypeChunk, int>(ghostChunks.Length, Allocator.Temp);
                // TODO: LZ:
                //      temp hack, fix me
                int maxCount = serialSpawnChunks.Length;

                for (int chunk = 0; chunk < ghostChunks.Length; ++chunk)
                {
                    SerializationState chunkState;
                    var addNew = !chunkSerializationData.TryGetValue(ghostChunks[chunk], out chunkState);
                    // FIXME: should be using chunk sequence number instead of this hack
                    if (!addNew && chunkState.arch != ghostChunks[chunk].Archetype)
                    {
                        UnsafeUtility.Free(chunkState.snapshotData, Allocator.Persistent);
                        chunkSerializationData.Remove(ghostChunks[chunk]);
                        addNew = true;
                    }
                    if (addNew)
                    {
                        chunkState.lastUpdate = currentTick - 1;
                        chunkState.startIndex = 0;
                        chunkState.ghostType  = serializers.FindSerializer(ghostChunks[chunk].Archetype);
                        chunkState.arch       = ghostChunks[chunk].Archetype;

                        chunkState.snapshotWriteIndex = 0;
                        int serializerDataSize = serializers.GetSnapshotSize(chunkState.ghostType);
                        chunkState.snapshotData = (byte *)UnsafeUtility.Malloc(UnsafeUtility.SizeOf <int>() * GhostSystemConstants.SnapshotHistorySize + GhostSystemConstants.SnapshotHistorySize * ghostChunks[chunk].Capacity * (UnsafeUtility.SizeOf <Entity>() + serializerDataSize), 16, Allocator.Persistent);

                        // Just clear snapshot index
                        UnsafeUtility.MemClear(chunkState.snapshotData, UnsafeUtility.SizeOf <int>() * GhostSystemConstants.SnapshotHistorySize);

                        chunkSerializationData.TryAdd(ghostChunks[chunk], chunkState);
                    }

                    existingChunks.TryAdd(ghostChunks[chunk], 1);
                    // FIXME: only if modified or force sync
                    var ghostType = chunkState.ghostType;
                    var pc        = new PrioChunk
                    {
                        chunk      = ghostChunks[chunk],
                        ghostState = null,
                        priority   = serializers.CalculateImportance(ghostType, ghostChunks[chunk]) * (int)(currentTick - chunkState.lastUpdate),
                        startIndex = chunkState.startIndex,
                        ghostType  = ghostType
                    };
                    serialChunks.Add(pc);
                    if (ghostChunks[chunk].Count > maxCount)
                    {
                        maxCount = ghostChunks[chunk].Count;
                    }
                }

                var oldChunks = chunkSerializationData.GetKeyArray(Allocator.Temp);

                for (int i = 0; i < oldChunks.Length; ++i)
                {
                    int val;
                    if (!existingChunks.TryGetValue(oldChunks[i], out val))
                    {
                        SerializationState chunkState;
                        chunkSerializationData.TryGetValue(oldChunks[i], out chunkState);
                        UnsafeUtility.Free(chunkState.snapshotData, Allocator.Persistent);
                        chunkSerializationData.Remove(oldChunks[i]);
                    }
                }

                NativeArray <PrioChunk> serialChunkArray = serialChunks;

                serialChunkArray.Sort();
                var availableBaselines = new NativeList <SnapshotBaseline>(GhostSystemConstants.SnapshotHistorySize, Allocator.Temp);
                var baselinePerEntity  = new NativeArray <int>(maxCount * 3, Allocator.Temp);

                for (int pc = 0; pc < serialChunks.Length && dataStream.Length < TargetPacketSize; ++pc)
                {
                    var chunk     = serialChunks[pc].chunk;
                    var ghostType = serialChunks[pc].ghostType;

                    Entity *           currentSnapshotEntity = null;
                    byte *             currentSnapshotData   = null;
                    SerializationState chunkState;
                    int dataSize = 0;
                    availableBaselines.Clear();
                    if (chunkSerializationData.TryGetValue(chunk, out chunkState))
                    {
                        dataSize = serializers.GetSnapshotSize(chunkState.ghostType);

                        uint *snapshotIndex = (uint *)chunkState.snapshotData;
                        snapshotIndex[chunkState.snapshotWriteIndex] = currentTick;
                        int baseline = (GhostSystemConstants.SnapshotHistorySize + chunkState.snapshotWriteIndex - 1) % GhostSystemConstants.SnapshotHistorySize;
                        while (baseline != chunkState.snapshotWriteIndex)
                        {
                            if (snapshotAck.IsReceivedByRemote(snapshotIndex[baseline]))
                            {
                                byte *dataBase = chunkState.snapshotData +
                                                 UnsafeUtility.SizeOf <int>() * GhostSystemConstants.SnapshotHistorySize +
                                                 baseline * (dataSize + entitySize) * chunk.Capacity;
                                availableBaselines.Add(new SnapshotBaseline
                                {
                                    tick     = snapshotIndex[baseline],
                                    snapshot = dataBase + entitySize * chunk.Capacity,
                                    entity   = (Entity *)(dataBase)
                                });
                            }

                            baseline = (GhostSystemConstants.SnapshotHistorySize + baseline - 1) % GhostSystemConstants.SnapshotHistorySize;
                        }
                        // Find the acked snapshot to delta against, setup pointer to current and previous entity* and data*
                        // Remember to bump writeIndex when done
                        currentSnapshotData   = chunkState.snapshotData + UnsafeUtility.SizeOf <int>() * GhostSystemConstants.SnapshotHistorySize;
                        currentSnapshotData  += chunkState.snapshotWriteIndex * (dataSize + entitySize) * chunk.Capacity;
                        currentSnapshotEntity = (Entity *)currentSnapshotData;
                        currentSnapshotData  += entitySize * chunk.Capacity;
                    }

                    var ghosts = serialChunks[pc].ghostState;
                    if (ghosts == null)
                    {
                        ghosts = (GhostSystemStateComponent *)chunk.GetNativeArray(ghostSystemStateType).GetUnsafeReadOnlyPtr();
                    }

                    var ghostEntities = chunk.GetNativeArray(entityType);
                    int ent;
                    if (serialChunks[pc].startIndex < chunk.Count)
                    {
                        dataStream.WritePackedUInt((uint)ghostType, compressionModel);
                        dataStream.WritePackedUInt((uint)(chunk.Count - serialChunks[pc].startIndex), compressionModel);
                    }

                    // First figure out the baselines to use per entity so they can be sent as baseline + maxCount instead of one per entity
                    int targetBaselines = serializers.WantsPredictionDelta(ghostType) ? 3 : 1;
                    for (ent = serialChunks[pc].startIndex; ent < chunk.Count; ++ent)
                    {
                        int foundBaselines = 0;
                        for (int baseline = 0; baseline < availableBaselines.Length; ++baseline)
                        {
                            if (availableBaselines[baseline].entity[ent] == ghostEntities[ent])
                            {
                                baselinePerEntity[ent * 3 + foundBaselines] = baseline;
                                ++foundBaselines;
                                if (foundBaselines == targetBaselines)
                                {
                                    break;
                                }
                            }
                            // Only way an entity can be missing from a snapshot but be available in an older is if last snapshot was partial
                            else if (availableBaselines[baseline].entity[ent] != Entity.Null)
                            {
                                break;
                            }
                        }

                        if (foundBaselines == 2)
                        {
                            foundBaselines = 1;
                        }
                        while (foundBaselines < 3)
                        {
                            baselinePerEntity[ent * 3 + foundBaselines] = -1;
                            ++foundBaselines;
                        }
                    }
                    ent = serializers.Serialize(ghostType, chunk, serialChunks[pc].startIndex, currentTick,
                                                currentSnapshotEntity, currentSnapshotData, ghosts, ghostEntities,
                                                baselinePerEntity, availableBaselines, dataStream, compressionModel);
                    updateLen += (uint)(ent - serialChunks[pc].startIndex);

                    // Spawn chunks are temporary and should not be added to the state data cache
                    if (serialChunks[pc].ghostState == null)
                    {
                        // Only append chunks which contain data
                        if (ent > serialChunks[pc].startIndex)
                        {
                            if (serialChunks[pc].startIndex > 0)
                            {
                                UnsafeUtility.MemClear(currentSnapshotEntity, entitySize * serialChunks[pc].startIndex);
                            }
                            if (ent < chunk.Capacity)
                            {
                                UnsafeUtility.MemClear(currentSnapshotEntity + ent, entitySize * (chunk.Capacity - ent));
                            }
                            chunkState.snapshotWriteIndex = (chunkState.snapshotWriteIndex + 1) % GhostSystemConstants.SnapshotHistorySize;
                        }

                        if (ent >= chunk.Count)
                        {
                            chunkState.lastUpdate = currentTick;
                            chunkState.startIndex = 0;
                        }
                        else
                        {
                            // TODO: should this always be run or should partial chunks only be allowed for the highest priority chunk?
                            //if (pc == 0)
                            chunkState.startIndex = ent;
                        }
                        chunkSerializationData.Remove(chunk);
                        chunkSerializationData.TryAdd(chunk, chunkState);
                    }
                }

                dataStream.Flush();
                despawnLenWriter.Update(despawnLen);
                updateLenWriter.Update(updateLen);

                driver.Send(unreliablePipeline, connectionFromEntity[connectionEntity].Value, dataStream);
            }