static NetworkConnection ProcessSingleConnection(NetworkDriver.Concurrent driver, NetworkConnection connection) { DataStreamReader strm; NetworkEvent.Type cmd; // Pop all events for the connection while ((cmd = driver.PopEventForConnection(connection, out strm)) != NetworkEvent.Type.Empty) { if (cmd == NetworkEvent.Type.Data) { // For ping requests we reply with a pong message int id = strm.ReadInt(); // Create a temporary DataStreamWriter to keep our serialized pong message if (driver.BeginSend(connection, out var pongData) == 0) { pongData.WriteInt(id); // Send the pong message with the same id as the ping driver.EndSend(pongData); } } else if (cmd == NetworkEvent.Type.Disconnect) { // When disconnected we make sure the connection return false to IsCreated so the next frames // DriverUpdateJob will remove it return(default(NetworkConnection)); } } return(connection); }
public unsafe int Send(NetworkDriver.Concurrent driver, NetworkPipeline pipeline, NetworkConnection connection, NetworkInterfaceSendHandle sendHandle, int headerSize) { var p = m_Pipelines[pipeline.Id - 1]; var connectionId = connection.m_NetworkId; int startStage = 0; // TODO: not really read-only, just hacking the safety system NativeArray <byte> tmpBuffer = sendBuffer; int *sendBufferLock = (int *)tmpBuffer.GetUnsafeReadOnlyPtr(); sendBufferLock += connectionId * sizePerConnection[SendSizeOffset] / 4; if (Interlocked.CompareExchange(ref *sendBufferLock, 1, 0) != 0) { #if ENABLE_UNITY_COLLECTIONS_CHECKS throw new InvalidOperationException("The parallel network driver needs to process a single unique connection per job, processing a single connection multiple times in a parallel for is not supported."); #else return(-1); #endif } NativeList <UpdatePipeline> currentUpdates = new NativeList <UpdatePipeline>(128, Allocator.Temp); int retval = ProcessPipelineSend(driver, startStage, pipeline, connection, sendHandle, headerSize, currentUpdates); Interlocked.Exchange(ref *sendBufferLock, 0); // Move the updates requested in this iteration to the concurrent queue so it can be read/parsed in update routine for (int i = 0; i < currentUpdates.Length; ++i) { m_SendStageNeedsUpdateWrite.Enqueue(currentUpdates[i]); } return(retval); }
public bool Listen(NetworkEndPoint endpoint) { LastDriverWriter.Complete(); // Switching to server mode if (m_Driver.Bind(endpoint) != 0) { return(false); } if (m_Driver.Listen() != 0) { return(false); } m_DriverListening = true; m_ConcurrentDriver = m_Driver.ToConcurrent(); return(true); }
public bool Listen(NetworkEndPoint endpoint) { LastDriverWriter.Complete(); // Switching to server mode if (m_Driver.Bind(endpoint) != 0) { return(false); } if (m_Driver.Listen() != 0) { return(false); } m_DriverListening = true; // FIXME: Bind breaks all copies of the driver nad makes them send to the wrong socket m_ConcurrentDriver = m_Driver.ToConcurrent(); return(true); }
internal unsafe void UpdateSend(NetworkDriver.Concurrent driver, out int updateCount) { // Clear the send lock since it cannot be kept here and can be lost if there are exceptions in send NativeArray <byte> tmpBuffer = m_SendBuffer; int *sendBufferLock = (int *)tmpBuffer.GetUnsafePtr(); for (int connectionOffset = 0; connectionOffset < m_SendBuffer.Length; connectionOffset += sizePerConnection[SendSizeOffset]) { sendBufferLock[connectionOffset / 4] = 0; } NativeArray <UpdatePipeline> sendUpdates = new NativeArray <UpdatePipeline>(m_SendStageNeedsUpdateRead.Count + m_SendStageNeedsUpdate.Length, Allocator.Temp); UpdatePipeline updateItem; updateCount = 0; while (m_SendStageNeedsUpdateRead.TryDequeue(out updateItem)) { if (driver.GetConnectionState(updateItem.connection) == NetworkConnection.State.Connected) { sendUpdates[updateCount++] = updateItem; } } int startLength = updateCount; for (int i = 0; i < m_SendStageNeedsUpdate.Length; i++) { if (driver.GetConnectionState(m_SendStageNeedsUpdate[i].connection) == NetworkConnection.State.Connected) { sendUpdates[updateCount++] = m_SendStageNeedsUpdate[i]; } } NativeList <UpdatePipeline> currentUpdates = new NativeList <UpdatePipeline>(128, Allocator.Temp); // Move the updates requested in this iteration to the concurrent queue so it can be read/parsed in update routine for (int i = 0; i < updateCount; ++i) { updateItem = sendUpdates[i]; ToConcurrent().ProcessPipelineSend(driver, updateItem.stage, updateItem.pipeline, updateItem.connection, default, 0, currentUpdates);
protected override JobHandle OnUpdate(JobHandle inputDeps) { EntityCommandBuffer.ParallelWriter parallelWriter = _barrier.CreateCommandBuffer().AsParallelWriter(); NetworkDriver.Concurrent concurrent = _receiveSystem.Driver.ToConcurrent(); NetworkPipeline networkPipeline = _receiveSystem.ReliablePipeline; RpcExecJob job = new RpcExecJob { commandBuffer = parallelWriter, entityType = GetEntityTypeHandle(), connectionType = GetComponentTypeHandle <NetworkStreamConnection>(), InBufferType = GetBufferTypeHandle <IncomingRpcDataStreamBufferComponent>(), OutBufferType = GetBufferTypeHandle <OutgoingRpcDataStreamBufferComponent>(), execute = _rpcData, driver = concurrent, reliablePipeline = networkPipeline }; var jobHandle = job.ScheduleParallel(rpcExecuteGroup, inputDeps); _barrier.AddJobHandleForProducer(jobHandle); return(jobHandle); }
protected override void OnCreate() { if (World.GetExistingSystem <ServerSimulationSystemGroup>() != null) { DriverConstructor.CreateServerDriver(World, out m_Driver, out m_UnreliablePipeline, out m_ReliablePipeline); } else { DriverConstructor.CreateClientDriver(World, out m_Driver, out m_UnreliablePipeline, out m_ReliablePipeline); } m_ConcurrentDriver = m_Driver.ToConcurrent(); m_DriverListening = false; m_Barrier = World.GetOrCreateSystem <BeginSimulationEntityCommandBufferSystem>(); numNetworkIds = new NativeArray <int>(1, Allocator.Persistent); freeNetworkIds = new NativeQueue <int>(Allocator.Persistent); rpcQueue = World.GetOrCreateSystem <RpcSystem>().GetRpcQueue <RpcSetNetworkId>(); m_NetworkStreamConnectionQuery = EntityManager.CreateEntityQuery(typeof(NetworkStreamConnection)); #if UNITY_EDITOR || DEVELOPMENT_BUILD m_NetStats = new NativeArray <uint>(1, Allocator.Persistent); m_GhostStatsCollectionSystem = World.GetOrCreateSystem <GhostStatsCollectionSystem>(); #endif }
protected override void OnUpdate() { var commandBuffer = m_Barrier.CreateCommandBuffer(); // Destroy drivers if the PingDriverComponents were removed if (!m_DestroyedDriverGroup.IsEmptyIgnoreFilter) { Dependency.Complete(); var destroyedDriverEntity = m_DestroyedDriverGroup.ToEntityArray(Allocator.TempJob); var destroyedDriverList = m_DestroyedDriverGroup.ToComponentDataArray <PingDriverComponentData>(Allocator.TempJob); for (int i = 0; i < destroyedDriverList.Length; ++i) { if (destroyedDriverList[i].isServer != 0) { var serverConnectionList = m_ServerConnectionGroup.ToEntityArray(Allocator.TempJob); // Also destroy all active connections when the driver dies for (int con = 0; con < serverConnectionList.Length; ++con) { commandBuffer.DestroyEntity(serverConnectionList[con]); } serverConnectionList.Dispose(); ServerDriver.Dispose(); } else { ClientDriver.Dispose(); } commandBuffer.RemoveComponent <PingDriverStateComponent>(destroyedDriverEntity[i]); } destroyedDriverList.Dispose(); destroyedDriverEntity.Dispose(); } // Create drivers if new PingDriverComponents were added if (!m_NewDriverGroup.IsEmptyIgnoreFilter) { Dependency.Complete(); var newDriverEntity = m_NewDriverGroup.ToEntityArray(Allocator.TempJob); var newDriverList = m_NewDriverGroup.ToComponentDataArray <PingDriverComponentData>(Allocator.TempJob); for (int i = 0; i < newDriverList.Length; ++i) { if (newDriverList[i].isServer != 0) { if (ServerDriver.IsCreated) { throw new InvalidOperationException("Cannot create multiple server drivers"); } var drv = NetworkDriver.Create(); var addr = NetworkEndPoint.AnyIpv4; addr.Port = 9000; if (drv.Bind(addr) != 0) { throw new Exception("Failed to bind to port 9000"); } else { drv.Listen(); } ServerDriver = drv; ConcurrentServerDriver = ServerDriver.ToConcurrent(); } else { if (ClientDriver.IsCreated) { throw new InvalidOperationException("Cannot create multiple client drivers"); } ClientDriver = NetworkDriver.Create(); ConcurrentClientDriver = ClientDriver.ToConcurrent(); } commandBuffer.AddComponent(newDriverEntity[i], new PingDriverStateComponent { isServer = newDriverList[i].isServer }); } newDriverList.Dispose(); newDriverEntity.Dispose(); } JobHandle clientDep = default(JobHandle); JobHandle serverDep = default(JobHandle); // Go through and update all drivers, also accept all incoming connections for server drivers if (ServerDriver.IsCreated) { // Schedule a chain with driver update, a job to accept all connections and finally a job to delete all invalid connections serverDep = ServerDriver.ScheduleUpdate(Dependency); var acceptJob = new DriverAcceptJob { driver = ServerDriver, commandBuffer = commandBuffer }; serverDep = acceptJob.Schedule(serverDep); var cleanupCommandBuffer = m_Barrier.CreateCommandBuffer().ToConcurrent(); serverDep = Entities.ForEach((Entity entity, int nativeThreadIndex, in PingServerConnectionComponentData connection) => { // Cleanup old connections if (!connection.connection.IsCreated) { cleanupCommandBuffer.DestroyEntity(nativeThreadIndex, entity); } }).ScheduleParallel(serverDep); Dependency = serverDep; m_Barrier.AddJobHandleForProducer(Dependency); } if (ClientDriver.IsCreated) { clientDep = ClientDriver.ScheduleUpdate(Dependency); Dependency = clientDep; } JobHandle.CombineDependencies(clientDep, serverDep); }
// -- lifetime -- public UpdateJob(NetworkDriver.Concurrent driver, NativeArray <NetworkConnection> connections) { mDriver = driver; mConnections = connections; }
internal unsafe int ProcessPipelineSend(NetworkDriver.Concurrent driver, int startStage, NetworkPipeline pipeline, NetworkConnection connection, NetworkInterfaceSendHandle sendHandle, int headerSize, NativeList <UpdatePipeline> currentUpdates) { int retval = sendHandle.size; NetworkPipelineContext ctx = default(NetworkPipelineContext); ctx.timestamp = m_timestamp[0]; var p = m_Pipelines[pipeline.Id - 1]; var connectionId = connection.m_NetworkId; var resumeQ = new NativeList <int>(16, Allocator.Temp); int resumeQStart = 0; #if ENABLE_UNITY_COLLECTIONS_CHECKS if (headerSize != p.headerCapacity + UnsafeUtility.SizeOf <UdpCHeader>() + 1 && sendHandle.data != IntPtr.Zero) { throw new InvalidOperationException("Invalid header size."); } #endif var inboundBuffer = default(InboundSendBuffer); if (sendHandle.data != IntPtr.Zero) { inboundBuffer.bufferWithHeaders = (byte *)sendHandle.data + UnsafeUtility.SizeOf <UdpCHeader>() + 1; inboundBuffer.bufferWithHeadersLength = sendHandle.size - UnsafeUtility.SizeOf <UdpCHeader>() - 1; inboundBuffer.buffer = inboundBuffer.bufferWithHeaders + p.headerCapacity; inboundBuffer.bufferLength = inboundBuffer.bufferWithHeadersLength - p.headerCapacity; } while (true) { headerSize = p.headerCapacity; int internalBufferOffset = p.sendBufferOffset + sizePerConnection[SendSizeOffset] * connectionId; int internalSharedBufferOffset = p.sharedBufferOffset + sizePerConnection[SharedSizeOffset] * connectionId; bool needsUpdate = false; // If this is not the first stage we need to fast forward the buffer offset to the correct place if (startStage > 0) { #if ENABLE_UNITY_COLLECTIONS_CHECKS if (inboundBuffer.bufferWithHeadersLength > 0) { throw new InvalidOperationException("Can't start from a stage with a buffer"); } #endif for (int i = 0; i < startStage; ++i) { internalBufferOffset += (m_StageCollection[m_StageList[p.FirstStageIndex + i]].SendCapacity + AlignmentMinusOne) & (~AlignmentMinusOne); internalSharedBufferOffset += (m_StageCollection[m_StageList[p.FirstStageIndex + i]].SharedStateCapacity + AlignmentMinusOne) & (~AlignmentMinusOne); headerSize -= m_StageCollection[m_StageList[p.FirstStageIndex + i]].HeaderCapacity; } } for (int i = startStage; i < p.NumStages; ++i) { int stageHeaderCapacity = m_StageCollection[m_StageList[p.FirstStageIndex + i]].HeaderCapacity; #if ENABLE_UNITY_COLLECTIONS_CHECKS if (stageHeaderCapacity > headerSize) { throw new InvalidOperationException("Not enough header space"); } #endif inboundBuffer.headerPadding = headerSize; headerSize -= stageHeaderCapacity; if (stageHeaderCapacity > 0 && inboundBuffer.bufferWithHeadersLength > 0) { var headerArray = NativeArrayUnsafeUtility.ConvertExistingDataToNativeArray <byte>(inboundBuffer.bufferWithHeaders + headerSize, stageHeaderCapacity, Allocator.Invalid); #if ENABLE_UNITY_COLLECTIONS_CHECKS NativeArrayUnsafeUtility.SetAtomicSafetyHandle(ref headerArray, AtomicSafetyHandle.GetTempMemoryHandle()); #endif ctx.header = new DataStreamWriter(headerArray); } else { ctx.header = new DataStreamWriter(stageHeaderCapacity, Allocator.Temp); } var prevInbound = inboundBuffer; ProcessSendStage(i, internalBufferOffset, internalSharedBufferOffset, p, ref resumeQ, ref ctx, ref inboundBuffer, ref needsUpdate); if (needsUpdate) { AddSendUpdate(connection, i, pipeline, currentUpdates); } if (inboundBuffer.bufferWithHeadersLength == 0) { break; } #if ENABLE_UNITY_COLLECTIONS_CHECKS if (inboundBuffer.headerPadding != prevInbound.headerPadding) { throw new InvalidOperationException("Changing the header padding in a pipeline is not supported"); } #endif if (inboundBuffer.buffer != prevInbound.buffer) { #if ENABLE_UNITY_COLLECTIONS_CHECKS if (inboundBuffer.buffer != inboundBuffer.bufferWithHeaders + inboundBuffer.headerPadding || inboundBuffer.bufferLength + inboundBuffer.headerPadding > inboundBuffer.bufferWithHeadersLength) { throw new InvalidOperationException("When creating an internal buffer in piplines the buffer must be a subset of the buffer width headers"); } #endif // Copy header to new buffer so it is part of the payload UnsafeUtility.MemCpy(inboundBuffer.bufferWithHeaders + headerSize, ctx.header.AsNativeArray().GetUnsafeReadOnlyPtr(), ctx.header.Length); } #if ENABLE_UNITY_COLLECTIONS_CHECKS else { if (inboundBuffer.bufferWithHeaders != prevInbound.bufferWithHeaders) { throw new InvalidOperationException("Changing the send buffer with headers without changing the buffer is not supported"); } } #endif if (ctx.header.Length < stageHeaderCapacity) { int wastedSpace = stageHeaderCapacity - ctx.header.Length; // Remove wasted space in the header UnsafeUtility.MemMove(inboundBuffer.buffer - wastedSpace, inboundBuffer.buffer, inboundBuffer.bufferLength); } // Update the inbound buffer for next iteration inboundBuffer.buffer = inboundBuffer.bufferWithHeaders + headerSize; inboundBuffer.bufferLength = ctx.header.Length + inboundBuffer.bufferLength; needsUpdate = false; internalBufferOffset += (ctx.internalProcessBufferLength + AlignmentMinusOne) & (~AlignmentMinusOne); internalSharedBufferOffset += (ctx.internalSharedProcessBufferLength + AlignmentMinusOne) & (~AlignmentMinusOne); } if (inboundBuffer.bufferLength != 0) { if (sendHandle.data != IntPtr.Zero && inboundBuffer.bufferWithHeaders == (byte *)sendHandle.data + UnsafeUtility.SizeOf <UdpCHeader>() + 1) { // Actually send the data - after collapsing it again if (inboundBuffer.buffer != inboundBuffer.bufferWithHeaders) { UnsafeUtility.MemMove(inboundBuffer.bufferWithHeaders, inboundBuffer.buffer, inboundBuffer.bufferLength); inboundBuffer.buffer = inboundBuffer.bufferWithHeaders; } ((byte *)sendHandle.data)[UnsafeUtility.SizeOf <UdpCHeader>()] = (byte)pipeline.Id; int sendSize = UnsafeUtility.SizeOf <UdpCHeader>() + 1 + inboundBuffer.bufferLength; #if ENABLE_UNITY_COLLECTIONS_CHECKS if (sendSize > sendHandle.size) { throw new InvalidOperationException("Pipeline increased the data in the buffer, this is not allowed"); } #endif sendHandle.size = sendSize; retval = driver.CompleteSend(connection, sendHandle); sendHandle = default; } else { // Sending without pipeline, the correct pipeline will be added by the default flags when this is called var writer = driver.BeginSend(connection); writer.WriteByte((byte)pipeline.Id); writer.WriteBytes(inboundBuffer.buffer, inboundBuffer.bufferLength); if (writer.HasFailedWrites) { driver.AbortSend(writer); } else { driver.EndSend(writer); } } } if (resumeQStart >= resumeQ.Length) { break; } startStage = resumeQ[resumeQStart++]; inboundBuffer = default(InboundSendBuffer); } if (sendHandle.data != IntPtr.Zero) { driver.AbortSend(sendHandle); } return(retval); }