public void Dispose() { codes?.Dispose(); quantizedValues?.Dispose(); codeWords?.Dispose(); boundingBoxes?.Dispose(); quantizers?.Dispose(); }
public IEnumerator ServerAnd5Clients_Connect_Successfully() { int numberOfClients = 5; NativeArray <NetworkConnection> connectionToClientArray; UdpNetworkDriver[] client_driversArray = new UdpNetworkDriver[numberOfClients]; NativeArray <NetworkConnection> clientToServerConnectionsArray; //setup server connectionToClientArray = new NativeArray <NetworkConnection>(numberOfClients, Allocator.Persistent); server_driver = new UdpNetworkDriver(new NetworkDataStreamParameter { size = 0 }); NetworkEndPoint server_endpoint = NetworkEndPoint.LoopbackIpv4; server_endpoint.Port = 1337; server_driver.Bind(server_endpoint); server_driver.Listen(); //setup clients clientToServerConnectionsArray = new NativeArray <NetworkConnection>(numberOfClients, Allocator.Persistent); for (int i = 0; i < numberOfClients; i++) { client_driversArray[i] = new UdpNetworkDriver(new NetworkDataStreamParameter { size = 0 }); clientToServerConnectionsArray[i] = client_driversArray[i].Connect(server_endpoint); } //update drivers for (int i = 0; i < numberOfClients; i++) { client_driversArray[i].ScheduleUpdate().Complete(); } server_driver.ScheduleUpdate().Complete(); //accept connections for (int i = 0; i < numberOfClients; i++) { connectionToClientArray[i] = server_driver.Accept(); server_driver.ScheduleUpdate().Complete(); ev = server_driver.PopEventForConnection(connectionToClientArray[i], out stream); Assert.IsTrue(ev == NetworkEvent.Type.Empty, "Not empty NetworkEvent on the server appeared"); client_driversArray[i].ScheduleUpdate().Complete(); ev = clientToServerConnectionsArray[i].PopEvent(client_driversArray[i], out stream); Assert.IsTrue(ev == NetworkEvent.Type.Connect, "NetworkEvent should have Type.Connect on the client"); } //close connections for (int i = 0; i < numberOfClients; i++) { clientToServerConnectionsArray[i].Close(client_driversArray[i]); //update drivers client_driversArray[i].ScheduleUpdate().Complete(); server_driver.ScheduleUpdate().Complete(); ev = server_driver.PopEventForConnection(connectionToClientArray[i], out stream); Assert.IsTrue(ev == NetworkEvent.Type.Disconnect, "NetworkEvent.Type.Disconnect was expected to appear, but " + ev + "appeared"); } server_driver.Dispose(); for (int i = 0; i < numberOfClients; i++) { client_driversArray[i].Dispose(); } connectionToClientArray.Dispose(); clientToServerConnectionsArray.Dispose(); yield return(null); }
public void Dispose() { Indices?.Dispose(); Vertices?.Dispose(); Uvs?.Dispose(); }
private void OnDestroy() { m_particles.Dispose(); m_grid.Dispose(); }
public void Dispose() { m_MaxItems.Dispose(); m_Queue.Dispose(); m_QueueHeadTail.Dispose(); }
protected override void OnDestroy() { NeighbourOffset.Dispose(); }
protected override void OnDestroy() { SpringDatas.Dispose(); }
protected override void OnDestroy() { base.OnDestroy(); m_strings.Dispose(); }
protected override void OnUpdate() { for (int i = 0; i < POSITION_SLICES; i++) { ClearQueueJob clearQueueJob = new ClearQueueJob { nativeQueue = nativeQueueArray[i] }; jobHandleArray[i] = clearQueueJob.Schedule(); } JobHandle.CompleteAll(jobHandleArray); Camera camera = Camera.main; float cameraWidth = camera.aspect * camera.orthographicSize; float3 cameraPosition = camera.transform.position; float marginX = cameraWidth / 10f; float xMin = cameraPosition.x - cameraWidth - marginX; float xMax = cameraPosition.x + cameraWidth + marginX; float cameraSliceSize = camera.orthographicSize * 2f / POSITION_SLICES; float yBottom = cameraPosition.y - camera.orthographicSize; // Bottom cull position float yTop_1 = cameraPosition.y + camera.orthographicSize; // Top most cull position float yTop_2 = yTop_1 - cameraSliceSize * 1f; float yTop_3 = yTop_1 - cameraSliceSize * 2f; float yTop_4 = yTop_1 - cameraSliceSize * 3f; float yTop_5 = yTop_1 - cameraSliceSize * 4f; float yTop_6 = yTop_1 - cameraSliceSize * 5f; float yTop_7 = yTop_1 - cameraSliceSize * 6f; float yTop_8 = yTop_1 - cameraSliceSize * 7f; float yTop_9 = yTop_1 - cameraSliceSize * 8f; float yTop_10 = yTop_1 - cameraSliceSize * 9f; float yTop_11 = yTop_1 - cameraSliceSize * 10f; float yTop_12 = yTop_1 - cameraSliceSize * 11f; float yTop_13 = yTop_1 - cameraSliceSize * 12f; float yTop_14 = yTop_1 - cameraSliceSize * 13f; float yTop_15 = yTop_1 - cameraSliceSize * 14f; float yTop_16 = yTop_1 - cameraSliceSize * 15f; float yTop_17 = yTop_1 - cameraSliceSize * 16f; float yTop_18 = yTop_1 - cameraSliceSize * 17f; float yTop_19 = yTop_1 - cameraSliceSize * 18f; float yTop_20 = yTop_1 - cameraSliceSize * 19f; float marginY = camera.orthographicSize / 10f; yTop_1 += marginY; yBottom -= marginY; CullAndSortNativeQueueJob cullAndSortNativeQueueJob = new CullAndSortNativeQueueJob { xMin = xMin, xMax = xMax, yBottom = yBottom, yTop_1 = yTop_1, yTop_2 = yTop_2, yTop_3 = yTop_3, yTop_4 = yTop_4, yTop_5 = yTop_5, yTop_6 = yTop_6, yTop_7 = yTop_7, yTop_8 = yTop_8, yTop_9 = yTop_9, yTop_10 = yTop_10, yTop_11 = yTop_11, yTop_12 = yTop_12, yTop_13 = yTop_13, yTop_14 = yTop_14, yTop_15 = yTop_15, yTop_16 = yTop_16, yTop_17 = yTop_17, yTop_18 = yTop_18, yTop_19 = yTop_19, yTop_20 = yTop_20, nativeQueue_1 = nativeQueueArray[0].AsParallelWriter(), nativeQueue_2 = nativeQueueArray[1].AsParallelWriter(), nativeQueue_3 = nativeQueueArray[2].AsParallelWriter(), nativeQueue_4 = nativeQueueArray[3].AsParallelWriter(), nativeQueue_5 = nativeQueueArray[4].AsParallelWriter(), nativeQueue_6 = nativeQueueArray[5].AsParallelWriter(), nativeQueue_7 = nativeQueueArray[6].AsParallelWriter(), nativeQueue_8 = nativeQueueArray[7].AsParallelWriter(), nativeQueue_9 = nativeQueueArray[8].AsParallelWriter(), nativeQueue_10 = nativeQueueArray[9].AsParallelWriter(), nativeQueue_11 = nativeQueueArray[10].AsParallelWriter(), nativeQueue_12 = nativeQueueArray[11].AsParallelWriter(), nativeQueue_13 = nativeQueueArray[12].AsParallelWriter(), nativeQueue_14 = nativeQueueArray[13].AsParallelWriter(), nativeQueue_15 = nativeQueueArray[14].AsParallelWriter(), nativeQueue_16 = nativeQueueArray[15].AsParallelWriter(), nativeQueue_17 = nativeQueueArray[16].AsParallelWriter(), nativeQueue_18 = nativeQueueArray[17].AsParallelWriter(), nativeQueue_19 = nativeQueueArray[18].AsParallelWriter(), nativeQueue_20 = nativeQueueArray[19].AsParallelWriter(), }; JobHandle cullAndSortNativeQueueJobHandle = cullAndSortNativeQueueJob.Schedule(this); cullAndSortNativeQueueJobHandle.Complete(); int visibleEntityTotal = 0; for (int i = 0; i < POSITION_SLICES; i++) { visibleEntityTotal += nativeQueueArray[i].Count; } for (int i = 0; i < POSITION_SLICES; i++) { NativeArray <RenderData> nativeArray = new NativeArray <RenderData>(nativeQueueArray[i].Count, Allocator.TempJob); nativeArrayArray[i] = nativeArray; } for (int i = 0; i < POSITION_SLICES; i++) { NativeQueueToArrayJob nativeQueueToArrayJob = new NativeQueueToArrayJob { nativeQueue = nativeQueueArray[i], nativeArray = nativeArrayArray[i], }; jobHandleArray[i] = nativeQueueToArrayJob.Schedule(); } JobHandle.CompleteAll(jobHandleArray); // Sort by position for (int i = 0; i < POSITION_SLICES; i++) { SortByPositionJob sortByPositionJob = new SortByPositionJob { sortArray = nativeArrayArray[i], comparer = positionComparer }; jobHandleArray[i] = sortByPositionJob.Schedule(); } JobHandle.CompleteAll(jobHandleArray); // Fill up individual Arrays NativeArray <Matrix4x4> matrixArray = new NativeArray <Matrix4x4>(visibleEntityTotal, Allocator.TempJob); NativeArray <Vector4> uvArray = new NativeArray <Vector4>(visibleEntityTotal, Allocator.TempJob); int startingIndex = 0; for (int i = 0; i < POSITION_SLICES; i++) { //if (i != 4) continue; FillArraysParallelJob fillArraysParallelJob = new FillArraysParallelJob { nativeArray = nativeArrayArray[i], matrixArray = matrixArray, uvArray = uvArray, startingIndex = startingIndex }; startingIndex += nativeArrayArray[i].Length; jobHandleArray[i] = fillArraysParallelJob.Schedule(nativeArrayArray[i].Length, 10); } JobHandle.CompleteAll(jobHandleArray); //jobHandleArray.Dispose(); for (int i = 0; i < POSITION_SLICES; i++) { nativeArrayArray[i].Dispose(); } // Slice Arrays and Draw InitDrawMeshInstancedSlicedData(); for (int i = 0; i < visibleEntityTotal; i += DRAW_MESH_INSTANCED_SLICE_COUNT) { int sliceSize = math.min(visibleEntityTotal - i, DRAW_MESH_INSTANCED_SLICE_COUNT); NativeArray <Matrix4x4> .Copy(matrixArray, i, matrixInstancedArray, 0, sliceSize); NativeArray <Vector4> .Copy(uvArray, i, uvInstancedArray, 0, sliceSize); materialPropertyBlock.SetVectorArray(shaderMainTexUVid, uvInstancedArray); Graphics.DrawMeshInstanced(mesh, 0, material, matrixInstancedArray, sliceSize, materialPropertyBlock); } matrixArray.Dispose(); uvArray.Dispose(); }
public void Dispose() { matrices.Dispose(); propertyParams.Dispose(); }
public JobHandle PerformJob(JobHandle inputDeps) { ComponentDataFromEntity <LifeStatus> lifeStatusLookup = GetComponentDataFromEntity <LifeStatus>(true); JobHandle jobHandle = Entities .WithReadOnly(lifeStatusLookup) .ForEach((Entity cell, ref LifeStatusNextCycle next, in Neighbors neighbors) => { byte numLiveNeighbors = 0; // check current life status of all neighbors if (neighbors.nw != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.nw].isAlive; } if (neighbors.n != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.n].isAlive; } if (neighbors.ne != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.ne].isAlive; } if (neighbors.w != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.w].isAlive; } if (neighbors.e != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.e].isAlive; } if (neighbors.sw != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.sw].isAlive; } if (neighbors.s != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.s].isAlive; } if (neighbors.se != Entity.Null) { numLiveNeighbors += lifeStatusLookup[neighbors.se].isAlive; } if (lifeStatusLookup[cell].isAlive == 1) // the cell currently alive { next.isAlive = (byte)math.select(1, 0, numLiveNeighbors < 2 || numLiveNeighbors > 3); /*if (numLiveNeighbors < 2 || numLiveNeighbors > 3) * { * // die from under population or over population * next.isAlive = 0; * } * else * { * next.isAlive = 1; * }*/ } else // the cell is currently dead { next.isAlive = (byte)math.select(0, 1, numLiveNeighbors == 3); /*if (numLiveNeighbors == 3) * { * // become alive from reproduction * next.isAlive = 1; * } * else * { * next.isAlive = 0; * }*/ } }).Schedule(inputDeps); // get the scaling constants and save them for our job later. This way, we can do a look up rather than a conditional EntityQuery scaleConstQuery = EntityManager.CreateEntityQuery(typeof(ScaleConst), typeof(Scale)); NativeArray <Scale> consts = scaleConstQuery.ToComponentDataArray <Scale>(Allocator.TempJob); NativeArray <float> scaleConsts = new NativeArray <float>(consts.Length, Allocator.TempJob); for (int i = 0; i < consts.Length; i++) { scaleConsts[i] = consts[i].Value; } consts.Dispose(); // update the current life status of all the cells with this job // this job requires the neighbor counter job to finish first jobHandle = Entities .WithReadOnly(scaleConsts) .WithDeallocateOnJobCompletion(scaleConsts) .ForEach((Entity entity, int entityInQueryIndex, ref Scale scale, ref LifeStatus status, in LifeStatusNextCycle nextStatus) => { status.isAlive = nextStatus.isAlive; // dead cells are invisible (scale 0) scale.Value = scaleConsts[status.isAlive]; }).Schedule(jobHandle); return(jobHandle); }
// Start is called before the first frame update void Start() { Mesh mesh = Tester.GetComponent <MeshFilter>().sharedMesh; Material mat = Tester.GetComponent <MeshRenderer>().sharedMaterial; var tex = mat.GetTexture("_MainTex") as Texture2D; var normalTex = mat.GetTexture("_BumpMap") as Texture2D; if (tex == null) { tex = Texture2D.whiteTexture; } // Get points on the mesh var meshPoints = MeshSampler.SampleRandomPointsOnMesh(mesh, tex, normalTex, PointCount, NoiseLevel); // Pick what particles we're going to actually calculate normals for var sampleIndices = new NativeList <int>(Allocator.TempJob); var rand = new Random(8976543); for (int i = 0; i < meshPoints.Length; ++i) { if (rand.NextFloat() <= SampleRate) { sampleIndices.Add(i); } } // Get properties of sampled particles var queryPositions = new NativeArray <float3>(sampleIndices.Length, Allocator.TempJob); var trueNormals = new NativeArray <float3>(sampleIndices.Length, Allocator.TempJob); var queryColors = new NativeArray <Color32>(sampleIndices.Length, Allocator.TempJob); for (int i = 0; i < sampleIndices.Length; ++i) { int index = sampleIndices[i]; queryPositions[i] = meshPoints[index].Position; trueNormals[i] = meshPoints[index].Normal; queryColors[i] = meshPoints[index].Albedo; } var histograms = PointCloudNormals.CalculateHistograms(meshPoints, queryPositions, trueNormals); // Now that we have the hough histograms, // we can estimate normals! NativeArray <float3> reconstructedNormals; if (!UseCNN) { // Use classical methods reconstructedNormals = PointCloudNormals.EstimateNormals(histograms, trueNormals); } else { // Pass to CNN! reconstructedNormals = PointCloudNormals.EstimateNormalsCNN("TrainingCode/saved_models/tf_model.pb", histograms, trueNormals); } // Ok we have our properties -> Measure how well we did... NativeArray <float> reconstructionError = PointCloudNormals.CalculateScore(reconstructedNormals, trueNormals, out float rms, out float pgp); // Log some info about how well we did string methodName = UseCNN ? "CNN" : "MaxBin"; Debug.Log($"{name} finished using {methodName}. Total RMS: {rms}, PGP: {pgp}."); // Now visualize it using TC Particles var pointCloudData = PointCloudNormals.ConstructPointCloudData(queryPositions, reconstructedNormals, queryColors, ShowErrors, reconstructionError); // Write hough textures to disk if requested if (WriteData) { PointCloudNormals.WriteTrainingImages(Folder, CloudName, histograms, trueNormals); } queryPositions.Dispose(); trueNormals.Dispose(); sampleIndices.Dispose(); queryColors.Dispose(); reconstructedNormals.Dispose(); reconstructionError.Dispose(); histograms.Dispose(); var system = GetComponent <TCParticleSystem>(); system.Emitter.PointCloud = pointCloudData; system.Emitter.Emit(pointCloudData.PointCount); GetComponent <MeshRenderer>().enabled = false; }
public void Dispose() { links.Dispose(); slices.Dispose(); indices.Dispose(); }
/// <summary> /// creates and returns the pokemoon's PhysicsCollider /// </summary> /// <param name="pokemonName">Name of the pokemon</param> /// <returns>PhysicsCollider</returns> public static PhysicsCollider getPokemonPhysicsCollider(string pokemonName, PokemonEntityData ped, CollisionFilter collisionFilter = new CollisionFilter(), float scale = 1f, Unity.Physics.Material material = new Unity.Physics.Material(), int groupIndex = 1) { ///FUTURE UPDATE ///allow specific colliders to recieve specific filters and materials! //needs collision groups PhysicsCollider physicsCollider = new PhysicsCollider { }; Quaternion rotation = new quaternion(); //if default collision filter is detected then create one realted to the pokemon if (collisionFilter.Equals(new CollisionFilter())) { Debug.Log("Creating new Collision Filter"); collisionFilter = new CollisionFilter { BelongsTo = TriggerEventClass.Pokemon | TriggerEventClass.Collidable, CollidesWith = TriggerEventClass.Collidable, GroupIndex = groupIndex }; } if (material.Equals(new Unity.Physics.Material())) { material = GetPokemonColliderMaterial(StringToPokedexEntry(pokemonName)); } switch (pokemonName) { case "Cubone": var colliders = new NativeArray <CompoundCollider.ColliderBlobInstance>(5, Allocator.Temp); colliders[0] = new CompoundCollider.ColliderBlobInstance { Collider = Unity.Physics.SphereCollider.Create(new SphereGeometry { Center = new float3(0, 0.27f, 0.03f), Radius = 0.225f }, collisionFilter, material), CompoundFromChild = new RigidTransform { pos = new float3 { x = 0, y = 0, z = 0 }, rot = quaternion.identity } }; var a = GenerateCapsuleData(float3.zero, Vector3.right, 0.1f, 0.3f); rotation.SetFromToRotation(Vector3.right, new Vector3(0, 90f, 0)); colliders[1] = new CompoundCollider.ColliderBlobInstance { Collider = Unity.Physics.CapsuleCollider.Create(new CapsuleGeometry { Vertex0 = a.pointA, Vertex1 = a.pointB, Radius = 0.1f }, collisionFilter, material), CompoundFromChild = new RigidTransform { pos = new float3(-0.17f, 0.19f, 0), rot = rotation } }; colliders[2] = new CompoundCollider.ColliderBlobInstance { Collider = Unity.Physics.CapsuleCollider.Create(new CapsuleGeometry { Vertex0 = a.pointA, Vertex1 = a.pointB, Radius = 0.1f }, collisionFilter, material), CompoundFromChild = new RigidTransform { pos = new float3(0.17f, 0.19f, 0), rot = rotation } }; colliders[3] = new CompoundCollider.ColliderBlobInstance { Collider = Unity.Physics.SphereCollider.Create(new SphereGeometry { Center = float3.zero, Radius = 0.23f }, collisionFilter, material), CompoundFromChild = new RigidTransform { pos = new float3(0, 0.75f, 0.03f), rot = rotation } }; a = GenerateCapsuleData(float3.zero, Vector3.right, 0.1f, 0.3f); rotation = Quaternion.Euler(0, 90f, 26f); colliders[4] = new CompoundCollider.ColliderBlobInstance { Collider = Unity.Physics.CapsuleCollider.Create(new CapsuleGeometry { Vertex0 = a.pointA, Vertex1 = a.pointB, Radius = 0.1f }, collisionFilter, material), CompoundFromChild = new RigidTransform { pos = new float3(0, 0.63f, 0.33f), rot = rotation } }; physicsCollider = new PhysicsCollider { Value = CompoundCollider.Create(colliders) }; if (scale > 1f) { Debug.LogWarning("Cannot scale Cubone"); } colliders.Dispose(); break; case "Electrode": Debug.Log("Creating PHysicwsCollider for Electrode"); physicsCollider = new PhysicsCollider { Value = Unity.Physics.SphereCollider.Create(new SphereGeometry { Center = float3.zero, Radius = ped.Height / 2 * scale }, collisionFilter, material ) }; break; default: Debug.LogError("Failed to find collider for pokemon \"" + pokemonName + "\""); physicsCollider = new PhysicsCollider { Value = Unity.Physics.SphereCollider.Create(new SphereGeometry { Center = float3.zero, Radius = ped.Height / 2 * scale }, collisionFilter, material ) }; break; } // Debug.Log("Returning Physics Collide for \""+pokemonName+"\""); return(physicsCollider); }
/// <summary> /// Completes construction of the blob asset and returns a reference to the asset in unmanaged memory. /// </summary> /// <remarks>Use the <see cref="BlobAssetReference{T}"/> to access the blob asset. When the asset is no longer /// needed, call<see cref="BlobAssetReference{T}.Dispose()"/> to destroy the blob asset and free its allocated /// memory.</remarks> /// <param name="allocator">The type of memory to allocate. Unless the asset has a very short life span, use /// <see cref="Allocator.Persistent"/>.</param> /// <typeparam name="T">The data type of the struct used to construct the asset's root. Use the same struct type /// that you used when calling <see cref="ConstructRoot{T}"/>.</typeparam> /// <returns></returns> public BlobAssetReference <T> CreateBlobAssetReference <T>(Allocator allocator) where T : struct { var offsets = new NativeArray <int>(m_allocations.Length + 1, Allocator.Temp); var sortedAllocs = new NativeArray <SortedIndex>(m_allocations.Length, Allocator.Temp); offsets[0] = 0; for (int i = 0; i < m_allocations.Length; ++i) { offsets[i + 1] = offsets[i] + m_allocations[i].size; sortedAllocs[i] = new SortedIndex { p = m_allocations[i].p, index = i }; } int dataSize = offsets[m_allocations.Length]; sortedAllocs.Sort(); var sortedPatches = new NativeArray <SortedIndex>(m_patches.Length, Allocator.Temp); for (int i = 0; i < m_patches.Length; ++i) { sortedPatches[i] = new SortedIndex { p = (byte *)m_patches[i].offsetPtr, index = i } } ; sortedPatches.Sort(); byte *buffer = (byte *)UnsafeUtility.Malloc(sizeof(BlobAssetHeader) + dataSize, 16, allocator); byte *data = buffer + sizeof(BlobAssetHeader); for (int i = 0; i < m_allocations.Length; ++i) { UnsafeUtility.MemCpy(data + offsets[i], m_allocations[i].p, m_allocations[i].size); } int iAlloc = 0; var allocStart = m_allocations[sortedAllocs[0].index].p; var allocEnd = allocStart + m_allocations[sortedAllocs[0].index].size; for (int i = 0; i < m_patches.Length; ++i) { int patchIndex = sortedPatches[i].index; int *offsetPtr = (int *)sortedPatches[i].p; while (offsetPtr > allocEnd) { ++iAlloc; allocStart = m_allocations[sortedAllocs[iAlloc].index].p; allocEnd = allocStart + m_allocations[sortedAllocs[iAlloc].index].size; } var patch = m_patches[patchIndex]; int offsetPtrInData = offsets[sortedAllocs[iAlloc].index] + (int)((byte *)offsetPtr - allocStart); int targetPtrInData = offsets[patch.target.allocIndex] + patch.target.offset; *(int *)(data + offsetPtrInData) = targetPtrInData - offsetPtrInData; if (patch.length != 0) { *(int *)(data + offsetPtrInData + 4) = patch.length; } } sortedPatches.Dispose(); sortedAllocs.Dispose(); offsets.Dispose(); BlobAssetHeader *header = (BlobAssetHeader *)buffer; *header = new BlobAssetHeader(); header->Length = (int)dataSize; header->Allocator = allocator; // @TODO use 64bit hash header->Hash = math.hash(buffer + sizeof(BlobAssetHeader), dataSize); BlobAssetReference <T> blobAssetReference; blobAssetReference.m_data.m_Align8Union = 0; header->ValidationPtr = blobAssetReference.m_data.m_Ptr = buffer + sizeof(BlobAssetHeader); return(blobAssetReference); } void *AllocationToPointer(BlobDataRef blobDataRef) { return(m_allocations[blobDataRef.allocIndex].p + blobDataRef.offset); } BlobAllocation EnsureEnoughRoomInChunk(int size, int alignment) { if (m_currentChunkIndex == -1) { return(AllocateNewChunk()); } var alloc = m_allocations[m_currentChunkIndex]; int startOffset = CollectionHelper.Align(alloc.size, alignment); if (startOffset + size > m_chunkSize) { return(AllocateNewChunk()); } UnsafeUtility.MemClear(alloc.p + alloc.size, startOffset - alloc.size); alloc.size = startOffset; return(alloc); } BlobDataRef Allocate(int size, int alignment) { if (size > m_chunkSize) { size = CollectionHelper.Align(size, 16); var allocIndex = m_allocations.Length; var mem = (byte *)UnsafeUtility.Malloc(size, alignment, m_allocator); UnsafeUtility.MemClear(mem, size); m_allocations.Add(new BlobAllocation { p = mem, size = size }); return(new BlobDataRef { allocIndex = allocIndex, offset = 0 }); } BlobAllocation alloc = EnsureEnoughRoomInChunk(size, alignment); var offset = alloc.size; UnsafeUtility.MemClear(alloc.p + alloc.size, size); alloc.size += size; m_allocations[m_currentChunkIndex] = alloc; return(new BlobDataRef { allocIndex = m_currentChunkIndex, offset = offset }); } BlobAllocation AllocateNewChunk() { // align size of last chunk to 16 bytes so chunks can be concatenated without breaking alignment if (m_currentChunkIndex != -1) { var currentAlloc = m_allocations[m_currentChunkIndex]; currentAlloc.size = CollectionHelper.Align(currentAlloc.size, 16); m_allocations[m_currentChunkIndex] = currentAlloc; } m_currentChunkIndex = m_allocations.Length; var alloc = new BlobAllocation { p = (byte *)UnsafeUtility.Malloc(m_chunkSize, 16, m_allocator), size = 0 }; m_allocations.Add(alloc); return(alloc); }
void OnDestroy() { vertices.Dispose(); }
protected override void OnUpdate() { var detectors = _entityQuery.ToComponentDataArray <DetectorComponent>(Allocator.TempJob); var entities = _entityQuery.ToEntityArray(Allocator.TempJob); for (var i = 0; i < detectors.Length; i++) { var detector = detectors[i]; var triggersSet = new NativeHashMap <int, bool>(MaxColliders, Allocator.Temp); DynamicBuffer <ColliderId> buffer = EntityManager.GetBuffer <ColliderId>(entities[i]); for (var index = 0; index < buffer.Length; index++) { ColliderId colliderId = buffer[index]; _triggersCache[index] = colliderId.Value; triggersSet.Add(colliderId.Value, false); } if (_detectorTriggers.TryGetValue(entities[i], out HashSet <int> triggerIds)) { int removeOldTriggersCount = 0; var removeOldTriggers = new NativeArray <int>(MaxColliders, Allocator.Temp); foreach (var id in triggerIds) { // in new triggers there is no old if (!triggersSet.ContainsKey(id)) { removeOldTriggers[removeOldTriggersCount++] = id; #if UNITY_EDITOR Debug.Log("Trigger exit::" + id); #endif TryRemoveComponentsFromTriggers(EntityManager, entities[i], id); } } for (var k = 0; k < removeOldTriggersCount; k++) { triggerIds.Remove(removeOldTriggers[k]); } for (int j = 0; j < detector.TriggersCount; j++) { var triggerId = _triggersCache[j]; if (!triggerIds.Contains(triggerId)) { triggerIds.Add(triggerId); #if UNITY_EDITOR Debug.Log("Trigger enter::" + triggerId); #endif TryAddComponentsFromTriggers(EntityManager, entities[i]); } } removeOldTriggers.Dispose(); } else { for (int j = 0; j < detector.TriggersCount; j++) { var triggerId = _triggersCache[j]; _detectorTriggers.Add(entities[i], new HashSet <int> { triggerId }); #if UNITY_EDITOR Debug.Log("Trigger enter::" + triggerId); #endif TryAddComponentsFromTriggers(EntityManager, entities[i]); } } triggersSet.Dispose(); detectors[i] = detector; } _entityQuery.CopyFromComponentDataArray(detectors); detectors.Dispose(); entities.Dispose(); }
public unsafe void VirtualMemory_AllocateAndFreeFromBurst() { BaselibErrorState errorState = default; // Reserve 1GB var addressSpace = VirtualMemoryUtility.ReserveAddressSpace(1024ul * 256ul, VirtualMemoryUtility.DefaultPageSizeInBytes, out errorState); { // 100 pages of ints const int allocationCount = 100; var errorStates = new NativeArray <BaselibErrorState>(allocationCount, Allocator.Persistent); { for (int i = 0; i < allocationCount; i++) { errorStates[i] = default; } var commitJob = new CommitJob { jobAddressRangePtr = addressSpace.ptr, jobLog2PageSize = addressSpace.log2PageSize, jobPageCount = 1, jobErrorStates = errorStates }; commitJob.Schedule(allocationCount, 1).Complete(); for (int i = 0; i < allocationCount; i++) { VirtualMemoryUtility.ReportWrappedBaselibError(errorStates[i]); } // for each page allocated for (int i = 0; i < allocationCount; i++) { var page = (void *)((ulong)addressSpace.ptr + (ulong)i * VirtualMemoryUtility.DefaultPageSizeInBytes); var allocator = new UnsafeScratchAllocator((void *)addressSpace.ptr, (int)VirtualMemoryUtility.DefaultPageSizeInBytes * allocationCount); var intCount = ((int)VirtualMemoryUtility.DefaultPageSizeInBytes / sizeof(int)); var numbersInPage = (int *)allocator.Allocate <int>(intCount); // for each int in the allocated page for (int j = 0; j < intCount; j++) { Assert.AreEqual(j, numbersInPage[j]); } } var decommitJob = new DecommitJob { jobAddressRangePtr = addressSpace.ptr, jobLog2PageSize = addressSpace.log2PageSize, jobPageCount = 1, jobErrorStates = errorStates }; decommitJob.Schedule(allocationCount, 1).Complete(); } errorStates.Dispose(); } VirtualMemoryUtility.FreeAddressSpace(addressSpace, out errorState); }
public void Execute(ArchetypeChunk chunk, int chunkIndex, int firstEntityIndex) { NativeArray <Translation> translations = chunk.GetNativeArray(translationType); NativeArray <Velocity> velocities = chunk.GetNativeArray(velocityType); NativeArray <Line> orcaLines = new NativeArray <Line>(maxNeighbors, Allocator.Temp); NativeArray <KeyValuePair <float, AgentNeighbor> > agentNeighbors = new NativeArray <KeyValuePair <float, AgentNeighbor> >(maxNeighbors, Allocator.Temp); NativeArray <int> quadrantKeys = new NativeArray <int>(MAX_QUADRANT_NEIGHBORS, Allocator.Temp); float invTimeStep = 1.0f / dt; float combinedRadius = radius * 2.0f; float combinedRadiusSqr = math.pow(combinedRadius, 2); float rangeSqr = neighborsDist * neighborsDist; for (int entityIdx = 0; entityIdx < chunk.ChunkEntityCount; entityIdx++) { float2 velocity = velocities[entityIdx].Value; //Early exit if the agent is not moving if (math.lengthsq(velocity) < 0.001f) { continue; } float2 position = translations[entityIdx].Value.xz; int countNeighborQuadrant = 0; QuadrantSystem.GetCurrentCellAndNeighborsKeys(position, ref quadrantKeys, ref countNeighborQuadrant); //ORCA setup int neighborsCount = 0; int nbObstacleLine = 0; //Get nearest neighbors for (int i = 0; i < countNeighborQuadrant; i++) { if (!quadrantMap.TryGetFirstValue(quadrantKeys[i], out var neighbor, out var nativeMultiHashMapIterator)) { continue; } do { float2 dir = position - neighbor.position; float distSqr = math.dot(dir, dir); //Condition to avoid self if (distSqr > 0.001f) { //If the other agent is under the minimum range => add it if (!(distSqr < rangeSqr)) { continue; } //If there is a free space, add it immediately if (neighborsCount < maxNeighbors) { agentNeighbors[neighborsCount] = new KeyValuePair <float, AgentNeighbor>(distSqr, new AgentNeighbor() { position = neighbor.position, velocity = neighbor.velocity }); neighborsCount++; } //Make sure the list is sorted int j = neighborsCount - 1; while (j != 0 && distSqr < agentNeighbors[j - 1].Key) { agentNeighbors[j] = agentNeighbors[j - 1]; j--; } //Once a spot with a further agent is found, place if agentNeighbors[j] = new KeyValuePair <float, AgentNeighbor>(distSqr, new AgentNeighbor() { position = neighbor.position, velocity = neighbor.velocity }); //If the list is full, only check agent nearer than the farrest neighbor. if (neighborsCount == maxNeighbors) { rangeSqr = agentNeighbors[maxNeighbors - 1].Key; } } } while (quadrantMap.TryGetNextValue(out neighbor, ref nativeMultiHashMapIterator)); } //Evaluate each neighbors for (int neighborIdx = 0; neighborIdx < neighborsCount; neighborIdx++) { AgentNeighbor otherAgent = agentNeighbors[neighborIdx].Value; float2 relativePosition = otherAgent.position - position; float2 relativeVelocity = velocity - otherAgent.velocity; float distSqr = math.lengthsq(relativePosition); Line line; float2 u; if (distSqr > combinedRadiusSqr) { // No Collision float2 w = relativeVelocity - invTimeHorizon * relativePosition; // Vector from center to relative velocity float wLengthSqr = math.lengthsq(w); float dotProduct1 = math.dot(w, relativePosition); if (dotProduct1 < 0.0f && math.pow(dotProduct1, 2) > combinedRadiusSqr * wLengthSqr) { // Project on circle float wLength = math.sqrt(wLengthSqr); float2 unitW = w / wLength; line.direction = new float2(unitW.y, -unitW.x); u = (combinedRadius * invTimeHorizon - wLength) * unitW; } else { // Projection on legs float leg = math.sqrt(distSqr - combinedRadiusSqr); if (Det(relativePosition, w) > 0.0f) { line.direction = new float2( relativePosition.x * leg - relativePosition.y * combinedRadius, relativePosition.x * combinedRadius + relativePosition.y * leg) / distSqr; } else { line.direction = -new float2( relativePosition.x * leg - relativePosition.y * combinedRadius, -relativePosition.x * combinedRadius + relativePosition.y * leg) / distSqr; } float dotProduct2 = math.dot(relativeVelocity, line.direction); u = dotProduct2 * line.direction - relativeVelocity; } } else { //Collision float2 w = relativeVelocity - invTimeStep * relativePosition; float wLength = math.length(w); float2 wUnit = w / wLength; line.direction = new float2(wUnit.y, -wUnit.x); u = (combinedRadius * invTimeStep - wLength) * wUnit; } line.point = velocity + 0.5f * u; orcaLines[neighborIdx] = line; } float2 optimalVel = velocity; float2 vel = float2.zero; float maxSpeed = velocities[entityIdx].maxSpeed; int lineFail = LinearProgram2(orcaLines, neighborsCount, maxSpeed, optimalVel, false, ref vel); if (lineFail < neighborsCount) { LinearProgram3(orcaLines, neighborsCount, nbObstacleLine, lineFail, maxSpeed, ref vel); } velocities[entityIdx] = new Velocity() { Value = vel, maxSpeed = maxSpeed }; } quadrantKeys.Dispose(); orcaLines.Dispose(); agentNeighbors.Dispose(); }
protected override void OnDestroy() { _randoms.Dispose(); }
private void OnDestroy() { mJob.Complete(); mDriver.Dispose(); mConnection.Dispose(); }
/// <summary> /// Performs a raycast against all currently tracked planes. /// </summary> /// <param name="ray">The ray, in Unity world space, to cast.</param> /// <param name="trackableTypeMask">A mask of raycast types to perform.</param> /// <param name="allocator">The <c>Allocator</c> to use when creating the returned <c>NativeArray</c>.</param> /// <returns> /// A new <c>NativeArray</c> of raycast results allocated with <paramref name="allocator"/>. /// The caller owns the memory and is responsible for calling <c>Dispose</c> on the <c>NativeArray</c>. /// </returns> /// <seealso cref="ARRaycastManager.Raycast(Ray, List{ARRaycastHit}, TrackableType)"/> /// <seealso cref="ARRaycastManager.Raycast(Vector2, List{ARRaycastHit}, TrackableType)"/> public NativeArray <XRRaycastHit> Raycast( Ray ray, TrackableType trackableTypeMask, Allocator allocator) { // No plane types requested; early out. if ((trackableTypeMask & TrackableType.Planes) == TrackableType.None) { return(new NativeArray <XRRaycastHit>(0, allocator)); } var trackableCollection = trackables; // Allocate a buffer that is at least large enough to contain a hit against every plane var hitBuffer = new NativeArray <XRRaycastHit>(trackableCollection.count, Allocator.Temp); try { int count = 0; foreach (var plane in trackableCollection) { TrackableType trackableTypes = TrackableType.None; var normal = plane.transform.localRotation * Vector3.up; var infinitePlane = new Plane(normal, plane.transform.localPosition); float distance; if (!infinitePlane.Raycast(ray, out distance)) { continue; } // Pose in session space var pose = new Pose( ray.origin + ray.direction * distance, plane.transform.localRotation); if ((trackableTypeMask & TrackableType.PlaneWithinInfinity) != TrackableType.None) { trackableTypes |= TrackableType.PlaneWithinInfinity; } // To test the rest, we need the intersection point in plane space var hitPositionPlaneSpace3d = Quaternion.Inverse(plane.transform.localRotation) * (pose.position - plane.transform.localPosition); var hitPositionPlaneSpace = new Vector2(hitPositionPlaneSpace3d.x, hitPositionPlaneSpace3d.z); var estimatedOrWithinBounds = TrackableType.PlaneWithinBounds | TrackableType.PlaneEstimated; if ((trackableTypeMask & estimatedOrWithinBounds) != TrackableType.None) { var differenceFromCenter = hitPositionPlaneSpace - plane.centerInPlaneSpace; if ((Mathf.Abs(differenceFromCenter.x) <= plane.extents.x) && (Mathf.Abs(differenceFromCenter.y) <= plane.extents.y)) { trackableTypes |= (estimatedOrWithinBounds & trackableTypeMask); } } if ((trackableTypeMask & TrackableType.PlaneWithinPolygon) != TrackableType.None) { if (WindingNumber(hitPositionPlaneSpace, plane.boundary) != 0) { trackableTypes |= TrackableType.PlaneWithinPolygon; } } if (trackableTypes != TrackableType.None) { hitBuffer[count++] = new XRRaycastHit( plane.trackableId, pose, distance, trackableTypes); } } // Finally, copy to return value var hitResults = new NativeArray <XRRaycastHit>(count, allocator); NativeArray <XRRaycastHit> .Copy(hitBuffer, hitResults, count); return(hitResults); } finally { hitBuffer.Dispose(); } }
public static unsafe void DeserializeWorld(ExclusiveEntityTransaction manager, BinaryReader reader, int numSharedComponents) { if (manager.ArchetypeManager.CountEntities() != 0) { throw new ArgumentException( $"DeserializeWorld can only be used on completely empty EntityManager. Please create a new empty World and use EntityManager.MoveEntitiesFrom to move the loaded entities into the destination world instead."); } int storedVersion = reader.ReadInt(); if (storedVersion != CurrentFileFormatVersion) { throw new ArgumentException( $"Attempting to read a entity scene stored in an old file format version (stored version : {storedVersion}, current version : {CurrentFileFormatVersion})"); } var types = ReadTypeArray(reader); int totalEntityCount; var archetypes = ReadArchetypes(reader, types, manager, out totalEntityCount); manager.AllocateConsecutiveEntitiesForLoading(totalEntityCount); int totalChunkCount = reader.ReadInt(); for (int i = 0; i < totalChunkCount; ++i) { var chunk = (Chunk*) UnsafeUtility.Malloc(Chunk.kChunkSize, 64, Allocator.Persistent); reader.ReadBytes(chunk, Chunk.kChunkSize); chunk->Archetype = archetypes[(int)chunk->Archetype].Archetype; // Fixup the pointer to the shared component values // todo: more generic way of fixing up pointers? chunk->SharedComponentValueArray = (int*)((byte*)(chunk) + Chunk.GetSharedComponentOffset(chunk->Archetype->NumSharedComponents)); var numSharedComponentsInArchetype = chunk->Archetype->NumSharedComponents; for (int j = 0; j < numSharedComponentsInArchetype; ++j) { // The shared component 0 is not part of the array, so an index equal to the array size is valid. if (chunk->SharedComponentValueArray[j] > numSharedComponents) { throw new ArgumentException( $"Archetype uses shared component at index {chunk->SharedComponentValueArray[j]} but only {numSharedComponents} are available, check if the shared scene has been properly loaded."); } } chunk->ChangeVersion = (uint*) ((byte*) chunk + Chunk.GetChangedComponentOffset(chunk->Archetype->TypesCount, chunk->Archetype->NumSharedComponents)); // Allocate additional heap memory for buffers that have overflown into the heap, and read their data. int bufferAllocationCount = reader.ReadInt(); if (bufferAllocationCount > 0) { var bufferPatches = new NativeArray<BufferPatchRecord>(bufferAllocationCount, Allocator.Temp); reader.ReadArray(bufferPatches, bufferPatches.Length); // TODO: PERF: Batch malloc interface. for (int pi = 0; pi < bufferAllocationCount; ++pi) { var target = (BufferHeader*)OffsetFromPointer(chunk->Buffer, bufferPatches[pi].ChunkOffset); // TODO: Alignment target->Pointer = (byte*) UnsafeUtility.Malloc(bufferPatches[pi].AllocSizeBytes, 8, Allocator.Persistent); reader.ReadBytes(target->Pointer, bufferPatches[pi].AllocSizeBytes); } bufferPatches.Dispose(); } manager.AddExistingChunk(chunk); } archetypes.Dispose(); }
private void SpawnParticles(EntityManager mgr, float deltaTime, Entity emitter) { var particleEmitter = mgr.GetComponentData <ParticleEmitter>(emitter); var particleEmitterInternal = mgr.GetComponentData <ParticleEmitterInternal>(emitter); uint particlesToSpawn = 0; if (mgr.HasComponent <BurstEmissionInternal>(emitter) && mgr.HasComponent <BurstEmission>(emitter)) { // Burst emission mode. var burstEmission = mgr.GetComponentData <BurstEmission>(emitter); var burstEmissionInternal = mgr.GetComponentData <BurstEmissionInternal>(emitter); if (burstEmissionInternal.cycle < burstEmission.cycles) { burstEmissionInternal.cooldown -= deltaTime; if (burstEmissionInternal.cooldown < 0.0f) { particlesToSpawn = (uint)m_rand.NextInt((int)burstEmission.count.start, (int)burstEmission.count.end); burstEmissionInternal.cycle++; burstEmissionInternal.cooldown = m_rand.NextFloat(burstEmission.interval.start, burstEmission.interval.end); } mgr.SetComponentData(emitter, burstEmissionInternal); } } else { // Normal emission mode. if (particleEmitter.emitRate > 0.0f) { particleEmitterInternal.particleSpawnCooldown += deltaTime; float particleSpawnDelay = 1.0f / particleEmitter.emitRate; particlesToSpawn = (uint)(particleEmitterInternal.particleSpawnCooldown / particleSpawnDelay); if (particlesToSpawn > 0) { particleEmitterInternal.particleSpawnCooldown -= particleSpawnDelay * particlesToSpawn; uint maxParticlesToSpawn = particleEmitter.maxParticles - particleEmitterInternal.numParticles; if (particlesToSpawn > maxParticlesToSpawn) { particlesToSpawn = maxParticlesToSpawn; } } mgr.SetComponentData(emitter, particleEmitterInternal); } } if (particlesToSpawn == 0) { return; } var newParticles = new NativeArray <Entity>((int)particlesToSpawn, Collections.Allocator.Persistent); // Before the new particles will spawn, Disabled component needs to be removed from the template particle. mgr.RemoveComponent <Disabled>(particleEmitterInternal.particleTemplate); mgr.Instantiate(particleEmitterInternal.particleTemplate, newParticles); mgr.AddComponentData(particleEmitterInternal.particleTemplate, new Disabled()); InitTime(mgr, deltaTime, particleEmitter.lifetime, newParticles); if (particleEmitter.attachToEmitter) { foreach (var particle in newParticles) { var node = new Parent() { Value = emitter }; if (mgr.HasComponent <Parent>(particle)) { mgr.SetComponentData(particle, node); } else { mgr.AddComponentData(particle, node); } var position = mgr.GetComponentData <Translation>(particle); position.Value = float3.zero; mgr.SetComponentData(particle, position); } } else { if (mgr.HasComponent <LocalToWorld>(emitter)) { var toWorld = mgr.GetComponentData <LocalToWorld>(emitter); var emitterWorldPos = new float3(toWorld.Value[3][0], toWorld.Value[3][1], toWorld.Value[3][2]); foreach (var particle in newParticles) { var localPos = mgr.GetComponentData <Translation>(particle); localPos.Value = emitterWorldPos; mgr.SetComponentData(particle, localPos); } } } if (mgr.HasComponent <EmitterInitialVelocity>(emitter)) { var velocity = mgr.GetComponentData <EmitterInitialVelocity>(emitter).velocity; var particleVelocity = new ParticleVelocity(); foreach (var particle in newParticles) { particleVelocity.velocity = velocity; mgr.AddComponentData(particle, particleVelocity); } } else if (mgr.HasComponent <LifetimeVelocity>(emitter)) { foreach (var particle in newParticles) { mgr.AddComponentData(particle, new ParticleVelocity() { velocity = float3.zero }); } } InitScale(mgr, emitter, newParticles); InitColor(mgr, emitter, newParticles); InitRotation(mgr, emitter, newParticles); // Init particle's position and the velocity based on the source. if (mgr.HasComponent <EmitterBoxSource>(emitter)) { ParticlesSource.InitEmitterBoxSource(mgr, emitter, newParticles); } else if (mgr.HasComponent <EmitterCircleSource>(emitter)) { ParticlesSource.InitEmitterCircleSource(mgr, emitter, newParticles); } else if (mgr.HasComponent <EmitterConeSource>(emitter)) { ParticlesSource.InitEmitterConeSource(mgr, emitter, newParticles, particleEmitter.attachToEmitter); } newParticles.Dispose(); }
public override unsafe JobHandle?ScheduleVertexJobs( VertexInputData posInput, VertexInputData?nrmInput = null, VertexInputData?tanInput = null, VertexInputData[] uvInputs = null, VertexInputData?colorInput = null, VertexInputData?weightsInput = null, VertexInputData?jointsInput = null ) { Profiler.BeginSample("ScheduleVertexJobs"); Profiler.BeginSample("AllocateNativeArray"); vData = new NativeArray <VType>(posInput.count, defaultAllocator); var vDataPtr = (byte *)vData.GetUnsafeReadOnlyPtr(); Profiler.EndSample(); bounds = posInput.bounds; int jobCount = 1; int outputByteStride = 12; // sizeof Vector3 hasNormals = nrmInput.HasValue || calculateNormals; if (hasNormals) { if (nrmInput.HasValue) { jobCount++; } outputByteStride += 12; } hasTangents = tanInput.HasValue || calculateTangents; if (hasTangents) { if (tanInput.HasValue) { jobCount++; } outputByteStride += 16; } if (uvInputs != null && uvInputs.Length > 0) { jobCount += uvInputs.Length; switch (uvInputs.Length) { case 1: texCoords = new VertexBufferTexCoords <VTexCoord1>(); break; default: texCoords = new VertexBufferTexCoords <VTexCoord2>(); break; } } hasColors = colorInput.HasValue; if (hasColors) { jobCount++; colors = new VertexBufferColors(); } hasBones = weightsInput.HasValue && jointsInput.HasValue; if (hasBones) { jobCount += 2; bones = new VertexBufferBones(); } NativeArray <JobHandle> handles = new NativeArray <JobHandle>(jobCount, defaultAllocator); int handleIndex = 0; fixed(void *input = &(posInput.buffer[posInput.startOffset])) { var h = GetVector3sJob( input, posInput.count, posInput.type, posInput.byteStride, (Vector3 *)vDataPtr, outputByteStride, posInput.normalize ); if (h.HasValue) { handles[handleIndex] = h.Value; handleIndex++; } else { Profiler.EndSample(); return(null); } } if (nrmInput.HasValue) { fixed(void *input = &(nrmInput.Value.buffer[nrmInput.Value.startOffset])) { var h = GetVector3sJob( input, nrmInput.Value.count, nrmInput.Value.type, nrmInput.Value.byteStride, (Vector3 *)(vDataPtr + 12), outputByteStride, nrmInput.Value.normalize ); if (h.HasValue) { handles[handleIndex] = h.Value; handleIndex++; } else { Profiler.EndSample(); return(null); } } } if (tanInput.HasValue) { fixed(void *input = &(tanInput.Value.buffer[tanInput.Value.startOffset])) { var h = GetTangentsJob( input, tanInput.Value.count, tanInput.Value.type, tanInput.Value.byteStride, (Vector4 *)(vDataPtr + 24), outputByteStride, tanInput.Value.normalize ); if (h.HasValue) { handles[handleIndex] = h.Value; handleIndex++; } else { Profiler.EndSample(); return(null); } } } if (texCoords != null) { texCoords.ScheduleVertexUVJobs(uvInputs, new NativeSlice <JobHandle>(handles, handleIndex, uvInputs.Length)); handleIndex++; } if (hasColors) { colors.ScheduleVertexColorJob(colorInput.Value, new NativeSlice <JobHandle>(handles, handleIndex, 1)); handleIndex++; } if (hasBones) { bones.ScheduleVertexBonesJob(weightsInput.Value, jointsInput.Value, new NativeSlice <JobHandle>(handles, handleIndex, 2)); handleIndex += 2; } var handle = (jobCount > 1) ? JobHandle.CombineDependencies(handles) : handles[0]; handles.Dispose(); Profiler.EndSample(); return(handle); }
private void FindPath(int2 startingPostion, int2 endPostion) { int2 gridSize = new int2(4, 4); NativeArray <PathNode> pathNodeArray = new NativeArray <PathNode>(gridSize.x * gridSize.y, Allocator.Temp); for (int x = 0; x < gridSize.x; x++) { for (int y = 0; y < gridSize.y; y++) { PathNode pathNode = new PathNode(); pathNode.x = x; pathNode.y = y; pathNode.Index = CalculateIndex(x, y, gridSize.x); pathNode.gCost = int.MaxValue; pathNode.hCost = CalculateDistanceCost(new int2(x, y), endPostion); pathNode.CalculateFcost(); pathNode.isWalkble = true; pathNode.cameFromNodeIndex = -1; pathNodeArray[pathNode.Index] = pathNode; } } NativeArray <int2> neighborOffsetArray = new NativeArray <int2>(new int2[] { new int2(-1, 0), new int2(+1, 0), new int2(0, +1), new int2(0, -1), }, Allocator.Temp); int endNodeIndex = CalculateIndex(endPostion.x, endPostion.y, gridSize.x); PathNode startNode = pathNodeArray[CalculateIndex(startingPostion.x, startingPostion.y, gridSize.x)]; startNode.gCost = 0; startNode.CalculateFcost(); pathNodeArray[startNode.Index] = startNode; NativeList <int> openList = new NativeList <int>(Allocator.Temp); NativeList <int> closedList = new NativeList <int>(Allocator.Temp); openList.Add(startNode.Index); while (openList.Length > 0) { int currentNodeIndex = GetLowestCostFNodeIndex(openList, pathNodeArray); PathNode currentNode = pathNodeArray[currentNodeIndex]; if (currentNodeIndex == endNodeIndex) { // we are at the end break; } // remove current for (int i = 0; i < openList.Length; i++) { if (openList[i] == currentNodeIndex) { openList.RemoveAtSwapBack(i); break; } } closedList.Add(currentNodeIndex); for (int i = 0; i < neighborOffsetArray.Length; i++) { int2 neighborOffset = neighborOffsetArray[i]; int2 neighborPosition = new int2(currentNode.x + neighborOffset.x, currentNode.y + neighborOffset.y); if (!IsPositionInsiderGrid(neighborPosition, gridSize)) { // Not valid continue; } int neighborNodeIndex = CalculateIndex(neighborPosition.x, neighborPosition.y, gridSize.x); if (closedList.Contains(neighborNodeIndex)) { //Searched already continue; } PathNode neighborNode = pathNodeArray[neighborNodeIndex]; if (!neighborNode.isWalkble) { //not walkable continue; } int2 currentNodePos = new int2(currentNode.x, currentNode.y); int tentaiveGCost = currentNode.gCost + CalculateDistanceCost(currentNodePos, neighborPosition); if (tentaiveGCost < neighborNode.gCost) { neighborNode.cameFromNodeIndex = currentNodeIndex; neighborNode.gCost = tentaiveGCost; neighborNode.CalculateFcost(); pathNodeArray[neighborNodeIndex] = neighborNode; if (!openList.Contains(neighborNode.Index)) { openList.Add(neighborNode.Index); } } } } PathNode endNode = pathNodeArray[endNodeIndex]; if (endNode.cameFromNodeIndex == -1) { //no Debug.Log("didnt Find it"); } else { //yes NativeList <int2> path = CalculatePath(pathNodeArray, endNode); foreach (int2 pathPosition in path) { Debug.Log(pathPosition); } path.Dispose(); } pathNodeArray.Dispose(); neighborOffsetArray.Dispose(); openList.Dispose(); closedList.Dispose(); }
protected override void OnDestroy() { base.OnDestroy(); _rand.Dispose(Dependency); }
protected override void OnDestroyManager() { m_pendingPings.Dispose(); m_pingStats.Dispose(); }
protected override JobHandle OnUpdate(JobHandle inputDeps) { this.DisposeBuffers(); var handle = inputDeps; // ---------------------------------------------- // Allocate Memory var groupLength = this._rendererGroup.CalculateLength(); this._localToWorlds = new NativeArray <LocalToWorld>(groupLength, Allocator.TempJob, NativeArrayOptions.UninitializedMemory); this._animationPlayData = new NativeArray <AnimationPlayData>(groupLength, Allocator.TempJob, NativeArrayOptions.UninitializedMemory); // ---------------------------------------------- // CopyComponentData handle = new CopyComponentData <LocalToWorld> { Source = this._rendererGroup.GetComponentDataArray <LocalToWorld>(), Results = this._localToWorlds, }.Schedule(groupLength, 32, handle); handle = new CopyComponentData <AnimationPlayData> { Source = this._rendererGroup.GetComponentDataArray <AnimationPlayData>(), Results = this._animationPlayData, }.Schedule(groupLength, 32, handle); // ---------------------------------------------- // アニメーションタイプ毎に再生情報を振り分けていく // FIXME: 今回の実装でNativeMultiHashMapで確保しているメモリはサンプルのために適当。 // → ここらの仕様は最大描画数などを考慮した上で、どれくらい必要なのかすり合わせた方が良いかと思われる。 var playDataMap = new NativeMultiHashMap <int, SendPlayData>(1000000, Allocator.TempJob); handle = new MapAnimationPlayDataJob { LocalToWorlds = this._localToWorlds, AnimationPlayData = this._animationPlayData, SendPlayDataMap = playDataMap.ToConcurrent(), }.Schedule(groupLength, 32, handle); // ---------------------------------------------- // 再生情報の更新 handle = new PlayAnimationJob { DeltaTime = Time.deltaTime, AnimationLengthList = this._animationLengthList, MaxAnimationType = this._maxAnimationType, }.Schedule(this, handle); handle.Complete(); // ---------------------------------------------- // GPU Instancing for (int i = 0; i < this._maxAnimationType; ++i) { // アニメーションタイプに応じた再生情報の取得 var buffer = new NativeArray <SendPlayData>(groupLength, Allocator.Temp); SendPlayData sendPlayData; NativeMultiHashMapIterator <int> it; int instanceCount = 0; // ※ iの値はAnimationTypeに該当 if (!playDataMap.TryGetFirstValue(i, out sendPlayData, out it)) { continue; } do { // 同一のアニメーションが再生されているインスタンスの再生情報をbufferに確保していく。 buffer[instanceCount] = sendPlayData; ++instanceCount; } while (playDataMap.TryGetNextValue(out sendPlayData, ref it)); // Materialに対し再生するアニメーションデータなど(ComputeBuffer)を設定していく。 var renderer = this._animationMeshes[i]; var computeBuffers = this._sendBuffers[i]; // 初回 or 同一のアニメーションが再生されているインスタンス数に変更があったらバッファを初期化 if (computeBuffers.CurrentInstance <= 0 || computeBuffers.CurrentInstance != instanceCount) { if (computeBuffers.SendPlayBuffer != null) { computeBuffers.SendPlayBuffer.Release(); } computeBuffers.SendPlayBuffer = new ComputeBuffer(instanceCount, Marshal.SizeOf(typeof(SendPlayData))); if (computeBuffers.GPUInstancingArgsBuffer != null) { computeBuffers.GPUInstancingArgsBuffer.Release(); } computeBuffers.GPUInstancingArgsBuffer = new ComputeBuffer(1, this._GPUInstancingArgs.Length * sizeof(uint), ComputeBufferType.IndirectArguments); computeBuffers.CurrentInstance = instanceCount; } // 再生情報の設定 var bufferSlice = buffer.Slice(0, instanceCount); var copyArray = new NativeArray <SendPlayData>(bufferSlice.Length, Allocator.Temp); bufferSlice.CopyTo(copyArray); // ※ComputeBufferにはNativeArrayを渡すことが可能。(内部的にもポインタ渡ししていた覚え) computeBuffers.SendPlayBuffer.SetData(copyArray); renderer.AnimationMaterial.SetBuffer(this._playDataBufferID, computeBuffers.SendPlayBuffer); // 「Graphics.DrawMeshInstancedIndirect -> bufferWithArgs」の設定 this._GPUInstancingArgs[0] = (uint)renderer.Mesh.GetIndexCount(0); this._GPUInstancingArgs[1] = (uint)instanceCount; this._GPUInstancingArgs[2] = (uint)renderer.Mesh.GetIndexStart(0); this._GPUInstancingArgs[3] = (uint)renderer.Mesh.GetBaseVertex(0); computeBuffers.GPUInstancingArgsBuffer.SetData(this._GPUInstancingArgs); // 描画 Graphics.DrawMeshInstancedIndirect( renderer.Mesh, 0, renderer.AnimationMaterial, new Bounds(Vector3.zero, 1000000 * Vector3.one), computeBuffers.GPUInstancingArgsBuffer); buffer.Dispose(); copyArray.Dispose(); } playDataMap.Dispose(); return(handle); }
public bool Save(string path) { int rotationCount = Mathf.CeilToInt(360.0f / HorizontalAngleLimit); float minAngle = 360.0f / CurrentMeasurementsPerRotation; int count = (int)(HorizontalAngleLimit / minAngle); float angle = HorizontalAngleLimit / 2.0f; var jobs = new NativeArray <JobHandle>(rotationCount, Allocator.Persistent); var active = new ReadRequest[rotationCount]; try { for (int i = 0; i < rotationCount; i++) { var rotation = Quaternion.AngleAxis(angle, Vector3.up); Camera.transform.localRotation = rotation; if (BeginReadRequest(count, ref active[i])) { active[i].Readback = AsyncGPUReadback.Request(active[i].RenderTexture, 0); } angle += HorizontalAngleLimit; if (angle >= 360.0f) { angle -= 360.0f; } } for (int i = 0; i < rotationCount; i++) { active[i].Readback.WaitForCompletion(); jobs[i] = EndReadRequest(active[i], active[i].Readback.GetData <byte>()); } JobHandle.CompleteAll(jobs); } finally { Array.ForEach(active, req => AvailableRenderTextures.Push(req.RenderTexture)); jobs.Dispose(); } var worldToLocal = LidarTransform; if (Compensated) { worldToLocal = worldToLocal * transform.worldToLocalMatrix; } try { using (var writer = new PcdWriter(path)) { for (int p = 0; p < Points.Length; p++) { var point = Points[p]; if (point != Vector4.zero) { writer.Write(worldToLocal.MultiplyPoint3x4(point), point.w); } } ; } return(true); } catch (Exception ex) { Debug.LogException(ex); return(false); } }