/************************************************************************************************************************/ /// <summary>Resizes the `values` if necessary and copies the value of each property into it.</summary> public void GetValues(ref TValue[] values) { var count = _Values.Length; if (values == null || values.Length != count) { values = new TValue[count]; } _Values.CopyTo(values); }
public static void Copy(NativeArray <T> input, NativeArray <T> output, Int2 inputSize, Int2 outputSize, IntRect inputBounds, IntRect outputBounds) { Assert(input.Length == inputSize.x * inputSize.y); Assert(output.Length == outputSize.x * outputSize.y); Assert(inputBounds.xmin >= 0 && inputBounds.ymin >= 0 && inputBounds.xmax < inputSize.x && inputBounds.ymax < inputSize.y); Assert(outputBounds.xmin >= 0 && outputBounds.ymin >= 0 && outputBounds.xmax < outputSize.x && outputBounds.ymax < outputSize.y); Assert(inputBounds.Width == outputBounds.Width && inputBounds.Height == outputBounds.Height); if (inputSize == outputSize && inputBounds.Width == inputSize.x && inputBounds.Height == inputSize.y) { // One contiguous chunk input.CopyTo(output); } else { // Copy row-by-row for (int z = 0; z < outputBounds.Height; z++) { var rowOffsetInput = (z + inputBounds.ymin) * inputSize.x + inputBounds.xmin; var rowOffsetOutput = (z + outputBounds.ymin) * outputSize.x + outputBounds.xmin; // Using a raw MemCpy call is a bit faster, but that requires unsafe code // Using a for loop is *a lot* slower (except for very small arrays, in which case it is about the same or very slightly faster). NativeArray <T> .Copy(input, rowOffsetInput, output, rowOffsetOutput, outputBounds.Width); } } }
void ExecuteMeshDeformationJob(JellyBody _jellyBody) { NativeArray <Vector3> initialVertsAccess = new NativeArray <Vector3> (_jellyBody.InitialVerts, Allocator.TempJob); NativeArray <Vector3> displacedVertsAccess = new NativeArray <Vector3> (_jellyBody.DisplacedVerts, Allocator.TempJob); NativeArray <Vector3> vertVelocitiesAccess = new NativeArray <Vector3> (_jellyBody.VertVelocities, Allocator.TempJob); MeshDeformationJob meshDeformationJob = new MeshDeformationJob { initialVerts = initialVertsAccess, displacedVerts = displacedVertsAccess, vertVelocities = vertVelocitiesAccess, springforce = (useManualSettings) ? _jellyBody.Stiffness : stiffness, dampening = (useManualSettings) ? _jellyBody.Attenuation : attenuation, unitformScale = _jellyBody.UnitformScale, time = Time.deltaTime }; JobHandle meshDeformationJobHandle = meshDeformationJob.Schedule (_jellyBody.DisplacedVerts.Length, _jellyBody.DisplacedVerts.Length); meshDeformationJobHandle.Complete(); initialVertsAccess.CopyTo(_jellyBody.InitialVerts); initialVertsAccess.Dispose(); displacedVertsAccess.CopyTo(_jellyBody.DisplacedVerts); displacedVertsAccess.Dispose(); vertVelocitiesAccess.CopyTo(_jellyBody.VertVelocities); vertVelocitiesAccess.Dispose(); _jellyBody.JellyMesh.vertices = _jellyBody.DisplacedVerts; }
private MeshData CopyChunkMeshData(ChunkMarchData chunkData) { MeshData meshData; if (chunkData.Verticies.Count > 0) { meshData = new MeshData(); meshData.Vertices = new Vector3[chunkData.Verticies.Count]; NativeArray <float3> tmpVertices = chunkData.Verticies.ToArray(Allocator.Temp); tmpVertices.Reinterpret <Vector3>().CopyTo(meshData.Vertices); tmpVertices.Dispose(); meshData.Triangles = new int[chunkData.Indicies.Count]; NativeArray <int> tmpIndicies = chunkData.Indicies.ToArray(Allocator.Temp); tmpIndicies.CopyTo(meshData.Triangles); tmpIndicies.Dispose(); } else { meshData = new MeshData(0); } chunkData.Clear(); return(meshData); }
public static void Copy(NativeArray <T> input, NativeArray <T> output, int3 inputSize, int3 outputSize, IntBounds inputBounds, IntBounds outputBounds) { Assert(input.Length == inputSize.x * inputSize.y * inputSize.z); Assert(output.Length == outputSize.x * outputSize.y * outputSize.z); Assert(math.all(inputBounds.min >= 0 & inputBounds.max <= inputSize)); Assert(math.all(outputBounds.min >= 0 & outputBounds.max <= outputSize)); Assert(math.all(inputBounds.size == outputBounds.size)); if (math.all(inputSize == outputSize & inputBounds.size == inputSize)) { // One contiguous chunk input.CopyTo(output); } else { // Copy row-by-row var inputStrides = new int3(1, inputSize.x * inputSize.z, inputSize.x); var outputStrides = new int3(1, outputSize.x * outputSize.z, outputSize.x); for (int y = 0; y < outputBounds.size.y; y++) { for (int z = 0; z < outputBounds.size.z; z++) { var rowOffsetInput = math.csum((new int3(0, y, z) + inputBounds.min) * inputStrides); var rowOffsetOutput = math.csum((new int3(0, y, z) + outputBounds.min) * outputStrides); // Using a raw MemCpy call is a bit faster, but that requires unsafe code // Using a for loop is *a lot* slower (except for very small arrays, in which case it is about the same or very slightly faster). NativeArray <T> .Copy(input, rowOffsetInput, output, rowOffsetOutput, outputBounds.size.x); } } } }
public void CopyToCache(PointCloudData target) { target.Name = Name; target.Frame = Frame; target.Time = Time; target.Sequence = Sequence; target.LaserCount = LaserCount; target.Transform = Transform; target.PointCount = PointCount; if (target.Points == null || target.Points.Length < PointCount) { target.Points = new Vector4[PointCount]; } // Final target is always managed array - native arrays can't be accessed outside of the main thread and // have to be properly disposed. Since copy has to happen anyway, it's easier to just target managed array. if (Points != null) { Array.Copy(Points, target.Points, PointCount); } else if (NativePoints.IsCreated) { NativePoints.CopyTo(target.Points); } }
public static JobHandle PrepareData <T>(NativeArray <T> source, int length, NativeList <T> output, JobHandle inputDeps) where T : struct, IComparable <T> { inputDeps = output.Resize(length, inputDeps); inputDeps = source.CopyTo(output.AsDeferredJobArray(), 0, 0, inputDeps); return(inputDeps); }
private void ExecuteJob(Vector3[] vertices) { var jobHandles = new List <JobHandle>(); var vertexArray = new NativeArray <Vector3>(vertices, Allocator.TempJob); for (int i = 0; i < waterProfile.wavesAttributes.Count; i++) { var job = new NoiseJob { v = vertexArray, nType = waterProfile.wavesAttributes[i].noiseType, curPos = transform.position, waveScale = waterProfile.wavesAttributes[i].waveScale, waveSpeed = waterProfile.wavesAttributes[i].waveSpeed, waveIntensity = waterProfile.wavesAttributes[i].waveIntensity, heightOffset = waterProfile.wavesAttributes[i].heightOffset, t = Time.timeSinceLevelLoad }; if (i == 0) { jobHandles.Add(job.Schedule(vertices.Length, 250)); } else { jobHandles.Add(job.Schedule(vertices.Length, 250, jobHandles[i - 1])); } } jobHandles.Last().Complete(); vertexArray.CopyTo(vertices); vertexArray.Dispose(); }
// Does a Rough apply on synchonization problems when loading a Chunk before applying // Structure to it public static void RoughApply(ushort[] cacheVoxdata, ushort[] cacheHP, ushort[] cacheState, Chunk st) { NativeArray <ushort> blockIn = new NativeArray <ushort>(st.data.GetData(), Allocator.TempJob); NativeArray <ushort> hpIn = new NativeArray <ushort>(st.metadata.GetHPData(), Allocator.TempJob); NativeArray <ushort> stateIn = new NativeArray <ushort>(st.metadata.GetStateData(), Allocator.TempJob); NativeArray <ushort> blockOut = new NativeArray <ushort>(cacheVoxdata, Allocator.TempJob); NativeArray <ushort> hpOut = new NativeArray <ushort>(cacheHP, Allocator.TempJob); NativeArray <ushort> stateOut = new NativeArray <ushort>(cacheState, Allocator.TempJob); RoughApplyJob raJob = new RoughApplyJob { blockIn = blockIn, hpIn = hpIn, stateIn = stateIn, blockOut = blockOut, hpOut = hpOut, stateOut = stateOut }; JobHandle job = raJob.Schedule(Chunk.chunkWidth, 2); job.Complete(); blockOut.CopyTo(cacheVoxdata); hpOut.CopyTo(cacheHP); stateOut.CopyTo(cacheState); // Dispose Bin blockIn.Dispose(); hpIn.Dispose(); stateIn.Dispose(); blockOut.Dispose(); hpOut.Dispose(); stateOut.Dispose(); }
private void ExecutePerlinNoiseJobs(Vector3[] vertices) { var jobHandles = new List <JobHandle>(); var vertexArray = new NativeArray <Vector3>(vertices, Allocator.TempJob); for (int i = 0; i < _perlinNoiseLayers.Count; i++) { var Job = new AddPerlinNoiseJob { Vertices = vertexArray, Layer = _perlinNoiseLayers[i], Time = Time.timeSinceLevelLoad }; if (i == 0) { jobHandles.Add(Job.Schedule(vertices.Length, 250)); } else { jobHandles.Add(Job.Schedule(vertices.Length, 250, jobHandles[i - 1])); } } jobHandles.Last().Complete(); vertexArray.CopyTo(vertices); vertexArray.Dispose(); }
protected JobHandle Render(JobHandle inputDeps) { if (injectRendererDatas.Length != rendererData.Length) { DestroyRendererArrays(); CreateRendererArrays(injectRendererDatas.Length); rendererData = new RendererData(injectRendererDatas.Length); } var handle = new UpdateDataJob { matrices = matrices, yPositions = yPositions, units = injectRendererDatas.stateDatas }.Schedule(injectRendererDatas.Length, 64, inputDeps); handle.Complete(); matrices.CopyTo(rendererData.matrices); yPositions.CopyTo(rendererData.yPositions); for (int i = 0; i < injectRendererDatas.Length; i += 1023) { var len = Mathf.Min(injectRendererDatas.Length - i, 1023); Array.Copy(rendererData.matrices, i, tempMatrices, 0, len); Array.Copy(rendererData.yPositions, i, tempYPositions, 0, len); materialPropertyBlock.SetFloatArray("_YPos", tempYPositions); for (int j = 0; j < StateGraph.rendererData.SubMeshCount; j++) { Graphics.DrawMeshInstanced(StateGraph.rendererData.Mesh, j, StateGraph.rendererData.Materials[j], tempMatrices, len, materialPropertyBlock, StateGraph.rendererData.ShadowCastingMode, StateGraph.rendererData.ReceivesShadows); } } return(handle); }
// Writes Chunk c's state metadata into given buffer // and returns the amount of bytes written public static int CompressMetadataState(Chunk c, byte[] buffer) { List <ushort> palleteList = Compression.GetPallete(Pallete.METADATA); int bytes; NativeArray <int> writtenBytes = new NativeArray <int>(new int[1] { 0 }, Allocator.TempJob); NativeArray <ushort> chunkData = new NativeArray <ushort>(c.metadata.GetStateData(), Allocator.TempJob); NativeArray <byte> buff = new NativeArray <byte>(buffer, Allocator.TempJob); NativeArray <ushort> palleteArray = new NativeArray <ushort>(palleteList.ToArray(), Allocator.TempJob); CompressionJob cmdJob = new CompressionJob { chunkData = chunkData, buffer = buff, palleteArray = palleteArray, writtenBytes = writtenBytes }; JobHandle handle = cmdJob.Schedule(); handle.Complete(); // NativeArray to Array convertion buff.CopyTo(buffer); bytes = writtenBytes[0]; chunkData.Dispose(); palleteArray.Dispose(); buff.Dispose(); writtenBytes.Dispose(); return(bytes); }
private void InitTreeInstances() { CleanUp(); if (Terrain == null) { return; } if (Terrain.TerrainData == null) { return; } List <GTreeInstance> instances = Terrain.TerrainData.Foliage.TreeInstances; nativeTreeInstances = new NativeArray <GTreeInstance>(instances.ToArray(), Allocator.Persistent); treeInstances = new GTreeInstance[instances.Count]; nativeCullResults = new NativeArray <bool>(instances.Count, Allocator.Persistent); cullResults = new bool[instances.Count]; Vector3 terrainSize = Terrain.TerrainData.Geometry.Size; GTransformTreesToLocalSpaceJob job = new GTransformTreesToLocalSpaceJob() { instances = nativeTreeInstances, terrainSize = terrainSize }; JobHandle handle = job.Schedule(nativeTreeInstances.Length, 100); handle.Complete(); nativeTreeInstances.CopyTo(treeInstances); }
public void RefreshVoxels() { Utils.Profiler.BeginSample("VoxelChunk.RefreshVoxels"); if (voxelArray == null) { return; } var heights = new NativeArray <float>(heightArray, Allocator.TempJob); var voxels = new NativeArray <Voxel>(voxelArray, Allocator.TempJob); // Set up the job data var jobData = new VoxelRefreshJob() { ChunkAltitude = voxelPosition.y, Heights = heights, Voxels = voxels, SizeVox = sizeVox, SizeVox2 = sizeVox * sizeVox }; // Schedule the job var handle = jobData.Schedule(voxels.Length, 64); // Wait for the job to complete handle.Complete(); voxels.CopyTo(voxelArray); heights.Dispose(); voxels.Dispose(); Utils.Profiler.EndSample(); }
public NativeArray <byte> GetNewBlackboard() { var output = new NativeArray <byte>(_cachedTotalSize, Allocator.Persistent); _template.CopyTo(output); return(output); }
public static byte[] EndianSwap(byte[] inputBytes) { NativeArray <byte> a = new NativeArray <byte>(inputBytes.Length, Allocator.TempJob); NativeArray <byte> result = new NativeArray <byte>(inputBytes.Length, Allocator.TempJob); a.CopyFrom(inputBytes); MyParallelJob jobData = new MyParallelJob(); jobData.a = a; jobData.result = result; // Schedule the job with one Execute per index in the results array and only 1 item per processing batch JobHandle handle = jobData.Schedule(result.Length, 1); // Wait for the job to complete handle.Complete(); result.CopyTo(inputBytes); a.Dispose(); result.Dispose(); return(inputBytes); }
private void ExecutePerlinNoiseJob(Vector3[] vertices) { // lets make a list of jobhandles to keep track of them var jobHandles = new List <JobHandle>(); var vertexArray = new NativeArray <Vector3>(vertices, Allocator.TempJob); for (int i = 0; i < _perlinNoiseLayers.Count; i++) { var job = new PerlinNoiseLayerJob() { vertices = vertexArray, layer = _perlinNoiseLayers[i], time = Time.timeSinceLevelLoad }; // if first job, do nothing but schedule it and add the handle to the list if (i == 0) { jobHandles.Add(job.Schedule(vertices.Length, 4)); } else { jobHandles.Add(job.Schedule(vertices.Length, 4, jobHandles[i - 1])); } } jobHandles.Last().Complete(); vertexArray.CopyTo(vertices); vertexArray.Dispose(); }
internal NativeArray <bool> GetVertexMarkerFromMeshUV(int lod) { int dimension = GGeometryJobUtilities.VERTEX_MARKER_DIMENSION; NativeArray <bool> markers = new NativeArray <bool>(dimension * dimension, Allocator.TempJob); Mesh m = GetMesh(lod); Vector2[] uvs = m.uv; int x = 0; int y = 0; Vector2 uv = Vector2.zero; for (int i = 0; i < uvs.Length; ++i) { uv = uvs[i]; x = (int)(uv.x * (dimension - 1)); y = (int)(uv.y * (dimension - 1)); markers[GGeometryJobUtilities.To1DIndex(ref x, ref y, ref dimension)] = true; } GUtilities.EnsureArrayLength(ref vertexMarker_Cache, markers.Length); markers.CopyTo(vertexMarker_Cache); return(markers); }
public static void SetTriangles(this Mesh mesh, NativeArray <int> triangles) { var array = new int[triangles.Length]; triangles.CopyTo(array); mesh.SetTriangles(array, 0); }
public static NativeArray <T> CreateCopy <T>(NativeArray <T> source, Allocator targetAllocator) where T : struct { var copy = new NativeArray <T>(source.Length, targetAllocator, NativeArrayOptions.UninitializedMemory); source.CopyTo(copy); return(copy); }
internal void CacheVertexMarker() { if (vertexMarkerNativeArray.IsCreated) { GUtilities.EnsureArrayLength(ref vertexMarker_Cache, vertexMarkerNativeArray.Length); vertexMarkerNativeArray.CopyTo(vertexMarker_Cache); } }
public PathfindingGrid Copy(Allocator allocator = Allocator.TempJob) { PathfindingGrid newGrid = this; newGrid.grid = new NativeArray <Node>(grid.Length, allocator); grid.CopyTo(newGrid.grid); return(newGrid); }
public void LateUpdate() { if (m_useJobSystem) { m_transJobHandle.Complete(); m_perlins.CopyTo(m_flowFields); } }
public NativeArray <BoneLocation> GetBonesLastFrameCopy() { var copy = new NativeArray <BoneLocation>(bonesLastFrame.Length, Allocator.Persistent, NativeArrayOptions.ClearMemory); bonesLastFrame.CopyTo(copy); return(copy); }
void IEcsRunSystem.Run() { if (JobMoveSystem.CanRead) { _nativeMatrices.CopyTo(_matrices); } Graphics.DrawMeshInstanced(mesh, 0, material, _matrices, _filter.EntitiesCount); }
public void Execute() { if (!m_recompute) { return; } m_referenceObstacles.CopyTo(m_inputObstacles); }
internal void OnProbesBakeCompleted() { if (this.gameObject == null || !this.gameObject.activeInHierarchy) { return; } int numProbes = parameters.resolutionX * parameters.resolutionY * parameters.resolutionZ; var sh = new NativeArray <SphericalHarmonicsL2>(numProbes, Allocator.Temp, NativeArrayOptions.UninitializedMemory); var validity = new NativeArray <float>(numProbes, Allocator.Temp, NativeArrayOptions.UninitializedMemory); // TODO: Currently, we need to always allocate and pass this octahedralDepth array into GetAdditionalBakedProbes(). // In the future, we should add an API call for GetAdditionalBakedProbes() without octahedralDepth required. var octahedralDepth = new NativeArray <float>(numProbes * 8 * 8, Allocator.Temp, NativeArrayOptions.UninitializedMemory); if (UnityEditor.Experimental.Lightmapping.GetAdditionalBakedProbes(GetID(), sh, validity, octahedralDepth)) { if (!probeVolumeAsset || GetID() != probeVolumeAsset.instanceID) { probeVolumeAsset = ProbeVolumeAsset.CreateAsset(GetID()); } probeVolumeAsset.instanceID = GetID(); probeVolumeAsset.resolutionX = parameters.resolutionX; probeVolumeAsset.resolutionY = parameters.resolutionY; probeVolumeAsset.resolutionZ = parameters.resolutionZ; ProbeVolumePayload.Ensure(ref probeVolumeAsset.payload, numProbes); // Always serialize L0, L1 and L2 coefficients, even if atlas is configured to only store L1. // In the future we will strip the L2 coefficients from the project at build time if L2 is never used. for (int i = 0, iLen = sh.Length; i < iLen; ++i) { ProbeVolumePayload.SetSphericalHarmonicsL2FromIndex(ref probeVolumeAsset.payload, sh[i], i); } validity.CopyTo(probeVolumeAsset.payload.dataValidity); if (ShaderConfig.s_ProbeVolumesBilateralFilteringMode == ProbeVolumesBilateralFilteringModes.OctahedralDepth) { octahedralDepth.CopyTo(probeVolumeAsset.payload.dataOctahedralDepth); } if (UnityEditor.Lightmapping.giWorkflowMode != UnityEditor.Lightmapping.GIWorkflowMode.Iterative) { UnityEditor.EditorUtility.SetDirty(probeVolumeAsset); } UnityEditor.AssetDatabase.Refresh(); dataUpdated = true; } sh.Dispose(); validity.Dispose(); octahedralDepth.Dispose(); }
/// <summary> /// Add an item to the <see cref="NativeSimpleList{T}" />, checking if <see cref="Array" /> needs to be resized. /// </summary> /// <param name="item">A typed <see cref="object" /> to add.</param> /// <param name="allocator">The <see cref="Unity.Collections.Allocator" /> type to use.</param> /// <param name="nativeArrayOptions">Should the memory be cleared on allocation?</param> public void AddWithExpandCheck(T item, Allocator allocator, NativeArrayOptions nativeArrayOptions) { int arrayLength = Array.Length; if (Count == arrayLength) { arrayLength = arrayLength == 0 ? 1 : arrayLength; NativeArray <T> newArray = new NativeArray <T>(arrayLength * 2, allocator, nativeArrayOptions); Array.CopyTo(newArray); Array.Dispose(); Array = newArray; } Array[Count] = item; ++Count; }
private void LateUpdate() { m_RGBComplementBurstJobHandle.Complete(); m_NativeColors.CopyTo(m_Data); m_Texture.SetPixels32(0, 0, m_WebcamTextureSize.x, m_WebcamTextureSize.y, m_Data); m_Texture.Apply(false); }
void ComputeSmoothedNormalByJob(Mesh smoothedMesh, Mesh originalMesh, int maxOverlapvertices = 20) { int svc = smoothedMesh.vertexCount, ovc = originalMesh.vertexCount; // CollectNormalJob Data NativeArray <Vector3> normals = new NativeArray <Vector3>(smoothedMesh.normals, Allocator.Persistent), vertrx = new NativeArray <Vector3>(smoothedMesh.vertices, Allocator.Persistent), smoothedNormals = new NativeArray <Vector3>(svc, Allocator.Persistent); var result = new NativeArray <UnsafeHashMap <Vector3, Vector3> >(maxOverlapvertices, Allocator.Persistent); var resultParallel = new NativeArray <UnsafeHashMap <Vector3, Vector3> .ParallelWriter>(result.Length, Allocator.Persistent); // NormalBakeJob Data NativeArray <Vector3> normalsO = new NativeArray <Vector3>(originalMesh.normals, Allocator.Persistent), vertrxO = new NativeArray <Vector3>(originalMesh.vertices, Allocator.Persistent); var tangents = new NativeArray <Vector4>(originalMesh.tangents, Allocator.Persistent); var colors = new NativeArray <Color>(ovc, Allocator.Persistent); var uv2 = new NativeArray <Vector2>(ovc, Allocator.Persistent); for (int i = 0; i < result.Length; i++) { result[i] = new UnsafeHashMap <Vector3, Vector3>(svc, Allocator.Persistent); resultParallel[i] = result[i].AsParallelWriter(); } bool existColors = originalMesh.colors.Length == ovc; bool isFace = originalMesh.name.Contains("face") || originalMesh.name.Contains("Face"); if (existColors) { colors.CopyFrom(originalMesh.colors); } CollectNormalJob collectNormalJob = new CollectNormalJob(normals, vertrx, resultParallel); BakeNormalJob normalBakeJob = new BakeNormalJob( vertrxO, normalsO, tangents, result, existColors, isFace, colors, uv2); normalBakeJob.Schedule(ovc, 100, collectNormalJob.Schedule(svc, 100)).Complete(); var c = new Color[ovc]; colors.CopyTo(c); originalMesh.colors = c; if (isFace) { var _uv2 = new Vector2[ovc]; uv2.CopyTo(_uv2); originalMesh.uv2 = _uv2; } normals.Dispose(); vertrx.Dispose(); result.Dispose(); smoothedNormals.Dispose(); resultParallel.Dispose(); normalsO.Dispose(); vertrxO.Dispose(); tangents.Dispose(); colors.Dispose(); uv2.Dispose(); }