/// <inheritdoc cref="IDisposable.Dispose"/> public void Dispose() { _mapped?.Dispose(); _mapped = null; _memory.Dispose(); _memory = null; }
public void Dispose() { ThrowIfDisposed(); memory.Dispose(); disposed = true; }
public void Resize(ref DeviceMemory <float> buffer, int length) { if (buffer.Length >= length) { return; } buffer.Dispose(); buffer = _worker.Malloc <float>(length); }
public void Dispose() { Fence?.Dispose(); Memory.Dispose(); Buffer.Dispose(); if (WriteUsingStagingBuffer) { StagingMemory?.Dispose(); StagingBuffer?.Dispose(); StagingCommandBuffer?.Dispose(); } }
protected virtual void Dispose(bool disposing) { if (disposing && !_disposed) { _blas.Dispose(); _cuda.Dispose(); _cache.Dispose(); //if(_solver.IsValueCreated) // _solver.Value.Dispose(); _numerics.Dispose(); _disposed = true; } }
public static VulkanBuffer Vertex <T>(VulkanContext ctx, T[] vertices) where T : struct { long size = vertices.Length * Interop.SizeOf <T>(); // Create a staging buffer that is writable by host. Buffer stagingBuffer = ctx.Device.CreateBuffer(new BufferCreateInfo(size, BufferUsages.TransferSrc)); MemoryRequirements stagingReq = stagingBuffer.GetMemoryRequirements(); int stagingMemoryTypeIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( stagingReq.MemoryTypeBits, MemoryProperties.HostVisible | MemoryProperties.HostCoherent); DeviceMemory stagingMemory = ctx.Device.AllocateMemory(new MemoryAllocateInfo(stagingReq.Size, stagingMemoryTypeIndex)); IntPtr vertexPtr = stagingMemory.Map(0, stagingReq.Size); Interop.Write(vertexPtr, vertices); stagingMemory.Unmap(); stagingBuffer.BindMemory(stagingMemory); // Create a device local buffer where the vertex data will be copied and which will be used for rendering. Buffer buffer = ctx.Device.CreateBuffer(new BufferCreateInfo(size, BufferUsages.VertexBuffer | BufferUsages.TransferDst)); MemoryRequirements req = buffer.GetMemoryRequirements(); int memoryTypeIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( req.MemoryTypeBits, MemoryProperties.DeviceLocal); DeviceMemory memory = ctx.Device.AllocateMemory(new MemoryAllocateInfo(req.Size, memoryTypeIndex)); buffer.BindMemory(memory); // Copy the data from staging buffers to device local buffers. CommandBuffer cmdBuffer = ctx.GraphicsCommandPool.AllocateBuffers(new CommandBufferAllocateInfo(CommandBufferLevel.Primary, 1))[0]; cmdBuffer.Begin(new CommandBufferBeginInfo(CommandBufferUsages.OneTimeSubmit)); cmdBuffer.CmdCopyBuffer(stagingBuffer, buffer, new BufferCopy(size)); cmdBuffer.End(); // Submit. Fence fence = ctx.Device.CreateFence(); ctx.GraphicsQueue.Submit(new SubmitInfo(commandBuffers: new[] { cmdBuffer }), fence); fence.Wait(); // Cleanup. fence.Dispose(); cmdBuffer.Dispose(); stagingBuffer.Dispose(); stagingMemory.Dispose(); return(new VulkanBuffer(buffer, memory, vertices.Length)); }
internal static VKBuffer Index(Context ctx, int[] indices) { long size = indices.Length * sizeof(int); // Create staging buffer. VulkanCore.Buffer stagingBuffer = ctx.Device.CreateBuffer(new BufferCreateInfo(size, BufferUsages.TransferSrc)); MemoryRequirements stagingReq = stagingBuffer.GetMemoryRequirements(); int stagingMemoryTypeIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( stagingReq.MemoryTypeBits, MemoryProperties.HostVisible | MemoryProperties.HostCoherent); DeviceMemory stagingMemory = ctx.Device.AllocateMemory(new MemoryAllocateInfo(stagingReq.Size, stagingMemoryTypeIndex)); IntPtr indexPtr = stagingMemory.Map(0, stagingReq.Size); Interop.Write(indexPtr, indices); stagingMemory.Unmap(); stagingBuffer.BindMemory(stagingMemory); // Create a device local buffer. VulkanCore.Buffer buffer = ctx.Device.CreateBuffer(new BufferCreateInfo(size, BufferUsages.IndexBuffer | BufferUsages.TransferDst)); MemoryRequirements req = buffer.GetMemoryRequirements(); int memoryTypeIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( req.MemoryTypeBits, MemoryProperties.DeviceLocal); DeviceMemory memory = ctx.Device.AllocateMemory(new MemoryAllocateInfo(req.Size, memoryTypeIndex)); buffer.BindMemory(memory); // Copy the data from staging buffer to device local buffer. CommandBuffer cmdBuffer = ctx.GraphicsCommandPool.AllocateBuffers(new CommandBufferAllocateInfo(CommandBufferLevel.Primary, 1))[0]; cmdBuffer.Begin(new CommandBufferBeginInfo(CommandBufferUsages.OneTimeSubmit)); cmdBuffer.CmdCopyBuffer(stagingBuffer, buffer, new BufferCopy(size)); cmdBuffer.End(); // Submit. Fence fence = ctx.Device.CreateFence(); ctx.GraphicsQueue.Submit(new SubmitInfo(commandBuffers: new[] { cmdBuffer }), fence); fence.Wait(); // Cleanup. fence.Dispose(); cmdBuffer.Dispose(); stagingBuffer.Dispose(); stagingMemory.Dispose(); return(new VKBuffer(ctx, buffer, memory, null, indices.Length, size)); }
public void Cleanup() { /* * Clean up all Vulkan Resources. */ bufferMemory.Dispose(); buffer.Dispose(); computeShaderModule.Dispose(); descriptorPool.Dispose(); descriptorSetLayout.Dispose(); pipelineLayout.Dispose(); foreach (Pipeline pipeline in pipelines) { pipeline.Dispose(); } commandPool.Dispose(); device.Dispose(); instance.Dispose(); }
public void Dispose() { if (Interlocked.Exchange(ref _disposed, 1) != 0) { return; } switch (MemoryType) { case CuMemoryType.Device: DeviceMemory.Dispose(); DeviceMemory = CuDeviceMemory.Empty; break; case CuMemoryType.Host: Marshal.FreeHGlobal(Bytes); Bytes = IntPtr.Zero; break; } }
double[] ExecuteGPU(int size) { // いったんメインメモリ上に変数を準備 List <double> a = new List <double>(); List <double> b = new List <double>(); for (int i = 0; i < size * size; i++) { a.Add(i + 1); b.Add((i + 1) * 10); } // デバイス上にメモリを転送 DeviceMemory memory = new DeviceMemory(); memory.Add <double>("a", a); memory.Add <double>("b", b); memory.Alloc <double>("c", size * size); // 関数の実行 CallMethod( "matrixDot", size, size, memory["a"], memory["b"], memory["c"], size, size ); // 全てのスレッドが終了するまで待つ context.Synchronize(); // 結果を取得して出力画面に表示 double[] result = memory.Read <double>("c", size * size); // リソースを解放する memory.Dispose(); return(result); }
//[overrideFuctions] protected override void Dispose(bool disposing) { foreach (var resource in _resources) { CUDAInterop.cuSafeCall(CUDAInterop.cuGraphicsUnregisterResource(resource)); } foreach (var buffer in _buffers) { CUDAInterop.cuSafeCall(CUDAInterop.cuGLUnregisterBufferObject(buffer)); } if (_buffers.Length > 0) { GL.DeleteBuffers(_buffers.Length, _buffers); } if (disposing) { _vel.Dispose(); _disposeSimulators(); _worker.Dispose(); } base.Dispose(disposing); }
public void Resize(ref DeviceMemory<float> buffer, int length) { if (buffer.Length >= length) return; buffer.Dispose(); buffer = _worker.Malloc<float>(length); }
public void Dispose() { DeviceResource.Dispose(); DeviceMemory.Dispose(); }
internal static VulkanImage Texture2D(VulkanContext ctx, TextureData tex2D) { Buffer stagingBuffer = ctx.Device.CreateBuffer( new BufferCreateInfo(tex2D.Mipmaps[0].Size, BufferUsages.TransferSrc)); MemoryRequirements stagingMemReq = stagingBuffer.GetMemoryRequirements(); int heapIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( stagingMemReq.MemoryTypeBits, MemoryProperties.HostVisible); DeviceMemory stagingMemory = ctx.Device.AllocateMemory( new MemoryAllocateInfo(stagingMemReq.Size, heapIndex)); stagingBuffer.BindMemory(stagingMemory); IntPtr ptr = stagingMemory.Map(0, stagingMemReq.Size); Interop.Write(ptr, tex2D.Mipmaps[0].Data); stagingMemory.Unmap(); // Setup buffer copy regions for each mip level. var bufferCopyRegions = new BufferImageCopy[tex2D.Mipmaps.Length]; int offset = 0; for (int i = 0; i < bufferCopyRegions.Length; i++) { bufferCopyRegions = new[] { new BufferImageCopy { ImageSubresource = new ImageSubresourceLayers(ImageAspects.Color, i, 0, 1), ImageExtent = tex2D.Mipmaps[0].Extent, BufferOffset = offset } }; offset += tex2D.Mipmaps[i].Size; } // Create optimal tiled target image. Image image = ctx.Device.CreateImage(new ImageCreateInfo { ImageType = ImageType.Image2D, Format = tex2D.Format, MipLevels = tex2D.Mipmaps.Length, ArrayLayers = 1, Samples = SampleCounts.Count1, Tiling = ImageTiling.Optimal, SharingMode = SharingMode.Exclusive, InitialLayout = ImageLayout.Undefined, Extent = tex2D.Mipmaps[0].Extent, Usage = ImageUsages.Sampled | ImageUsages.TransferDst }); MemoryRequirements imageMemReq = image.GetMemoryRequirements(); int imageHeapIndex = ctx.MemoryProperties.MemoryTypes.IndexOf( imageMemReq.MemoryTypeBits, MemoryProperties.DeviceLocal); DeviceMemory memory = ctx.Device.AllocateMemory(new MemoryAllocateInfo(imageMemReq.Size, imageHeapIndex)); image.BindMemory(memory); var subresourceRange = new ImageSubresourceRange(ImageAspects.Color, 0, tex2D.Mipmaps.Length, 0, 1); // Copy the data from staging buffers to device local buffers. CommandBuffer cmdBuffer = ctx.GraphicsCommandPool.AllocateBuffers(new CommandBufferAllocateInfo(CommandBufferLevel.Primary, 1))[0]; cmdBuffer.Begin(new CommandBufferBeginInfo(CommandBufferUsages.OneTimeSubmit)); cmdBuffer.CmdPipelineBarrier(PipelineStages.TopOfPipe, PipelineStages.TopOfPipe, imageMemoryBarriers: new[] { new ImageMemoryBarrier( image, subresourceRange, 0, Accesses.TransferWrite, ImageLayout.Undefined, ImageLayout.TransferDstOptimal) }); cmdBuffer.CmdCopyBufferToImage(stagingBuffer, image, ImageLayout.TransferDstOptimal, bufferCopyRegions); cmdBuffer.CmdPipelineBarrier(PipelineStages.TopOfPipe, PipelineStages.TopOfPipe, imageMemoryBarriers: new[] { new ImageMemoryBarrier( image, subresourceRange, Accesses.TransferWrite, Accesses.ShaderRead, ImageLayout.TransferDstOptimal, ImageLayout.ShaderReadOnlyOptimal) }); cmdBuffer.End(); // Submit. Fence fence = ctx.Device.CreateFence(); ctx.GraphicsQueue.Submit(new SubmitInfo(commandBuffers: new[] { cmdBuffer }), fence); fence.Wait(); // Cleanup staging resources. fence.Dispose(); stagingMemory.Dispose(); stagingBuffer.Dispose(); // Create image view. ImageView view = image.CreateView(new ImageViewCreateInfo(tex2D.Format, subresourceRange)); return(new VulkanImage(image, memory, view, tex2D.Format)); }
private void TestCompile() { // プログラムのコンパイル (cu から PTX へ) RuntimeCompiler compiler = new RuntimeCompiler(); //compiler.AddHeader("curand_kernel.h", @"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v8.0\include\curand_kernel.h"); compiler.AddOptions( RuntimeCompiler.OPTION_TARGET_30, RuntimeCompiler.OPTION_FMAD_FALSE, RuntimeCompiler.OPTION_LINE_INFO, RuntimeCompiler.OPTION_DEVICE_AS_DEFAULT_EXECUTION_SPACE //RuntimeCompiler.OPTION_INCLUDE_PATH_ + @"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v8.0\include\" ); string ptx = compiler.Compile("addKernel.cu", addKernelString); if (ptx == null) { Console.WriteLine("Compile Error:"); Console.WriteLine(); Console.WriteLine(compiler.Log); return; } // コンパイル時のログを出力画面に表示 Console.WriteLine("----- <Compile Log>"); Console.WriteLine(compiler.Log); Console.WriteLine("----- </Compile Log>"); // コンパイル済みプログラムを出力画面に表示 Console.WriteLine("----- <PTX>"); Console.WriteLine(ptx); Console.WriteLine("----- </PTX>"); // プログラムの実行準備 Device device = new Device(0); Context context = device.CreateContext(); Module module = new Module(); Console.WriteLine(device.Name); Console.WriteLine(device.PCIBusId); Console.WriteLine(device.TotalMem); //Console.WriteLine(device.GetProperties()); Console.WriteLine(context.ApiVersion); //return; // PTX データをロード module.LoadData(ptx); // いったんメインメモリ上に変数を準備 const int arraySize = 5; List <int> a = new List <int>(); List <int> b = new List <int>(); for (int i = 0; i < arraySize; i++) { a.Add(i + 1); b.Add((i + 1) * 10); } // デバイス上にメモリを転送 DeviceMemory memory = new DeviceMemory(); memory.Add <int>("a", a); memory.Add <int>("b", b); memory.Alloc <int>("c", arraySize); // 関数の実行 module.SetBlockCount(1, 1, 1); module.SetThreadCount(arraySize, 1, 1); module.Excecute( "addKernel", memory["c"], memory["a"], memory["b"] ); // 全てのスレッドが終了するまで待つ context.Synchronize(); // 結果を取得して出力画面に表示 int[] results = memory.Read <int>("c", arraySize); Console.WriteLine("----- <Execute Log>"); for (int i = 0; i < arraySize; i++) { Console.WriteLine("{0} + {1} = {2}", a[i], b[i], results[i]); } Console.WriteLine("----- </Execute Log>"); // リソースを解放する memory.Dispose(); module.Dispose(); context.Dispose(); }