An opaque structure holding the description of a convolution operation.
Inheritance: IDisposable
Example #1
0
 public void Im2Col(float alpha,
                    TensorDescriptor srcDesc,
                    CudaDeviceVariable <float> srcData,
                    FilterDescriptor filterDesc,
                    ConvolutionDescriptor convDesc,
                    CudaDeviceVariable <byte> colBuffer
                    )
 {
     res = CudaDNNNativeMethods.cudnnIm2Col(_handle, ref alpha, srcDesc.Desc, srcData.DevicePointer, filterDesc.Desc, convDesc.Desc, colBuffer.DevicePointer);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnIm2Col", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
Example #2
0
 public void GetConvolutionForwardAlgorithm(TensorDescriptor srcDesc,
                                            FilterDescriptor filterDesc,
                                            ConvolutionDescriptor convDesc,
                                            TensorDescriptor destDesc,
                                            cudnnConvolutionFwdPreference preference,
                                            SizeT memoryLimitInbytes,
                                            ref cudnnConvolutionFwdAlgo algo
                                            )
 {
     res = CudaDNNNativeMethods.cudnnGetConvolutionForwardAlgorithm(_handle, srcDesc.Desc, filterDesc.Desc, convDesc.Desc, destDesc.Desc, preference, memoryLimitInbytes, ref algo);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionForwardAlgorithm", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
Example #3
0
        public SizeT GetConvolutionForwardWorkspaceSize(TensorDescriptor srcDesc,
                                                        FilterDescriptor filterDesc,
                                                        ConvolutionDescriptor convDesc,
                                                        TensorDescriptor destDesc,
                                                        cudnnConvolutionFwdAlgo algo
                                                        )
        {
            SizeT sizeInBytes = 0;

            res = CudaDNNNativeMethods.cudnnGetConvolutionForwardWorkspaceSize(_handle, srcDesc.Desc, filterDesc.Desc, convDesc.Desc, destDesc.Desc, algo, ref sizeInBytes);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionForwardWorkspaceSize", res));
            if (res != cudnnStatus.Success)
            {
                throw new CudaDNNException(res);
            }
            return(sizeInBytes);
        }
Example #4
0
 public void ConvolutionBackwardData(double alpha,
                                     FilterDescriptor filterDesc,
                                     CudaDeviceVariable <double> filterData,
                                     TensorDescriptor diffDesc,
                                     CudaDeviceVariable <double> diffData,
                                     ConvolutionDescriptor convDesc,
                                     double beta,
                                     TensorDescriptor gradDesc,
                                     CudaDeviceVariable <double> gradData
                                     )
 {
     res = CudaDNNNativeMethods.cudnnConvolutionBackwardData(_handle, ref alpha, filterDesc.Desc, filterData.DevicePointer, diffDesc.Desc, diffData.DevicePointer, convDesc.Desc, ref beta, gradDesc.Desc, gradData.DevicePointer);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnConvolutionBackwardData", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
Example #5
0
        /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */

        /* Function to perform the forward multiconvolution */
        public void ConvolutionForward(double alpha,
                                       TensorDescriptor srcDesc,
                                       CudaDeviceVariable <double> srcData,
                                       FilterDescriptor filterDesc,
                                       CudaDeviceVariable <double> filterData,
                                       ConvolutionDescriptor convDesc,
                                       cudnnConvolutionFwdAlgo algo,
                                       CudaDeviceVariable <byte> workSpace,
                                       SizeT workSpaceSizeInBytes,
                                       double beta,
                                       TensorDescriptor destDesc,
                                       CudaDeviceVariable <double> destData
                                       )
        {
            res = CudaDNNNativeMethods.cudnnConvolutionForward(_handle, ref alpha, srcDesc.Desc, srcData.DevicePointer, filterDesc.Desc, filterData.DevicePointer, convDesc.Desc, algo, workSpace.DevicePointer, workSpaceSizeInBytes, ref beta, destDesc.Desc, destData.DevicePointer);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnConvolutionForward", res));
            if (res != cudnnStatus.Success)
            {
                throw new CudaDNNException(res);
            }
        }
        public void Im2Col(double alpha,
							TensorDescriptor srcDesc,
							CudaDeviceVariable<double> srcData,
							FilterDescriptor filterDesc,
							ConvolutionDescriptor convDesc,
							CudaDeviceVariable<byte> colBuffer
							)
        {
            res = CudaDNNNativeMethods.cudnnIm2Col(_handle, ref alpha, srcDesc.Desc, srcData.DevicePointer, filterDesc.Desc, convDesc.Desc, colBuffer.DevicePointer);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnIm2Col", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
        public SizeT GetConvolutionForwardWorkspaceSize(TensorDescriptor srcDesc,
														FilterDescriptor filterDesc,
														ConvolutionDescriptor convDesc,
														TensorDescriptor destDesc,
														cudnnConvolutionFwdAlgo algo
													)
        {
            SizeT sizeInBytes = 0;
            res = CudaDNNNativeMethods.cudnnGetConvolutionForwardWorkspaceSize(_handle, srcDesc.Desc, filterDesc.Desc, convDesc.Desc, destDesc.Desc, algo, ref sizeInBytes);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionForwardWorkspaceSize", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
            return sizeInBytes;
        }
        public void GetConvolutionForwardAlgorithm(TensorDescriptor srcDesc,
													FilterDescriptor filterDesc,
													ConvolutionDescriptor convDesc,
													TensorDescriptor destDesc,
													cudnnConvolutionFwdPreference preference,
													SizeT memoryLimitInbytes,
													ref cudnnConvolutionFwdAlgo algo
													)
        {
            res = CudaDNNNativeMethods.cudnnGetConvolutionForwardAlgorithm(_handle, srcDesc.Desc, filterDesc.Desc, convDesc.Desc, destDesc.Desc, preference, memoryLimitInbytes, ref algo);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionForwardAlgorithm", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
        /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
        /* Function to perform the forward multiconvolution */
        public void ConvolutionForward(double alpha,
										TensorDescriptor srcDesc,
										CudaDeviceVariable<double> srcData,
										FilterDescriptor filterDesc,
										CudaDeviceVariable<double> filterData,
										ConvolutionDescriptor convDesc,
										cudnnConvolutionFwdAlgo algo,
										CudaDeviceVariable<byte> workSpace,
										SizeT workSpaceSizeInBytes,
										double beta,
										TensorDescriptor destDesc,
										CudaDeviceVariable<double> destData
									)
        {
            res = CudaDNNNativeMethods.cudnnConvolutionForward(_handle, ref alpha, srcDesc.Desc, srcData.DevicePointer, filterDesc.Desc, filterData.DevicePointer, convDesc.Desc, algo, workSpace.DevicePointer, workSpaceSizeInBytes, ref beta, destDesc.Desc, destData.DevicePointer);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnConvolutionForward", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
        public void ConvolutionBackwardFilter(double alpha,
												TensorDescriptor srcDesc,
												CudaDeviceVariable<double> srcData,
												TensorDescriptor diffDesc,
												CudaDeviceVariable<double> diffData,
												ConvolutionDescriptor convDesc,
												double beta,
												FilterDescriptor gradDesc,
												CudaDeviceVariable<double> gradData
											)
        {
            res = CudaDNNNativeMethods.cudnnConvolutionBackwardFilter(_handle, ref alpha, srcDesc.Desc, srcData.DevicePointer, diffDesc.Desc, diffData.DevicePointer, convDesc.Desc, ref beta, gradDesc.Desc, gradData.DevicePointer);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnConvolutionBackwardFilter", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
Example #11
0
		/// <summary>
		/// This function computes the convolution gradient with respect to the output tensor using
		/// the specified algo, returning results in gradDesc. Scaling factors alpha and beta can
		/// be used to scale the input tensor and the output tensor respectively.
		/// </summary>
		/// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the computation
		/// result with prior value in the output layer as follows: dstValue =
		/// alpha[0]*result + beta[0]*priorDstValue. Please refer to this section for
		/// additional details.</param>
		/// <param name="filterDesc">Handle to a previously initialized filter descriptor.</param>
		/// <param name="filterData">Data pointer to GPU memory associated with the filter descriptor filterDesc.</param>
		/// <param name="diffDesc">Handle to the previously initialized input differential tensor descriptor.</param>
		/// <param name="diffData">Data pointer to GPU memory associated with the input differential tensor descriptor diffDesc.</param>
		/// <param name="convDesc">Previously initialized convolution descriptor.</param>
		/// <param name="algo">Enumerant that specifies which backward data convolution algorithm shoud be used to compute the results</param>
		/// <param name="workSpace">Data pointer to GPU memory to a workspace needed to able to execute
		/// the specified algorithm. If no workspace is needed for a particular
		/// algorithm, that pointer can be nil</param>
		/// <param name="beta">Pointer to scaling factors (in host memory) used to blend the computation
		/// result with prior value in the output layer as follows: dstValue =
		/// alpha[0]*result + beta[0]*priorDstValue. Please refer to this section for
		/// additional details.</param>
		/// <param name="gradDesc">Handle to the previously initialized output tensor descriptor.</param>
		/// <param name="gradData">Data pointer to GPU memory associated with the output tensor descriptor
		/// gradDesc that carries the result.</param>
		public void ConvolutionBackwardData(double alpha,
											FilterDescriptor filterDesc,
											CudaDeviceVariable<double> filterData,
											TensorDescriptor diffDesc,
											CudaDeviceVariable<double> diffData,
											ConvolutionDescriptor convDesc,
											cudnnConvolutionBwdDataAlgo algo,
											CudaDeviceVariable<byte> workSpace,
											double beta,
											TensorDescriptor gradDesc,
											CudaDeviceVariable<double> gradData
										)
		{
			res = CudaDNNNativeMethods.cudnnConvolutionBackwardData(_handle, ref alpha, filterDesc.Desc, filterData.DevicePointer, diffDesc.Desc, diffData.DevicePointer, convDesc.Desc, algo, workSpace.DevicePointer, workSpace.SizeInBytes, ref beta, gradDesc.Desc, gradData.DevicePointer);
			Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnConvolutionBackwardData", res));
			if (res != cudnnStatus.Success) throw new CudaDNNException(res);
		}
Example #12
0
		/// <summary>
		/// This function returns the amount of GPU memory workspace the user needs
		/// to allocate to be able to call cudnnConvolutionBackwardData_v3 with the
		/// specified algorithm. The workspace allocated will then be passed to the routine
		/// cudnnConvolutionBackwardData_v3. The specified algorithm can be the result of the
		/// call to cudnnGetConvolutionBackwardDataAlgorithm or can be chosen arbitrarily
		/// by the user. Note that not every algorithm is available for every configuration of the
		/// input tensor and/or every configuration of the convolution descriptor.
		/// </summary>
		/// <param name="filterDesc">Handle to a previously initialized filter descriptor.</param>
		/// <param name="diffDesc">Handle to the previously initialized input differential tensor descriptor.</param>
		/// <param name="convDesc">Previously initialized convolution descriptor.</param>
		/// <param name="gradDesc">Handle to the previously initialized output tensor descriptor.</param>
		/// <param name="algo">Enumerant that specifies the chosen convolution algorithm</param>
		/// <returns>Amount of GPU memory needed as workspace to be able to execute a forward convolution with the specified algo</returns>
		public SizeT GetConvolutionBackwardDataWorkspaceSize(FilterDescriptor filterDesc,
															TensorDescriptor diffDesc,
															ConvolutionDescriptor convDesc,
															TensorDescriptor gradDesc,
															cudnnConvolutionBwdDataAlgo algo
														)
		{
			SizeT sizeInBytes = new SizeT();
			res = CudaDNNNativeMethods.cudnnGetConvolutionBackwardDataWorkspaceSize(_handle, filterDesc.Desc, diffDesc.Desc, convDesc.Desc, gradDesc.Desc, algo, ref sizeInBytes);
			Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionBackwardDataWorkspaceSize", res));
			if (res != cudnnStatus.Success) throw new CudaDNNException(res);
			return sizeInBytes;
		}
Example #13
0
		/// <summary>
		/// This function serves as a heuristic for obtaining the best suited algorithm for
		/// cudnnConvolutionBackwardData_v3 for the given layer specifications. Based
		/// on the input preference, this function will either return the fastest algorithm or the
		/// fastest algorithm within a given memory limit. For an exhaustive search for the fastest
		/// algorithm, please use cudnnFindConvolutionBackwardDataAlgorithm.
		/// </summary>
		/// <param name="filterDesc">Handle to a previously initialized filter descriptor.</param>
		/// <param name="diffDesc">Handle to the previously initialized input differential tensor descriptor.</param>
		/// <param name="convDesc">Previously initialized convolution descriptor.</param>
		/// <param name="gradDesc">Handle to the previously initialized output tensor descriptor.</param>
		/// <param name="preference">Enumerant to express the preference criteria in terms of memory
		/// requirement and speed.</param>
		/// <param name="memoryLimitInbytes">It is to specify the maximum amount of GPU memory the user is willing to
		/// use as a workspace. This is currently a placeholder and is not used.</param>
		/// <returns>Enumerant that specifies which convolution algorithm should be used to
		/// compute the results according to the specified preference</returns>
		public cudnnConvolutionBwdDataAlgo GetConvolutionBackwardDataAlgorithm(FilterDescriptor filterDesc,
														TensorDescriptor diffDesc,
														ConvolutionDescriptor convDesc,
														TensorDescriptor gradDesc,
														cudnnConvolutionBwdDataPreference preference,
														SizeT memoryLimitInbytes
														)
		{
			cudnnConvolutionBwdDataAlgo algo = new cudnnConvolutionBwdDataAlgo();
			res = CudaDNNNativeMethods.cudnnGetConvolutionBackwardDataAlgorithm(_handle, filterDesc.Desc, diffDesc.Desc, convDesc.Desc, gradDesc.Desc, preference, memoryLimitInbytes, ref algo);
			Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetConvolutionBackwardDataAlgorithm", res));
			if (res != cudnnStatus.Success) throw new CudaDNNException(res);
			return algo;
		}
Example #14
0
		/// <summary>
		/// This function attempts all cuDNN algorithms for
		/// cudnnConvolutionBackwardData_v3 and outputs performance metrics to a user-
		/// allocated array of cudnnConvolutionBwdDataAlgoPerf_t. These metrics are written
		/// in sorted fashion where the first element has the lowest compute time.
		/// </summary>
		/// <param name="filterDesc">Handle to a previously initialized filter descriptor.</param>
		/// <param name="diffDesc">Handle to the previously initialized input differential tensor descriptor.</param>
		/// <param name="convDesc">Previously initialized convolution descriptor.</param>
		/// <param name="gradDesc">Handle to the previously initialized output tensor descriptor.</param>
		/// <param name="requestedAlgoCount">The maximum number of elements to be stored in perfResults.</param>
		/// <returns>An array to store performance metrics sorted ascending by compute time.</returns>
		public cudnnConvolutionBwdDataAlgoPerf[] FindConvolutionBackwardDataAlgorithm(FilterDescriptor filterDesc,
															TensorDescriptor diffDesc,
															ConvolutionDescriptor convDesc,
															TensorDescriptor gradDesc,
															int requestedAlgoCount
														)
		{
			cudnnConvolutionBwdDataAlgoPerf[] temp = new cudnnConvolutionBwdDataAlgoPerf[requestedAlgoCount];
			int returnedAlgoCount = 0;
			res = CudaDNNNativeMethods.cudnnFindConvolutionBackwardDataAlgorithm(_handle, filterDesc.Desc, diffDesc.Desc, convDesc.Desc, gradDesc.Desc, requestedAlgoCount, ref returnedAlgoCount, temp);
			Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnFindConvolutionBackwardDataAlgorithm", res));
			if (res != cudnnStatus.Success) throw new CudaDNNException(res);
			if (returnedAlgoCount <= 0) return null;

			cudnnConvolutionBwdDataAlgoPerf[] perfResults = new cudnnConvolutionBwdDataAlgoPerf[returnedAlgoCount];
			Array.Copy(temp, perfResults, returnedAlgoCount);
			return perfResults;
		}