/// <summary> /// </summary> public TensorDescriptor() { _desc = new cudnnTensorDescriptor(); res = CudaDNNNativeMethods.cudnnCreateTensorDescriptor(ref _desc); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnCreateTensorDescriptor", res)); if (res != cudnnStatus.Success) throw new CudaDNNException(res); }
public static extern cudnnStatus cudnnActivationForward( cudnnHandle handle, cudnnActivationMode mode, ref float alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, ref float beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
/// <summary> /// </summary> public TensorDescriptor() { _desc = new cudnnTensorDescriptor(); res = CudaDNNNativeMethods.cudnnCreateTensorDescriptor(ref _desc); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnCreateTensorDescriptor", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
public static extern cudnnStatus cudnnActivationBackward( cudnnHandle handle, cudnnActivationDescriptor activationDesc, ref float alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, cudnnTensorDescriptor srcDiffDesc, CUdeviceptr srcDiffData, cudnnTensorDescriptor destDesc, CUdeviceptr destData, ref float beta, cudnnTensorDescriptor destDiffDesc, CUdeviceptr destDiffData );
public static extern cudnnStatus cudnnActivationBackward( cudnnHandle handle, cudnnActivationMode mode, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, cudnnTensorDescriptor srcDiffDesc, CUdeviceptr srcDiffData, cudnnTensorDescriptor destDesc, CUdeviceptr destData, ref double beta, cudnnTensorDescriptor destDiffDesc, CUdeviceptr destDiffData );
/// <summary> /// This function initializes a previously created generic Tensor descriptor object. /// </summary> /// <param name="tensorDesc">Handle to a previously created tensor descriptor.</param> /// <param name="format"></param> /// <param name="dataType">Data type.</param> /// <param name="nbDims">Dimension of the tensor.</param> /// <param name="dimA">Array of dimension nbDims that contain the size of the tensor for every dimension.</param> public void SetTensorNdDescriptorEx( cudnnTensorDescriptor tensorDesc, cudnnTensorFormat format, cudnnDataType dataType, int nbDims, int[] dimA) { res = CudaDNNNativeMethods.cudnnSetTensorNdDescriptorEx(_desc, format, dataType, nbDims, dimA); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnSetTensorNdDescriptorEx", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
/// <summary> /// This function performs the forward LRN layer computation. /// </summary> /// <param name="lrnMode">LRN layer mode of operation. Currently only /// CUDNN_LRN_CROSS_CHANNEL_DIM1 is implemented. Normalization is /// performed along the tensor's dimA[1].</param> /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="xDesc">Tensor descriptor objects for the input and output tensors.</param> /// <param name="x">Input tensor data pointer in device memory.</param> /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="yDesc">Tensor descriptor objects for the input and output tensors.</param> /// <param name="y">Output tensor data pointer in device memory.</param> public void cudnnLRNCrossChannelForward( cudnnLRNMode lrnMode, float alpha, cudnnTensorDescriptor xDesc, CUdeviceptr x, float beta, cudnnTensorDescriptor yDesc, CUdeviceptr y) { res = CudaDNNNativeMethods.cudnnLRNCrossChannelForward(_handle, _desc, lrnMode, ref alpha, xDesc, x, ref beta, yDesc, y); Debug.Write(""); //Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnLRNCrossChannelForward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
public void cudnnLRNCrossChannelForward( cudnnLRNMode lrnMode, double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData) { res = CudaDNNNativeMethods.cudnnLRNCrossChannelForward(_handle, _desc, lrnMode, ref alpha, srcDesc, srcData, ref beta, destDesc, destData); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnLRNCrossChannelForward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
/// <summary> /// This function performs the forward DivisiveNormalization layer computation. /// </summary> /// <param name="mode">DivisiveNormalization layer mode of operation. Currently only /// CUDNN_DIVNORM_PRECOMPUTED_MEANS is implemented. Normalization /// is performed using the means input tensor that is expected to be /// precomputed by the user.</param> /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="xDesc">Tensor descriptor objects for the input and output tensors. Note that /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2 /// tensors.</param> /// <param name="x">Input tensor data pointer in device memory.</param> /// <param name="means">Input means tensor data pointer in device memory. Note that this tensor /// can be NULL (in that case it's values are assumed to be zero during the /// computation). This tensor also doesn't have to contain means, these can /// be any values, a frequently used variation is a result of convolution with a /// normalized positive kernel (such as Gaussian).</param> /// <param name="temp">Temporary tensors in device memory. These are used for computing /// intermediate values during the forward pass. These tensors do not have /// to be preserved as inputs from forward to the backward pass. Both use /// srcDesc as a descriptor.</param> /// <param name="temp2">Temporary tensors in device memory. These are used for computing /// intermediate values during the forward pass. These tensors do not have /// to be preserved as inputs from forward to the backward pass. Both use /// srcDesc as a descriptor.</param> /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="yDesc">Tensor descriptor objects for the input and output tensors. Note that /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2 /// tensors.</param> /// <param name="y">Pointer in device memory to a tensor for the result of the forward DivisiveNormalization pass.</param> public void cudnnDivisiveNormalizationForward( cudnnDivNormMode mode, float alpha, cudnnTensorDescriptor xDesc, // same desc for means, temp, temp2 CUdeviceptr x, CUdeviceptr means, // if NULL, means are assumed to be zero CUdeviceptr temp, CUdeviceptr temp2, float beta, cudnnTensorDescriptor yDesc, CUdeviceptr y) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, xDesc, x, means, temp, temp2, ref beta, yDesc, y); Debug.Write("");//Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
public void cudnnDivisiveNormalizationBackward( cudnnDivNormMode mode, float alpha, cudnnTensorDescriptor srcDesc, // same desc for diff, means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr srcDiffData, CUdeviceptr tempData, CUdeviceptr tempData2, float betaData, cudnnTensorDescriptor destDataDesc, // same desc for dest, means, meansDiff CUdeviceptr destDataDiff, // output data differential CUdeviceptr destMeansDiff // output means differential, can be NULL ) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, srcDiffData, tempData, tempData2, ref betaData, destDataDesc, destDataDiff, destMeansDiff); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res)); if (res != cudnnStatus.Success) throw new CudaDNNException(res); }
public void cudnnDivisiveNormalizationForward( cudnnDivNormMode mode, double alpha, cudnnTensorDescriptor srcDesc, // same desc for means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr tempData, CUdeviceptr tempData2, double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, tempData, tempData2, ref beta, destDesc, destData); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
/// <summary> /// This function performs the backward DivisiveNormalization layer computation. /// </summary> /// <param name="mode">DivisiveNormalization layer mode of operation. Currently only /// CUDNN_DIVNORM_PRECOMPUTED_MEANS is implemented. Normalization /// is performed using the means input tensor that is expected to be /// precomputed by the user.</param> /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="xDesc">Tensor descriptor and pointers in device memory for the bottom layer's /// data and means. (Bottom layer is the earlier layer in the computation /// graph during inference). Note: the means tensor is expected to be /// precomputed by the user. It can also contain any valid values (not required /// to be actual means, and can be for instance a result of a convolution with /// a Gaussian kernel).</param> /// <param name="x">Tensor descriptor and pointers in device memory for the bottom layer's /// data and means. (Bottom layer is the earlier layer in the computation /// graph during inference). Note: the means tensor is expected to be /// precomputed by the user. It can also contain any valid values (not required /// to be actual means, and can be for instance a result of a convolution with /// a Gaussian kernel).</param> /// <param name="means">Tensor descriptor and pointers in device memory for the bottom layer's /// data and means. (Bottom layer is the earlier layer in the computation /// graph during inference). Note: the means tensor is expected to be /// precomputed by the user. It can also contain any valid values (not required /// to be actual means, and can be for instance a result of a convolution with /// a Gaussian kernel).</param> /// <param name="dy">Tensor pointer in device memory for the top layer's cumulative loss /// differential data (error backpropagation). (Top layer is the later layer in /// the computation graph during inference).</param> /// <param name="temp">Temporary tensors in device memory. These are used for computing /// intermediate values during the backward pass. These tensors do not have /// to be preserved from forward to backward pass. Both use srcDesc as a /// descriptor.</param> /// <param name="temp2">Temporary tensors in device memory. These are used for computing /// intermediate values during the backward pass. These tensors do not have /// to be preserved from forward to backward pass. Both use srcDesc as a /// descriptor.</param> /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output /// value with prior value in the destination tensor as follows: dstValue = /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section /// for additional details.</param> /// <param name="dXdMeansDesc">Tensor descriptor for destDataDiff and destMeansDiff.</param> /// <param name="dx">Tensor pointers (in device memory) for the bottom layer's resulting /// differentials (data and means). Both share the same descriptor.</param> /// <param name="dMeans">Tensor pointers (in device memory) for the bottom layer's resulting /// differentials (data and means). Both share the same descriptor.</param> public void cudnnDivisiveNormalizationBackward( cudnnDivNormMode mode, double alpha, cudnnTensorDescriptor xDesc, // same desc for diff, means, temp, temp2 CUdeviceptr x, CUdeviceptr means, // if NULL, means are assumed to be zero CUdeviceptr dy, CUdeviceptr temp, CUdeviceptr temp2, double beta, cudnnTensorDescriptor dXdMeansDesc, // same desc for dest, means, meansDiff CUdeviceptr dx, // output data differential CUdeviceptr dMeans // output means differential, can be NULL ) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, xDesc, x, means, dy, temp, temp2, ref beta, dXdMeansDesc, dx, dMeans); Debug.Write(""); //Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
public void cudnnDivisiveNormalizationBackward( cudnnDivNormMode mode, float alpha, cudnnTensorDescriptor srcDesc, // same desc for diff, means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr srcDiffData, CUdeviceptr tempData, CUdeviceptr tempData2, float betaData, cudnnTensorDescriptor destDataDesc, // same desc for dest, means, meansDiff CUdeviceptr destDataDiff, // output data differential CUdeviceptr destMeansDiff // output means differential, can be NULL ) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, srcDiffData, tempData, tempData2, ref betaData, destDataDesc, destDataDiff, destMeansDiff); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res)); if (res != cudnnStatus.Success) { throw new CudaDNNException(res); } }
public static extern cudnnStatus cudnnConvolutionForward( cudnnHandle handle, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, cudnnFilterDescriptor filterDesc, CUdeviceptr filterData, cudnnConvolutionDescriptor convDesc, cudnnConvolutionFwdAlgo algo, CUdeviceptr workSpace, SizeT workSpaceSizeInBytes, ref double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
public static extern cudnnStatus cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle handle, cudnnTensorDescriptor srcDesc, cudnnFilterDescriptor filterDesc, cudnnConvolutionDescriptor convDesc, cudnnTensorDescriptor destDesc, cudnnConvolutionFwdAlgo algo, ref SizeT sizeInBytes );
public static extern cudnnStatus cudnnGetConvolutionForwardAlgorithm( cudnnHandle handle, cudnnTensorDescriptor srcDesc, cudnnFilterDescriptor filterDesc, cudnnConvolutionDescriptor convDesc, cudnnTensorDescriptor destDesc, cudnnConvolutionFwdPreference preference, SizeT memoryLimitInbytes, ref cudnnConvolutionFwdAlgo algo );
public static extern cudnnStatus cudnnFindConvolutionForwardAlgorithm(cudnnHandle handle, cudnnTensorDescriptor srcDesc, cudnnFilterDescriptor filterDesc, cudnnConvolutionDescriptor convDesc, cudnnTensorDescriptor destDesc, int requestedAlgoCount, ref int returnedAlgoCount, cudnnConvolutionFwdAlgoPerf[] perfResults );
public static extern cudnnStatus cudnnGetConvolutionNdForwardOutputDim( cudnnConvolutionDescriptor convDesc, cudnnTensorDescriptor inputTensorDesc, cudnnFilterDescriptor filterDesc, int nbDims, int[] tensorOuputDimA );
public static extern cudnnStatus cudnnLRNCrossChannelBackward( cudnnHandle handle, cudnnLRNDescriptor normDesc, cudnnLRNMode lrnMode, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, cudnnTensorDescriptor srcDiffDesc, CUdeviceptr srcDiffData, cudnnTensorDescriptor destDesc, CUdeviceptr destData, ref double beta, cudnnTensorDescriptor destDiffDesc, CUdeviceptr destDiffData);
public void cudnnLRNCrossChannelForward( cudnnLRNMode lrnMode, double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData) { res = CudaDNNNativeMethods.cudnnLRNCrossChannelForward(_handle, _desc, lrnMode, ref alpha, srcDesc, srcData, ref beta, destDesc, destData); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnLRNCrossChannelForward", res)); if (res != cudnnStatus.Success) throw new CudaDNNException(res); }
public static extern cudnnStatus cudnnPoolingBackward( cudnnHandle handle, cudnnPoolingDescriptor poolingDesc, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, cudnnTensorDescriptor srcDiffDesc, CUdeviceptr srcDiffData, cudnnTensorDescriptor destDesc, CUdeviceptr destData, ref double beta, cudnnTensorDescriptor destDiffDesc, CUdeviceptr destDiffData );
public static extern cudnnStatus cudnnDestroyTensorDescriptor( cudnnTensorDescriptor tensorDesc );
public static extern cudnnStatus cudnnTransformTensor( cudnnHandle handle, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, ref double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
public static extern cudnnStatus cudnnDivisiveNormalizationBackward( cudnnHandle handle, cudnnLRNDescriptor normDesc, cudnnDivNormMode mode, ref double alpha, cudnnTensorDescriptor srcDesc, // same desc for diff, means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr srcDiffData, CUdeviceptr tempData, CUdeviceptr tempData2, ref double beta, cudnnTensorDescriptor destDataDesc, // same desc for dest, means, meansDiff CUdeviceptr destDataDiff, // output data differential CUdeviceptr destMeansDiff // output means differential, can be NULL );
public static extern cudnnStatus cudnnGetTensorNdDescriptor( cudnnTensorDescriptor tensorDesc, int nbDimsRequested, ref cudnnDataType dataType, ref int nbDims, int[] dimA, int[] strideA );
public static extern cudnnStatus cudnnDivisiveNormalizationForward( cudnnHandle handle, cudnnLRNDescriptor normDesc, cudnnDivNormMode mode, ref double alpha, cudnnTensorDescriptor srcDesc, // same desc for means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr tempData, CUdeviceptr tempData2, ref double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
public static extern cudnnStatus cudnnConvolutionBackwardBias(cudnnHandle handle, ref double alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, ref double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
public static extern cudnnStatus cudnnLRNCrossChannelForward( cudnnHandle handle, cudnnLRNDescriptor normDesc, cudnnLRNMode lrnMode, ref float alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, ref float beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData);
public void cudnnDivisiveNormalizationForward( cudnnDivNormMode mode, double alpha, cudnnTensorDescriptor srcDesc, // same desc for means, temp, temp2 CUdeviceptr srcData, CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero CUdeviceptr tempData, CUdeviceptr tempData2, double beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData) { res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, tempData, tempData2, ref beta, destDesc, destData); Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res)); if (res != cudnnStatus.Success) throw new CudaDNNException(res); }
public static extern cudnnStatus cudnnPoolingForward( cudnnHandle handle, cudnnPoolingDescriptor poolingDesc, ref float alpha, cudnnTensorDescriptor srcDesc, CUdeviceptr srcData, ref float beta, cudnnTensorDescriptor destDesc, CUdeviceptr destData );
public static extern cudnnStatus cudnnGetTensor4dDescriptor( cudnnTensorDescriptor tensorDesc, ref cudnnDataType dataType, // image data type ref int n, // number of inputs (batch size) ref int c, // number of input feature maps ref int h, // height of input section ref int w, // width of input section ref int nStride, ref int cStride, ref int hStride, ref int wStride );
public static extern cudnnStatus cudnnSetTensorNdDescriptor(cudnnTensorDescriptor tensorDesc, cudnnDataType dataType, int nbDims, int[] dimA, int[] strideA );
public static extern cudnnStatus cudnnSetTensor( cudnnHandle handle, cudnnTensorDescriptor srcDestDesc, CUdeviceptr srcDestData, CUdeviceptr value );
public static extern cudnnStatus cudnnScaleTensor( cudnnHandle handle, cudnnTensorDescriptor srcDestDesc, CUdeviceptr srcDestData, ref double alpha );
public static extern cudnnStatus cudnnGetPoolingNdForwardOutputDim( cudnnPoolingDescriptor poolingDesc, cudnnTensorDescriptor inputTensorDesc, int nbDims, int[] outputTensorDimA);
public static extern cudnnStatus cudnnGetConvolution2dForwardOutputDim( cudnnConvolutionDescriptor convDesc, cudnnTensorDescriptor inputTensorDesc, cudnnFilterDescriptor filterDesc, ref int n, ref int c, ref int h, ref int w );
public static extern cudnnStatus cudnnAddTensor(cudnnHandle handle, ref double alpha, cudnnTensorDescriptor biasDesc, CUdeviceptr biasData, ref double beta, cudnnTensorDescriptor srcDestDesc, CUdeviceptr srcDestData );
public static extern cudnnStatus cudnnGetPooling2dForwardOutputDim( cudnnPoolingDescriptor poolingDesc, cudnnTensorDescriptor inputTensorDesc, ref int outN, ref int outC, ref int outH, ref int outW);