コード例 #1
0
        public void cudnnDivisiveNormalizationBackward(
									  cudnnDivNormMode mode,
									  float alpha,
									  cudnnTensorDescriptor srcDesc, // same desc for diff, means, temp, temp2
									  CUdeviceptr srcData,
									  CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero
									  CUdeviceptr srcDiffData,
									  CUdeviceptr tempData,
									  CUdeviceptr tempData2,
									  float betaData,
									  cudnnTensorDescriptor destDataDesc, // same desc for dest, means, meansDiff
									  CUdeviceptr destDataDiff, // output data differential
									  CUdeviceptr destMeansDiff // output means differential, can be NULL
			)
        {
            res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, srcDiffData, tempData, tempData2, ref betaData, destDataDesc, destDataDiff, destMeansDiff);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
コード例 #2
0
ファイル: LRNDescriptor.cs プロジェクト: furusdara/cuda
 public void cudnnDivisiveNormalizationForward(
     cudnnDivNormMode mode,
     double alpha,
     cudnnTensorDescriptor srcDesc,                                                          // same desc for means, temp, temp2
     CUdeviceptr srcData,
     CUdeviceptr srcMeansData,                                                               // if NULL, means are assumed to be zero
     CUdeviceptr tempData,
     CUdeviceptr tempData2,
     double beta,
     cudnnTensorDescriptor destDesc,
     CUdeviceptr destData)
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, tempData, tempData2, ref beta, destDesc, destData);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
コード例 #3
0
 /// <summary>
 /// This function performs the forward DivisiveNormalization layer computation.
 /// </summary>
 /// <param name="mode">DivisiveNormalization layer mode of operation. Currently only
 /// CUDNN_DIVNORM_PRECOMPUTED_MEANS is implemented. Normalization
 /// is performed using the means input tensor that is expected to be
 /// precomputed by the user.</param>
 /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="xDesc">Tensor descriptor objects for the input and output tensors. Note that
 /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2
 /// tensors.</param>
 /// <param name="x">Input tensor data pointer in device memory.</param>
 /// <param name="means">Input means tensor data pointer in device memory. Note that this tensor
 /// can be NULL (in that case it's values are assumed to be zero during the
 /// computation). This tensor also doesn't have to contain means, these can
 /// be any values, a frequently used variation is a result of convolution with a
 /// normalized positive kernel (such as Gaussian).</param>
 /// <param name="temp">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the forward pass. These tensors do not have
 /// to be preserved as inputs from forward to the backward pass. Both use
 /// srcDesc as a descriptor.</param>
 /// <param name="temp2">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the forward pass. These tensors do not have
 /// to be preserved as inputs from forward to the backward pass. Both use
 /// srcDesc as a descriptor.</param>
 /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="yDesc">Tensor descriptor objects for the input and output tensors. Note that
 /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2
 /// tensors.</param>
 /// <param name="y">Pointer in device memory to a tensor for the result of the forward DivisiveNormalization pass.</param>
 public void cudnnDivisiveNormalizationForward(
     cudnnDivNormMode mode,
     float alpha,
     cudnnTensorDescriptor xDesc,                 // same desc for means, temp, temp2
     CUdeviceptr x,
     CUdeviceptr means,                           // if NULL, means are assumed to be zero
     CUdeviceptr temp,
     CUdeviceptr temp2,
     float beta,
     cudnnTensorDescriptor yDesc,
     CUdeviceptr y)
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, xDesc, x, means, temp, temp2, ref beta, yDesc, y);
     Debug.Write("");//Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
コード例 #4
0
ファイル: LRNDescriptor.cs プロジェクト: furusdara/cuda
 public void cudnnDivisiveNormalizationBackward(
     cudnnDivNormMode mode,
     float alpha,
     cudnnTensorDescriptor srcDesc,                                                          // same desc for diff, means, temp, temp2
     CUdeviceptr srcData,
     CUdeviceptr srcMeansData,                                                               // if NULL, means are assumed to be zero
     CUdeviceptr srcDiffData,
     CUdeviceptr tempData,
     CUdeviceptr tempData2,
     float betaData,
     cudnnTensorDescriptor destDataDesc,                                                     // same desc for dest, means, meansDiff
     CUdeviceptr destDataDiff,                                                               // output data differential
     CUdeviceptr destMeansDiff                                                               // output means differential, can be NULL
     )
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, srcDiffData, tempData, tempData2, ref betaData, destDataDesc, destDataDiff, destMeansDiff);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
コード例 #5
0
 /// <summary>
 /// This function performs the backward DivisiveNormalization layer computation.
 /// </summary>
 /// <param name="mode">DivisiveNormalization layer mode of operation. Currently only
 /// CUDNN_DIVNORM_PRECOMPUTED_MEANS is implemented. Normalization
 /// is performed using the means input tensor that is expected to be
 /// precomputed by the user.</param>
 /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="xDesc">Tensor descriptor and pointers in device memory for the bottom layer's
 /// data and means. (Bottom layer is the earlier layer in the computation
 /// graph during inference). Note: the means tensor is expected to be
 /// precomputed by the user. It can also contain any valid values (not required
 /// to be actual means, and can be for instance a result of a convolution with
 /// a Gaussian kernel).</param>
 /// <param name="x">Tensor descriptor and pointers in device memory for the bottom layer's
 /// data and means. (Bottom layer is the earlier layer in the computation
 /// graph during inference). Note: the means tensor is expected to be
 /// precomputed by the user. It can also contain any valid values (not required
 /// to be actual means, and can be for instance a result of a convolution with
 /// a Gaussian kernel).</param>
 /// <param name="means">Tensor descriptor and pointers in device memory for the bottom layer's
 /// data and means. (Bottom layer is the earlier layer in the computation
 /// graph during inference). Note: the means tensor is expected to be
 /// precomputed by the user. It can also contain any valid values (not required
 /// to be actual means, and can be for instance a result of a convolution with
 /// a Gaussian kernel).</param>
 /// <param name="dy">Tensor pointer in device memory for the top layer's cumulative loss
 /// differential data (error backpropagation). (Top layer is the later layer in
 /// the computation graph during inference).</param>
 /// <param name="temp">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the backward pass. These tensors do not have
 /// to be preserved from forward to backward pass. Both use srcDesc as a
 /// descriptor.</param>
 /// <param name="temp2">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the backward pass. These tensors do not have
 /// to be preserved from forward to backward pass. Both use srcDesc as a
 /// descriptor.</param>
 /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="dXdMeansDesc">Tensor descriptor for destDataDiff and destMeansDiff.</param>
 /// <param name="dx">Tensor pointers (in device memory) for the bottom layer's resulting
 /// differentials (data and means). Both share the same descriptor.</param>
 /// <param name="dMeans">Tensor pointers (in device memory) for the bottom layer's resulting
 /// differentials (data and means). Both share the same descriptor.</param>
 public void cudnnDivisiveNormalizationBackward(
     cudnnDivNormMode mode,
     double alpha,
     cudnnTensorDescriptor xDesc,                                                     // same desc for diff, means, temp, temp2
     CUdeviceptr x,
     CUdeviceptr means,                                                               // if NULL, means are assumed to be zero
     CUdeviceptr dy,
     CUdeviceptr temp,
     CUdeviceptr temp2,
     double beta,
     cudnnTensorDescriptor dXdMeansDesc,                                           // same desc for dest, means, meansDiff
     CUdeviceptr dx,                                                               // output data differential
     CUdeviceptr dMeans                                                            // output means differential, can be NULL
     )
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationBackward(_handle, _desc, mode, ref alpha, xDesc, x, means, dy, temp, temp2, ref beta, dXdMeansDesc, dx, dMeans);
     Debug.Write("");            //Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationBackward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
コード例 #6
0
        public void cudnnDivisiveNormalizationForward(
									  cudnnDivNormMode mode,
									  double alpha,
									  cudnnTensorDescriptor srcDesc, // same desc for means, temp, temp2
									  CUdeviceptr srcData,
									  CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero
									  CUdeviceptr tempData,
									  CUdeviceptr tempData2,
									  double beta,
									  cudnnTensorDescriptor destDesc,
									  CUdeviceptr destData)
        {
            res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, tempData, tempData2, ref beta, destDesc, destData);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res));
            if (res != cudnnStatus.Success) throw new CudaDNNException(res);
        }
コード例 #7
0
		public static extern cudnnStatus cudnnDivisiveNormalizationBackward(
									  cudnnHandle                    handle,
									  cudnnLRNDescriptor             normDesc,
									  cudnnDivNormMode               mode,
									  ref double alpha,
									  cudnnTensorDescriptor    srcDesc, // same desc for diff, means, temp, temp2
									  CUdeviceptr srcData,
									  CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero
									  CUdeviceptr srcDiffData,
									  CUdeviceptr tempData,
									  CUdeviceptr tempData2,
									  ref double beta,
									  cudnnTensorDescriptor    destDataDesc, // same desc for dest, means, meansDiff
									  CUdeviceptr destDataDiff, // output data differential
									  CUdeviceptr destMeansDiff // output means differential, can be NULL
									  );
コード例 #8
0
		public static extern cudnnStatus cudnnDivisiveNormalizationForward(
									  cudnnHandle                    handle,
									  cudnnLRNDescriptor             normDesc,
									  cudnnDivNormMode               mode,
									  ref double alpha,
									  cudnnTensorDescriptor    srcDesc, // same desc for means, temp, temp2
									  CUdeviceptr srcData,
									  CUdeviceptr srcMeansData, // if NULL, means are assumed to be zero
									  CUdeviceptr tempData,
									  CUdeviceptr tempData2,
									  ref double beta,
									  cudnnTensorDescriptor    destDesc,
									  CUdeviceptr destData
									  );