cudnnDivisiveNormalizationForward() private method

private cudnnDivisiveNormalizationForward ( cudnnHandle handle, cudnnLRNDescriptor normDesc, cudnnDivNormMode mode, double &alpha, cudnnTensorDescriptor srcDesc, ManagedCuda.BasicTypes.CUdeviceptr srcData, ManagedCuda.BasicTypes.CUdeviceptr srcMeansData, ManagedCuda.BasicTypes.CUdeviceptr tempData, ManagedCuda.BasicTypes.CUdeviceptr tempData2, double &beta, cudnnTensorDescriptor destDesc, ManagedCuda.BasicTypes.CUdeviceptr destData ) : cudnnStatus
handle cudnnHandle
normDesc cudnnLRNDescriptor
mode cudnnDivNormMode
alpha double
srcDesc cudnnTensorDescriptor
srcData ManagedCuda.BasicTypes.CUdeviceptr
srcMeansData ManagedCuda.BasicTypes.CUdeviceptr
tempData ManagedCuda.BasicTypes.CUdeviceptr
tempData2 ManagedCuda.BasicTypes.CUdeviceptr
beta double
destDesc cudnnTensorDescriptor
destData ManagedCuda.BasicTypes.CUdeviceptr
return cudnnStatus
Exemplo n.º 1
0
 public void cudnnDivisiveNormalizationForward(
     cudnnDivNormMode mode,
     double alpha,
     cudnnTensorDescriptor srcDesc,                                                          // same desc for means, temp, temp2
     CUdeviceptr srcData,
     CUdeviceptr srcMeansData,                                                               // if NULL, means are assumed to be zero
     CUdeviceptr tempData,
     CUdeviceptr tempData2,
     double beta,
     cudnnTensorDescriptor destDesc,
     CUdeviceptr destData)
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, srcDesc, srcData, srcMeansData, tempData, tempData2, ref beta, destDesc, destData);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
Exemplo n.º 2
0
 /// <summary>
 /// This function performs the forward DivisiveNormalization layer computation.
 /// </summary>
 /// <param name="mode">DivisiveNormalization layer mode of operation. Currently only
 /// CUDNN_DIVNORM_PRECOMPUTED_MEANS is implemented. Normalization
 /// is performed using the means input tensor that is expected to be
 /// precomputed by the user.</param>
 /// <param name="alpha">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="xDesc">Tensor descriptor objects for the input and output tensors. Note that
 /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2
 /// tensors.</param>
 /// <param name="x">Input tensor data pointer in device memory.</param>
 /// <param name="means">Input means tensor data pointer in device memory. Note that this tensor
 /// can be NULL (in that case it's values are assumed to be zero during the
 /// computation). This tensor also doesn't have to contain means, these can
 /// be any values, a frequently used variation is a result of convolution with a
 /// normalized positive kernel (such as Gaussian).</param>
 /// <param name="temp">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the forward pass. These tensors do not have
 /// to be preserved as inputs from forward to the backward pass. Both use
 /// srcDesc as a descriptor.</param>
 /// <param name="temp2">Temporary tensors in device memory. These are used for computing
 /// intermediate values during the forward pass. These tensors do not have
 /// to be preserved as inputs from forward to the backward pass. Both use
 /// srcDesc as a descriptor.</param>
 /// <param name="beta">Pointer to scaling factors (in host memory) used to blend the layer output
 /// value with prior value in the destination tensor as follows: dstValue =
 /// alpha[0]*resultValue + beta[0]*priorDstValue. Please refer to this section
 /// for additional details.</param>
 /// <param name="yDesc">Tensor descriptor objects for the input and output tensors. Note that
 /// srcDesc is shared between srcData, srcMeansData, tempData, tempData2
 /// tensors.</param>
 /// <param name="y">Pointer in device memory to a tensor for the result of the forward DivisiveNormalization pass.</param>
 public void cudnnDivisiveNormalizationForward(
     cudnnDivNormMode mode,
     float alpha,
     cudnnTensorDescriptor xDesc,                 // same desc for means, temp, temp2
     CUdeviceptr x,
     CUdeviceptr means,                           // if NULL, means are assumed to be zero
     CUdeviceptr temp,
     CUdeviceptr temp2,
     float beta,
     cudnnTensorDescriptor yDesc,
     CUdeviceptr y)
 {
     res = CudaDNNNativeMethods.cudnnDivisiveNormalizationForward(_handle, _desc, mode, ref alpha, xDesc, x, means, temp, temp2, ref beta, yDesc, y);
     Debug.Write("");//Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnDivisiveNormalizationForward", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }