/// <summary>
 /// 
 /// </summary>
 /// <param name="context"></param>
 public SpatialTransformerDescriptor(CudaDNNContext context)
 {
     _handle = context.Handle;
     _desc = new cudnnSpatialTransformerDescriptor();
     res = CudaDNNNativeMethods.cudnnCreateSpatialTransformerDescriptor(ref _desc);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnCreateSpatialTransformerDescriptor", res));
     if (res != cudnnStatus.Success) throw new CudaDNNException(res);
 }
예제 #2
0
 /// <summary>
 /// </summary>
 public LRNDescriptor(CudaDNNContext context)
 {
     _handle = context.Handle;
     _desc   = new cudnnLRNDescriptor();
     res     = CudaDNNNativeMethods.cudnnCreateLRNDescriptor(ref _desc);
     Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnCreateLRNDescriptor", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }
예제 #3
0
        public SizeT GetAlgorithmSpaceSize(CudaDNNContext ctx)
        {
            SizeT size = new SizeT();

            res = CudaDNNNativeMethods.cudnnGetAlgorithmSpaceSize(ctx.Handle, _desc, ref size);
            Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetAlgorithmSpaceSize", res));
            if (res != cudnnStatus.Success)
            {
                throw new CudaDNNException(res);
            }
            return(size);
        }
예제 #4
0
        public void RestoreAlgorithm(CudaDNNContext ctx, byte[] algoSpace)
        {
            GCHandle handle = GCHandle.Alloc(algoSpace, GCHandleType.Pinned);

            try
            {
                IntPtr ptr = handle.AddrOfPinnedObject();
                res = CudaDNNNativeMethods.cudnnRestoreAlgorithm(ctx.Handle, ptr, algoSpace.Length, _desc);
                Debug.WriteLine(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnRestoreAlgorithm", res));
            }
            finally
            {
                handle.Free();
            }
            if (res != cudnnStatus.Success)
            {
                throw new CudaDNNException(res);
            }
        }
예제 #5
0
        /// <summary>
        /// return the workspace size needed for ctc
        /// </summary>
        /// <param name="handle">Handle to a previously created cuDNN context.</param>
        /// <param name="probsDesc">Handle to the previously initialized probabilities tensor descriptor.</param>
        /// <param name="gradientsDesc">Handle to a previously initialized gradients tensor descriptor.</param>
        /// <param name="labels">Pointer to a previously initialized labels list.</param>
        /// <param name="labelLengths">Pointer to a previously initialized lengths list, to walk the above labels list.</param>
        /// <param name="inputLengths">Pointer to a previously initialized list of the lengths of the timing steps in each batch.</param>
        /// <param name="algo">Enumerant that specifies the chosen CTC loss algorithm</param>
        /// <returns>Amount of GPU memory needed as workspace to be able to execute the CTC
        /// loss computation with the specified algo.</returns>
        public SizeT CTCLoss(CudaDNNContext handle,
                             TensorDescriptor probsDesc,     /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the mini batch size, A is the alphabet size)  */
                             int[] labels,                   /* labels, in CPU memory */
                             int[] labelLengths,             /* the length of each label, in CPU memory */
                             int[] inputLengths,             /* the lengths of timing steps in each batch, in CPU memory */
                             TensorDescriptor gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
                             cudnnCTCLossAlgo algo           /* algorithm selected, supported now 0 and 1 */
                             )
        {
            SizeT size = new SizeT();

            res = CudaDNNNativeMethods.cudnnGetCTCLossWorkspaceSize(handle.Handle, probsDesc.Desc, gradientsDesc.Desc, labels, labelLengths, inputLengths,
                                                                    algo, _desc, ref size);
            Debug.Write("");//Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnGetCTCLossWorkspaceSize", res));
            if (res != cudnnStatus.Success)
            {
                throw new CudaDNNException(res);
            }
            return(size);
        }
예제 #6
0
 /// <summary>
 /// This function returns the ctc costs and gradients, given the probabilities and labels.
 /// </summary>
 /// <param name="handle">Handle to a previously created cuDNN context.</param>
 /// <param name="probsDesc">Handle to the previously initialized probabilities tensor descriptor.</param>
 /// <param name="probs">Pointer to a previously initialized probabilities tensor.</param>
 /// <param name="labels">Pointer to a previously initialized labels list.</param>
 /// <param name="labelLengths">Pointer to a previously initialized lengths list, to walk the above labels list.</param>
 /// <param name="inputLengths">Pointer to a previously initialized list of the lengths of the timing steps in each batch.</param>
 /// <param name="costs">Pointer to the computed costs of CTC.</param>
 /// <param name="gradientsDesc">Handle to a previously initialized gradients tensor descriptor.</param>
 /// <param name="gradients">Pointer to the computed gradients of CTC.</param>
 /// <param name="algo">Enumerant that specifies the chosen CTC loss algorithm.</param>
 /// <param name="workspace">Pointer to GPU memory of a workspace needed to able to execute the specified algorithm.</param>
 public void CTCLoss(CudaDNNContext handle,
                     TensorDescriptor probsDesc,            /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the mini batch size, A is the alphabet size)  */
                     CudaDeviceVariable <double> probs,     /* probabilities after softmax, in GPU memory */
                     int[] labels,                          /* labels, in CPU memory */
                     int[] labelLengths,                    /* the length of each label, in CPU memory */
                     int[] inputLengths,                    /* the lengths of timing steps in each batch, in CPU memory */
                     CudaDeviceVariable <double> costs,     /* the returned costs of CTC, in GPU memory */
                     TensorDescriptor gradientsDesc,        /* Tensor descriptor for gradients, the dimensions are T,N,A */
                     CudaDeviceVariable <double> gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
                     cudnnCTCLossAlgo algo,                 /* algorithm selected, supported now 0 and 1 */
                     CudaDeviceVariable <byte> workspace    /* pointer to the workspace, in GPU memory */
                     )
 {
     res = CudaDNNNativeMethods.cudnnCTCLoss(handle.Handle, probsDesc.Desc, probs.DevicePointer, labels, labelLengths, inputLengths, costs.DevicePointer,
                                             gradientsDesc.Desc, gradients.DevicePointer, algo, _desc, workspace.DevicePointer, workspace.SizeInBytes);
     Debug.Write("");//Line(String.Format("{0:G}, {1}: {2}", DateTime.Now, "cudnnCTCLoss", res));
     if (res != cudnnStatus.Success)
     {
         throw new CudaDNNException(res);
     }
 }