/// <summary> /// Convolutions the backward filter. /// </summary> /// <param name="algo">The algo.</param> /// <param name="cd">The cd.</param> /// <param name="workspace">The workspace.</param> /// <param name="x">The x.</param> /// <param name="dy">The dy.</param> /// <param name="dw">The dw.</param> public static void ConvolutionBackwardFilter(DNNConvolutionBwdFilterAlgo algo, Cpu.ConvolutionDesc2d cd, CudaStorage workspace, NDArray x, NDArray dy, NDArray dw) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { var convDesc = GetConvDescriptor(cd, x.ElementType); using (var workspacePtr = new CudaDeviceVariable <byte>(workspace.DevicePtrAtElement(0), false, workspace.ByteLength)) using (var xPtr = GetDeviceVar(x)) using (var dyPtr = GetDeviceVar(dy)) using (var dwPtr = GetDeviceVar(dw)) using (var xDesc = GetDescriptor(x)) using (var dyDesc = GetDescriptor(dy)) using (var dwDesc = GetFilterDescriptor(dw)) { dnn.Value.ConvolutionBackwardFilter(1, xDesc, xPtr, dyDesc, dyPtr, convDesc, (cudnnConvolutionBwdFilterAlgo)algo, workspacePtr, 0, dwDesc, dwPtr); } } }
/// <summary> /// Poolings the backward. /// </summary> /// <param name="desc">The desc.</param> /// <param name="x">The x.</param> /// <param name="y">The y.</param> /// <param name="dx">The dx.</param> /// <param name="dy">The dy.</param> public static void PoolingBackward(DNNPoolingDesc desc, NDArray x, NDArray y, NDArray dx, NDArray dy) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { var poolingDesc = new PoolingDescriptor(); poolingDesc.SetPoolingNdDescriptor((cudnnPoolingMode)desc.Mode, cudnnNanPropagation.PropagateNan, desc.WindowDims.Length, desc.WindowDims, desc.Padding, desc.Strides); using (var xPtr = GetDeviceVar(x)) using (var yPtr = GetDeviceVar(y)) using (var dxPtr = GetDeviceVar(dx)) using (var dyPtr = GetDeviceVar(dy)) using (var xDesc = GetDescriptor(x)) using (var yDesc = GetDescriptor(y)) using (var dxDesc = GetDescriptor(dx)) using (var dyDesc = GetDescriptor(dy)) { // Note: ManagedCUDA argument names may be slightly misleading (src refers to 'y' here, and dest to 'x') dnn.Value.PoolingBackward(poolingDesc, 1, yDesc, yPtr, dyDesc, dyPtr, xDesc, xPtr, 0, dxDesc, dxPtr); } } }
/// <summary> /// Convs the forward. /// </summary> /// <param name="algo">The algo.</param> /// <param name="cd">The cd.</param> /// <param name="workspace">The workspace.</param> /// <param name="x">The x.</param> /// <param name="w">The w.</param> /// <param name="y">The y.</param> public static void ConvForward(DNNConvolutionFwdAlgo algo, Cpu.ConvolutionDesc2d cd, CudaStorage workspace, NDArray x, NDArray w, NDArray y) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { var convDesc = GetConvDescriptor(cd, x.ElementType); using (var workspacePtr = new CudaDeviceVariable <byte>(workspace.DevicePtrAtElement(0), false, workspace.ByteLength)) using (var xPtr = GetDeviceVar(x)) using (var wPtr = GetDeviceVar(w)) using (var yPtr = GetDeviceVar(y)) using (var xDesc = GetDescriptor(x)) using (var wDesc = GetFilterDescriptor(w)) using (var yDesc = GetDescriptor(y)) { dnn.Value.ConvolutionForward(1, xDesc, xPtr, wDesc, wPtr, convDesc, (cudnnConvolutionFwdAlgo)algo, workspacePtr, 0, yDesc, yPtr); } } }
/// <summary> /// Activations the backward. /// </summary> /// <param name="x">The x.</param> /// <param name="y">The y.</param> /// <param name="dx">The dx.</param> /// <param name="dy">The dy.</param> /// <param name="activationType">Type of the activation.</param> /// <param name="clippedReluCeiling">The clipped relu ceiling.</param> public static void ActivationBackward(NDArray x, NDArray y, NDArray dx, NDArray dy, DNNActivation activationType, double clippedReluCeiling) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { var activationDesc = new ActivationDescriptor(); activationDesc.SetActivationDescriptor((cudnnActivationMode)activationType, cudnnNanPropagation.PropagateNan, clippedReluCeiling); using (var xPtr = GetDeviceVar(x)) using (var yPtr = GetDeviceVar(y)) using (var dxPtr = GetDeviceVar(dx)) using (var dyPtr = GetDeviceVar(dy)) using (var xDesc = GetDescriptor(x)) using (var yDesc = GetDescriptor(y)) using (var dxDesc = GetDescriptor(dx)) using (var dyDesc = GetDescriptor(dy)) { dnn.Value.ActivationBackward(activationDesc, 1, xDesc, xPtr, dxDesc, dxPtr, yDesc, yPtr, 0, dyDesc, dyPtr); } } }
public NDArray Addmm(NDArray result, float beta, NDArray src, float alpha, NDArray m1, NDArray m2) { var context = CudaHelpers.TSContextForTensor(src); if (src.ElementType != m1.ElementType || src.ElementType != m2.ElementType || (result != null && result.ElementType != src.ElementType)) { throw new InvalidOperationException("All tensors must have the same element type"); } if (result != null && !(result.Storage is CudaStorage)) { throw new ArgumentException("result must be a CUDA tensor", "result"); } if (!(m1.Storage is CudaStorage)) { throw new ArgumentException("m1 must be a CUDA tensor", "m1"); } if (!(m2.Storage is CudaStorage)) { throw new ArgumentException("m2 must be a CUDA tensor", "m2"); } if (src.DimensionCount != 2) { throw new ArgumentException("src must be a matrix", "src"); } if (m1.DimensionCount != 2) { throw new ArgumentException("m1 must be a matrix", "m1"); } if (m2.DimensionCount != 2) { throw new ArgumentException("m2 must be a matrix", "m2"); } if (src.Shape[0] != m1.Shape[0] || src.Shape[1] != m2.Shape[1] || m1.Shape[1] != m2.Shape[0]) { throw new InvalidOperationException("Size mismatch"); } var writeTarget = TensorResultBuilder.GetWriteTarget(result, src, true, src.Shape); if (writeTarget != src) { Ops.Copy(writeTarget, src); } CudaMatrixMulMM.Gemm(context, alpha, m1, m2, beta, writeTarget); return(writeTarget); }
/// <summary> /// Convolutions the backward bias. /// </summary> /// <param name="cd">The cd.</param> /// <param name="dy">The dy.</param> /// <param name="db">The database.</param> public static void ConvolutionBackwardBias(Cpu.ConvolutionDesc2d cd, NDArray dy, NDArray db) { using (var dnn = CudaHelpers.TSContextForTensor(dy).DNNForTensor(dy)) { using (var dyPtr = GetDeviceVar(dy)) using (var dbPtr = GetDeviceVar(db)) using (var dyDesc = GetDescriptor(dy)) using (var dbDesc = GetDescriptor(db)) { dnn.Value.ConvolutionBackwardBias(1, dyDesc, dyPtr, 0, dbDesc, dbPtr); } } }
/// <summary> /// Adds the tensor. /// </summary> /// <param name="src">The source.</param> /// <param name="result">The result.</param> public static void AddTensor(NDArray src, NDArray result) { using (var dnn = CudaHelpers.TSContextForTensor(src).DNNForTensor(src)) { using (var srcPtr = GetDeviceVar(src)) using (var resultPtr = GetDeviceVar(result)) using (var srcDesc = GetDescriptor(src)) using (var resultDesc = GetDescriptor(result)) { dnn.Value.AddTensor(1, srcDesc, srcPtr, 1, resultDesc, resultPtr); } } }
/// <summary> /// Softmaxes the forward. /// </summary> /// <param name="algorithm">The algorithm.</param> /// <param name="mode">The mode.</param> /// <param name="x">The x.</param> /// <param name="y">The y.</param> public static void SoftmaxForward(DNNSoftmaxAlgorithm algorithm, DNNSoftmaxMode mode, NDArray x, NDArray y) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { using (var xPtr = GetDeviceVar(x)) using (var yPtr = GetDeviceVar(y)) using (var xDesc = GetDescriptor(x)) using (var yDesc = GetDescriptor(y)) { dnn.Value.SoftmaxForward((cudnnSoftmaxAlgorithm)algorithm, (cudnnSoftmaxMode)mode, 1, xDesc, xPtr, 0, yDesc, yPtr); } } }
/// <summary> /// Softmaxes the backward. /// </summary> /// <param name="algorithm">The algorithm.</param> /// <param name="mode">The mode.</param> /// <param name="y">The y.</param> /// <param name="dx">The dx.</param> /// <param name="dy">The dy.</param> public static void SoftmaxBackward(DNNSoftmaxAlgorithm algorithm, DNNSoftmaxMode mode, NDArray y, NDArray dx, NDArray dy) { using (var dnn = CudaHelpers.TSContextForTensor(y).DNNForTensor(y)) { using (var yPtr = GetDeviceVar(y)) using (var dxPtr = GetDeviceVar(dx)) using (var dyPtr = GetDeviceVar(dy)) using (var yDesc = GetDescriptor(y)) using (var dxDesc = GetDescriptor(dx)) using (var dyDesc = GetDescriptor(dy)) { dnn.Value.SoftmaxBackward((cudnnSoftmaxAlgorithm)algorithm, (cudnnSoftmaxMode)mode, 1, yDesc, yPtr, dyDesc, dyPtr, 0, dxDesc, dxPtr); } } }
/// <summary> /// Poolings the forward. /// </summary> /// <param name="desc">The desc.</param> /// <param name="x">The x.</param> /// <param name="y">The y.</param> public static void PoolingForward(DNNPoolingDesc desc, NDArray x, NDArray y) { using (var dnn = CudaHelpers.TSContextForTensor(x).DNNForTensor(x)) { var poolingDesc = new PoolingDescriptor(); poolingDesc.SetPoolingNdDescriptor((cudnnPoolingMode)desc.Mode, cudnnNanPropagation.PropagateNan, desc.WindowDims.Length, desc.WindowDims, desc.Padding, desc.Strides); using (var xPtr = GetDeviceVar(x)) using (var yPtr = GetDeviceVar(y)) using (var xDesc = GetDescriptor(x)) using (var yDesc = GetDescriptor(y)) { dnn.Value.PoolingForward(poolingDesc, 1, xDesc, xPtr, 0, yDesc, yPtr); } } }
public NDArray Dot(NDArray result, NDArray lhs, NDArray rhs) { var context = CudaHelpers.TSContextForTensor(lhs); if (lhs.DimensionCount == 1 && rhs.DimensionCount == 1) { return(CudaMatrixMulDot.Dot(context, result, lhs, rhs)); } else if (lhs.DimensionCount == 2 && (rhs.DimensionCount == 1 || rhs.PossibleVector)) { return(CudaMatrixMulMV.Mul_M_V(context, result, lhs, rhs.Ravel()).Reshape(lhs.Shape[0], 1)); } else if (lhs.DimensionCount == 2 && rhs.DimensionCount == 2) { return(CudaMatrixMulMM.Mul_M_M(context, result, lhs, rhs)); } else { throw new NotSupportedException(string.Format("Multiplication of {0}D with {1}D tensor is not supported")); } }