static public TorchTensor AdaptiveAvgPool2D(TorchTensor x, long[] kernelSize)
 {
     using (var d = Modules.AdaptiveAvgPool2D(kernelSize)) {
         return(d.Forward(x));
     }
 }
 /// <summary>
 /// Reverses the PixelShuffle operation by rearranging elements in a tensor of shape (*, C * r^2, H, W) to a tensor of shape(*, C, H * r, W * r), where r is an downscale factor.
 /// This is useful for implementing efficient sub-pixel convolution with a stride of 1/r.
 /// </summary>
 /// <param name="x">Input tensor</param>
 /// <param name="downscaleFactor">Factor to increase spatial resolution by</param>
 /// <returns></returns>
 /// <returns></returns>
 static public TorchTensor PixelUnshuffle(TorchTensor x, long downscaleFactor)
 {
     using (var d = Modules.PixelUnshuffle(downscaleFactor)) {
         return(d.forward(x));
     }
 }
Beispiel #3
0
 /// <summary>
 /// A simple lookup table that stores embeddings of a fixed dictionary and size.
 /// This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings.
 /// </summary>
 /// <param name="x">An input tensor of arbitrary shape.</param>
 /// <param name="num_embeddings">Size of the dictionary of embeddings, the vocabulary size.</param>
 /// <param name="embedding_dims">The size of each embedding vector</param>
 /// <param name="padding_idx">If given, pads the output with the embedding vector at padding_idx (initialized to zeros) whenever it encounters the index.</param>
 /// <param name="max_norm">If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm.</param>
 /// <param name="norm_type">The p of the p-norm to compute for the max_norm option. Default 2.</param>
 /// <param name="scale_grad_by_freq">If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default: false.</param>
 /// <param name="sparse">If true, gradient w.r.t. weight matrix will be a sparse tensor. Default: false</param>
 /// <returns></returns>
 /// <remarks>Keep in mind that only a limited number of optimizers support sparse gradients: currently it’s optim.SGD (CUDA and CPU), optim.SparseAdam (CUDA and CPU) and optim.Adagrad (CPU)</remarks>
 static public TorchTensor Embedding(TorchTensor x, long num_embeddings, long embedding_dims, long?padding_idx = null, double?max_norm = null, double norm_type = 2.0, bool scale_grad_by_freq = false, bool sparse = false)
 {
     using (var d = Modules.Embedding(num_embeddings, embedding_dims, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)) {
         return(d.forward(x));
     }
 }
Beispiel #4
0
 static public TorchTensor AvgPool2D(TorchTensor x, long[] kernelSize, long[] strides = null)
 {
     using (var d = Modules.AvgPool2D(kernelSize, strides)) {
         return(d.Forward(x));
     }
 }
Beispiel #5
0
        /// <summary>
        /// Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
        /// </summary>
        /// <param name="x"></param>
        /// <param name="features">C from an expected input of size (N,C,H,W)</param>
        /// <param name="eps">A value added to the denominator for numerical stability. Default: 1e-5</param>
        /// <param name="momentum">The value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1</param>
        /// <param name="affine">A boolean value that when set to True, this module has learnable affine parameters. Default: true</param>
        /// <param name="track_running_stats">A boolean value that when set to True, this module tracks the running mean and variance, and when set to False,
        /// this module does not track such statistics, and initializes statistics buffers running_mean and running_var as None.
        /// When these buffers are None, this module always uses batch statistics. in both training and eval modes. Default: true</param>
        /// <returns></returns>

        static public TorchTensor BatchNorm2d(TorchTensor x, long features, double eps = 1e-05, double momentum = 0.1, bool affine = true, bool track_running_stats = true)
        {
            using (var d = Modules.BatchNorm2d(features, eps, momentum, affine, track_running_stats)) {
                return(d.forward(x));
            }
        }
Beispiel #6
0
 static public TorchTensor Linear(TorchTensor x, long inputSize, long outputSize, bool hasBias = true)
 {
     using (var d = Modules.Linear(inputSize, outputSize, hasBias)) {
         return(d.Forward(x));
     }
 }
Beispiel #7
0
 static public TorchTensor FeatureAlphaDropout(TorchTensor x, double probability = 0.5)
 {
     using (var f = Modules.FeatureAlphaDropout(probability)) {
         return(f.forward(x));
     }
 }
Beispiel #8
0
 /// <summary>
 /// Pads the input tensor using replication of the input boundary.
 /// </summary>
 /// <param name="x">Input tensor</param>
 /// <param name="padding">The size of the padding: (padding_left , padding_right)</param>
 /// <param name="value"></param>
 /// <returns></returns>
 static public TorchTensor ConstantPad1d(TorchTensor x, long padding, double value)
 {
     using (var d = Modules.ConstantPad1d(padding, value)) {
         return(d.forward(x));
     }
 }
Beispiel #9
0
 /// <summary>
 /// Rectified Linear Unit
 /// </summary>
 /// <param name="x">The input tensor</param>
 /// <param name="inPlace">Do the operation in-place. Default: False</param>
 /// <returns></returns>
 static public TorchTensor ReLU(TorchTensor x, bool inPlace = false)
 {
     using (var m = Modules.ReLU(inPlace)) {
         return(m.forward(x));
     }
 }
Beispiel #10
0
 /// <summary>
 /// Sigmoid activation
 /// </summary>
 /// <param name="x">The input tensor</param>
 /// <returns></returns>
 static public TorchTensor Sigmoid(TorchTensor x)
 {
     using (var m = Modules.Sigmoid()) {
         return(m.forward(x));
     }
 }
Beispiel #11
0
 /// <summary>
 /// Continuously Differentiable Exponential Linear Unit
 /// </summary>
 /// <param name="x">The input tensor</param>
 /// <param name="alpha">The α value for the CELU formulation. Default: 1.0</param>
 /// <param name="inPlace">Do the operation in-place. Default: False</param>
 /// <returns></returns>
 static public TorchTensor CELU(TorchTensor x, double alpha, bool inPlace = false)
 {
     using (var m = Modules.CELU(alpha, inPlace)) {
         return(m.forward(x));
     }
 }
Beispiel #12
0
        /// <summary>
        /// Applies a 3D max pooling over an input signal composed of several input planes.
        /// </summary>
        /// <param name="x">The input signal tensor</param>
        /// <param name="kernelSize">The size of the sliding window, must be > 0.</param>
        /// <param name="strides">The stride of the sliding window, must be > 0. Default value is kernel_size.</param>
        /// <returns></returns>

        static public TorchTensor MaxPool3d(TorchTensor x, long[] kernelSize, long[] strides = null)
        {
            using (var d = Modules.MaxPool3d(kernelSize, strides)) {
                return(d.forward(x));
            }
        }
Beispiel #13
0
 static public TorchTensor Identity(TorchTensor x)
 {
     using (var d = Modules.Identity()) {
         return(d.forward(x));
     }
 }
Beispiel #14
0
 static public TorchTensor PairwiseDistance(TorchTensor input1, TorchTensor input2, double p = 2.0, double eps = 1e-6, bool keep_dim = false)
 {
     using (var f = Modules.PairwiseDistance(p, eps, keep_dim)) {
         return(f.forward(input1, input2));
     }
 }
Beispiel #15
0
 static public TorchTensor Conv3d(TorchTensor x, long inputChannel, long outputChannel, long kernelSize, long stride = 1, long padding = 0, long dilation = 1, PaddingModes paddingMode = PaddingModes.Zeros, long groups = 1, bool bias = true)
 {
     using (var d = Modules.Conv3d(inputChannel, outputChannel, kernelSize, stride, padding, dilation, paddingMode, groups, bias)) {
         return(d.forward(x));
     }
 }
Beispiel #16
0
 /// <summary>
 /// Pads the input tensor using the reflection of the input boundary.
 /// </summary>
 /// <param name="x">Input tensor</param>
 /// <param name="padding">The size of the padding: (padding_left , padding_right)</param>
 /// <returns></returns>
 static public TorchTensor ReflectionPad1d(TorchTensor x, long padding)
 {
     using (var d = Modules.ReflectionPad1d(padding)) {
         return(d.forward(x));
     }
 }
Beispiel #17
0
 static public TorchTensor LogSoftMax(TorchTensor x, long dimension)
 {
     using (var l = Modules.LogSoftMax(dimension)) {
         return(l.Forward(x));
     }
 }