public void CreateLongTensorOnes() { var shape = new long[] { 2, 2 }; TorchTensor t = LongTensor.Ones(shape); Assert.Equal(shape, t.Shape); Assert.Equal((long)1, t[0, 0].DataItem <int>()); Assert.Equal((long)1, t[1, 1].DataItem <int>()); }
public TorchTensor forward(TorchTensor tensor) { var res = THSNN_LayerNorm_forward(handle.DangerousGetHandle(), tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public TorchTensor forward(TorchTensor tensor) { var res = THSNN_RReLU_forward(handle, tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
/// <summary> /// Forward pass. /// </summary> /// <param name="tensor">Input tensor</param> /// <returns></returns> public override TorchTensor forward(TorchTensor tensor) { var res = THSNN_PixelUnshuffle_forward(handle, tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public TorchTensor forward(TorchTensor input1, TorchTensor input2) { var res = THSNN_PairwiseDistance_forward(handle, input1.Handle, input2.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public TorchTensor Forward(TorchTensor tensor) { var res = THSNN_AdaptiveAvgPool2d_forward(handle.DangerousGetHandle(), tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public (TorchTensor, TorchTensor) forward(TorchTensor input, TorchTensor?h0 = null) { var res = THSNN_GRU_forward(handle, input.Handle, h0?.Handle ?? IntPtr.Zero, out IntPtr hN); if (res == IntPtr.Zero || hN == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res), new TorchTensor(hN)); }
public override TorchTensor forward(TorchTensor input) { var res = THSNN_Embedding_forward(handle, input.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public void CreateHalfTensorOnes() { var shape = new long[] { 2, 2 }; TorchTensor t = HalfTensor.Ones(shape); Assert.Equal(shape, t.Shape); Assert.Equal(1.0f, t.ReadHalf(0)); Assert.Equal(1.0f, t.ReadHalf(3)); }
public void CreateBoolTensorOnes() { var shape = new long[] { 2, 2 }; TorchTensor t = BoolTensor.Ones(shape); Assert.Equal(shape, t.Shape); Assert.Equal((object)true, t[0, 0].DataItem <bool>()); Assert.Equal((object)true, t[1, 1].DataItem <bool>()); }
public void TestCat() { var zeros = FloatTensor.Zeros(new long[] { 1, 9 }); var ones = FloatTensor.Ones(new long[] { 1, 9 }); var centroids = new TorchTensor[] { zeros, ones }.Cat(0); var shape = centroids.Shape; Assert.Equal(new long[] { 2, 9 }, shape); }
static public TorchTensor GridSample(TorchTensor input, TorchTensor grid, GridSampleMode mode = GridSampleMode.Bilinear, GridSamplePaddingMode paddingMode = GridSamplePaddingMode.Zeros, bool?alignCorners = null) { byte ac = (byte)((alignCorners.HasValue) ? (alignCorners.Value ? 1 : 2) : 0); var res = THSNN_grid_sample(input.Handle, grid.Handle, (byte)mode, (byte)paddingMode, ac); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public void GetSetItem6() { var shape = new long[] { 2, 3, 4, 5, 6, 7 }; TorchTensor t = FloatTensor.Ones(shape); Assert.Equal(shape, t.Shape); Assert.Equal(1.0f, t[0, 0, 0, 0, 0, 0].DataItem <float>()); Assert.Equal(1.0f, t[1, 2, 3, 4, 5, 6].DataItem <float>()); t[1, 2, 3, 4, 5, 6] = FloatTensor.From(2.0f); Assert.Equal(2.0f, t[1, 2, 3, 4, 5, 6].DataItem <float>()); }
public void TestPoissonNLLLoss() { using (TorchTensor input = FloatTensor.From(new float[] { 0.5f, 1.5f, 2.5f })) using (TorchTensor target = FloatTensor.From(new float[] { 1f, 2f, 3f })) { var componentWiseLoss = ((TorchTensor)input.Exp()) - target * input; Assert.True(componentWiseLoss.Equal(NN.LossFunction.PoissonNLL(reduction: NN.Reduction.None)(input, target))); Assert.True(componentWiseLoss.Sum().Equal(NN.LossFunction.PoissonNLL(reduction: NN.Reduction.Sum)(input, target))); Assert.True(componentWiseLoss.Mean().Equal(NN.LossFunction.PoissonNLL(reduction: NN.Reduction.Mean)(input, target))); } }
public static TorchTensor matrix_rank(TorchTensor input, double?tol = null, bool hermitian = false) { unsafe { var res = THSLinalg_matrix_rank(input.Handle, tol ?? double.NegativeInfinity, tol.HasValue, hermitian); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); } }
/// <summary> /// A simple lookup table that stores embeddings of a fixed dictionary and size. /// This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. /// </summary> /// <param name="embeddings">FloatTensor containing weights for the EmbeddingBag in two dimensions. First dimension is being passed to EmbeddingBag as num_embeddings, second as embedding_dim.</param> /// <param name="freeze">If true (the default), the tensor does not get updated in the learning</param> /// <param name="max_norm">If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm.</param> /// <param name="norm_type">The p of the p-norm to compute for the max_norm option. Default 2.</param> /// <param name="scale_grad_by_freq">If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default: false.</param> /// <param name="mode"></param> /// <param name="sparse">If true, gradient w.r.t. weight matrix will be a sparse tensor. Default: false</param> /// <param name="include_last_offset"></param> /// <returns></returns> /// <remarks>Keep in mind that only a limited number of optimizers support sparse gradients: currently it’s optim.SGD (CUDA and CPU), optim.SparseAdam (CUDA and CPU) and optim.Adagrad (CPU)</remarks> public static EmbeddingBag from_pretrained(TorchTensor embeddings, bool freeze = true, double?max_norm = null, double norm_type = 2.0, bool scale_grad_by_freq = false, EmbeddingBagMode mode = EmbeddingBagMode.Mean, bool sparse = false, bool include_last_offset = false) { var res = THSNN_EmbeddingBag_from_pretrained(embeddings.Handle, freeze, max_norm.HasValue ? max_norm.Value : 0.0, max_norm.HasValue, norm_type, scale_grad_by_freq, (long)mode, sparse, include_last_offset, out var boxedHandle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new EmbeddingBag(res, boxedHandle)); }
public override TorchTensor Forward(TorchTensor input) { using (var x = fb.Forward(input)) if (_isTrue) { return(fbT1.Forward(x)); } else { return(fbF2.Forward(fbF1.Forward(x))); } }
public void TestCatCuda() { if (Torch.IsCudaAvailable()) { var zeros = FloatTensor.Zeros(new long[] { 1, 9 }).Cuda(); var ones = FloatTensor.Ones(new long[] { 1, 9 }).Cuda(); var centroids = new TorchTensor[] { zeros, ones }.Cat(0); var shape = centroids.Shape; Assert.Equal(new long[] { 2, 9 }, shape); Assert.Equal(DeviceType.CUDA, centroids.DeviceType); } }
public PositionalEncoding(long dmodel, double dropout, int maxLen = 5000) : base("PositionalEncoding") { this.dropout = Dropout(dropout); var pe = Float32Tensor.zeros(new long[] { maxLen, dmodel }); var position = Float32Tensor.arange(0, maxLen, 1).unsqueeze(1); var divTerm = (Float32Tensor.arange(0, dmodel, 2) * (-Math.Log(10000.0) / dmodel)).exp(); pe[TorchTensorIndex.Ellipsis, TorchTensorIndex.Slice(0, null, 2)] = (position * divTerm).sin(); pe[TorchTensorIndex.Ellipsis, TorchTensorIndex.Slice(1, null, 2)] = (position * divTerm).cos(); this.pe = pe.unsqueeze(0).transpose(0, 1); RegisterComponents(); }
/// <summary> /// A simple lookup table that stores embeddings of a fixed dictionary and size. /// This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. /// </summary> /// <param name="embeddings">FloatTensor containing weights for the Embedding in two dimensions. First dimension is being passed to Embedding as num_embeddings, second as embedding_dim.</param> /// <param name="freeze">If true (the default), the tensor does not get updated in the learning</param> /// <param name="padding_idx">If given, pads the output with the embedding vector at padding_idx (initialized to zeros) whenever it encounters the index.</param> /// <param name="max_norm">If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm.</param> /// <param name="norm_type">The p of the p-norm to compute for the max_norm option. Default 2.</param> /// <param name="scale_grad_by_freq">If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default: false.</param> /// <param name="sparse">If true, gradient w.r.t. weight matrix will be a sparse tensor. Default: false</param> /// <returns></returns> /// <remarks>Keep in mind that only a limited number of optimizers support sparse gradients: currently it’s optim.SGD (CUDA and CPU), optim.SparseAdam (CUDA and CPU) and optim.Adagrad (CPU)</remarks> public static Embedding from_pretrained(TorchTensor embeddings, bool freeze = true, long?padding_idx = null, double?max_norm = null, double norm_type = 2.0, bool scale_grad_by_freq = false, bool sparse = false) { var res = THSNN_Embedding_from_pretrained(embeddings.Handle, freeze, padding_idx.HasValue ? padding_idx.Value : -1, padding_idx.HasValue, max_norm.HasValue ? max_norm.Value : 0.0, max_norm.HasValue, norm_type, scale_grad_by_freq, sparse, out var boxedHandle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new Embedding(res, boxedHandle)); }
public void TestSquareEuclideanDistance() { var input = new double[] { 0.1, 0.1, 0.1, 0.1, 0.2, 0.1, 0.2, 0.1, 0.1 }.ToTorchTensor(new long[] { 9 }).ToType(ScalarType.Float); var zeros = FloatTensor.Zeros(new long[] { 1, 9 }); var ones = FloatTensor.Ones(new long[] { 1, 9 }); var centroids = new TorchTensor[] { zeros, ones }.Cat(0); var distanceFromZero = input.Reshape(new long[] { -1, 1, 9 }).Sub(zeros).Pow(2.ToScalar()).Sum(new long[] { 2 }); var distanceFromOne = input.Reshape(new long[] { -1, 1, 9 }).Sub(ones).Pow(2.ToScalar()).Sum(new long[] { 2 }); var distanceFromCentroids = input.Reshape(new long[] { -1, 1, 9 }).Sub(centroids).Pow(2.ToScalar()).Sum(new long[] { 2 }); Assert.True(true); }
public TorchTensor forward(TorchTensor src, TorchTensor?src_mask = null, TorchTensor?src_key_padding_mask = null) { var res = THSNN_TransformerEncoder_forward(handle, src.Handle, src_mask?.Handle ?? IntPtr.Zero, src_key_padding_mask?.Handle ?? IntPtr.Zero); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public void TestTrainingConv2dCUDA() { if (Torch.IsCudaAvailable()) { var device = Device.CUDA; using (Module conv1 = Conv2d(3, 4, 3, stride: 2), lin1 = Linear(4 * 13 * 13, 32), lin2 = Linear(32, 10)) using (var seq = Sequential( ("conv1", conv1), ("r1", ReLU(inPlace: true)), ("drop1", Dropout(0.1)), ("flat1", Flatten()), ("lin1", lin1), ("r2", ReLU(inPlace: true)), ("lin2", lin2))) { seq.to(device); var optimizer = NN.Optimizer.Adam(seq.parameters()); var loss = mse_loss(NN.Reduction.Sum); using (TorchTensor x = Float32Tensor.randn(new long[] { 64, 3, 28, 28 }, device: device), y = Float32Tensor.randn(new long[] { 64, 10 }, device: device)) { float initialLoss = loss(seq.forward(x), y).ToSingle(); float finalLoss = float.MaxValue; for (int i = 0; i < 10; i++) { var eval = seq.forward(x); var output = loss(eval, y); var lossVal = output.ToSingle(); finalLoss = lossVal; optimizer.zero_grad(); output.backward(); optimizer.step(); } Assert.True(finalLoss < initialLoss); } } } else { Assert.Throws <InvalidOperationException>(() => Float32Tensor.randn(new long[] { 64, 3, 28, 28 }).cuda()); } }
public void ExpandTest() { TorchTensor ones = FloatTensor.Ones(new long[] { 2 }); TorchTensor onesExpanded = ones.Expand(new long[] { 3, 2 }); Assert.Equal(onesExpanded.Shape, new long[] { 3, 2 }); for (int i = 0; i < 3; i++) { for (int j = 0; j < 2; j++) { Assert.Equal(1.0, onesExpanded[i, j].DataItem <float>()); } } }
public override TorchTensor forward(TorchTensor tensor) { if (tensor.Dimensions < 3) { throw new ArgumentException($"Invalid number of dimensions for LocalResponseNorm argument: {tensor.Dimensions}"); } var res = THSNN_LocalResponseNorm_forward(handle.DangerousGetHandle(), tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
static public TorchTensor OneHot(TorchTensor x, long num_classes = -1) { if (x.Type != ScalarType.Int64) { throw new ArgumentException("OneHot input tensor must have elements of type Int64"); } var res = THSNN_one_hot(x.Handle, num_classes); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public TorchTensor forward(TorchTensor tensor) { if (tensor.Dimensions != 5) { throw new ArgumentException($"Invalid number of dimensions for BatchNorm argument: {tensor.Dimensions}"); } var res = THSNN_BatchNorm3d_forward(handle.DangerousGetHandle(), tensor.Handle); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); }
public static TorchTensor norm(TorchTensor input, long[]?dims = null, bool keepdim = false) { unsafe { fixed(long *pdims = dims) { var res = THSLinalg_norm_opt(input.Handle, (IntPtr)pdims, dims is null ? 0 : dims.Length, keepdim); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); } } }
static public TorchTensor Pad(TorchTensor input, long[] pad, PaddingModes mode = PaddingModes.Constant, double value = 0) { unsafe { fixed(long *psize = pad) { var res = THSNN_pad(input.Handle, (IntPtr)psize, pad.Length, (byte)mode, value); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); } } }
public static TorchTensor tensorsolve(TorchTensor input, TorchTensor other, long[] dims) { unsafe { fixed(long *pdims = dims) { var res = THSLinalg_tensorsolve(input.Handle, other.Handle, (IntPtr)pdims, dims.Length); if (res == IntPtr.Zero) { Torch.CheckForErrors(); } return(new TorchTensor(res)); } } }