コード例 #1
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestAutoGradMode()
        {
            var x = Float32Tensor.RandomN(new long[] { 2, 3 }, requiresGrad: true);

            using (var mode = new AutoGradMode(false))
            {
                Assert.False(AutoGradMode.IsAutogradEnabled());
                var sum = x.Sum();
                Assert.Throws <ExternalException>(() => sum.Backward());
                //var grad = x.Grad();
                //Assert.True(grad.Handle == IntPtr.Zero);
            }
            using (var mode = new AutoGradMode(true))
            {
                Assert.True(AutoGradMode.IsAutogradEnabled());
                var sum = x.Sum();
                sum.Backward();
                var grad = x.Grad();
                Assert.False(grad.Handle == IntPtr.Zero);
                var data = grad.Data <float>();
                for (int i = 0; i < 2 * 3; i++)
                {
                    Assert.Equal(1.0, data[i]);
                }
            }
        }
コード例 #2
0
                public static GaussianNLLLoss gaussian_nll_loss(bool full = false, float eps = 1e-8f, Reduction reduction = Reduction.Mean)
                {
                    return((Tensor input, Tensor target, Tensor variance) => {
                        input = input.view(input.shape[0], -1);
                        target = target.view(target.shape[0], -1);
                        if (target.shape == input.shape)
                        {
                            throw new ArgumentException("input and target must have the same shape");
                        }

                        variance = variance.view(target.shape[0], -1);
                        if (variance.shape[1] != input.shape[1] && variance.shape[1] != 1)
                        {
                            throw new ArgumentException("variance has the wrong shape");
                        }

                        if ((variance < 0).any().DataItem <bool>())
                        {
                            throw new ArgumentException("variance has negative entry/entries");
                        }

                        variance = variance.clone().max(Float32Tensor.from(eps));

                        var loss = 0.5 * (variance.log() + (input - target).square() / variance).view(input.shape[0], -1).sum(dimensions: new long[] { 1 });

                        if (full)
                        {
                            loss = loss + 0.5 * input.shape[1] * MathF.Log(2 * MathF.PI);
                        }

                        return (reduction == Reduction.Mean) ? loss.mean() : (reduction == Reduction.Sum) ? loss.sum() : loss;
                    });
                }
コード例 #3
0
        public void TestTrainingAdamWDefaults()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(("lin1", lin1), ("relu1", ReLU()), ("drop1", Dropout(0.1)), ("lin2", lin2));

            var x = Float32Tensor.randn(new long[] { 64, 1000 });
            var y = Float32Tensor.randn(new long[] { 64, 10 });

            var optimizer = torch.optim.AdamW(seq.parameters());
            var loss      = mse_loss(Reduction.Sum);

            float initialLoss = loss(seq.forward(x), y).ToSingle();
            float finalLoss   = float.MaxValue;

            for (int i = 0; i < 10; i++)
            {
                var eval    = seq.forward(x);
                var output  = loss(eval, y);
                var lossVal = output.ToSingle();

                finalLoss = lossVal;

                optimizer.zero_grad();

                output.backward();

                optimizer.step();
            }
            Assert.True(finalLoss < initialLoss);
        }
コード例 #4
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestGrad2()
        {
            var y = Float32Tensor.RandomN(new long[] { 32, 1 });
            var input = new double[] { -2.75, 0.77, -0.61, 0.14, 1.39, 0.38, -0.53, -0.5, -2.13, -0.39, 0.46, -0.61, -0.37, -0.12, 0.55, -1, 0.84, -0.02, 1.3, -0.24, -0.5, -2.12, -0.85, -0.91, 1.81, 0.02, -0.78, -1.41, -1.09, -0.65, 0.9, -0.37, -0.22, 0.28, 1.05, -0.24, 0.3, -0.99, 0.19, 0.32, -0.95, -1.19, -0.63, 0.75, 0.16, 0.15, 0.3, -0.69, 0.2, -0.4, -0.67, 0.18, -1.43, -0.61, -0.78, -0.11, -1.07, -1.71, -0.45, -0.6, 0.05, -1.59, 1.24, 0.62, 0.01, 1.35, -0.9, -1.25, 1.62, -1.45, 0.92, 1.51, -0.19, -1.33, -0.01, -0.13, 0.1, -1.34, 1.23, 0.57, -0.24, 0.5, 0.71, -0.15, -1.37, -1.03, 1.8, 1.4, -0.63, 0.8, -0.97, -0.64, 0.51, 0.52, 0.95, 0.86, 0.43, 0.73, -1.38, -0.56, 0.44, 1.2, -1.45, -0.07, 1.88, 1.57, 0.38, -2.2, -0.56, -1.52, -0.17, 1.38, -1.02, -1.61, -0.13, -0.44, -0.37, 0.23, 1.75, 0.83, -0.02, -1.91, -0.23, -0.47, -1.41, -1.01, -0.91, -0.56, -1.72, 1.47, 0.31, 0.24, 0.48, 2.06, 0.07, -0.96, 1.03, -0.4, -0.64, -0.85, 0.42, -0.33, 0.85, -0.11, -1.24, -0.71, -1.04, -0.37, -0.37, 0.84, -0.9, -1.63, -2.91, -0.71, 0.09, 1.64, -1.1, -1.05, 0.51, 0.57, 0.19, 0.36, 1.36, 1.45, 0.35, -1.66, -0.65, 0.47, 1.95, -0.32, 0.19, -2.06, 0.5, 1.03, 0.94, -0.65, -2.94, 0.41, 1.13, 0.95, -0.02, 1.12, 0.19, 0.66, -0.77, -0.39, 0.59, -1.58, -0.67, 0.88, 0.26, -0.63, 0.49, 1.38, 1.48, -0.55, 0.4, 0.65, 0.19, 0.25, 0.03, -0.31, 0.75, 2.16, -1.36, 0.05, 0.22, 0.65, 1.28, 0.42, 1.35, -0.08, 1.1, 0.25, 0.44, 1.06, -1.78, 0.47, 1.38, 0.43, -1.56, 0.14, -0.22, 1.48, 0.04, 0.33, 0.1, 0.2, -0.99, 1.04, 0.61, -0.4, 0.96, 0.4, 0.5, 0.1, 0.02, 0.01, 0.22, 1.45, -0.77, 0.69, 0.95, 0.96, -0.09, -0.26, 0.22, -1.61, 1.86, -0.06, -0.34, -0.35, 0.55, -1.08, 1.29, 0.92, 0.16, 0.55, -0.01, 0.2, -0.61, -0.28, -2.17, -0.46, 1.63, 1.61, 0.64, 0.32, -0.75, 0.33, 0.3, -1.15, 0.42, -0.06, -1.14, 1.62, -0.9, -0.39, 0.4, 1.52, -0.43, 1.22, -0.32, -0.02, 1, -0.92, 0.11, 0.8, -0.99, -0.26, -2.85, -1.13, 0.49, -0.63, -0.54, -0.86, -0.97, -0.9, 0.23, 1.26, -1.78, -0.84, -0.48, 0.35, -1.13, -2.23, 0.1, 0.95, 1.27, 0.08, -2.21, 0.67, -0.2, 0.6, -1.14, 0.65, -0.73, -0.01, 0.9, -1.33, -1.16, 0.29, 1.16, 1.19, 0.84, 0.66, -1.55, -0.58, 1.85, -1.16, -0.95, 0.98, -0.1, -1.47, 0.78, -0.75, -1.32, 0.61, -0.5, -1, -0.42, 0.96, -1.39, 0.08, -1.82, 0.51, -0.71, -0.02, 2.32, -0.71, 0.08, -1.07 }.ToTorchTensor(new long[] { 32, 11 }).ToType(ScalarType.Float32);
            var inputs = new TorchTensor[] { input };
            var scaler = new double[] { 0.2544529, 0.3184713, 0.2597403, 0.3246753, 0.3144654, 0.3322259, 0.3436426, 0.3215434, 0.308642, 0.3154574, 0.3448276 }.ToTorchTensor(new long[] { 1, 11 }).ToType(ScalarType.Float32).RequiresGrad(true);
            var linear = Linear(11, 1, true);

            linear.Bias   = new double[] { 373.8864 }.ToTorchTensor(new long[] { 1, 1 }).ToType(ScalarType.Float32).RequiresGrad(true);
            linear.Weight = new double[] { 300.2818, -0.5905267, 286.2787, 0.1970505, 0.9004903, 0.1373157, 55.85495, 11.43741, 1.525748, 0.4299785, 239.9356 }.ToTorchTensor(new long[] { 1, 11 }).ToType(ScalarType.Float32).RequiresGrad(true);

            var afterCat    = inputs.Cat(1);
            var afterScaler = afterCat * scaler;
            var prediction  = linear.Forward(afterScaler);

            var loss   = MSE();
            var output = loss(prediction, y);

            linear.ZeroGrad();

            output.Backward();

            var scalerGrad = scaler.Grad();
            var weightGrad = linear.Weight.Grad();
            var biasGrad   = linear.Bias.Grad();

            Assert.True(scalerGrad.Shape.Length == 2);
            Assert.True(weightGrad.Shape.Length == 2);
            Assert.True(biasGrad.Shape.Length == 2);
        }
コード例 #5
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestTrainingAdam()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(("lin1", lin1), ("relu1", Relu()), ("lin2", lin2));

            var x = Float32Tensor.RandomN(new long[] { 64, 1000 });
            var y = Float32Tensor.RandomN(new long[] { 64, 10 });

            double learning_rate = 0.00004f;
            float  prevLoss      = float.MaxValue;
            var    optimizer     = NN.Optimizer.Adam(seq.GetParameters(), learning_rate);
            var    loss          = MSE(NN.Reduction.Sum);

            for (int i = 0; i < 10; i++)
            {
                var eval    = seq.Forward(x);
                var output  = loss(eval, y);
                var lossVal = output.ToSingle();

                Assert.True(lossVal < prevLoss);
                prevLoss = lossVal;

                optimizer.ZeroGrad();

                output.Backward();

                optimizer.Step();
            }
        }
コード例 #6
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void MaxPool2DObjectInitialized()
        {
            TorchTensor ones = Float32Tensor.Ones(new long[] { 2, 2, 2 });
            var         obj  = Functions.MaxPool2D(ones, new long[] { 2, 2 }, new long[] { 2, 2 });

            Assert.Equal(typeof(TorchTensor), obj.GetType());
        }
コード例 #7
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestGrad()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(
                ("lin1", lin1),
                ("relu1", Relu()),
                ("lin2", lin2));

            var x = Float32Tensor.RandomN(new long[] { 64, 1000 }, requiresGrad: true);
            var y = Float32Tensor.RandomN(new long[] { 64, 10 }, requiresGrad: true);

            var eval   = seq.Forward(x);
            var loss   = MSE(NN.Reduction.Sum);
            var output = loss(eval, y);

            seq.ZeroGrad();

            output.Backward();

            foreach (var parm in seq.GetParameters())
            {
                var grad = parm.Grad();
            }
        }
コード例 #8
0
ファイル: test_npz_write.cs プロジェクト: maklem/libnpy
        static void TestWrite(bool compressed, ref int result)
        {
            string filename = compressed ? "test_compressed.npz" : "test.npz";

            byte[] expected = File.ReadAllBytes(Test.AssetPath(filename));

            UInt8Tensor     color  = Test.Tensor <UInt8Tensor, byte, UInt8Buffer>(new Shape(new uint[] { 5, 5, 3 }));
            Float32Tensor   depth  = Test.Tensor <Float32Tensor, float, Float32Buffer>(new Shape(new uint[] { 5, 5 }));
            string          path   = Path.GetRandomFileName();
            NPZOutputStream stream = new NPZOutputStream(path, compressed ? CompressionMethod.DEFLATED : CompressionMethod.STORED);

            stream.Write("color.npy", color);
            stream.Write("depth.npy", depth);
            stream.Close();

            byte[] actual = File.ReadAllBytes(path);

            string tag = "c#_npz_write";

            if (compressed)
            {
                tag += "_compressed";
            }
            Test.AssertEqual <byte, byte[]>(expected, actual, ref result, tag);

            File.Delete(path);
        }
コード例 #9
0
ファイル: TestTraining.cs プロジェクト: stjordanis/TorchSharp
        public void TestTrainingSGDDefaults()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(("lin1", lin1), ("relu1", ReLU()), ("lin2", lin2));

            var x = Float32Tensor.randn(new long[] { 64, 1000 });
            var y = Float32Tensor.randn(new long[] { 64, 10 });

            double learning_rate = 0.00004f;
            var    optimizer     = NN.Optimizer.SGD(seq.parameters(), learning_rate);
            var    loss          = mse_loss(NN.Reduction.Sum);

            float initialLoss = loss(seq.forward(x), y).ToSingle();
            float finalLoss   = float.MaxValue;

            for (int i = 0; i < 10; i++)
            {
                var eval    = seq.forward(x);
                var output  = loss(eval, y);
                var lossVal = output.ToSingle();

                finalLoss = lossVal;

                optimizer.zero_grad();

                output.backward();

                optimizer.step();
            }
            Assert.True(finalLoss < initialLoss);
        }
コード例 #10
0
        public void ValidateIssue315_3()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(("lin1", lin1), ("relu1", ReLU()), ("lin2", lin2));

            using var x = Float32Tensor.randn(new long[] { 64, 1000 });
            using var y = Float32Tensor.randn(new long[] { 64, 10 });

            double learning_rate = 0.00004f;
            var    optimizer     = torch.optim.LBFGS(seq.parameters(), learning_rate);
            var    loss          = nn.functional.mse_loss(Reduction.Sum);

            Func <Tensor> closure = () => {
                using var eval = seq.forward(x);
                var output = loss(eval, y);

                var l = output.ToSingle();

                optimizer.zero_grad();

                output.backward();
                return(output);
            };

            optimizer.step(closure);

            GC.Collect();
            GC.WaitForPendingFinalizers();
        }
コード例 #11
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
 public void TestErrorHandling()
 {
     using (TorchTensor input = Float32Tensor.From(new float[] { 0.5f, 1.5f }))
         using (TorchTensor target = Float32Tensor.From(new float[] { 1f, 2f, 3f }))
         {
             Assert.Throws <ExternalException>(() => PoissonNLL()(input, target));
         }
 }
コード例 #12
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
 public void TestPoissonNLLLoss2()
 {
     using (TorchTensor input = Float32Tensor.Random(new long[] { 5, 2 }))
         using (TorchTensor target = Float32Tensor.Random(new long[] { 5, 2 }))
         {
             var outTensor = PoissonNLL(true, true)(input, target);
         }
 }
コード例 #13
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void AvgPool2DTensor()
        {
            TorchTensor ones = Float32Tensor.Ones(new long[] { 4, 2, 2, 2 });
            var         obj  = ones.AvgPool2D(new long[] { 2, 2 });

            Assert.Equal(typeof(TorchTensor), obj.GetType());
            Assert.Equal(Float32Tensor.Ones(new long[] { 4, 2, 1, 1 }), obj);
        }
コード例 #14
0
            public TorchTensor GenerateSquareSubsequentMask(long size)
            {
                var mask = (Float32Tensor.ones(new long[] { size, size }) == 1).triu().transpose(0, 1);

                return(mask.to_type(ScalarType.Float32)
                       .masked_fill(mask == 0, float.NegativeInfinity)
                       .masked_fill(mask == 1, 0.0f).to(device));
            }
コード例 #15
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestSetGetBiasInLinear()
        {
            var lin  = Linear(1000, 100, true);
            var bias = Float32Tensor.Ones(new long[] { 1000 });

            lin.Bias = bias;
            Assert.True(!(lin.Bias is null));

            Assert.Equal(lin.Bias?.NumberOfElements, bias.NumberOfElements);
        }
コード例 #16
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
 public void TestPoissonNLLLoss()
 {
     using (TorchTensor input = Float32Tensor.From(new float[] { 0.5f, 1.5f, 2.5f }))
         using (TorchTensor target = Float32Tensor.From(new float[] { 1f, 2f, 3f }))
         {
             var componentWiseLoss = ((TorchTensor)input.Exp()) - target * input;
             Assert.True(componentWiseLoss.Equal(PoissonNLL(reduction: NN.Reduction.None)(input, target)));
             Assert.True(componentWiseLoss.Sum().Equal(PoissonNLL(reduction: NN.Reduction.Sum)(input, target)));
             Assert.True(componentWiseLoss.Mean().Equal(PoissonNLL(reduction: NN.Reduction.Mean)(input, target)));
         }
 }
コード例 #17
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestSetGrad()
        {
            var x = Float32Tensor.Random(new long[] { 10, 10 });

            Assert.False(x.IsGradRequired);

            x.RequiresGrad(true);
            Assert.True(x.IsGradRequired);
            x.RequiresGrad(false);
            Assert.False(x.IsGradRequired);
        }
コード例 #18
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestGradConditional()
        {
            var modT = new CondModel("modT", true);
            var modF = new CondModel("modF", false);

            var psT = modT.GetParameters();

            Assert.Equal(4, psT.Length);

            var psF = modF.GetParameters();

            Assert.Equal(4, psF.Length);

            var x = Float32Tensor.RandomN(new long[] { 64, 1000 }, requiresGrad: true);
            var y = Float32Tensor.RandomN(new long[] { 64, 10 }, requiresGrad: true);

            modT.Train();

            var eval   = modT.Forward(x);
            var loss   = MSE(NN.Reduction.Sum);
            var output = loss(eval, y);

            modT.ZeroGrad();

            output.Backward();
            var gradCounts = 0;

            foreach (var parm in modT.GetParameters())
            {
                var grad = parm.Grad();
                gradCounts += grad.Handle == IntPtr.Zero ? 0 : 1;
            }

            Assert.Equal(2, gradCounts);

            //{ "grad can be implicitly created only for scalar outputs (_make_grads at ..\\..\\torch\\csrc\\autograd\\autograd.cpp:47)\n(no backtrace available)"}
            modF.Train();

            eval   = modF.Forward(x);
            output = loss(eval, y);

            modF.ZeroGrad();

            output.Backward();
            gradCounts = 0;

            foreach (var parm in modF.GetParameters())
            {
                var grad = parm.Grad();
                gradCounts += grad.Handle == IntPtr.Zero ? 0 : 1;
            }

            Assert.Equal(3, gradCounts);
        }
コード例 #19
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestLinearEditBias()
        {
            var lin  = Linear(1000, 100, true);
            var bias = Float32Tensor.RandomN(new long[] { 100 });

            lin.Bias = bias;

            for (int i = 0; i < 100; i++)
            {
                Assert.Equal(lin.Bias.Data <float>()[i], bias.Data <float>()[i]);
            }
        }
コード例 #20
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void EvalSequence()
        {
            var lin1 = Linear(1000, 100);
            var lin2 = Linear(100, 10);
            var seq  = Sequential(
                ("lin1", lin1),
                ("relu1", Relu()),
                ("lin2", lin2));

            var x    = Float32Tensor.RandomN(new long[] { 64, 1000 }, requiresGrad: true);
            var eval = seq.Forward(x);
        }
コード例 #21
0
            public PositionalEncoding(long dmodel, double dropout, int maxLen = 5000) : base("PositionalEncoding")
            {
                this.dropout = Dropout(dropout);
                var pe       = Float32Tensor.zeros(new long[] { maxLen, dmodel });
                var position = Float32Tensor.arange(0, maxLen, 1).unsqueeze(1);
                var divTerm  = (Float32Tensor.arange(0, dmodel, 2) * (-Math.Log(10000.0) / dmodel)).exp();

                pe[TorchTensorIndex.Ellipsis, TorchTensorIndex.Slice(0, null, 2)] = (position * divTerm).sin();
                pe[TorchTensorIndex.Ellipsis, TorchTensorIndex.Slice(1, null, 2)] = (position * divTerm).cos();
                this.pe = pe.unsqueeze(0).transpose(0, 1);

                RegisterComponents();
            }
コード例 #22
0
        public void ValidateIssue145()
        {
            // TorchTensor.DataItem gives a hard crash on GPU tensor

            if (Torch.IsCudaAvailable())
            {
                var scalar = Float32Tensor.from(3.14f, Device.CUDA);
                Assert.Throws <InvalidOperationException>(() => scalar.DataItem <float>());
                var tensor = Float32Tensor.zeros(new long[] { 10, 10 }, Device.CUDA);
                Assert.Throws <InvalidOperationException>(() => tensor.Data <float>());
                Assert.Throws <InvalidOperationException>(() => tensor.Bytes());
            }
        }
コード例 #23
0
        public Tensor forward(Tensor input)
        {
            using (var chance = Float32Tensor.rand(1))

                if (chance.DataItem <float>() < p)
                {
                    return(transform.forward(input));
                }
                else
                {
                    return(input);
                }
        }
コード例 #24
0
ファイル: TestTraining.cs プロジェクト: stjordanis/TorchSharp
        public void TestTrainingConv2dCUDA()
        {
            if (Torch.IsCudaAvailable())
            {
                var device = Device.CUDA;

                using (Module conv1 = Conv2d(3, 4, 3, stride: 2),
                       lin1 = Linear(4 * 13 * 13, 32),
                       lin2 = Linear(32, 10))

                    using (var seq = Sequential(
                               ("conv1", conv1),
                               ("r1", ReLU(inPlace: true)),
                               ("drop1", Dropout(0.1)),
                               ("flat1", Flatten()),
                               ("lin1", lin1),
                               ("r2", ReLU(inPlace: true)),
                               ("lin2", lin2))) {
                        seq.to(device);

                        var optimizer = NN.Optimizer.Adam(seq.parameters());
                        var loss      = mse_loss(NN.Reduction.Sum);

                        using (TorchTensor x = Float32Tensor.randn(new long[] { 64, 3, 28, 28 }, device: device),
                               y = Float32Tensor.randn(new long[] { 64, 10 }, device: device)) {
                            float initialLoss = loss(seq.forward(x), y).ToSingle();
                            float finalLoss   = float.MaxValue;

                            for (int i = 0; i < 10; i++)
                            {
                                var eval    = seq.forward(x);
                                var output  = loss(eval, y);
                                var lossVal = output.ToSingle();

                                finalLoss = lossVal;

                                optimizer.zero_grad();

                                output.backward();

                                optimizer.step();
                            }
                            Assert.True(finalLoss < initialLoss);
                        }
                    }
            }
            else
            {
                Assert.Throws <InvalidOperationException>(() => Float32Tensor.randn(new long[] { 64, 3, 28, 28 }).cuda());
            }
        }
コード例 #25
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void AvgPool3DBackwardTensorExplicitDivisor()
        {
            var ones       = Float32Tensor.Ones(new long[] { 4, 2, 2, 2, 2 });
            var kernelSize = new long[] { 2, 2, 2 };
            var avg        = Float32Tensor.Ones(new long[] { 4, 2, 1, 1, 1 });
            var res        = avg.AvgPool3DBackward(ones, kernelSize, divisorOverride: 6) * 6.0;

            var ones0000 = ones[0, 0, 0, 0, 0].ToSingle();
            var res0000  = res[0, 0, 0, 0, 0].ToSingle();

            Assert.True(Math.Abs(ones0000 - res0000) < 0.00001);
            // This gets back to the original uniform input
            Assert.True(res.AllClose(ones));
        }
コード例 #26
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void AvgPool2DBackwardTensor()
        {
            var ones       = Float32Tensor.Ones(new long[] { 4, 2, 2, 2 });
            var kernelSize = new long[] { 2, 2 };
            var avg        = Float32Tensor.Ones(new long[] { 4, 2, 1, 1 });
            var res        = avg.AvgPool2DBackward(ones, kernelSize) * 4.0;

            var ones0000 = ones[0, 0, 0, 0].ToSingle();
            var res0000  = res[0, 0, 0, 0].ToSingle();

            Assert.Equal(ones0000, res0000);
            // This gets back to the original uniform input
            Assert.Equal(res, ones);
        }
コード例 #27
0
ファイル: TestTorchSharp.cs プロジェクト: losttech/TorchSharp
        public void ExplicitDisposal()
        {
            // Allocate many 256MB tensors. Without explicit disposal memory use relies on finalization.
            // This will often succeed but not reliably
            int n = 25;

            for (int i = 0; i < n; i++)
            {
                Console.WriteLine("ExplicitDisposal: Loop iteration {0}", i);

                using (var x = Float32Tensor.empty(new long[] { 64000, 1000 }, device: torch.CPU)) { }
            }
            Console.WriteLine("Hello World!");
        }
コード例 #28
0
        private void ThreadFunc()
        {
            using var net = nn.Sequential(
                      ("relu", nn.ReLU()),
                      ("double", new DoubleIt())
                      );

            using var @in = Float32Tensor.from(3);

            for (var i = 0; i < 1000; i++)
            {
                using var @out = net.forward(@in);
            }
        }
コード例 #29
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestCustomModule()
        {
            var module = new TestModule("test", Float32Tensor.RandomN(new long[] { 2, 2 }), true);
            var name   = module.GetName();

            Assert.NotNull(name);
            Assert.Equal("test", name);
            Assert.True(module.HasParameter("test"));

            var ps = module.GetParameters();
            var n  = ps.Length;

            Assert.Equal(1, n);
        }
コード例 #30
0
ファイル: NN.cs プロジェクト: AkillesAILimited/TorchSharp
        public void TestLinearEditWeightsAndBiasGetParameters()
        {
            var lin     = Linear(1000, 1000, true);
            var bias    = Float32Tensor.RandomN(new long[] { 100 });
            var weights = Float32Tensor.RandomN(new long[] { 1000, 1000 });

            lin.Bias   = bias;
            lin.Weight = weights;

            var parameters = lin.GetParameters().ToArray();

            Assert.Equal(lin.Weight.Shape.Length, parameters[0].Shape.Length);
            Assert.Equal(lin.Weight.Shape[0], parameters[0].Shape[0]);
            Assert.Equal(lin.Weight.Shape[1], parameters[0].Shape[1]);
        }