public void TestMmCuda() { if (Torch.IsCudaAvailable()) { TestMmGen(DeviceType.CUDA); } }
public void CopyCpuToCuda() { TorchTensor cpu = FloatTensor.Ones(new long[] { 2, 2 }); Assert.Equal("cpu", cpu.DeviceString); if (Torch.IsCudaAvailable()) { var cuda = cpu.Cuda(); Assert.Equal("cuda:0", cuda.DeviceString); // Copy back to CPU to inspect the elements var cpu2 = cuda.Cpu(); Assert.Equal("cpu", cpu2.DeviceString); var data = cpu.Data <float>(); for (int i = 0; i < 4; i++) { Assert.Equal(1, data[i]); } } else { Assert.Throws <InvalidOperationException>(() => cpu.Cuda()); } }
public void TestCatCuda() { if (Torch.IsCudaAvailable()) { var zeros = FloatTensor.Zeros(new long[] { 1, 9 }).Cuda(); var ones = FloatTensor.Ones(new long[] { 1, 9 }).Cuda(); var centroids = new TorchTensor[] { zeros, ones }.Cat(0); var shape = centroids.Shape; Assert.Equal(new long[] { 2, 9 }, shape); Assert.Equal(DeviceType.CUDA, centroids.DeviceType); } }
public void ValidateIssue145() { // TorchTensor.DataItem gives a hard crash on GPU tensor if (Torch.IsCudaAvailable()) { var scalar = Float32Tensor.from(3.14f, Device.CUDA); Assert.Throws <InvalidOperationException>(() => scalar.DataItem <float>()); var tensor = Float32Tensor.zeros(new long[] { 10, 10 }, Device.CUDA); Assert.Throws <InvalidOperationException>(() => tensor.Data <float>()); Assert.Throws <InvalidOperationException>(() => tensor.Bytes()); } }
public void TestTrainingConv2dCUDA() { if (Torch.IsCudaAvailable()) { var device = Device.CUDA; using (Module conv1 = Conv2d(3, 4, 3, stride: 2), lin1 = Linear(4 * 13 * 13, 32), lin2 = Linear(32, 10)) using (var seq = Sequential( ("conv1", conv1), ("r1", ReLU(inPlace: true)), ("drop1", Dropout(0.1)), ("flat1", Flatten()), ("lin1", lin1), ("r2", ReLU(inPlace: true)), ("lin2", lin2))) { seq.to(device); var optimizer = NN.Optimizer.Adam(seq.parameters()); var loss = mse_loss(NN.Reduction.Sum); using (TorchTensor x = Float32Tensor.randn(new long[] { 64, 3, 28, 28 }, device: device), y = Float32Tensor.randn(new long[] { 64, 10 }, device: device)) { float initialLoss = loss(seq.forward(x), y).ToSingle(); float finalLoss = float.MaxValue; for (int i = 0; i < 10; i++) { var eval = seq.forward(x); var output = loss(eval, y); var lossVal = output.ToSingle(); finalLoss = lossVal; optimizer.zero_grad(); output.backward(); optimizer.step(); } Assert.True(finalLoss < initialLoss); } } } else { Assert.Throws <InvalidOperationException>(() => Float32Tensor.randn(new long[] { 64, 3, 28, 28 }).cuda()); } }
public void CopyCudaToCpu() { if (Torch.IsCudaAvailable()) { var cuda = FloatTensor.Ones(new long[] { 2, 2 }, DeviceType.CUDA); Assert.Equal("cuda:0", cuda.DeviceString); var cpu = cuda.Cpu(); Assert.Equal("cpu", cpu.DeviceString); var data = cpu.Data <float>(); for (int i = 0; i < 4; i++) { Assert.Equal(1, data[i]); } } else { Assert.Throws <InvalidOperationException>(() => { FloatTensor.Ones(new long[] { 2, 2 }, DeviceType.CUDA); }); } }
public void TestGeneratorState() { // This test fails intermittently with CUDA. Just skip it. if (Torch.IsCudaAvailable()) { return; } // After restoring a saved RNG state, the next number should be the // same as right after the snapshot. lock (_lock) { using (var gen = Torch.ManualSeed(4711)) { // Take a snapshot var state = gen.State; Assert.NotNull(state); // Generate a number var val1 = Float32Tensor.randn(new long[] { 1 }); var value1 = val1[0].ToSingle(); // Genereate a different number var val2 = Float32Tensor.randn(new long[] { 1 }); var value2 = val2[0].ToSingle(); Assert.NotEqual(value1, value2); // Restore the state gen.State = state; // Generate the first number again. var val3 = Float32Tensor.randn(new long[] { 1 }); var value3 = val3[0].ToSingle(); Assert.Equal(value1, value3); } } }
public void TestDeviceCount() { //var shape = new long[] { 2, 2 }; var isCudaAvailable = Torch.IsCudaAvailable(); var isCudnnAvailable = Torch.IsCudnnAvailable(); var deviceCount = Torch.CudaDeviceCount(); if (isCudaAvailable) { Assert.True(deviceCount > 0); Assert.True(isCudnnAvailable); } else { Assert.Equal(0, deviceCount); Assert.False(isCudnnAvailable); } //TorchTensor t = Float32Tensor.ones(shape); //Assert.Equal(shape, t.Shape); //Assert.Equal(1.0f, t[0, 0].ToSingle()); //Assert.Equal(1.0f, t[1, 1].ToSingle()); }