Пример #1
0
 internal static void CheckForCUDA(string device)
 {
     if (!Torch.IsCudaAvailable() && device.ToLower().Contains("cuda"))
     {
         throw new InvalidOperationException("CUDA non available in the current machine.");
     }
 }
Пример #2
0
        static void Main(string[] args)

        {
            Torch.SetSeed(1);

            var cwd = Environment.CurrentDirectory;

            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            Console.WriteLine($"Running on {device.Type.ToString()}");

            using (var reader = TorchText.Data.AG_NEWSReader.AG_NEWS("train", device, _dataLocation)) {
                var dataloader = reader.Enumerate();

                var tokenizer = TorchText.Data.Utils.get_tokenizer("basic_english");

                var counter = new TorchText.Vocab.Counter <string>();
                foreach (var(label, text) in dataloader)
                {
                    counter.update(tokenizer(text));
                }

                var vocab = new TorchText.Vocab.Vocab(counter);

                var model = new TextClassificationModel(vocab.Count, emsize, 4).to(device);

                var loss      = cross_entropy_loss();
                var lr        = 5.0;
                var optimizer = NN.Optimizer.SGD(model.parameters(), lr);
                var scheduler = NN.Optimizer.StepLR(optimizer, 1, 0.2, last_epoch: 5);

                foreach (var epoch in Enumerable.Range(1, epochs))
                {
                    var sw = new Stopwatch();
                    sw.Start();

                    train(epoch, reader.GetBatches(tokenizer, vocab, batch_size), model, loss, optimizer);

                    sw.Stop();

                    Console.WriteLine($"\nEnd of epoch: {epoch} | lr: {scheduler.LearningRate:0.0000} | time: {sw.Elapsed.TotalSeconds:0.0}s\n");
                    scheduler.step();
                }

                using (var test_reader = TorchText.Data.AG_NEWSReader.AG_NEWS("test", device, _dataLocation)) {
                    var sw = new Stopwatch();
                    sw.Start();

                    var accuracy = evaluate(test_reader.GetBatches(tokenizer, vocab, eval_batch_size), model, loss);

                    sw.Stop();

                    Console.WriteLine($"\nEnd of training: test accuracy: {accuracy:0.00} | eval time: {sw.Elapsed.TotalSeconds:0.0}s\n");
                    scheduler.step();
                }
            }
        }
Пример #3
0
        public TorchTensor Cuda()
        {
            if (!Torch.IsCudaAvailable())
            {
                throw new InvalidOperationException("CUDA non available in the current machine.");
            }

            return(new TorchTensor(THSTensor_cuda(handle)));
        }
Пример #4
0
        static void Main(string[] args)

        {
            Torch.SetSeed(1);

            var cwd = Environment.CurrentDirectory;

            //var device = Device.CPU; //Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;
            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            Console.WriteLine($"Running on {device.Type.ToString()}");

            if (device.Type == DeviceType.CUDA)
            {
                _trainBatchSize *= 4;
                _testBatchSize  *= 4;
                _epochs         *= 16;
            }

            var sourceDir = _dataLocation;
            var targetDir = Path.Combine(_dataLocation, "test_data");

            if (!Directory.Exists(targetDir))
            {
                Directory.CreateDirectory(targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-labels-idx1-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-labels-idx1-ubyte.gz"), targetDir);
            }

            using (var train = new MNISTReader(targetDir, "train", _trainBatchSize, device: device, shuffle: true))
                using (var test = new MNISTReader(targetDir, "t10k", _testBatchSize, device: device))
                    //using (var model = new Model("model", device))
                    using (var model = GetModel(device))
                        using (var optimizer = NN.Optimizer.SGD(model.parameters(), 0.01, 0.5))
                        {
                            Stopwatch sw = new Stopwatch();
                            sw.Start();

                            for (var epoch = 1; epoch <= _epochs; epoch++)
                            {
                                Train(model, optimizer, nll_loss(), train, epoch, _trainBatchSize, train.Size);
                                Test(model, nll_loss(reduction: NN.Reduction.Sum), test, test.Size);

                                Console.WriteLine($"Pre-GC memory:  {GC.GetTotalMemory(false)}");
                                GC.Collect();
                                Console.WriteLine($"Post-GC memory: {GC.GetTotalMemory(false)}");
                            }

                            sw.Stop();
                            Console.WriteLine($"Elapsed time: {sw.Elapsed.TotalSeconds} s.");
                            Console.ReadLine();
                        }
        }
Пример #5
0
        static void Main(string[] args)
        {
            Torch.SetSeed(1);

            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            if (device.Type == DeviceType.CUDA)
            {
                _trainBatchSize *= 8;
                _testBatchSize  *= 8;
                _epochs         *= 16;
            }

            Console.WriteLine();
            Console.WriteLine($"\tRunning AlexNet with {_dataset} on {device.Type.ToString()} for {_epochs} epochs");
            Console.WriteLine();

            var sourceDir = _dataLocation;
            var targetDir = Path.Combine(_dataLocation, "test_data");

            if (!Directory.Exists(targetDir))
            {
                Directory.CreateDirectory(targetDir);
                Utils.Decompress.ExtractTGZ(Path.Combine(sourceDir, "cifar-10-binary.tar.gz"), targetDir);
            }

            using (var train = new CIFARReader(targetDir, false, _trainBatchSize, shuffle: true, device: device))
                using (var test = new CIFARReader(targetDir, true, _testBatchSize, device: device))
                    using (var model = new Model("model", _numClasses, device))
                        using (var optimizer = NN.Optimizer.Adam(model.parameters(), 0.001)) {
                            Stopwatch sw = new Stopwatch();
                            sw.Start();

                            for (var epoch = 1; epoch <= _epochs; epoch++)
                            {
                                Train(model, optimizer, nll_loss(), train, epoch, _trainBatchSize, train.Size);
                                Test(model, nll_loss(), test, test.Size);
                                GC.Collect();

                                if (sw.Elapsed.TotalSeconds > 3600)
                                {
                                    break;
                                }
                            }

                            sw.Stop();
                            Console.WriteLine($"Elapsed time {sw.Elapsed.TotalSeconds} s.");
                            Console.ReadLine();
                        }
        }
Пример #6
0
        static void Main(string[] args)
        {
            var dataset     = args.Length > 0 ? args[0] : "mnist";
            var datasetPath = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", dataset);

            Torch.SetSeed(1);

            var cwd = Environment.CurrentDirectory;

            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            Console.WriteLine($"Running MNIST on {device.Type.ToString()}");
            Console.WriteLine($"Dataset: {dataset}");

            var sourceDir = datasetPath;
            var targetDir = Path.Combine(datasetPath, "test_data");

            if (!Directory.Exists(targetDir))
            {
                Directory.CreateDirectory(targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-labels-idx1-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-labels-idx1-ubyte.gz"), targetDir);
            }

            if (device.Type == DeviceType.CUDA)
            {
                _trainBatchSize *= 4;
                _testBatchSize  *= 4;
            }

            var model = new Model("model", device);

            var normImage = TorchVision.Transforms.Normalize(new double[] { 0.1307 }, new double[] { 0.3081 }, device: device);

            using (MNISTReader train = new MNISTReader(targetDir, "train", _trainBatchSize, device: device, shuffle: true, transform: normImage),
                   test = new MNISTReader(targetDir, "t10k", _testBatchSize, device: device, transform: normImage)) {
                TrainingLoop(dataset, device, model, train, test);
            }
        }
Пример #7
0
        public void CopyCudaToCpu()
        {
            if (Torch.IsCudaAvailable())
            {
                var cuda = FloatTensor.Ones(new long[] { 2, 2 }, "cuda");
                Assert.Equal("cuda", cuda.Device);

                var cpu = cuda.Cpu();
                Assert.Equal("cpu", cpu.Device);

                var data = cpu.Data <float>();
                for (int i = 0; i < 4; i++)
                {
                    Assert.Equal(1, data[i]);
                }
            }
            else
            {
                Assert.Throws <InvalidOperationException>(() => { FloatTensor.Ones(new long[] { 2, 2 }, "cuda"); });
            }
        }
Пример #8
0
        public void CopyCpuToCuda()
        {
            TorchTensor cpu = FloatTensor.Ones(new long[] { 2, 2 });

            Assert.Equal("cpu", cpu.Device);

            if (Torch.IsCudaAvailable())
            {
                var cuda = cpu.Cuda();
                Assert.Equal("cuda", cuda.Device);

                // Copy back to CPU to inspect the elements
                cpu = cuda.Cpu();
                var data = cpu.Data <float>();
                for (int i = 0; i < 4; i++)
                {
                    Assert.Equal(1, data[i]);
                }
            }
            else
            {
                Assert.Throws <InvalidOperationException>(() => cpu.Cuda());
            }
        }
        static void Main(string[] args)
        {
            var cwd = Environment.CurrentDirectory;

            var dataset     = args.Length > 0 ? args[0] : "mnist";
            var datasetPath = Path.Join(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "..", "Downloads", dataset);

            Torch.SetSeed(1);

            //var device = Device.CPU;
            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            Console.WriteLine($"\n  Running AdversarialExampleGeneration on {device.Type.ToString()}\n");
            Console.WriteLine($"Dataset: {dataset}");

            if (device.Type == DeviceType.CUDA)
            {
                _trainBatchSize *= 4;
                _testBatchSize  *= 4;
                _epochs         *= 4;
            }

            var sourceDir = _dataLocation;
            var targetDir = Path.Combine(_dataLocation, "test_data");

            if (!Directory.Exists(targetDir))
            {
                Directory.CreateDirectory(targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "train-labels-idx1-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-images-idx3-ubyte.gz"), targetDir);
                Utils.Decompress.DecompressGZipFile(Path.Combine(sourceDir, "t10k-labels-idx1-ubyte.gz"), targetDir);
            }

            MNIST.Model model = null;

            var normImage = TorchVision.Transforms.Normalize(new double[] { 0.1307 }, new double[] { 0.3081 }, device: device);

            using (var test = new MNISTReader(targetDir, "t10k", _testBatchSize, device: device, transform: normImage)) {
                var modelFile = dataset + ".model.bin";

                if (!File.Exists(modelFile))
                {
                    // We need the model to be trained first, because we want to start with a trained model.
                    Console.WriteLine($"\n  Running MNIST on {device.Type.ToString()} in order to pre-train the model.");

                    model = new MNIST.Model("model", device);

                    using (MNISTReader train = new MNISTReader(targetDir, "train", _trainBatchSize, device: device, shuffle: true, transform: normImage)) {
                        MNIST.TrainingLoop(dataset, device, model, train, test);
                    }

                    Console.WriteLine("Moving on to the Adversarial model.\n");
                }
                else
                {
                    model = new MNIST.Model("model", Device.CPU);
                    model.load(modelFile);
                }

                model.to(device);
                model.Eval();

                var epsilons = new double[] { 0, 0.05, 0.1, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50 };

                foreach (var ε in epsilons)
                {
                    var attacked = Test(model, nll_loss(), ε, test, test.Size);
                    Console.WriteLine($"Epsilon: {ε:F2}, accuracy: {attacked:P2}");
                }
            }
        }
Пример #10
0
        static void Main(string[] args)

        {
            Torch.SetSeed(1);

            var cwd = Environment.CurrentDirectory;

            var device = Torch.IsCudaAvailable() ? Device.CUDA : Device.CPU;

            Console.WriteLine($"Running SequenceToSequence on {device.Type.ToString()}");

            var vocab_iter = TorchText.Datasets.WikiText2("train", _dataLocation);
            var tokenizer  = TorchText.Data.Utils.get_tokenizer("basic_english");

            var counter = new TorchText.Vocab.Counter <string>();

            foreach (var item in vocab_iter)
            {
                counter.update(tokenizer(item));
            }

            var vocab = new TorchText.Vocab.Vocab(counter);

            var(train_iter, valid_iter, test_iter) = TorchText.Datasets.WikiText2(_dataLocation);

            var train_data = Batchify(ProcessInput(train_iter, tokenizer, vocab), batch_size).to(device);
            var valid_data = Batchify(ProcessInput(valid_iter, tokenizer, vocab), eval_batch_size).to(device);
            var test_data  = Batchify(ProcessInput(test_iter, tokenizer, vocab), eval_batch_size).to(device);

            var bptt = 32;

            var(data, targets) = GetBatch(train_data, 0, bptt);

            var ntokens = vocab.Count;

            var model     = new TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device);
            var loss      = cross_entropy_loss();
            var lr        = 2.50;
            var optimizer = NN.Optimizer.SGD(model.parameters(), lr);
            var scheduler = NN.Optimizer.StepLR(optimizer, 1, 0.95, last_epoch: 15);

            var totalTime = new Stopwatch();

            totalTime.Start();

            foreach (var epoch in Enumerable.Range(1, epochs))
            {
                var sw = new Stopwatch();
                sw.Start();

                train(epoch, train_data, model, loss, bptt, ntokens, optimizer);

                var val_loss = evaluate(valid_data, model, loss, lr, bptt, ntokens, optimizer);
                sw.Stop();

                Console.WriteLine($"\nEnd of epoch: {epoch} | lr: {scheduler.LearningRate:0.00} | time: {sw.Elapsed.TotalSeconds:0.0}s | loss: {val_loss:0.00}\n");
                scheduler.step();
            }

            var tst_loss = evaluate(test_data, model, loss, lr, bptt, ntokens, optimizer);

            totalTime.Stop();

            Console.WriteLine($"\nEnd of training | time: {totalTime.Elapsed.TotalSeconds:0.0}s | loss: {tst_loss:0.00}\n");
        }