static void Main(string[] args) { // Todo: move to a separate unit test. Console.WriteLine("Test CNTKLibraryCSTrainingExamples"); #if CPUONLY Console.WriteLine("======== Train model on CPU using CPUOnly build ========"); #else Console.WriteLine("======== Train model on CPU using GPU build ========"); #endif if (ShouldRunOnCpu()) { var device = DeviceDescriptor.CPUDevice; SimpleFeedForwardClassifierTest.TrainSimpleFeedForwardClassifier(device); } if (ShouldRunOnGpu()) { Console.WriteLine(" ====== Train model on GPU ====="); var device = DeviceDescriptor.GPUDevice(0); SimpleFeedForwardClassifierTest.TrainSimpleFeedForwardClassifier(device); } Console.WriteLine("======== Train completes. ========"); }
//Inference public IList <IList <float> > Inference(float[] data) { //Get number of samples n_samples = data.Length / (input_size[0] * input_size[1]); //Generate batch from input data Value inputdata = mapBatch(data, n_samples); //Map input array to feature var inputDataMap = new Dictionary <Variable, Value>() { { feature, inputdata } }; //Create output featuremap var outputDataMap = new Dictionary <Variable, Value>() { { model.Output, null } }; //Forward pass model.Evaluate(inputDataMap, outputDataMap, DeviceDescriptor.GPUDevice(0)); //Get output IList <IList <float> > output = get_output(outputDataMap, input_size, n_samples); inputdata.Dispose(); inputDataMap.Clear(); outputDataMap.Clear(); return(output); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== running LSTMSequenceClassifier.Train using {device.Type} ========"); LSTMSequenceClassifier.Train(device); }
static void Main(string[] args) { #if CPUONLY Console.WriteLine("======== Evaluate model on CPU using CPUOnly build ========"); #else Console.WriteLine("======== Evaluate model on CPU using GPU build ========"); #endif CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.CPUDevice); if (IsGPUAvailable()) { Console.WriteLine(" ====== Evaluate model on GPU ====="); CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.GPUDevice(0)); } Console.WriteLine("======== Evaluation completes. ========"); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== MNISTClassifier in cnn with {device.Type} ========"); MNISTClassifier.TrainAndEvaluate(device, true); }
/// <summary> /// This is a project to quickly test out functions /// </summary> /// <param name="args"></param> static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\cicles.png"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\jump.png"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\1.jpg"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\3.jpg"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\5.jpg"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\6.png"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\7.jpg"))); //var testImage = new Bitmap(Image.FromFile(Path.Join(RootPath, @"Test images\dog-cycle-car.png"))); //// Upsample //var result = Upsample.Test(testImage, device); //result.Save(Path.Join(RootPath, @"Output\upsample output.bmp")); // ONNX models //var result = OnnxModelModels.TestTinyYoloV2(testImage, device); //result.Save(Path.Join(RootPath, @"Output\Tiny Yolov2 output.bmp")); OnnxModelModels.RetrainTinyYoloV2(device); // Testing Simple networks //SimpleNetworks.LogisticRegression(device); // Test CNTK functions //OpertationTester.TestElementTimes(); }
static void Main(string[] args) { Console.WriteLine("======== Evaluate model using C# GPU Build ========"); Console.WriteLine(" ====== Run evaluation on CPU ====="); // Evalaute a single image. CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.CPUDevice); // Evaluate a batch of images CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.CPUDevice); // Evaluate multiple sample requests in parallel CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.CPUDevice); // Evaluate a single sequence with one-hot vector CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.CPUDevice); // Evalaute a batch of variable length sequences with one-hot vector CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.CPUDevice); // Use GPU for evaluation. Console.WriteLine(" ====== Run evaluation on GPU ====="); CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.GPUDevice(0)); Console.WriteLine("======== Evaluation completes. ========"); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== running LogisticRegression.TrainAndEvaluate using {device.Type} ========"); LogisticRegression.TrainAndEvaluate(device); Console.WriteLine($"======== running MNISTClassifier.TrainAndEvaluate with multilayer perceptron (MLP) classifier using {device.Type} ========"); MNISTClassifier.TrainAndEvaluate(device, false, true); Console.WriteLine($"======== running MNISTClassifier.TrainAndEvaluate with convolutional neural network using {device.Type} ========"); MNISTClassifier.TrainAndEvaluate(device, true, true); Console.WriteLine($"======== running CifarResNet.TrainAndEvaluate using {device.Type} ========"); CifarResNetClassifier.TrainAndEvaluate(device, true); Console.WriteLine($"======== running TransferLearning.TrainAndEvaluateWithFlowerData using {device.Type} ========"); TransferLearning.TrainAndEvaluateWithFlowerData(device, true); Console.WriteLine($"======== running TransferLearning.TrainAndEvaluateWithAnimalData using {device.Type} ========"); TransferLearning.TrainAndEvaluateWithAnimalData(device, true); Console.WriteLine($"======== running LSTMSequenceClassifier.Train using {device.Type} ========"); LSTMSequenceClassifier.Train(device); }
static void Main(string[] args) { #if CPUONLY Console.WriteLine("======== Evaluate model on CPU using CPUOnly build ========"); #else Console.WriteLine("======== Evaluate model on CPU using GPU build ========"); #endif if (ShouldRunOnCpu()) { CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.CPUDevice); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.CPUDevice); // It is sufficient to test loading model from memory buffer only on CPU. CNTKLibraryManagedExamples.LoadModelFromMemory(DeviceDescriptor.CPUDevice); } if (ShouldRunOnGpu()) { Console.WriteLine(" ====== Evaluate model on GPU ====="); CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.GPUDevice(0)); } Console.WriteLine("======== Evaluation completes. ========"); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== running CifarResNet.TrainAndEvaluate using {device.Type} ========"); CifarResNetClassifier.CifarDataFolder = "../../../../../CIFAR-10"; CifarResNetClassifier.TrainAndEvaluate(device, true); }
public ReversiAI(Reversi reversi, ReversiPlayer player, string modelUri) { _Reversi = reversi; _Player = player; _ModelUri = modelUri; _Device = DeviceDescriptor.GPUDevice(0); }
public static void Main() { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine( $"======== running MNISTClassifier.TrainAndEvaluate with convolutional neural network using {device.Type} ========"); TrainAndEvaluate(device, true); }
//Load weights from array private static Parameter weight_fromFloat(Parameter weight, float[] array, int[] view) { //Generate weight array with correct dimensions NDArrayView nDArray = new NDArrayView(view, array, DeviceDescriptor.GPUDevice(0)); weight.SetValue(nDArray); return(weight); }
static void Main(string[] args) { Console.WriteLine("======== Evaluate model using C# GPU Build ========"); CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.GPUDevice(0)); Console.WriteLine("======== Evaluation completes. ========"); }
static void Main(string[] args) { Console.WriteLine("======== Evaluate model using C# GPU Build ========"); Console.WriteLine(" ====== Run evaluation on CPU ====="); // Evaluate a single image. CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.CPUDevice); // Evaluate a batch of images. CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.CPUDevice); // Evaluate multiple sample requests in parallel. CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallelAsync(DeviceDescriptor.CPUDevice).Wait(); // Evaluate an image asynchronously. Task evalTask = CNTKLibraryManagedExamples.EvaluationSingleImageAsync(DeviceDescriptor.CPUDevice); evalTask.Wait(); // Evaluate a single sequence using one-hot vector input. CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.CPUDevice); // Evaluate a batch of variable length sequences with one-hot vector input. CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.CPUDevice); // Evaluate a sequence using sparse input. CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingSparse(DeviceDescriptor.CPUDevice); // Load model from memory buffer. CNTKLibraryManagedExamples.LoadModelFromMemory(DeviceDescriptor.CPUDevice); // Use GPU for evaluation. Console.WriteLine(" ====== Run evaluation on GPU ====="); CNTKLibraryManagedExamples.EvaluationSingleImage(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfImages(DeviceDescriptor.GPUDevice(0)); // Evaluate an image asynchronously evalTask = CNTKLibraryManagedExamples.EvaluationSingleImageAsync(DeviceDescriptor.GPUDevice(0)); evalTask.Wait(); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallelAsync(DeviceDescriptor.GPUDevice(0)).Wait(); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingSparse(DeviceDescriptor.GPUDevice(0)); CNTKLibraryManagedExamples.LoadModelFromMemory(DeviceDescriptor.GPUDevice(0)); // Evaluate intermediate layer. CNTKLibraryManagedExamples.EvaluateIntermediateLayer(DeviceDescriptor.GPUDevice(0)); // Evaluate combined outputs. CNTKLibraryManagedExamples.EvaluateCombinedOutputs(DeviceDescriptor.GPUDevice(0)); Console.WriteLine("======== Evaluation completes. ========"); }
// Use this for initialization void Start() { QNetworkSimple network = new QNetworkSimple(6, 3, 2, 64, DeviceDescriptor.GPUDevice(0), 0.4f); model = new DQLModel(network); QNetworkSimple networkTarget = new QNetworkSimple(6, 3, 2, 64, DeviceDescriptor.GPUDevice(0), 0.4f); modelTarget = new DQLModel(networkTarget); //trainer = new TrainerDQLSimple(model, null, LearnerDefs.SGDLearner(startLearningRate),1, experienceBufferSize, 2048); trainer = new TrainerDQLSimple(model, modelTarget, LearnerDefs.AdamLearner(startLearningRate), 1, experienceBufferSize, experienceBufferSize); //Save();//test }
private static void RunEmnistTraining(string choice) { ITrainingDatasetDefinition datasetDefinition = null; switch (choice) { case LETTERS_CHOICE: datasetDefinition = new EMNISTLetterDataset(); break; case DIGITS_CHOICE: datasetDefinition = new EMNISTDigitDataset(); break; case UPPERCASE_LETTERS_CHOICE: datasetDefinition = new EMNISTUppercaseLetterDataset(); break; default: SharedConsoleCommands.InvalidCommand(choice); return; } TrainingSessionStart(choice); var msgPrinter = new ConsolePrinter(); var outputDir = $"./{DateTime.Now.ToString("yyyyMMddHHmmss", CultureInfo.InvariantCulture)}/"; var device = DeviceDescriptor.GPUDevice(0); var trainingConfiguration = new TrainingSessionConfiguration { Epochs = 200, DumpModelSnapshotPerEpoch = true, ProgressEvaluationSeverity = EvaluationSeverity.PerEpoch, MinibatchConfig = new MinibatchConfiguration { MinibatchSize = 64, HowManyMinibatchesPerSnapshot = (60000 / 32), HowManyMinibatchesPerProgressPrint = 500, DumpModelSnapshotPerMinibatch = false, AsyncMinibatchSnapshot = false }, PersistenceConfig = TrainingModelPersistenceConfiguration.CreateWithAllLocationsSetTo(outputDir) }; msgPrinter.PrintMessage("\n" + trainingConfiguration + "\n"); using (var runner = new ConvolutionalNeuralNetworkRunner(device, trainingConfiguration, msgPrinter)) { runner.RunUsing(datasetDefinition); } EmnistTrainingDone(choice); }
private void SelectBestDevice() { var gpu = DeviceDescriptor.GPUDevice(0); if (gpu != null) { device = gpu; } else { device = DeviceDescriptor.CPUDevice; } }
// Use this for initialization void Start() { var network = new PPONetworkContinuousSimple(2, 1, 4, 64, DeviceDescriptor.GPUDevice(0), 0.01f); model = new PPOModel(network); trainer = new TrainerPPOSimple(model, LearnerDefs.AdamLearner(learningRate), 1, 10000, 500); //test //trainer.RewardDiscountFactor = 0.5f; loss = new AutoAverage(iterationForEachTrain); episodePointAve = new AutoAverage(episodeToRunForEachTrain); }
static void Main(string[] args) { #if CPUONLY Console.WriteLine("======== Evaluate model on CPU using CPUOnly build ========"); #else Console.WriteLine("======== Evaluate model on CPU using GPU build ========"); #endif if (ShouldRunOnCpu()) { var device = DeviceDescriptor.CPUDevice; CNTKLibraryManagedExamples.EvaluationSingleImage(device); // Run memory tests. MemoryTests.ValidateObjectReferences(device); CNTKLibraryManagedExamples.EvaluationBatchOfImages(device); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(device); // Run memory tests again. MemoryTests.ValidateObjectReferences(device); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(device); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(device); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingSparse(device); // It is sufficient to test loading model from memory buffer only on CPU. CNTKLibraryManagedExamples.LoadModelFromMemory(device); MemoryTests.WriteOutputs(); } if (ShouldRunOnGpu()) { Console.WriteLine(" ====== Evaluate model on GPU ====="); var device = DeviceDescriptor.GPUDevice(0); // Run memory tests. MemoryTests.ValidateObjectReferences(device); CNTKLibraryManagedExamples.EvaluationSingleImage(device); CNTKLibraryManagedExamples.EvaluationBatchOfImages(device); CNTKLibraryManagedExamples.EvaluateMultipleImagesInParallel(device); // Run memory tests. MemoryTests.ValidateObjectReferences(device); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingOneHot(device); CNTKLibraryManagedExamples.EvaluationBatchOfSequencesUsingOneHot(device); CNTKLibraryManagedExamples.EvaluationSingleSequenceUsingSparse(device); // Run memory tests again. MemoryTests.WriteOutputs(); } Console.WriteLine("======== Evaluation completes. ========"); }
// Use this for initialization void Start() { //QNetworkSimple network = new QNetworkSimple(environment.mazeDimension.x* environment.mazeDimension.y, 4, 3, 64, DeviceDescriptor.CPUDevice, 0.4f); var network = new QNetworkConvSimple(environment.mazeDimension.x, environment.mazeDimension.y, 1, 4, new int[] { 3, 3 }, new int[] { 64, 128 }, new int[] { 1, 1 }, new bool[] { true, true }, 1, 128, false, DeviceDescriptor.GPUDevice(0), 1f); model = new DQLModel(network); //QNetworkSimple networkTarget = new QNetworkSimple(environment.mazeDimension.x * environment.mazeDimension.y, 4, 3, 64, DeviceDescriptor.CPUDevice, 0.4f); //modelTarget = new DQLModel(networkTarget); //trainer = new TrainerDQLSimple(model, modelTarget, LearnerDefs.SGDLearner(startLearningRate), 1, experienceBufferSize, 500); trainer = new TrainerDQLSimple(model, null, LearnerDefs.SGDLearner(startLearningRate), 1, experienceBufferSize, 500); //Save();//test }
public static void CNTK_ResNetSetup() { try { device = DeviceDescriptor.GPUDevice(0); } catch (Exception ex) { MessageBox.Show(ex.ToString()); } MLRoutine.DoWork += new DoWorkEventHandler(MLRoutine_doWork); MLRoutine.ProgressChanged += new ProgressChangedEventHandler(MLRoutine_ProgressChanged); MLRoutine.RunWorkerCompleted += new RunWorkerCompletedEventHandler(MLRoutine_WorkerCompleted); MLRoutine.WorkerReportsProgress = true; }
public static DeviceDescriptor Device() { if (_device == null) { try { _device = DeviceDescriptor.GPUDevice(0); } catch (Exception) { _device = DeviceDescriptor.CPUDevice; } } return(_device); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== running CifarResNet.TrainAndEvaluate using {device.Type} ========"); CifarResNetClassifier.CifarDataFolder = "../../../../Data/CIFAR-10"; CifarResNetClassifier.TrainAndEvaluate(device, true); TestCommon.TestDataDirPrefix = "../../../../Data/"; string modelFileSourceDir = "D:/Libraries/cntk-release/PretrainedModels/ResNet18_ImageNet_CNTK.model"; if (!File.Exists(modelFileSourceDir)) { Console.WriteLine("Model file doesn't exist. Please run download_model.py in CNTK/CNTK/PretrainedModels"); Console.ReadKey(); return; } }
/// <summary> /// Returns DEviceDescription from ProcessDevice enumeration /// </summary> /// <param name="pdevice"></param> /// <returns></returns> public static DeviceDescriptor GetDevice(ProcessDevice pdevice) { switch (pdevice) { case ProcessDevice.Default: return(DeviceDescriptor.UseDefaultDevice()); case ProcessDevice.CPU: return(DeviceDescriptor.CPUDevice); case ProcessDevice.GPU: return(DeviceDescriptor.GPUDevice(0)); default: return(DeviceDescriptor.UseDefaultDevice()); } }
// Use this for initialization void Start() { if (useGPU) { device = DeviceDescriptor.GPUDevice(0); } else { device = DeviceDescriptor.CPUDevice; } //load model string modelPath = System.IO.Path.Combine(Environment.CurrentDirectory, @"Assets\CNTK\Models\mymodel.model"); model = Function.Load(modelPath, device); Evaluate(); }
protected override void EndProcessing() { DeviceDescriptor device; if (CPUDevice) { device = DeviceDescriptor.CPUDevice; } else if (DefaultDevice) { device = DeviceDescriptor.UseDefaultDevice(); } else { device = DeviceDescriptor.GPUDevice(GPUDeviceId); } WriteObject(DeviceDescriptor.TrySetDefaultDevice(device)); }
public static void TestElementTimes() { var device = DeviceDescriptor.GPUDevice(0); // todo put in different values CNTKDictionary testInitializer = new CNTKDictionary(); Parameter leftOperand = new Parameter(new int[] { 2, 2 }, 1f, device, "left"); NDArrayView initValues = new NDArrayView(new int[] { 2, 2 }, new float[] { 0f, 1f, 2f, 3f }, device); leftOperand.SetValue(initValues); // leftOperand looks like: // 0 1 // 4 9 Parameter rightOperand = new Parameter(new int[] { 2, 2, 2 }, 1f, device, "right"); initValues = new NDArrayView(new int[] { 2, 2, 2 }, new float[] { 0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f }, device); rightOperand.SetValue(initValues); // rightOperand looks like: // 0 1 | 4 5 // 2 3 | 6 7 Function model = CNTKLib.ElementTimes(leftOperand, rightOperand); var inputVariable = model.Inputs.First(); var inputMap = new Dictionary <Variable, Value>(); var outputVariable = model.Output; var outputDataMap = new Dictionary <Variable, Value>() { { outputVariable, null } }; model.Evaluate(inputMap, outputDataMap, device); var output = outputDataMap[outputVariable]; var outputArray = output.GetDenseData <float>(outputVariable).First(); // output looks like: // 0 1 | 0 5 // 4 9 | 12 21 // conclusion of this test: CNTKLib.ElementTimes works as espected :-) }
private void Init() { if (useGPU) { device = DeviceDescriptor.GPUDevice(0); } else { device = DeviceDescriptor.CPUDevice; } trainPath = System.IO.Path.Combine(Environment.CurrentDirectory, @"Assets\CNTK\Data\train.txt"); testPath = System.IO.Path.Combine(Environment.CurrentDirectory, @"Assets\CNTK\Data\test.txt"); modelPath = System.IO.Path.Combine(Environment.CurrentDirectory, @"Assets\CNTK\Models\mymodel.model"); streamConfigurations = new StreamConfiguration[] { new StreamConfiguration("features", inputDim), new StreamConfiguration("labels", numOutputClasses) }; trainingData = ReadData(trainPath, true); testData = ReadData(testPath, false); }
static void Main(string[] args) { var device = DeviceDescriptor.GPUDevice(0); Console.WriteLine($"======== runing MNISTClassifierTest.TrainAndEvaluate using {device.Type} with logistic classifier ========"); MNISTClassifier.TrainAndEvaluate(device, false, true); Console.WriteLine($"======== runing MNISTClassifierTest.TrainAndEvaluate using {device.Type} with convolution classifier ========"); MNISTClassifier.TrainAndEvaluate(device, true, true); Console.WriteLine($"======== runing CifarResNet.TrainAndEvaluate using {device.Type} ========"); CifarResNetClassifier.TrainAndEvaluate(device, true); Console.WriteLine($"======== runing TransferLearning.TrainAndEvaluateWithFlowerData using {device.Type} ========"); TransferLearning.TrainAndEvaluateWithFlowerData(device, true); Console.WriteLine($"======== runing TransferLearning.TrainAndEvaluateWithAnimalData using {device.Type} ========"); TransferLearning.TrainAndEvaluateWithAnimalData(device, true); Console.WriteLine($"======== runing LSTMSequenceClassifier.Train using {device.Type} ========"); LSTMSequenceClassifier.Train(device, true); }