public static BackpropAlgorithm CreateKaggleCatOrDogFiltersDemo1_Pretrained(string fpath) { Console.WriteLine("init CreateKaggleCatOrDogFiltersDemo1_Pretrained"); ConvNet net; var assembly = Assembly.GetExecutingAssembly(); using (var stream = System.IO.File.Open(fpath, System.IO.FileMode.Open, System.IO.FileAccess.Read)) { net = ConvNet.Deserialize(stream); net.IsTraining = true; } var lrate = 0.001D; var alg = new BackpropAlgorithm(net) { LossFunction = Loss.CrossEntropySoftMax, EpochCount = 500, LearningRate = lrate, BatchSize = 8, UseBatchParallelization = true, MaxBatchThreadCount = 8, Optimizer = Optimizer.Adadelta, Regularizator = Regularizator.L2(0.001D), LearningRateScheduler = LearningRateScheduler.DropBased(lrate, 5, 0.5D) }; alg.Build(); return(alg); }
public static BackpropAlgorithm CreateMainColorsDemo1() { Console.WriteLine("init CreateMainColorsDemo1"); var activation = Activation.ReLU; var net = new ConvNet(3, 48) { IsTraining = true }; net.AddLayer(new FlattenLayer(outputDim: 128, activation: activation)); net.AddLayer(new FlattenLayer(outputDim: 128, activation: activation)); net.AddLayer(new DenseLayer(outputDim: 12, activation: activation)); net._Build(); net.RandomizeParameters(seed: 0); var lrate = 1.1D; var alg = new BackpropAlgorithm(net) { EpochCount = 500, LearningRate = lrate, BatchSize = 8, UseBatchParallelization = true, MaxBatchThreadCount = 8, LossFunction = Loss.Euclidean, Optimizer = Optimizer.Adadelta, Regularizator = Regularizator.L2(0.0001D), LearningRateScheduler = LearningRateScheduler.DropBased(lrate, 5, 0.5D) }; alg.Build(); return(alg); }
/// <summary> /// Error: 19.1 /// </summary> public static BackpropAlgorithm CreateKaggleCatOrDogDemo_Pretrained() { Console.WriteLine("init CreateKaggleCatOrDogDemo_Pretrained"); ConvNet net; var assembly = Assembly.GetExecutingAssembly(); using (var stream = assembly.GetManifestResourceStream("ML.DeepTests.Pretrained.cn_e16_p37.65.mld")) { net = ConvNet.Deserialize(stream); net.IsTraining = true; } var lrate = 0.01D; var alg = new BackpropAlgorithm(net) { LossFunction = Loss.CrossEntropySoftMax, EpochCount = 500, LearningRate = lrate, BatchSize = 4, UseBatchParallelization = true, MaxBatchThreadCount = 8, Optimizer = Optimizer.Adadelta, Regularizator = Regularizator.L2(0.001D), LearningRateScheduler = LearningRateScheduler.DropBased(lrate, 5, 0.5D) }; alg.Build(); return(alg); }
public void Gradient_DifferentLayers_1Iter_CrossEntropy_Regularization() { // arrange var activation = Activation.ReLU; var net = new ConvNet(1, 5) { IsTraining = true }; net.AddLayer(new ConvLayer(outputDepth: 2, windowSize: 3, padding: 1)); net.AddLayer(new MaxPoolingLayer(windowSize: 3, stride: 2, activation: Activation.Exp)); net.AddLayer(new ActivationLayer(activation: Activation.Tanh)); net.AddLayer(new FlattenLayer(outputDim: 10, activation: activation)); net.AddLayer(new DropoutLayer(rate: 0.5D)); net.AddLayer(new DenseLayer(outputDim: 3, activation: Activation.Exp)); net._Build(); net.RandomizeParameters(seed: 0); var sample = new ClassifiedSample <double[][, ]>(); for (int i = 0; i < 3; i++) { var point = RandomPoint(1, 5, 5); sample[point] = new Class(i.ToString(), i); } var regularizator = Regularizator.Composite(Regularizator.L1(0.1D), Regularizator.L2(0.3D)); var alg = new BackpropAlgorithm(net) { LearningRate = 0.1D, LossFunction = Loss.CrossEntropySoftMax, Regularizator = regularizator }; alg.Build(); // act var data = sample.First(); var expected = new double[3] { 1.0D, 0.0D, 0.0D }; alg.RunIteration(data.Key, expected); regularizator.Apply(alg.Gradient, alg.Net.Weights); ((DropoutLayer)alg.Net[4]).ApplyCustomMask = true; // assert AssertNetGradient(alg, data.Key, expected); }
/// <summary> /// Error 21.65 /// </summary> public static BackpropAlgorithm CreateCIFAR10Trunc2ClassesDemo2_SEALED() { Console.WriteLine("init CreateCIFAR10Trunc2ClassesDemo2"); var activation = Activation.ReLU; var net = new ConvNet(3, 32) { IsTraining = true }; net.AddLayer(new ConvLayer(outputDepth: 16, windowSize: 3, padding: 1, activation: activation)); net.AddLayer(new ConvLayer(outputDepth: 16, windowSize: 3, padding: 1, activation: activation)); net.AddLayer(new MaxPoolingLayer(windowSize: 3, stride: 2)); net.AddLayer(new DropoutLayer(0.25)); net.AddLayer(new ConvLayer(outputDepth: 32, windowSize: 3, padding: 1, activation: activation)); net.AddLayer(new ConvLayer(outputDepth: 32, windowSize: 3, padding: 1, activation: activation)); net.AddLayer(new MaxPoolingLayer(windowSize: 3, stride: 2)); net.AddLayer(new DropoutLayer(0.25)); net.AddLayer(new FlattenLayer(outputDim: 256, activation: activation)); net.AddLayer(new DropoutLayer(0.5)); net.AddLayer(new DenseLayer(outputDim: 2, activation: Activation.Exp)); net._Build(); net.RandomizeParameters(seed: 0); var lrate = 0.01D; var alg = new BackpropAlgorithm(net) { LossFunction = Loss.CrossEntropySoftMax, EpochCount = 500, LearningRate = lrate, BatchSize = 4, UseBatchParallelization = true, MaxBatchThreadCount = 8, Optimizer = Optimizer.Adadelta, Regularizator = Regularizator.L2(0.001D), LearningRateScheduler = LearningRateScheduler.DropBased(lrate, 5, 0.5D) }; alg.Build(); return(alg); }
/// <summary> /// Error = 0.92 /// </summary> public static BackpropAlgorithm CreateMNISTSimpleDemo_SEALED() { Console.WriteLine("init CreateMNISTSimpleDemo_SEALED"); var activation = Activation.LeakyReLU(); var net = new ConvNet(1, 28) { IsTraining = true }; net.AddLayer(new ConvLayer(outputDepth: 12, windowSize: 5, padding: 2)); net.AddLayer(new ConvLayer(outputDepth: 12, windowSize: 5, padding: 2)); net.AddLayer(new MaxPoolingLayer(windowSize: 2, stride: 2, activation: activation)); net.AddLayer(new ConvLayer(outputDepth: 24, windowSize: 5, padding: 2)); net.AddLayer(new MaxPoolingLayer(windowSize: 2, stride: 2, activation: activation)); net.AddLayer(new FlattenLayer(outputDim: 32, activation: activation)); net.AddLayer(new DropoutLayer(rate: 0.5D)); net.AddLayer(new DenseLayer(outputDim: 10, activation: activation)); net._Build(); net.RandomizeParameters(seed: 0); var lrate = 0.001D; var alg = new BackpropAlgorithm(net) { EpochCount = 500, LearningRate = lrate, BatchSize = 4, UseBatchParallelization = true, MaxBatchThreadCount = 4, LossFunction = Loss.Euclidean, Optimizer = Optimizer.RMSProp, Regularizator = Regularizator.L2(0.0001D), LearningRateScheduler = LearningRateScheduler.DropBased(lrate, 5, 0.5D) }; alg.Build(); return(alg); }