Exemple #1
0
        /// <summary>
        /// The main program entry point.
        /// </summary>
        /// <param name="args"></param>
        static void Main(string[] args)
        {
            // unpack archive
            if (!File.Exists("x_channels_first_8_5.bin"))
            {
                Console.WriteLine("Unpacking archive...");
                ZipFile.ExtractToDirectory("frog_pictures.zip", ".");
            }

            // load training and test data
            Console.WriteLine("Loading data files...");
            var trainingData = DataUtil.LoadBinary <float>("x_channels_first_8_5.bin", 5000, channels * imageWidth * imageHeight);

            // create the generator input variable
            var generatorVar = CNTK.Variable.InputVariable(new int[] { latentDimensions }, CNTK.DataType.Float, name: "generator_input");

            // create the generator
            Console.WriteLine("Creating generator...");
            var generator = generatorVar
                            .Dense(128 * 16 * 16, v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                            .Reshape(new int[] { 16, 16, 128 })
                            .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                            .ConvolutionTranspose(
                filterShape:     new int[] { 4, 4 },
                numberOfFilters: 256,
                strides:         new int[] { 2, 2 },
                outputShape:     new int[] { 32, 32 },
                padding:         true,
                activation:      v => CNTK.CNTKLib.LeakyReLU(v, 0.1)
                )
                            .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                            .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                            .Convolution2D(channels, new int[] { 7, 7 }, padding: true, activation: CNTK.CNTKLib.Tanh)
                            .ToNetwork();

            Console.WriteLine(generator.ToSummary());

            // create the discriminator input variable
            var discriminatorVar = CNTK.Variable.InputVariable(new int[] { imageWidth, imageHeight, channels }, CNTK.DataType.Float, name: "discriminator_input");

            // create the discriminator
            Console.WriteLine("Creating discriminator...");
            var discriminator = discriminatorVar
                                .Convolution2D(128, new int[] { 3, 3 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                .Convolution2D(128, new int[] { 4, 4 }, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                .Convolution2D(128, new int[] { 4, 4 }, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                .Convolution2D(128, new int[] { 4, 4 }, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                .Dropout(0.4)
                                .Dense(1, CNTK.CNTKLib.Sigmoid)
                                .ToNetwork();

            Console.WriteLine(discriminator.ToSummary());

            // create the Gan
            Console.WriteLine("Creating Gan...");
            var gan = Gan.CreateGan(generator, discriminator);

            // create the label variable
            var labelVar = CNTK.Variable.InputVariable(shape: new CNTK.NDShape(0), dataType: CNTK.DataType.Float, name: "label_var");

            // set up the loss functions
            var discriminatorLoss = CNTK.CNTKLib.BinaryCrossEntropy(discriminator, labelVar);
            var ganLoss           = CNTK.CNTKLib.BinaryCrossEntropy(gan, labelVar);

            // set up the learners
            var discriminatorLearner = discriminator.GetAdaDeltaLearner(1);
            var ganLearner           = gan.GetAdaDeltaLearner(1);

            // set up the trainers
            var discriminatorTrainer = discriminator.GetTrainer(discriminatorLearner, discriminatorLoss, discriminatorLoss);
            var ganTrainer           = gan.GetTrainer(ganLearner, ganLoss, ganLoss);

            // make sure we have an images folder to write to
            var outputFolder = "images";

            if (!Directory.Exists(outputFolder))
            {
                Directory.CreateDirectory(outputFolder);
            }

            // train the gan during multiple epochs
            Console.WriteLine("Training Gan...");
            var numEpochs = 100000;
            var batchSize = 12;
            var start     = 0;

            for (var epoch = 0; epoch < numEpochs; epoch++)
            {
                // run the generator and create a set of fake frog images
                var generatedImages = Gan.GenerateImages(generator, batchSize, latentDimensions);

                // get a training batch: a mix of fake and real images labelled correctly
                start = Math.Min(start, trainingData.Length - batchSize);
                var batch = Gan.GetTrainingBatch(discriminatorVar, generatedImages, trainingData, batchSize, start);
                start += batchSize;
                if (start >= trainingData.Length)
                {
                    start = 0;
                }

                // train the discriminator
                var discriminatorResult = discriminatorTrainer.TrainBatch(
                    new[] {
        static void Main(string[] args)
        {
            foreach (var item in CNTK.DeviceDescriptor.AllDevices())
            {
                Console.WriteLine($"{item.Id}: {item.Type.ToString()}");
            }
            /// Data loading
            if (!File.Exists("./x_channels_first_8_5.bin"))
            {
                Console.WriteLine("Unpacking archive...");
                ZipFile.ExtractToDirectory("../../../../Resources/frog_pictures.zip", "./");
            }
            Console.WriteLine("Loading data files..");
            float[][] trainingData = DataUtil.LoadBinary <float>("./x_channels_first_8_5.bin", 5000, channels * imageHeight * imageWidth);

            ///  Generator Input Variable
            Variable generatorVar = CNTK.Variable.InputVariable(new int[] { latentDimensions }, DataType.Float, "generator_input");

            // Generator Architecture
            Function generator = generatorVar.Dense(128 * 16 * 16, v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                 .Reshape(new int[] { 16, 16, 128 })
                                 .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                 .ConvolutionTranspose(
                filterShape: new int[] { 4, 4 },
                numberOfFilters: 128,
                strides: new int[] { 2, 2 },
                outputShape: new int[] { 32, 32 },
                padding: true,
                activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1)
                )
                                 .Convolution2D(256, new int[] { 3, 3 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                 .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                 .Convolution2D(256, new int[] { 7, 7 }, padding: true, activation: v => CNTK.CNTKLib.Tanh(v))
                                 .Convolution2D(channels, new int[] { 7, 7 }, padding: true, activation: CNTK.CNTKLib.Tanh)
                                 .ToNetwork();
            //                             .Dense(128 * 16 * 16, var => CNTK.CNTKLib.LeakyReLU(var, 0.1))
            //                             .Reshape(new int[] { 16, 16, 128 })
            //                             .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: var => CNTK.CNTKLib.LeakyReLU(var, 0.1))
            //                             .ConvolutionTranspose(
            //                                filterShape: new int[] { 4, 4 },
            //                                numberOfFilters: 256,
            //                                strides: new int[] { 2, 2 },
            //                                outputShape: new int[] { 32, 32 },
            //                                padding: true,
            //                                activation: var => CNTK.CNTKLib.LeakyReLU(var, 0.1))
            //                             .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: var => CNTK.CNTKLib.LeakyReLU(var, 0.1))
            //                             .Convolution2D(256, new int[] { 5, 5 }, padding: true, activation: var => CNTK.CNTKLib.LeakyReLU(var, 0.1))
            //                             .Convolution2D(channels, new int[] { 7, 7 }, padding: true, activation: CNTK.CNTKLib.Tanh)
            //                             .ToNetwork();

            // Discriminator Input Variable
            Variable discriminatorVar = CNTK.Variable.InputVariable(new int[] { imageWidth, imageHeight, channels }, DataType.Float, name: "discriminator_input");

            // Discriminator Architecture
            Function discriminator = discriminatorVar.Convolution2D(32, new int[] { 7, 7 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Convolution2D(32, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Pooling(PoolingType.Max, new int[] { 5, 5 }, new int[] { 1, 1 })
                                     .Convolution2D(64, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Convolution2D(64, new int[] { 5, 5 }, padding: true, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Pooling(PoolingType.Max, new int[] { 5, 5 }, new int[] { 1, 1 })
                                     .Convolution2D(128, new int[] { 4, 4 }, padding: true, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Convolution2D(256, new int[] { 4, 4 }, padding: true, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.LeakyReLU(v, 0.1))
                                     .Pooling(PoolingType.Max, new int[] { 4, 4 }, new int[] { 1, 1 })
                                     .Convolution2D(512, new int[] { 3, 3 }, strides: new int[] { 2, 2 }, activation: v => CNTK.CNTKLib.Tanh(v))
                                     .Dropout(0.2)
                                     .Dense(1, CNTK.CNTKLib.Sigmoid)
                                     .ToNetwork();

            // +++ Create GAN +++
            Function gan = Gan.CreateGan(generator, discriminator);

            // Output of GAN
            Variable labelVar = CNTK.Variable.InputVariable(new NDShape(0), DataType.Float, "label_var");

            // Loss Function
            Function discriminatorLoss = CNTK.CNTKLib.BinaryCrossEntropy(discriminator, labelVar);
            Function ganLoss           = CNTK.CNTKLib.BinaryCrossEntropy(gan, labelVar);


            // Set up the algorithms for traning discriminator and the GAN (learners)
            Learner discriminatorLearner = discriminator.GetAdaDeltaLearner(1);
            Learner ganLearner           = gan.GetAdaDeltaLearner(0.25);

            // Set up the trainers for calculating [_ the discriminator and gan loss _] during each training epoch
            Trainer discriminatorTrainer = discriminator.GetTrainer(discriminatorLearner, discriminatorLoss, discriminatorLoss);
            Trainer ganTrainer           = gan.GetTrainer(ganLearner, ganLoss, ganLoss);


            /// START Training!
            string outputFolder = "./images";

            if (!Directory.Exists(outputFolder))
            {
                Directory.CreateDirectory(outputFolder);
            }

            Console.WriteLine("Training GAN...");

            int numEpoches = 100_000;
            int batchSize  = 15;
            int start      = 0;

            for (int epoch = 0; epoch < numEpoches; epoch++)
            {
                // create a set of fake images..
                var generatedImages = Gan.GenerateImages(generator, batchSize, latentDimensions);
                start = Math.Min(start, trainingData.Length - batchSize);


                ///////////////////////COPIED
                var batch = Gan.GetTrainingBatch(discriminatorVar, generatedImages, trainingData, batchSize, start);
                start += batchSize;
                if (start >= trainingData.Length)
                {
                    start = 0;
                }

                // train the discriminator
                //var discriminatorResult = discriminatorTrainer.TrainMinibatch(
                //    new Dictionary<Variable, Value> { { discriminatorVar, batch.featureBatch }, { labelVar, batch.labelBatch } }, false, CNTK.DeviceDescriptor.GPUDevice(0)
                //    );
                var discriminatorResult = discriminatorTrainer.TrainBatch(
                    new[] {