/// <summary> /// Helper functoin to create gerantor; /// </summary> /// <param name="inputNoiseSize"></param> /// <param name="inputConditionSize"></param> /// <param name="outputSize"></param> /// <param name="generatorLayerSize"></param> /// <param name="generatorLayerCount"></param> /// <param name="device"></param> protected void CreateGenerator(int inputNoiseSize, int inputConditionSize, int outputSize, int generatorLayerSize, int generatorLayerCount, DeviceDescriptor device) { //create generator Variable concatenatedInput; if (inputNoiseSize > 0 && inputConditionSize > 0) { InputNoiseGenerator = CNTKLib.InputVariable(new int[] { inputNoiseSize }, DataType.Float); InputConditionGenerator = CNTKLib.InputVariable(new int[] { inputConditionSize }, DataType.Float); var vsgenerator = new VariableVector(); vsgenerator.Add(InputNoiseGenerator); vsgenerator.Add(InputConditionGenerator); concatenatedInput = CNTKLib.Splice(vsgenerator, new Axis(0)); } else if (inputNoiseSize > 0) { InputNoiseGenerator = CNTKLib.InputVariable(new int[] { inputNoiseSize }, DataType.Float); InputConditionGenerator = null; concatenatedInput = InputNoiseGenerator; } else { InputNoiseGenerator = null; InputConditionGenerator = CNTKLib.InputVariable(new int[] { inputConditionSize }, DataType.Float); concatenatedInput = InputConditionGenerator; } var inputG = new InputLayerCNTKVar(concatenatedInput); var outputLayerG = new OutputLayerDense(outputSize, null, OutputLayerDense.LossFunction.Square); GeneratorSequentialModel = new SequentialNetworkDense(inputG, LayerDefineHelper.DenseLayers(generatorLayerCount, generatorLayerSize, true, NormalizationMethod.None), outputLayerG, device); GeneratorOutput = GeneratorSequentialModel.OutputLayer.GetOutputVariable(); InputTargetGenerator = GeneratorSequentialModel.OutputLayer.GetTargetInputVariable(); }
/// <summary> /// Helper functio to create discriminators /// </summary> /// <param name="fakeDataFromGenerator"></param> /// <param name="inputConditionSize"></param> /// <param name="outputSize"></param> /// <param name=""></param> /// <param name="discriminatorLayerSize"></param> /// <param name="discriminatorLayerCount"></param> /// <param name="device"></param> protected void CreateDiscriminators(Variable fakeDataFromGenerator, int inputConditionSize, int outputSize, int discriminatorLayerSize, int discriminatorLayerCount, DeviceDescriptor device) { //create discriminator Variable concatenatedInput = null; //create input based on whether it is a conditional gan if (inputConditionSize > 0) { InputDataDiscriminatorReal = CNTKLib.InputVariable(new int[] { outputSize }, DataType.Float); InputConditionDiscriminatorReal = CNTKLib.InputVariable(new int[] { inputConditionSize }, DataType.Float); InputConditionDiscriminatorFake = CNTKLib.InputVariable(new int[] { inputConditionSize }, DataType.Float); var vsDiscriminator = new VariableVector(); vsDiscriminator.Add(InputDataDiscriminatorReal); vsDiscriminator.Add(InputConditionDiscriminatorReal); concatenatedInput = CNTKLib.Splice(vsDiscriminator, new Axis(0)); } else { InputDataDiscriminatorReal = CNTKLib.InputVariable(new int[] { outputSize }, DataType.Float); InputConditionDiscriminatorReal = null; InputConditionDiscriminatorFake = null; concatenatedInput = InputDataDiscriminatorReal; } var inputD = new InputLayerCNTKVar(concatenatedInput); var outputLayerD = new OutputLayerDense(1, new SigmoidDef(), OutputLayerDense.LossFunction.Square); //create the discriminator sequential model DiscriminatorSequentialModel = new SequentialNetworkDense(inputD, LayerDefineHelper.DenseLayers(discriminatorLayerCount, discriminatorLayerSize, true, NormalizationMethod.None), outputLayerD, device); //real discriminator output DiscriminatorRealOutput = DiscriminatorSequentialModel.OutputLayer.GetOutputVariable(); //clone the discriminator with shared parameters if (inputConditionSize > 0) { DiscriminatorFakeOutput = ((Function)DiscriminatorRealOutput).Clone(ParameterCloningMethod.Share, new Dictionary <Variable, Variable>() { { InputDataDiscriminatorReal, fakeDataFromGenerator }, { InputConditionDiscriminatorReal, InputConditionDiscriminatorFake } }); } else { DiscriminatorFakeOutput = ((Function)DiscriminatorRealOutput).Clone(ParameterCloningMethod.Share, new Dictionary <Variable, Variable>() { { InputDataDiscriminatorReal, fakeDataFromGenerator } }); } DiscriminatorMerged = Function.Combine(new List <Variable>() { DiscriminatorRealOutput, DiscriminatorFakeOutput }); }
public TrainerSimpleNN(SequentialNetworkDense net, LearnerDefs.LearnerDef trainingLearner, DeviceDescriptor device, int maxDataBufferCount = 50000) { Device = device; networkRef = net; learners = new List <Learner>(); var paramsToTrain = net.CNTKFunction.Parameters(); Learner learner = trainingLearner.Create(paramsToTrain); learners.Add(learner); trainer = Trainer.CreateTrainer(networkRef.OutputLayer.GetOutputVariable(), networkRef.OutputLayer.GetTrainingLossVariable(), networkRef.OutputLayer.GetTrainingLossVariable(),//use training loss for eval error for now learners); //get the input shape for creating the buffer var inputDims = networkRef.InputLayer.InputVariable.Shape.Dimensions; int inputSize = 1; foreach (var d in inputDims) { inputSize *= d; } var targetDims = networkRef.OutputLayer.GetTargetInputVariable().Shape.Dimensions; int targetSize = 1; foreach (var d in targetDims) { targetSize *= d; } //create databuffer dataBuffer = new DataBuffer(maxDataBufferCount, new DataBuffer.DataInfo("Input", DataBuffer.DataType.Float, inputSize), new DataBuffer.DataInfo("Target", DataBuffer.DataType.Float, targetSize) ); }