예제 #1
0
        public override void Train()
        {
            var graph = Config.IsImportingGraph ? ImportGraph() : BuildGraph();

            var config = new ConfigProto {
                AllowSoftPlacement = true
            };

            using (var sess = tf.Session(graph, config: config))
            {
                //tf.train.export_meta_graph("tfnet.meta");
                //var json = JsonConvert.SerializeObject(graph._nodes_by_name.Select(x => x.Value).ToArray(), Formatting.Indented);
                //File.WriteAllText($"YOLOv3/nodes-{(IsImportingGraph ? "right" : "wrong")}.txt", json);

                sess.run(tf.global_variables_initializer());
                print($"=> Restoring weights from: {cfg.TRAIN.INITIAL_WEIGHT} ... ");
                loader.restore(sess, cfg.TRAIN.INITIAL_WEIGHT);
                first_stage_epochs = 0;

                foreach (var epoch in range(1, 1 + first_stage_epochs + second_stage_epochs))
                {
                    if (epoch <= first_stage_epochs)
                    {
                        train_op = train_op_with_frozen_variables;
                    }
                    else
                    {
                        train_op = train_op_with_all_variables;
                    }

                    foreach (var train_data in trainset.Items())
                    {
                        var results = sess.run(new object[] { train_op, loss, global_step },
                                               (input_data, train_data[0]),
                                               (label_sbbox, train_data[1]),
예제 #2
0
 public Estimator(Action model_fn, RunConfig config)
 {
     _config         = config;
     _model_dir      = _config.model_dir;
     _session_config = _config.session_config;
     _model_fn       = model_fn;
 }
        public string config_proto_serialized()
        {
            var config = new ConfigProto
            {
                AllowSoftPlacement = true,
            };

            return(config.ToByteString().ToStringUtf8());
        }
예제 #4
0
 public CSession(Graph graph, Status s, bool user_XLA = false)
 {
     lock (Locks.ProcessWide)
     {
         var config = new ConfigProto {
             InterOpParallelismThreads = 4
         };
         session_ = new Session(graph, config, s);
     }
 }
예제 #5
0
        private void InferenceEmotion(int shotsTaken, NDArray img_buffer_)
        {
            var         imgArr    = ReadTensorFromDetected(img_buffer_, img_size: 60);
            ConfigProto config    = new ConfigProto();
            GPUOptions  gpuConfig = new GPUOptions();

            gpuConfig.AllowGrowth = true;
            gpuConfig.PerProcessGpuMemoryFraction = 0.3;
            config.GpuOptions = gpuConfig;

            using (var sess = tf.Session(emotionGraph, config))
            {
                Tensor   tensorClasses = emotionGraph.OperationByName("Identity");
                Tensor   imgTensor     = emotionGraph.OperationByName("x");
                Tensor[] outTensorArr  = new Tensor[] { tensorClasses };

                var results = sess.run(outTensorArr, new FeedItem(imgTensor, imgArr));

                var emotions = results[0].ToArray <float>();
                //var records = new List<object>
                //{
                //    new { Frame = shotsTaken, Results = results[0] },
                //};
                //csv.WriteRecord(new { Frame = shotsTaken, Results = results[0] });
                //csv.Flush();
                var record = new CSVRecord();
                record.Neutral   = (int)(Math.Round(emotions[0], 2) * 100);
                record.Happy     = (int)(Math.Round(emotions[1], 2) * 100);
                record.Sad       = (int)(Math.Round(emotions[2], 2) * 100);
                record.Angry     = (int)(Math.Round(emotions[3], 2) * 100);
                record.Surprised = (int)(Math.Round(emotions[4], 2) * 100);
                record.Date      = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss");

                using (var stream = File.Open("output.csv", FileMode.Append))
                    using (var writer = new StreamWriter(stream))
                        using (var csv = new CsvWriter(writer, CultureInfo.InvariantCulture))
                        {
                            // Don't write the header again.
                            csv.Configuration.HasHeaderRecord = false;
                            csv.WriteRecord <CSVRecord>(record);
                            csv.NextRecord();
                        }

                Console.WriteLine($"Results: {results[0].ToString()}");
                //PreProcessEmotion(img_buffer, results[0]);
            }
        }
예제 #6
0
        public bool Run()
        {
            PrepareData();

            var graph = IsImportingGraph ? ImportGraph() : BuildGraph();

            var config = new ConfigProto {
                AllowSoftPlacement = true
            };

            using (var sess = tf.Session(graph, config: config))
            {
                Train(sess);
            }

            return(true);
        }
예제 #7
0
        private NDArray ReadTensorFromDetected(NDArray img_buffer_, int img_size = 60)
        {
            var         graph     = tf.Graph().as_default();
            ConfigProto config    = new ConfigProto();
            GPUOptions  gpuConfig = new GPUOptions();

            gpuConfig.AllowGrowth = true;
            gpuConfig.PerProcessGpuMemoryFraction = 0.3;
            config.GpuOptions = gpuConfig;

            var t3 = tf.constant(img_buffer_, dtype: TF_DataType.TF_UINT8);
            //var inp = tf.reshape(t3, (height, width, 3));
            var casted        = tf.cast(t3, tf.float32);
            var dims_expander = tf.expand_dims(casted, 0);
            var resize        = tf.constant(new int[] { img_size, img_size });
            var bilinear      = tf.image.resize_bilinear(dims_expander, resize);

            using (var sess = tf.Session(graph, config))
                return(sess.run(bilinear));
        }
예제 #8
0
        private NDArray InferenceDetector(NDArray img_buffer_)
        {
            var         imgArr    = ReadTensorFromImageFile(img_buffer_);
            ConfigProto config    = new ConfigProto();
            GPUOptions  gpuConfig = new GPUOptions();

            gpuConfig.AllowGrowth = true;
            gpuConfig.PerProcessGpuMemoryFraction = 0.3;
            config.GpuOptions = gpuConfig;

            using (var sess = tf.Session(detectorGraph, config))
            {
                Tensor   tensorClasses = detectorGraph.OperationByName("Identity");
                Tensor   imgTensor     = detectorGraph.OperationByName("x");
                Tensor[] outTensorArr  = new Tensor[] { tensorClasses };

                var results = sess.run(outTensorArr, new FeedItem(imgTensor, imgArr));

                //Console.WriteLine($"Results: {results[0].ToString()}");
                return(PreProcessEmotion(img_buffer_, results[0]));
            }
        }
예제 #9
0
        //Errors with CNTK: https://github.com/Microsoft/CNTK/issues/2614
        public void Train(PredictorTrainingContext ctx)
        {
            InitialSetup();

            tf.compat.v1.disable_eager_execution();
            var p = ctx.Predictor;

            var nn = (NeuralNetworkSettingsEntity)p.AlgorithmSettings;

            Tensor inputPlaceholder  = tf.placeholder(tf.float32, new[] { -1, ctx.InputCodifications.Count }, "inputPlaceholder");
            Tensor outputPlaceholder = tf.placeholder(tf.float32, new[] { -1, ctx.OutputCodifications.Count }, "outputPlaceholder");

            Tensor currentTensor = inputPlaceholder;

            nn.HiddenLayers.ForEach((layer, i) =>
            {
                currentTensor = NetworkBuilder.DenseLayer(currentTensor, layer.Size, layer.Activation, layer.Initializer, p.Settings.Seed ?? 0, "hidden" + i);
            });
            Tensor output           = NetworkBuilder.DenseLayer(currentTensor, ctx.OutputCodifications.Count, nn.OutputActivation, nn.OutputInitializer, p.Settings.Seed ?? 0, "output");
            Tensor calculatedOutput = tf.identity(output, "calculatedOutput");

            Tensor loss     = NetworkBuilder.GetEvalFunction(nn.LossFunction, outputPlaceholder, calculatedOutput);
            Tensor accuracy = NetworkBuilder.GetEvalFunction(nn.EvalErrorFunction, outputPlaceholder, calculatedOutput);

            // prepare for training
            Optimizer optimizer = NetworkBuilder.GetOptimizer(nn);

            Operation trainOperation = optimizer.minimize(loss);

            Random rand = p.Settings.Seed == null ?
                          new Random() :
                          new Random(p.Settings.Seed.Value);

            var(training, validation) = ctx.SplitTrainValidation(rand);

            var minibachtSize  = nn.MinibatchSize;
            var numMinibatches = nn.NumMinibatches;


            Stopwatch             sw        = Stopwatch.StartNew();
            List <FinalCandidate> candidate = new List <FinalCandidate>();

            var config = new ConfigProto
            {
                IntraOpParallelismThreads = 1,
                InterOpParallelismThreads = 1,
                LogDevicePlacement        = true
            };

            ctx.ReportProgress($"Deleting Files");
            var dir = PredictorDirectory(ctx.Predictor);

            if (Directory.Exists(dir))
            {
                Directory.Delete(dir, true);
            }

            Directory.CreateDirectory(dir);

            ctx.ReportProgress($"Starting training...");

            var saver = tf.train.Saver();

            using (var sess = tf.Session(config))
            {
                sess.run(tf.global_variables_initializer());

                for (int i = 0; i < numMinibatches; i++)
                {
                    using (HeavyProfiler.Log("MiniBatch", () => i.ToString()))
                    {
                        var trainMinibatch = 0.To(minibachtSize).Select(_ => rand.NextElement(training)).ToList();

                        var inputValue  = CreateNDArray(ctx, trainMinibatch, ctx.InputCodifications.Count, ctx.InputCodificationsByColumn);
                        var outputValue = CreateNDArray(ctx, trainMinibatch, ctx.OutputCodifications.Count, ctx.OutputCodificationsByColumn);

                        using (HeavyProfiler.Log("TrainMinibatch", () => i.ToString()))
                        {
                            sess.run(trainOperation,
                                     (inputPlaceholder, inputValue),
                                     (outputPlaceholder, outputValue));
                        }

                        if (ctx.StopTraining)
                        {
                            p = ctx.Predictor = ctx.Predictor.ToLite().RetrieveAndRemember();
                        }

                        var isLast = numMinibatches - nn.BestResultFromLast <= i;
                        if (isLast || (i % nn.SaveProgressEvery) == 0 || ctx.StopTraining)
                        {
                            float loss_val;
                            float accuracy_val;

                            using (HeavyProfiler.Log("EvalTraining", () => i.ToString()))
                            {
                                (loss_val, accuracy_val) = sess.run((loss, accuracy),
                                                                    (inputPlaceholder, inputValue),
                                                                    (outputPlaceholder, outputValue));
                            }

                            var ep = new EpochProgress
                            {
                                Ellapsed           = sw.ElapsedMilliseconds,
                                Epoch              = i,
                                TrainingExamples   = i * minibachtSize,
                                LossTraining       = loss_val,
                                AccuracyTraining   = accuracy_val,
                                LossValidation     = null,
                                AccuracyValidation = null,
                            };

                            ctx.ReportProgress($"Training Minibatches Loss:{loss_val} / Accuracy:{accuracy_val}", (i + 1) / (decimal)numMinibatches);

                            ctx.Progresses.Enqueue(ep);

                            if (isLast || (i % nn.SaveValidationProgressEvery) == 0 || ctx.StopTraining)
                            {
                                using (HeavyProfiler.LogNoStackTrace("EvalValidation"))
                                {
                                    var validateMinibatch = 0.To(minibachtSize).Select(_ => rand.NextElement(validation)).ToList();

                                    var inputValValue  = CreateNDArray(ctx, validateMinibatch, ctx.InputCodifications.Count, ctx.InputCodificationsByColumn);
                                    var outputValValue = CreateNDArray(ctx, validateMinibatch, ctx.OutputCodifications.Count, ctx.OutputCodificationsByColumn);

                                    (loss_val, accuracy_val) = sess.run((loss, accuracy),
                                                                        (inputPlaceholder, inputValValue),
                                                                        (outputPlaceholder, outputValValue));


                                    ep.LossValidation     = loss_val;
                                    ep.AccuracyValidation = accuracy_val;
                                }
                            }

                            var progress = ep.SaveEntity(ctx.Predictor);

                            if (isLast || ctx.StopTraining)
                            {
                                Directory.CreateDirectory(TrainingModelDirectory(ctx.Predictor, i));
                                var save = saver.save(sess, Path.Combine(TrainingModelDirectory(ctx.Predictor, i), ModelFileName));

                                using (HeavyProfiler.LogNoStackTrace("FinalCandidate"))
                                {
                                    candidate.Add(new FinalCandidate
                                    {
                                        ModelIndex     = i,
                                        ResultTraining = new PredictorMetricsEmbedded {
                                            Accuracy = progress.AccuracyTraining, Loss = progress.LossTraining
                                        },
                                        ResultValidation = new PredictorMetricsEmbedded {
                                            Accuracy = progress.AccuracyValidation, Loss = progress.LossValidation
                                        },
                                    });
                                }
                            }
                        }

                        if (ctx.StopTraining)
                        {
                            break;
                        }
                    }
                }
            }

            var best = candidate.WithMin(a => a.ResultValidation.Loss !.Value);

            p.ResultTraining   = best.ResultTraining;
            p.ResultValidation = best.ResultValidation;

            var files = Directory.GetFiles(TrainingModelDirectory(ctx.Predictor, best.ModelIndex));

            p.Files.AddRange(files.Select(p => new Entities.Files.FilePathEmbedded(PredictorFileType.PredictorFile, p)));

            using (OperationLogic.AllowSave <PredictorEntity>())
                p.Save();
        }
예제 #10
0
        /// <summary>
        /// Default method to build a fully-connected graph of the specified structure
        /// </summary>
        /// <param name="layerNodes">The number of nodes to have at each layer. Index [0] is the number of inputs, and [lenth - 1] is the number of outputs.</param>
        /// <returns></returns>
        public virtual Graph BuildFullyConnectedGraphInt(int[] layerNodes, float learningRate = 0.01f)
        {
            tf.enable_eager_execution();
            var g = new Graph().as_default();

            tf_with(tf.name_scope("Input"), delegate
            {
                Input = tf.placeholder(tf.float32, shape: new TensorShape(-1, layerNodes[0]));
                YTrue = tf.placeholder(tf.int32, shape: new TensorShape(-1, layerNodes[layerNodes.Length - 1]));
            });

            tf_with(tf.variable_scope("FullyConnected"), delegate
            {
                Tensor x = Input;
                Tensor y = null;

                for (int i = 1; i < (layerNodes.Length - 1); i++)
                {
                    var w      = tf.get_variable("w" + i, shape: new TensorShape(layerNodes[i - 1], layerNodes[i]), initializer: tf.random_normal_initializer(stddev: 0.1f));
                    var b      = tf.get_variable("b" + i, shape: new TensorShape(layerNodes[i]), initializer: tf.constant_initializer(0.1));
                    Prediction = tf.matmul(x, w) + b;
                    y          = tf.nn.relu(Prediction);
                    x          = y;
                }

                var w2     = tf.get_variable("w_out", shape: new TensorShape(layerNodes[layerNodes.Length - 2], layerNodes[layerNodes.Length - 1]), initializer: tf.random_normal_initializer(stddev: 0.1f));
                var b2     = tf.get_variable("b_out", shape: new TensorShape(layerNodes[layerNodes.Length - 1]), initializer: tf.constant_initializer(0.1));
                Prediction = tf.matmul(y, w2) + b2;
            });

            tf_with(tf.variable_scope("Loss"), delegate
            {
                var losses = tf.nn.sigmoid_cross_entropy_with_logits(tf.cast(YTrue, tf.float32), Prediction);
                LossFunc   = tf.reduce_mean(losses);
            });

            tf_with(tf.variable_scope("Accuracy"), delegate
            {
                var y_pred = tf.cast(Prediction > 0, tf.int32);
                Accuracy   = tf.reduce_mean(tf.cast(tf.equal(y_pred, YTrue), tf.float32));
            });

            // We add the training operation, ...
            var adam = tf.train.AdamOptimizer(learningRate);   // Set the learning rate here

            TrainOp = adam.minimize(LossFunc, name: "train_op");

            // Create the new session
            var config = new ConfigProto
            {
                InterOpParallelismThreads = 1,
                IntraOpParallelismThreads = 1,
                GpuOptions = new GPUOptions {
                },

                LogDevicePlacement = true
            };

            Sess = tf.Session(config);

            return(g);
        }
예제 #11
0
        /// <summary>
        /// Default method to build a fully-connected graph of the specified structure
        /// </summary>
        /// <param name="layerNodes">The number of nodes to have at each layer. Index [0] is the number of inputs, and [lenth - 1] is the number of outputs.</param>
        /// <returns></returns>
        public virtual Graph BuildFullyConnectedGraphFloat(int[] layerNodes, float learningRate = 0.01f)
        {
            tf.enable_eager_execution();
            var g = tf.get_default_graph();

            tf_with(tf.name_scope("Input"), delegate
            {
                Input = tf.placeholder(tf.float32, shape: new TensorShape(-1, layerNodes[0]));
                YTrue = tf.placeholder(tf.float32, shape: new TensorShape(-1, layerNodes[layerNodes.Length - 1]));
            });

            tf_with(tf.variable_scope("FullyConnected"), delegate
            {
                Tensor x = Input;
                Tensor y = null;

                for (int i = 1; i < (layerNodes.Length - 1); i++)
                {
                    var w      = tf.get_variable("w" + i, shape: new TensorShape(layerNodes[i - 1], layerNodes[i]), initializer: tf.random_normal_initializer(stddev: 0.1f));
                    var b      = tf.get_variable("b" + i, shape: new TensorShape(layerNodes[i]), initializer: tf.constant_initializer(0.1));
                    Prediction = tf.matmul(x, w) + b;
                    y          = tf.nn.relu(Prediction);
                    x          = y;
                }

                var w2     = tf.get_variable("w_out", shape: new TensorShape(layerNodes[layerNodes.Length - 2], layerNodes[layerNodes.Length - 1]), initializer: tf.random_normal_initializer(stddev: 0.1f));
                var b2     = tf.get_variable("b_out", shape: new TensorShape(layerNodes[layerNodes.Length - 1]), initializer: tf.constant_initializer(0.1));
                Prediction = tf.matmul(y, w2) + b2;
            });

            tf_with(tf.variable_scope("Loss"), delegate
            {
                var lossVal = (Prediction - YTrue);
                var scaler  = (YTrue - Prediction);
                var correct = tf.cast(tf.logical_and((YTrue < 10), (Prediction < 10)), TF_DataType.TF_FLOAT);
                //LossFunc = tf.reduce_mean((lossVal * 0.01 * correct) + (lossVal * tf.maximum((10 - Prediction), 1) * (1 - correct)));
                var mean = tf.reduce_mean(lossVal);
                var std  = tf.sqrt(tf.reduce_mean(tf.square(Prediction - mean)));

                //LossFunc = (tf.reduce_mean(scaler));
                //LossFunc = tf.reduce_mean(Prediction + tf.maximum(YTrue - Prediction, 0));
                //LossFunc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(YTrue, Prediction));
                LossFunc = tf.reduce_mean(tf.abs(Prediction - YTrue));
            });

            tf_with(tf.variable_scope("Accuracy"), delegate
            {
                Accuracy = tf.reduce_mean(tf.abs(tf.sub(Prediction, YTrue)));
            });

            // We add the training operation, ...
            var adam = tf.train.AdamOptimizer(learningRate);   // Set the learning rate here

            TrainOp = adam.minimize(LossFunc, name: "train_op");

            // Create the new session
            var config = new ConfigProto
            {
                InterOpParallelismThreads = 1,
                IntraOpParallelismThreads = 1,
                LogDevicePlacement        = true
            };

            Sess = tf.Session(g, config);

            return(g);
        }
예제 #12
0
 public Estimator(RunConfig config)
 {
     _config         = config;
     _model_dir      = _config.model_dir;
     _session_config = _config.session_config;
 }