Esempio n. 1
0
        private static TFSession LoadTFSession(IHostEnvironment env, string exportDirSavedModel)
        {
            Contracts.Check(env != null, nameof(env));
            env.CheckValue(exportDirSavedModel, nameof(exportDirSavedModel));
            var sessionOptions = new TFSessionOptions();
            var tags           = new string[] { "serve" };
            var graph          = new TFGraph();
            var metaGraphDef   = new TFBuffer();

            return(TFSession.FromSavedModel(sessionOptions, null, exportDirSavedModel, tags, graph, metaGraphDef));
        }
        public TensorProcessor(byte[] model, string device = "/CPU:0")
        {
            _graph = new TFGraph();
            var options = new TFImportGraphDefOptionsExt();

            //options.SetDefaultDevice(device);
            _graph.Import(model, options);

            TFSessionOptions TFOptions = new TFSessionOptions();

            unsafe
            {
                byte[] GPUConfig = { 0x38, 0x1 };

                fixed(void *ptr = &GPUConfig[0])
                {
                    TFOptions.SetConfig(new IntPtr(ptr), GPUConfig.Length);
                }
            }
            _session = new TFSession(_graph, TFOptions);

            Console.WriteLine($"=> Session for {device} created with: {String.Join(',', _session.ListDevices().Select(x => x.Name).ToList())}");
        }
Esempio n. 3
0
        public override IObservable <Pose> Process(IObservable <IplImage> source)
        {
            return(Observable.Defer(() =>
            {
                TFSessionOptions options = new TFSessionOptions();
                unsafe
                {
                    byte[] GPUConfig = new byte[] { 0x32, 0x02, 0x20, 0x01 };
                    fixed(void *ptr = &GPUConfig[0])
                    {
                        options.SetConfig(new IntPtr(ptr), GPUConfig.Length);
                    }
                }

                var graph = new TFGraph();
                var session = new TFSession(graph, options, null);
                var bytes = File.ReadAllBytes(ModelFileName);
                graph.Import(bytes);

                IplImage temp = null;
                TFTensor tensor = null;
                TFSession.Runner runner = null;
                var config = ConfigHelper.PoseConfig(PoseConfigFileName);
                return source.Select(input =>
                {
                    var poseScale = 1.0;
                    const int TensorChannels = 3;
                    var frameSize = input.Size;
                    var scaleFactor = ScaleFactor;
                    if (scaleFactor.HasValue)
                    {
                        poseScale = scaleFactor.Value;
                        frameSize.Width = (int)(frameSize.Width * poseScale);
                        frameSize.Height = (int)(frameSize.Height * poseScale);
                        poseScale = 1.0 / poseScale;
                    }

                    if (tensor == null || tensor.GetTensorDimension(1) != frameSize.Height || tensor.GetTensorDimension(2) != frameSize.Width)
                    {
                        tensor = new TFTensor(
                            TFDataType.Float,
                            new long[] { 1, frameSize.Height, frameSize.Width, TensorChannels },
                            frameSize.Width * frameSize.Height * TensorChannels * sizeof(float));
                        runner = session.GetRunner();
                        runner.AddInput(graph["Placeholder"][0], tensor);
                        runner.Fetch(graph["concat_1"][0]);
                    }

                    var frame = input;
                    if (frameSize != input.Size)
                    {
                        if (temp == null || temp.Size != frameSize)
                        {
                            temp = new IplImage(frameSize, input.Depth, input.Channels);
                        }

                        CV.Resize(input, temp);
                        frame = temp;
                    }

                    using (var image = new IplImage(frameSize, IplDepth.F32, TensorChannels, tensor.Data))
                    {
                        CV.Convert(frame, image);
                    }

                    // Run the model
                    var output = runner.Run();

                    // Fetch the results from output:
                    var poseTensor = output[0];
                    var pose = new Mat((int)poseTensor.Shape[0], (int)poseTensor.Shape[1], Depth.F32, 1, poseTensor.Data);
                    var result = new Pose(input);
                    var threshold = MinConfidence;
                    for (int i = 0; i < pose.Rows; i++)
                    {
                        BodyPart bodyPart;
                        bodyPart.Name = config[i];
                        bodyPart.Confidence = (float)pose.GetReal(i, 2);
                        if (bodyPart.Confidence < threshold)
                        {
                            bodyPart.Position = new Point2f(float.NaN, float.NaN);
                        }
                        else
                        {
                            bodyPart.Position.X = (float)(pose.GetReal(i, 1) * poseScale);
                            bodyPart.Position.Y = (float)(pose.GetReal(i, 0) * poseScale);
                        }
                        result.Add(bodyPart);
                    }
                    return result;
                });
            }));
        }
Esempio n. 4
0
        public FlexNet3D(string modelDir, int3 boxDimensions, int gpuID = 0, int nThreads = 1, bool forTraining = true, int batchSize = 128, int bottleneckWidth = 2, int layerWidth = 64, int nlayers = 4)
        {
            BoxDimensions   = boxDimensions;
            ForTraining     = forTraining;
            BatchSize       = batchSize;
            BottleneckWidth = bottleneckWidth;
            NWeights0       = layerWidth;
            NLayers         = nlayers;
            ModelDir        = modelDir;
            MaxThreads      = nThreads;

            TFSessionOptions SessionOptions = TFHelper.CreateOptions();
            TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

            Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{gpuID}");
            Graph   = Session.Graph;

            NodeInputSource       = Graph["volume_source"][0];
            NodeInputTarget       = Graph["volume_target"][0];
            NodeInputWeightSource = Graph["volume_weight_source"][0];
            NodeInputWeightTarget = Graph["volume_weight_target"][0];
            NodeDropoutRate       = Graph["training_dropout_rate"][0];
            if (forTraining)
            {
                NodeLearningRate      = Graph["training_learning_rate"][0];
                NodeOrthogonalityRate = Graph["training_orthogonality"][0];
                NodeOpTrain           = Graph["train_momentum"][0];
                NodeOutputLoss        = Graph["l2_loss"][0];
                NodeOutputLossKL      = Graph["kl_loss"][0];
                NodeBottleneck        = Graph["bottleneck"][0];
            }

            NodeCode = Graph["volume_code"][0];

            NodeOutputPredicted = Graph["volume_predict"][0];

            NodeWeights0 = Graph["encoder_0/weights_0"][0];
            NodeWeights1 = Graph[$"decoder_{nlayers - 1}/weights_{nlayers - 1}"][0];
            if (forTraining)
            {
                NodeWeights0Assign = Graph["encoder_0/assign_layer0"][0];
                NodeWeights0Input  = Graph["encoder_0/assign_layer0_values"][0];

                NodeWeights1Assign = Graph[$"decoder_{nlayers - 1}/assign_layer0"][0];
                NodeWeights1Input  = Graph[$"decoder_{nlayers - 1}/assign_layer0_values"][0];
            }

            TensorSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 2),
                                                                           new float[BatchSize * BoxDimensions.ElementsFFT() * 2],
                                                                           0,
                                                                           BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                                                  nThreads);

            TensorTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 2),
                                                                           new float[BatchSize * BoxDimensions.ElementsFFT() * 2],
                                                                           0,
                                                                           BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                                                  nThreads);

            TensorWeightSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 1),
                                                                                 new float[BatchSize * BoxDimensions.ElementsFFT()],
                                                                                 0,
                                                                                 BatchSize * (int)BoxDimensions.ElementsFFT()),
                                                        nThreads);

            TensorWeightTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 1),
                                                                                 new float[BatchSize * BoxDimensions.ElementsFFT()],
                                                                                 0,
                                                                                 BatchSize * (int)BoxDimensions.ElementsFFT()),
                                                        nThreads);

            TensorCode = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BottleneckWidth),
                                                                         new float[BatchSize * BottleneckWidth],
                                                                         0,
                                                                         BatchSize * BottleneckWidth),
                                                nThreads);

            TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                 new float[1],
                                                                                 0,
                                                                                 1),
                                                        nThreads);

            TensorDropoutRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                new float[1],
                                                                                0,
                                                                                1),
                                                       nThreads);

            TensorOrthogonalityRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                      new float[1],
                                                                                      0,
                                                                                      1),
                                                             nThreads);

            ResultPredicted  = Helper.ArrayOfFunction(i => new float[BatchSize * BoxDimensions.ElementsFFT() * 2], nThreads);
            ResultBottleneck = Helper.ArrayOfFunction(i => new float[BatchSize * BottleneckWidth], nThreads);
            ResultLoss       = Helper.ArrayOfFunction(i => new float[1], nThreads);
            ResultLossKL     = Helper.ArrayOfFunction(i => new float[1], nThreads);

            RetrievedWeights = new float[boxDimensions.ElementsFFT() * 2 * NWeights0];

            //if (!ForTraining)
            RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                      AddInput(NodeCode, TensorCode[i]).
                                                      AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                      Fetch(NodeOutputPredicted),
                                                      nThreads);
            //else
            RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                    AddInput(NodeInputSource, TensorSource[i]).
                                                    AddInput(NodeInputTarget, TensorTarget[i]).
                                                    AddInput(NodeInputWeightSource, TensorWeightSource[i]).
                                                    AddInput(NodeInputWeightTarget, TensorWeightTarget[i]).
                                                    AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                    AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                    AddInput(NodeOrthogonalityRate, TensorOrthogonalityRate[i]).
                                                    Fetch(NodeOutputPredicted, NodeOutputLoss, NodeOutputLossKL, NodeBottleneck, NodeOpTrain),
                                                    nThreads);

            RunnerEncode = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                  AddInput(NodeInputSource, TensorSource[i]).
                                                  AddInput(NodeInputWeightSource, TensorWeightSource[i]).
                                                  AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                  Fetch(NodeBottleneck),
                                                  nThreads);

            RunnerRetrieveWeights0 = Session.GetRunner().Fetch(NodeWeights0);
            RunnerRetrieveWeights1 = Session.GetRunner().Fetch(NodeWeights1);

            if (ForTraining)
            {
                TensorWeights0 = TFTensor.FromBuffer(new TFShape(NWeights0, BoxDimensions.ElementsFFT() * 2),
                                                     new float[BoxDimensions.ElementsFFT() * 2 * NWeights0],
                                                     0,
                                                     (int)BoxDimensions.ElementsFFT() * 2 * NWeights0);

                RunnerAssignWeights0 = Session.GetRunner().AddInput(NodeWeights0Input, TensorWeights0).
                                       Fetch(NodeWeights0Assign);
                RunnerAssignWeights1 = Session.GetRunner().AddInput(NodeWeights1Input, TensorWeights0).
                                       Fetch(NodeWeights1Assign);
            }

            // Run prediction or training for one batch to claim all the memory needed
            float[] InitDecoded;
            float[] InitBottleneck;
            float[] InitLoss, InitLossKL;
            if (!ForTraining)
            {
                RandomNormal RandN = new RandomNormal(123);
                Predict(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BottleneckWidth * BatchSize),
                        0,
                        out InitDecoded);
            }
            else
            {
                RandomNormal RandN = new RandomNormal();

                Encode(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                       Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                       0,
                       out InitBottleneck);

                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                      Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                      Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                      Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                      0.5f,
                      1e-10f,
                      1e-5f,
                      0,
                      out InitDecoded,
                      out InitBottleneck,
                      out InitLoss,
                      out InitLossKL);
            }
        }
Esempio n. 5
0
        public static void Main(string [] args)
        {
            TFSessionOptions options = new TFSessionOptions();

            unsafe
            {
                //byte[] PUConfig = new byte[] { 0x32, 0x05, 0x20, 0x01, 0x2a, 0x01, 0x30, 0x38, 0x01 }; //gpu
                byte[] PUConfig = new byte[] { 0x0a, 0x07, 0x0a, 0x03, 0x67, 0x70, 0x75, 0x10, 0x00 }; //cpu
                fixed(void *ptr = &PUConfig[0])
                {
                    options.SetConfig(new IntPtr(ptr), PUConfig.Length);
                }
            }
            TFSession session;
            var       graph = new TFGraph();

            using (TFSession sess = new TFSession(graph, options))
                using (var metaGraphUnused = new TFBuffer())
                {
                    session = sess.FromSavedModel(options, null, "tzb", new[] { "serve" }, graph, metaGraphUnused);
                    IEnumerable <TensorFlow.DeviceAttributes> iem = session.ListDevices();
                    foreach (object obj in iem)
                    {
                        Console.WriteLine(((DeviceAttributes)obj).Name);
                    }
                    var labels = File.ReadAllLines("tzb/label.txt");
                    //打印节点名称

                    /*IEnumerable<TensorFlow.TFOperation> iem = graph.GetEnumerator();
                     * foreach (object obj in iem)
                     * {
                     *  Console.WriteLine(((TFOperation)obj).Name);
                     * }*/
                    //while(true)
                    float[] eimg = new float[224 * 224];
                    for (int i = 0; i < 224 * 224; i++)
                    {
                        eimg[i] = 0;
                    }
                    TFTensor ten = TFTensor.FromBuffer(tfs, eimg, 0, 224 * 224 * 1);
                    for (int j = 0; j < 3; j++)
                    {
                        var runner = session.GetRunner();
                        runner.AddInput(graph["images"][0], ten).Fetch(graph["classes"].Name);
                        var output = runner.Run();
                    }
                    ten.Dispose();
                    string[] files = Directory.GetFiles("tzb/images/defect", "*.*");
                    //while(true)
                    foreach (string file in files)
                    {
                        DateTime bft = DateTime.Now;
                        //var tensor = Image2Tensor(file);
                        //break;
                        var tensor = ImageUtil.CreateTensorFromImageFile(file);
                        //TFTensor tensor = TFTensor.FromBuffer(tfs, eimg, 0, 224 * 224);
                        var runner = session.GetRunner();
                        runner.AddInput(graph["images"][0], tensor).Fetch(graph["classes"].Name);
                        var      output = runner.Run();
                        DateTime aft    = DateTime.Now;
                        TimeSpan ts     = aft.Subtract(bft);
                        System.Threading.Thread.Sleep(50);
                        var result = output[0];
                        int class_ = ((int[])result.GetValue(jagged: true))[0];
                        Console.WriteLine(file + " best_match: " + class_ + " " + labels[class_] + " time: " + ts.TotalMilliseconds);
                    }
                }
        }
Esempio n. 6
0
        public NoiseNet3D(string modelDir, int3 boxDimensions, int nThreads = 1, int batchSize = 8, bool forTraining = true, int deviceID = 0)
        {
            lock (TFHelper.DeviceSync[deviceID])
            {
                DeviceID      = deviceID;
                BoxDimensions = boxDimensions;
                ForTraining   = forTraining;
                ModelDir      = modelDir;
                MaxThreads    = nThreads;
                BatchSize     = batchSize;

                TFSessionOptions SessionOptions = TFHelper.CreateOptions();
                TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

                Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{deviceID}");
                Graph   = Session.Graph;

                NodeInputSource = Graph["volume_source"][0];
                if (forTraining)
                {
                    NodeInputTarget  = Graph["volume_target"][0];
                    NodeLearningRate = Graph["training_learning_rate"][0];
                    NodeOpTrain      = Graph["train_momentum"][0];
                    NodeOutputLoss   = Graph["l2_loss"][0];
                }

                NodeOutputPredicted = Graph["volume_predict"][0];

                TensorSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensions.X, BoxDimensions.Y, boxDimensions.Z, 1),
                                                                               new float[BatchSize * BoxDimensions.Elements()],
                                                                               0,
                                                                               BatchSize * (int)BoxDimensions.Elements()),
                                                      nThreads);

                if (ForTraining)
                {
                    TensorTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensions.X, BoxDimensions.Y, boxDimensions.Z, 1),
                                                                                   new float[BatchSize * BoxDimensions.Elements()],
                                                                                   0,
                                                                                   BatchSize * (int)BoxDimensions.Elements()),
                                                          nThreads);

                    TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                         new float[1],
                                                                                         0,
                                                                                         1),
                                                                nThreads);
                }

                ResultPredicted = Helper.ArrayOfFunction(i => new float[BatchSize * BoxDimensions.Elements()], nThreads);
                ResultLoss      = Helper.ArrayOfFunction(i => new float[1], nThreads);

                //if (!ForTraining)
                RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                          AddInput(NodeInputSource, TensorSource[i]).
                                                          Fetch(NodeOutputPredicted),
                                                          nThreads);
                if (ForTraining)
                {
                    RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                            AddInput(NodeInputSource, TensorSource[i]).
                                                            AddInput(NodeInputTarget, TensorTarget[i]).
                                                            AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                            Fetch(NodeOutputPredicted, NodeOutputLoss, NodeOpTrain),
                                                            nThreads);
                }
            }

            // Run prediction or training for one batch to claim all the memory needed
            float[] InitDecoded;
            float[] InitLoss;
            //if (!ForTraining)
            {
                Predict(new float[BoxDimensions.Elements() * BatchSize],
                        0,
                        out InitDecoded);
            }
            if (ForTraining)
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      1e-10f,
                      0,
                      out InitDecoded,
                      out InitLoss);
            }
        }
Esempio n. 7
0
        public CubeNet(string modelDir, int deviceID = 0, int nThreads = 1, int batchSize = 1, int nClasses = 2, bool forTraining = false)
        {
            lock (TFHelper.DeviceSync[deviceID])
            {
                DeviceID    = deviceID;
                ForTraining = forTraining;
                ModelDir    = modelDir;
                MaxThreads  = nThreads;
                BatchSize   = batchSize;
                NClasses    = nClasses;

                TFSessionOptions SessionOptions = TFHelper.CreateOptions();
                TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

                Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{deviceID}");
                Graph   = Session.Graph;

                if (forTraining)
                {
                    NodeInputMicTile = Graph["images"][0];
                    NodeInputLabels  = Graph["image_classes"][0];
                    NodeInputWeights = Graph["image_weights"][0];
                    NodeLearningRate = Graph["training_learning_rate"][0];
                    NodeOpTrain      = Graph["train_momentum"][0];

                    NodeOutputLoss = Graph["cross_entropy"][0];
                }
                else
                {
                    NodeInputMicTilePredict = Graph["images_predict"][0];
                }

                NodeOutputArgMax  = Graph["argmax_tensor"][0];
                NodeOutputSoftMax = Graph["softmax_tensor"][0];

                if (forTraining)
                {
                    TensorMicTile = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, 1),
                                                                                    new float[BatchSize * BoxDimensionsTrain.Elements()],
                                                                                    0,
                                                                                    BatchSize * (int)BoxDimensionsTrain.Elements()),
                                                           nThreads);

                    TensorTrainingLabels = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, NClasses),
                                                                                           new float[BatchSize * BoxDimensionsTrain.Elements() * NClasses],
                                                                                           0,
                                                                                           BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses),
                                                                  nThreads);

                    TensorTrainingWeights = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, 1),
                                                                                            new float[BatchSize * BoxDimensionsTrain.Elements()],
                                                                                            0,
                                                                                            BatchSize * (int)BoxDimensionsTrain.Elements()),
                                                                   nThreads);

                    TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                         new float[1],
                                                                                         0,
                                                                                         1),
                                                                nThreads);
                }
                else
                {
                    TensorMicTilePredict = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsPredict.X, BoxDimensionsPredict.Y, BoxDimensionsPredict.Z, 1),
                                                                                           new float[BatchSize * BoxDimensionsPredict.Elements()],
                                                                                           0,
                                                                                           BatchSize * (int)BoxDimensionsPredict.Elements()),
                                                                  nThreads);
                }

                if (forTraining)
                {
                    ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize * (int)BoxDimensionsTrain.Elements()], nThreads);
                    ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses], nThreads);
                    ResultLoss    = Helper.ArrayOfFunction(i => new float[BatchSize], nThreads);
                }
                else
                {
                    ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize * (int)BoxDimensionsPredict.Elements()], nThreads);
                    ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * (int)BoxDimensionsPredict.Elements() * NClasses], nThreads);
                }

                if (!ForTraining)
                {
                    RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                              AddInput(NodeInputMicTilePredict, TensorMicTilePredict[i]).
                                                              Fetch(NodeOutputArgMax, NodeOutputSoftMax),
                                                              nThreads);
                }
                if (ForTraining)
                {
                    RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                            AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                            AddInput(NodeInputLabels, TensorTrainingLabels[i]).
                                                            AddInput(NodeInputWeights, TensorTrainingWeights[i]).
                                                            AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                            Fetch(NodeOpTrain, NodeOutputArgMax, NodeOutputSoftMax, NodeOutputLoss),
                                                            nThreads);
                }
            }

            // Run prediction or training for one batch to claim all the memory needed
            long[]  InitArgMax;
            float[] InitProb;
            if (!ForTraining)
            {
                Predict(new float[BoxDimensionsPredict.Elements() * BatchSize],
                        0,
                        out InitArgMax,
                        out InitProb);
            }
            if (ForTraining)
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensionsTrain.Elements()),
                      Helper.ArrayOfConstant(0.0f, BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses),
                      Helper.ArrayOfConstant(0.0f, BatchSize * (int)BoxDimensionsTrain.Elements()),
                      1e-6f,
                      0,
                      out InitArgMax,
                      out InitProb);
            }
        }
Esempio n. 8
0
        public override IObservable <Pose> Process(IObservable <IplImage> source)
        {
            return(Observable.Defer(() =>
            {
                TFSessionOptions options = new TFSessionOptions();
                unsafe
                {
                    byte[] GPUConfig = new byte[] { 0x32, 0x02, 0x20, 0x01 };
                    fixed(void *ptr = &GPUConfig[0])
                    {
                        options.SetConfig(new IntPtr(ptr), GPUConfig.Length);
                    }
                }

                var graph = new TFGraph();
                var session = new TFSession(graph, options, null);
                var bytes = File.ReadAllBytes(ModelFileName);
                graph.Import(bytes);

                TFTensor tensor = null;
                var config = ConfigHelper.PoseConfig(PoseConfigFileName);
                return source.Select(input =>
                {
                    if (tensor == null || tensor.GetTensorDimension(1) != input.Height || tensor.GetTensorDimension(2) != input.Width)
                    {
                        tensor = new TFTensor(
                            TFDataType.Float,
                            new long[] { 1, input.Height, input.Width, 3 },
                            input.WidthStep * input.Height * 4);
                    }

                    using (var image = new IplImage(input.Size, IplDepth.F32, 3, tensor.Data))
                    {
                        CV.Convert(input, image);
                    }

                    var runner = session.GetRunner();
                    runner.AddInput(graph["Placeholder"][0], tensor);
                    runner.Fetch(graph["concat_1"][0]);

                    // Run the model
                    var output = runner.Run();

                    // Fetch the results from output:
                    var poseTensor = output[0];
                    var pose = new Mat((int)poseTensor.Shape[0], (int)poseTensor.Shape[1], Depth.F32, 1, poseTensor.Data);
                    var result = new Pose(input);
                    var threshold = MinConfidence;
                    for (int i = 0; i < pose.Rows; i++)
                    {
                        BodyPart bodyPart;
                        bodyPart.Name = config[i];
                        bodyPart.Confidence = (float)pose.GetReal(i, 2);
                        if (bodyPart.Confidence < threshold)
                        {
                            bodyPart.Position = new Point2f(float.NaN, float.NaN);
                        }
                        else
                        {
                            bodyPart.Position.X = (float)pose.GetReal(i, 1);
                            bodyPart.Position.Y = (float)pose.GetReal(i, 0);
                        }
                        result.Add(bodyPart);
                    }
                    return result;
                });
            }));
        }
Esempio n. 9
0
        public BoxNet(string modelDir, int gpuID = 0, int nThreads = 1, int batchSize = 128, bool forTraining = false)
        {
            ForTraining = forTraining;
            BatchSize   = batchSize;
            ModelDir    = modelDir;
            MaxThreads  = nThreads;

            TFSessionOptions SessionOptions = TFHelper.CreateOptions();
            TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

            Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{gpuID}");
            Graph   = Session.Graph;

            NodeInputMicTile = Graph["mic_tiles"][0];
            if (forTraining)
            {
                NodeInputLabels  = Graph["training_labels"][0];
                NodeLearningRate = Graph["training_learning_rate"][0];
                NodeOpTrain      = Graph["train_momentum"][0];
            }

            NodeOutputArgMax  = Graph["ArgMax"][0];
            NodeOutputSoftMax = Graph["softmax_tensor"][0];

            TensorMicTile = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, 1, BoxDimensions.Y, BoxDimensions.X),
                                                                            new float[BatchSize * BoxDimensions.Elements()],
                                                                            0,
                                                                            BatchSize * (int)BoxDimensions.Elements()),
                                                   nThreads);

            TensorTrainingLabels = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, 2),
                                                                                   new float[BatchSize * 2],
                                                                                   0,
                                                                                   BatchSize * 2),
                                                          nThreads);

            TensorLearningRate = Helper.ArrayOfFunction(i => new TFTensor(0.0f),
                                                        nThreads);

            ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize], nThreads);
            ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * 2], nThreads);

            if (!ForTraining)
            {
                RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                          AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                          Fetch(NodeOutputArgMax, NodeOutputSoftMax),
                                                          nThreads);
            }
            else
            {
                RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                        AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                        AddInput(NodeInputLabels, TensorTrainingLabels[i]).
                                                        AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                        Fetch(NodeOutputArgMax, NodeOutputSoftMax, NodeOpTrain),
                                                        nThreads);
            }

            // Run prediction or training for one batch to claim all the memory needed
            long[]  InitArgMax;
            float[] InitProb;
            if (!ForTraining)
            {
                Predict(new float[BoxDimensions.Elements() * BatchSize],
                        0,
                        out InitArgMax,
                        out InitProb);
            }
            else
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      Helper.Combine(Helper.ArrayOfFunction(i => new[] { 1.0f, 0.0f }, 128)),
                      1e-6f,
                      0,
                      out InitArgMax,
                      out InitProb);
            }
        }