Example #1
0
        public FlexNet3D(string modelDir, int3 boxDimensions, int gpuID = 0, int nThreads = 1, bool forTraining = true, int batchSize = 128, int bottleneckWidth = 2, int layerWidth = 64, int nlayers = 4)
        {
            BoxDimensions   = boxDimensions;
            ForTraining     = forTraining;
            BatchSize       = batchSize;
            BottleneckWidth = bottleneckWidth;
            NWeights0       = layerWidth;
            NLayers         = nlayers;
            ModelDir        = modelDir;
            MaxThreads      = nThreads;

            TFSessionOptions SessionOptions = TFHelper.CreateOptions();
            TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

            Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{gpuID}");
            Graph   = Session.Graph;

            NodeInputSource       = Graph["volume_source"][0];
            NodeInputTarget       = Graph["volume_target"][0];
            NodeInputWeightSource = Graph["volume_weight_source"][0];
            NodeInputWeightTarget = Graph["volume_weight_target"][0];
            NodeDropoutRate       = Graph["training_dropout_rate"][0];
            if (forTraining)
            {
                NodeLearningRate      = Graph["training_learning_rate"][0];
                NodeOrthogonalityRate = Graph["training_orthogonality"][0];
                NodeOpTrain           = Graph["train_momentum"][0];
                NodeOutputLoss        = Graph["l2_loss"][0];
                NodeOutputLossKL      = Graph["kl_loss"][0];
                NodeBottleneck        = Graph["bottleneck"][0];
            }

            NodeCode = Graph["volume_code"][0];

            NodeOutputPredicted = Graph["volume_predict"][0];

            NodeWeights0 = Graph["encoder_0/weights_0"][0];
            NodeWeights1 = Graph[$"decoder_{nlayers - 1}/weights_{nlayers - 1}"][0];
            if (forTraining)
            {
                NodeWeights0Assign = Graph["encoder_0/assign_layer0"][0];
                NodeWeights0Input  = Graph["encoder_0/assign_layer0_values"][0];

                NodeWeights1Assign = Graph[$"decoder_{nlayers - 1}/assign_layer0"][0];
                NodeWeights1Input  = Graph[$"decoder_{nlayers - 1}/assign_layer0_values"][0];
            }

            TensorSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 2),
                                                                           new float[BatchSize * BoxDimensions.ElementsFFT() * 2],
                                                                           0,
                                                                           BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                                                  nThreads);

            TensorTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 2),
                                                                           new float[BatchSize * BoxDimensions.ElementsFFT() * 2],
                                                                           0,
                                                                           BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                                                  nThreads);

            TensorWeightSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 1),
                                                                                 new float[BatchSize * BoxDimensions.ElementsFFT()],
                                                                                 0,
                                                                                 BatchSize * (int)BoxDimensions.ElementsFFT()),
                                                        nThreads);

            TensorWeightTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, (BoxDimensions.X / 2 + 1), BoxDimensions.Y, BoxDimensions.Z, 1),
                                                                                 new float[BatchSize * BoxDimensions.ElementsFFT()],
                                                                                 0,
                                                                                 BatchSize * (int)BoxDimensions.ElementsFFT()),
                                                        nThreads);

            TensorCode = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BottleneckWidth),
                                                                         new float[BatchSize * BottleneckWidth],
                                                                         0,
                                                                         BatchSize * BottleneckWidth),
                                                nThreads);

            TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                 new float[1],
                                                                                 0,
                                                                                 1),
                                                        nThreads);

            TensorDropoutRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                new float[1],
                                                                                0,
                                                                                1),
                                                       nThreads);

            TensorOrthogonalityRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                      new float[1],
                                                                                      0,
                                                                                      1),
                                                             nThreads);

            ResultPredicted  = Helper.ArrayOfFunction(i => new float[BatchSize * BoxDimensions.ElementsFFT() * 2], nThreads);
            ResultBottleneck = Helper.ArrayOfFunction(i => new float[BatchSize * BottleneckWidth], nThreads);
            ResultLoss       = Helper.ArrayOfFunction(i => new float[1], nThreads);
            ResultLossKL     = Helper.ArrayOfFunction(i => new float[1], nThreads);

            RetrievedWeights = new float[boxDimensions.ElementsFFT() * 2 * NWeights0];

            //if (!ForTraining)
            RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                      AddInput(NodeCode, TensorCode[i]).
                                                      AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                      Fetch(NodeOutputPredicted),
                                                      nThreads);
            //else
            RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                    AddInput(NodeInputSource, TensorSource[i]).
                                                    AddInput(NodeInputTarget, TensorTarget[i]).
                                                    AddInput(NodeInputWeightSource, TensorWeightSource[i]).
                                                    AddInput(NodeInputWeightTarget, TensorWeightTarget[i]).
                                                    AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                    AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                    AddInput(NodeOrthogonalityRate, TensorOrthogonalityRate[i]).
                                                    Fetch(NodeOutputPredicted, NodeOutputLoss, NodeOutputLossKL, NodeBottleneck, NodeOpTrain),
                                                    nThreads);

            RunnerEncode = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                  AddInput(NodeInputSource, TensorSource[i]).
                                                  AddInput(NodeInputWeightSource, TensorWeightSource[i]).
                                                  AddInput(NodeDropoutRate, TensorDropoutRate[i]).
                                                  Fetch(NodeBottleneck),
                                                  nThreads);

            RunnerRetrieveWeights0 = Session.GetRunner().Fetch(NodeWeights0);
            RunnerRetrieveWeights1 = Session.GetRunner().Fetch(NodeWeights1);

            if (ForTraining)
            {
                TensorWeights0 = TFTensor.FromBuffer(new TFShape(NWeights0, BoxDimensions.ElementsFFT() * 2),
                                                     new float[BoxDimensions.ElementsFFT() * 2 * NWeights0],
                                                     0,
                                                     (int)BoxDimensions.ElementsFFT() * 2 * NWeights0);

                RunnerAssignWeights0 = Session.GetRunner().AddInput(NodeWeights0Input, TensorWeights0).
                                       Fetch(NodeWeights0Assign);
                RunnerAssignWeights1 = Session.GetRunner().AddInput(NodeWeights1Input, TensorWeights0).
                                       Fetch(NodeWeights1Assign);
            }

            // Run prediction or training for one batch to claim all the memory needed
            float[] InitDecoded;
            float[] InitBottleneck;
            float[] InitLoss, InitLossKL;
            if (!ForTraining)
            {
                RandomNormal RandN = new RandomNormal(123);
                Predict(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BottleneckWidth * BatchSize),
                        0,
                        out InitDecoded);
            }
            else
            {
                RandomNormal RandN = new RandomNormal();

                Encode(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                       Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                       0,
                       out InitBottleneck);

                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                      Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.ElementsFFT() * 2),
                      Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                      Helper.ArrayOfFunction(i => 1f, BatchSize * (int)BoxDimensions.ElementsFFT()),
                      0.5f,
                      1e-10f,
                      1e-5f,
                      0,
                      out InitDecoded,
                      out InitBottleneck,
                      out InitLoss,
                      out InitLossKL);
            }
        }
Example #2
0
        public CubeNet(string modelDir, int deviceID = 0, int nThreads = 1, int batchSize = 1, int nClasses = 2, bool forTraining = false)
        {
            lock (TFHelper.DeviceSync[deviceID])
            {
                DeviceID    = deviceID;
                ForTraining = forTraining;
                ModelDir    = modelDir;
                MaxThreads  = nThreads;
                BatchSize   = batchSize;
                NClasses    = nClasses;

                TFSessionOptions SessionOptions = TFHelper.CreateOptions();
                TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

                Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{deviceID}");
                Graph   = Session.Graph;

                if (forTraining)
                {
                    NodeInputMicTile = Graph["images"][0];
                    NodeInputLabels  = Graph["image_classes"][0];
                    NodeInputWeights = Graph["image_weights"][0];
                    NodeLearningRate = Graph["training_learning_rate"][0];
                    NodeOpTrain      = Graph["train_momentum"][0];

                    NodeOutputLoss = Graph["cross_entropy"][0];
                }
                else
                {
                    NodeInputMicTilePredict = Graph["images_predict"][0];
                }

                NodeOutputArgMax  = Graph["argmax_tensor"][0];
                NodeOutputSoftMax = Graph["softmax_tensor"][0];

                if (forTraining)
                {
                    TensorMicTile = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, 1),
                                                                                    new float[BatchSize * BoxDimensionsTrain.Elements()],
                                                                                    0,
                                                                                    BatchSize * (int)BoxDimensionsTrain.Elements()),
                                                           nThreads);

                    TensorTrainingLabels = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, NClasses),
                                                                                           new float[BatchSize * BoxDimensionsTrain.Elements() * NClasses],
                                                                                           0,
                                                                                           BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses),
                                                                  nThreads);

                    TensorTrainingWeights = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsTrain.X, BoxDimensionsTrain.Y, BoxDimensionsTrain.Z, 1),
                                                                                            new float[BatchSize * BoxDimensionsTrain.Elements()],
                                                                                            0,
                                                                                            BatchSize * (int)BoxDimensionsTrain.Elements()),
                                                                   nThreads);

                    TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                         new float[1],
                                                                                         0,
                                                                                         1),
                                                                nThreads);
                }
                else
                {
                    TensorMicTilePredict = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensionsPredict.X, BoxDimensionsPredict.Y, BoxDimensionsPredict.Z, 1),
                                                                                           new float[BatchSize * BoxDimensionsPredict.Elements()],
                                                                                           0,
                                                                                           BatchSize * (int)BoxDimensionsPredict.Elements()),
                                                                  nThreads);
                }

                if (forTraining)
                {
                    ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize * (int)BoxDimensionsTrain.Elements()], nThreads);
                    ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses], nThreads);
                    ResultLoss    = Helper.ArrayOfFunction(i => new float[BatchSize], nThreads);
                }
                else
                {
                    ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize * (int)BoxDimensionsPredict.Elements()], nThreads);
                    ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * (int)BoxDimensionsPredict.Elements() * NClasses], nThreads);
                }

                if (!ForTraining)
                {
                    RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                              AddInput(NodeInputMicTilePredict, TensorMicTilePredict[i]).
                                                              Fetch(NodeOutputArgMax, NodeOutputSoftMax),
                                                              nThreads);
                }
                if (ForTraining)
                {
                    RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                            AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                            AddInput(NodeInputLabels, TensorTrainingLabels[i]).
                                                            AddInput(NodeInputWeights, TensorTrainingWeights[i]).
                                                            AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                            Fetch(NodeOpTrain, NodeOutputArgMax, NodeOutputSoftMax, NodeOutputLoss),
                                                            nThreads);
                }
            }

            // Run prediction or training for one batch to claim all the memory needed
            long[]  InitArgMax;
            float[] InitProb;
            if (!ForTraining)
            {
                Predict(new float[BoxDimensionsPredict.Elements() * BatchSize],
                        0,
                        out InitArgMax,
                        out InitProb);
            }
            if (ForTraining)
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensionsTrain.Elements()),
                      Helper.ArrayOfConstant(0.0f, BatchSize * (int)BoxDimensionsTrain.Elements() * NClasses),
                      Helper.ArrayOfConstant(0.0f, BatchSize * (int)BoxDimensionsTrain.Elements()),
                      1e-6f,
                      0,
                      out InitArgMax,
                      out InitProb);
            }
        }
Example #3
0
        public NoiseNet3D(string modelDir, int3 boxDimensions, int nThreads = 1, int batchSize = 8, bool forTraining = true, int deviceID = 0)
        {
            lock (TFHelper.DeviceSync[deviceID])
            {
                DeviceID      = deviceID;
                BoxDimensions = boxDimensions;
                ForTraining   = forTraining;
                ModelDir      = modelDir;
                MaxThreads    = nThreads;
                BatchSize     = batchSize;

                TFSessionOptions SessionOptions = TFHelper.CreateOptions();
                TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

                Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{deviceID}");
                Graph   = Session.Graph;

                NodeInputSource = Graph["volume_source"][0];
                if (forTraining)
                {
                    NodeInputTarget  = Graph["volume_target"][0];
                    NodeLearningRate = Graph["training_learning_rate"][0];
                    NodeOpTrain      = Graph["train_momentum"][0];
                    NodeOutputLoss   = Graph["l2_loss"][0];
                }

                NodeOutputPredicted = Graph["volume_predict"][0];

                TensorSource = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensions.X, BoxDimensions.Y, boxDimensions.Z, 1),
                                                                               new float[BatchSize * BoxDimensions.Elements()],
                                                                               0,
                                                                               BatchSize * (int)BoxDimensions.Elements()),
                                                      nThreads);

                if (ForTraining)
                {
                    TensorTarget = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, BoxDimensions.X, BoxDimensions.Y, boxDimensions.Z, 1),
                                                                                   new float[BatchSize * BoxDimensions.Elements()],
                                                                                   0,
                                                                                   BatchSize * (int)BoxDimensions.Elements()),
                                                          nThreads);

                    TensorLearningRate = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(1),
                                                                                         new float[1],
                                                                                         0,
                                                                                         1),
                                                                nThreads);
                }

                ResultPredicted = Helper.ArrayOfFunction(i => new float[BatchSize * BoxDimensions.Elements()], nThreads);
                ResultLoss      = Helper.ArrayOfFunction(i => new float[1], nThreads);

                //if (!ForTraining)
                RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                          AddInput(NodeInputSource, TensorSource[i]).
                                                          Fetch(NodeOutputPredicted),
                                                          nThreads);
                if (ForTraining)
                {
                    RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                            AddInput(NodeInputSource, TensorSource[i]).
                                                            AddInput(NodeInputTarget, TensorTarget[i]).
                                                            AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                            Fetch(NodeOutputPredicted, NodeOutputLoss, NodeOpTrain),
                                                            nThreads);
                }
            }

            // Run prediction or training for one batch to claim all the memory needed
            float[] InitDecoded;
            float[] InitLoss;
            //if (!ForTraining)
            {
                Predict(new float[BoxDimensions.Elements() * BatchSize],
                        0,
                        out InitDecoded);
            }
            if (ForTraining)
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      1e-10f,
                      0,
                      out InitDecoded,
                      out InitLoss);
            }
        }
Example #4
0
        public BoxNet(string modelDir, int gpuID = 0, int nThreads = 1, int batchSize = 128, bool forTraining = false)
        {
            ForTraining = forTraining;
            BatchSize   = batchSize;
            ModelDir    = modelDir;
            MaxThreads  = nThreads;

            TFSessionOptions SessionOptions = TFHelper.CreateOptions();
            TFSession        Dummy          = new TFSession(new TFGraph(), SessionOptions);

            Session = TFHelper.FromSavedModel(SessionOptions, null, ModelDir, new[] { forTraining ? "train" : "serve" }, new TFGraph(), $"/device:GPU:{gpuID}");
            Graph   = Session.Graph;

            NodeInputMicTile = Graph["mic_tiles"][0];
            if (forTraining)
            {
                NodeInputLabels  = Graph["training_labels"][0];
                NodeLearningRate = Graph["training_learning_rate"][0];
                NodeOpTrain      = Graph["train_momentum"][0];
            }

            NodeOutputArgMax  = Graph["ArgMax"][0];
            NodeOutputSoftMax = Graph["softmax_tensor"][0];

            TensorMicTile = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, 1, BoxDimensions.Y, BoxDimensions.X),
                                                                            new float[BatchSize * BoxDimensions.Elements()],
                                                                            0,
                                                                            BatchSize * (int)BoxDimensions.Elements()),
                                                   nThreads);

            TensorTrainingLabels = Helper.ArrayOfFunction(i => TFTensor.FromBuffer(new TFShape(BatchSize, 2),
                                                                                   new float[BatchSize * 2],
                                                                                   0,
                                                                                   BatchSize * 2),
                                                          nThreads);

            TensorLearningRate = Helper.ArrayOfFunction(i => new TFTensor(0.0f),
                                                        nThreads);

            ResultArgMax  = Helper.ArrayOfFunction(i => new long[BatchSize], nThreads);
            ResultSoftMax = Helper.ArrayOfFunction(i => new float[BatchSize * 2], nThreads);

            if (!ForTraining)
            {
                RunnerPrediction = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                          AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                          Fetch(NodeOutputArgMax, NodeOutputSoftMax),
                                                          nThreads);
            }
            else
            {
                RunnerTraining = Helper.ArrayOfFunction(i => Session.GetRunner().
                                                        AddInput(NodeInputMicTile, TensorMicTile[i]).
                                                        AddInput(NodeInputLabels, TensorTrainingLabels[i]).
                                                        AddInput(NodeLearningRate, TensorLearningRate[i]).
                                                        Fetch(NodeOutputArgMax, NodeOutputSoftMax, NodeOpTrain),
                                                        nThreads);
            }

            // Run prediction or training for one batch to claim all the memory needed
            long[]  InitArgMax;
            float[] InitProb;
            if (!ForTraining)
            {
                Predict(new float[BoxDimensions.Elements() * BatchSize],
                        0,
                        out InitArgMax,
                        out InitProb);
            }
            else
            {
                RandomNormal RandN = new RandomNormal();
                Train(Helper.ArrayOfFunction(i => RandN.NextSingle(0, 1), BatchSize * (int)BoxDimensions.Elements()),
                      Helper.Combine(Helper.ArrayOfFunction(i => new[] { 1.0f, 0.0f }, 128)),
                      1e-6f,
                      0,
                      out InitArgMax,
                      out InitProb);
            }
        }