Exemple #1
0
        /// <summary>
        /// Accumulate the diffs from one BlobCollection into another.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn instance used to add the blobs into this collection.</param>
        /// <param name="src">Specifies the source BlobCollection to add into this one.</param>
        /// <param name="bAccumulateDiff">Specifies to accumulate diffs when <i>true</i>, and the data otherwise.</param>
        public void Accumulate(CudaDnn <T> cuda, BlobCollection <T> src, bool bAccumulateDiff)
        {
            for (int i = 0; i < src.Count; i++)
            {
                Blob <T> bSrc      = src[i];
                Blob <T> bDst      = m_rgBlobs[i];
                int      nSrcCount = bSrc.count();
                int      nDstCount = bDst.count();

                if (nSrcCount != nDstCount)
                {
                    throw new Exception("The src and dst blobs at index #" + i.ToString() + " have different sizes!");
                }

                if (bAccumulateDiff)
                {
                    if (bSrc.DiffExists && bDst.DiffExists)
                    {
                        cuda.add(nSrcCount, bSrc.gpu_diff, bDst.gpu_diff, bDst.mutable_gpu_diff);
                    }
                }
                else
                {
                    cuda.add(nSrcCount, bSrc.gpu_data, bDst.gpu_data, bDst.mutable_gpu_data);
                }
            }
        }
Exemple #2
0
        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="cuda">Cuda engine.</param>
        /// <param name="log">General log.</param>
        /// <param name="p">provides MultiBoxLossParameter multiboxloss_param
        /// with MultiBoxLossLayer.
        /// </param>
        public MultiBoxLossLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
            : base(cuda, log, p)
        {
            m_type = LayerParameter.LayerType.MULTIBOX_LOSS;

            m_blobLocPred      = new Blob <T>(cuda, log);
            m_blobLocPred.Name = "loc_pred";
            m_blobLocGt        = new Blob <T>(cuda, log);
            m_blobLocGt.Name   = "loc_gt";
            m_blobLocLoss      = new Blob <T>(cuda, log);
            m_blobLocLoss.Name = "loc_loss";

            m_blobConfPred      = new Blob <T>(cuda, log);
            m_blobConfPred.Name = "conf_pred";
            m_blobConfGt        = new Blob <T>(cuda, log);
            m_blobConfGt.Name   = "conf_gt";
            m_blobConfLoss      = new Blob <T>(cuda, log);
            m_blobConfLoss.Name = "conf_loss";

            m_bboxUtil = new BBoxUtility <T>(cuda, log);

            m_hostConf = new HostBuffer <T>(cuda);
            m_hostLoc  = new HostBuffer <T>(cuda);
            m_hostGt   = new HostBuffer <T>(cuda);
            m_hostPrio = new HostBuffer <T>(cuda);
        }
Exemple #3
0
        /// <summary>
        /// Transforms a list of Datum and places the transformed data into a Blob.
        /// </summary>
        /// <param name="rgDatum">Specifies a List of Datum to be transformed.</param>
        /// <param name="blobTransformed">Specifies the Blob where all transformed data is placed.</param>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies a Log for all output.</param>
        public void Transform(List <Datum> rgDatum, Blob <T> blobTransformed, CudaDnn <T> cuda, Log log)
        {
            int nDatumNum = rgDatum.Count;
            int nNum      = blobTransformed.num;
            int nChannels = blobTransformed.channels;
            int nHeight   = blobTransformed.height;
            int nWidth    = blobTransformed.width;

            m_log.CHECK_GT(nDatumNum, 0, "There are no datum to add.");
            m_log.CHECK_LE(nDatumNum, nNum, "The size of the rgDatum must be no greater than the transformed blob num.");

            Blob <T> blobUni = new Blob <T>(cuda, log, 1, nChannels, nHeight, nWidth, false);

            for (int i = 0; i < nDatumNum; i++)
            {
                int nOffset = blobTransformed.offset(i);

                if (rgDatum[i] != null)
                {
                    Transform(rgDatum[i], blobUni);
                }
                else
                {
                    blobUni.SetData(0);
                }

                cuda.copy(blobUni.count(), blobUni.gpu_data, blobTransformed.mutable_gpu_data, 0, nOffset);
            }

            blobUni.Dispose();
        }
Exemple #4
0
 /// <summary>
 /// The BiasLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type BIAS, with bias_param.
 /// </param>
 public BiasLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type = LayerParameter.LayerType.BIAS;
     m_blobBiasMultiplier      = new Blob <T>(cuda, log);
     m_blobBiasMultiplier.Name = "bias_biasmult";
 }
Exemple #5
0
        static void Main(string[] args)
        {
            // Create the output log used.
            Log log = new Log("Test");

            log.OnWriteLine += Log_OnWriteLine;

            // Create the CudaDnn connection used.  NOTE: only one CudaDnn connection is needed
            // per thread for each instance creates and manages its own low-level kernel state
            // which includes all memory allocated etc.  All memory handles allocated should
            // be used with the CudaDnn that allocated the memory.
            CudaDnn <float> cuda = new CudaDnn <float>(0, DEVINIT.CUBLAS | DEVINIT.CURAND);

            log.WriteLine("CudaDnn created.");

            // Run super simple sample.
            runSuperSimpleSample(cuda, log);

            // Run Blob sample #1
            runSimpleBlobExample1(cuda, log);

            // Run Blob sample #2
            runSimpleBlobExample2(cuda, log);

            // Run Blob sample #3
            runSimpleBlobExample3(cuda, log);

            // Release all GPU memory and other state data used.
            cuda.Dispose();
        }
Exemple #6
0
        /// <summary>
        /// The GPUParams constructor.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="root_solver">Specifies the root Solver.</param>
        /// <param name="nDeviceID">Specifies the device ID to use for this instance.</param>
        public GPUParams(CudaDnn <T> cuda, Log log, Solver <T> root_solver, int nDeviceID)
            : base(root_solver)
        {
            m_cuda = cuda;
            m_log  = log;

            m_nDeviceID = m_cuda.GetDeviceID();

            if (nDeviceID != m_nDeviceID)
            {
                m_cuda.SetDeviceID(nDeviceID);
            }

            // Allocate device buffers
            m_hData = m_cuda.AllocMemory(m_lCount);

            // Copy blob values
            BlobCollection <T> net = root_solver.net.learnable_parameters;

            apply_buffers(net, m_hData, m_lCount, Op.copy);

            m_hDiff = m_cuda.AllocMemory(m_lCount);
            m_cuda.set((int)m_lCount, m_hDiff, 0);

            m_hStream = m_cuda.CreateStream();

            if (m_nDeviceID != nDeviceID)
            {
                m_cuda.SetDeviceID(m_nDeviceID);
            }
        }
Exemple #7
0
 /// <summary>
 /// The ReductionLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type REDUCTION with parameter reduction_param,
 /// with options:
 ///   - operation. The operation (SUM, ASUM, SUMSQ or MEAN) to run.
 ///
 ///   - axis (\b optional, default = 0). The first axis to reduce to scalar.
 ///
 ///   - coeff (\b optional, default = 1).  The coefficient used to scale the output.
 /// </param>
 public ReductionLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type = LayerParameter.LayerType.REDUCTION;
     m_blobSumMultiplier      = new Blob <T>(cuda, log);
     m_blobSumMultiplier.Name = m_param.name + " summult";
 }
        /// <summary>
        /// The LSTMSimpleLayer constructor.
        /// </summary>
        /// <remarks>
        /// @see [A Clockwork RNN](https://arxiv.org/abs/1402.3511) by Koutnik, et al., 2014
        /// </remarks>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="p">Specifies the LayerParameter of type LSTM with parameter lstm_simple_param,
        /// with options:
        ///   - num_output.  The dimension of the output -- must be explicitly set to non-zero.
        ///
        ///   - clipping_threshold (/b optional, default = 0).  The gradient clipping threshold (0 = no clipping).
        ///
        ///   - weight_filler (/b optional, default = "gaussian"). The weight filler used to initialize the weights.
        ///
        ///   - bias_filler (/b optional, default = "constant, 1.0"). The bias filler used to initialize the bias values.
        ///
        ///   - batch_size (/b optional, default = 1).  The batch size.
        ///
        ///   - enable_clockwork_forget_bias (/b optional, default = false).  Whether or not to set the forget gat bias to 5.0 as recommended by [1] Koutnik, J., et al.
        /// </param>
        public LSTMSimpleLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
            : base(cuda, log, p)
        {
            m_type = LayerParameter.LayerType.LSTM_SIMPLE;

            m_blobBiasMultiplier      = new Blob <T>(m_cuda, m_log);
            m_blobBiasMultiplier.Name = m_param.name + " biasmult";
            m_blobTop             = new Blob <T>(m_cuda, m_log);
            m_blobTop.Name        = m_param.name + " top";
            m_blobCell            = new Blob <T>(m_cuda, m_log);
            m_blobCell.Name       = m_param.name + " cell";
            m_blobPreGate         = new Blob <T>(m_cuda, m_log);
            m_blobPreGate.Name    = m_param.name + " pregate";
            m_blobGate            = new Blob <T>(m_cuda, m_log);
            m_blobGate.Name       = m_param.name + " gate";
            m_blob_C_0            = new Blob <T>(m_cuda, m_log);
            m_blob_C_0.Name       = m_param.name + " c_0";
            m_blob_H_0            = new Blob <T>(m_cuda, m_log);
            m_blob_H_0.Name       = m_param.name + " h_0";
            m_blob_C_T            = new Blob <T>(m_cuda, m_log);
            m_blob_C_T.Name       = m_param.name + " c_t";
            m_blob_H_T            = new Blob <T>(m_cuda, m_log);
            m_blob_H_T.Name       = m_param.name + " h_t";
            m_blob_H_to_Gate      = new Blob <T>(m_cuda, m_log);
            m_blob_H_to_Gate.Name = m_param.name + "h_to_gate";
            m_blob_H_to_H         = new Blob <T>(m_cuda, m_log);
            m_blob_H_to_H.Name    = m_param.name + " h_to_h";
        }
Exemple #9
0
        /// <summary>
        /// The BaseDataLayer constructor.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="p">Specifies the LayerParameter</param>
        /// <param name="db">Specifies the external database to use.</param>
        public BaseDataLayer(CudaDnn <T> cuda, Log log, LayerParameter p, IXImageDatabase db)
            : base(cuda, log, p)
        {
            if (db != null)
            {
                m_imgdb = db;

                if (p.type == LayerParameter.LayerType.DATA ||
                    p.type == LayerParameter.LayerType.TRIPLET_DATA)
                {
                    m_src = m_imgdb.GetSourceByName(p.data_param.source);
                }

                if (p.transform_param.use_imagedb_mean)
                {
                    if (db != null)
                    {
                        m_imgMean = db.GetImageMean(m_src.ID);
                    }
                    else
                    {
                        m_log.WriteLine("WARNING: The image database is NULL, and therefore no mean image can not be acquired.");
                    }
                }
            }
        }
        private List <int> getGpus(int nMax)
        {
            CudaDnn <float> cuda      = new CudaDnn <float>(0);
            List <int>      rgGpu     = new List <int>();
            int             nDevCount = cuda.GetDeviceCount();

            for (int i = 0; i < nDevCount; i++)
            {
                string strDevInfo = cuda.GetDeviceInfo(i, true);
                string strP2PInfo = cuda.GetDeviceP2PInfo(i);

                if (strP2PInfo.Contains("P2P Capable = YES"))
                {
                    rgGpu.Add(i);
                }

                if (rgGpu.Count == nMax)
                {
                    break;
                }
            }

            cuda.Dispose();

            return(rgGpu);
        }
Exemple #11
0
 /// <summary>
 /// Constructor.
 /// </summary>
 /// <param name="cuda">Cuda engine.</param>
 /// <param name="log">General log.</param>
 /// <param name="p">provides LossParameter loss_param, with options:
 ///  - ignore_label (optional)
 ///    Specify a label value that whould be ignored when computing the loss.
 ///  - normalize (optional, default true)
 ///    If true, the loss is normalized by the number of (nonignored) labels
 ///    present; otherwise the loss is imply summed over spatial locations.
 /// </param>
 public SoftmaxLossLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type          = LayerParameter.LayerType.SOFTMAXWITH_LOSS;
     m_blobProb      = new Blob <T>(cuda, log);
     m_blobProb.Name = m_param.name + " prob";
 }
Exemple #12
0
 /// <summary>
 /// The SigmoidCrossEntropyLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type SIGMOIDCROSSENTROPY_LOSS.
 /// </param>
 public SigmoidCrossEntropyLossLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type = LayerParameter.LayerType.SIGMOIDCROSSENTROPY_LOSS;
     m_blobSigmoidOutput = new Blob <T>(cuda, log);
     m_sigmoidLayer      = new SigmoidLayer <T>(cuda, log, p);
 }
Exemple #13
0
        public static float[] myBlobAdditionTest(CudaDnn <float> cuda, Log log, float fInput1, float fInput2)
        {
            // Create the blobs and load their input data.
            Blob <float> scalar1 = CuSca(cuda, log, fInput1);

            Console.WriteLine("Scalar 1 gpu_data = {0}", scalar1.gpu_data);
            Blob <float> scalar2 = CuSca(cuda, log, fInput2);

            Console.WriteLine("Scalar 2 gpu_data = {0}", scalar2.gpu_data);

            // Do the add.
            Blob <float> blobResult = scalar2.Clone();

            cuda.add(scalar1.count(), scalar1.gpu_data, scalar2.gpu_data, blobResult.mutable_gpu_data);

            // Transfer the data back to CPU memory.
            float[] rgRes = blobResult.mutable_cpu_data;

            // Free up any resources used (including any GPU memory used).
            scalar1.Dispose();
            scalar2.Dispose();
            blobResult.Dispose();

            // Return the result.
            return(rgRes);
        }
Exemple #14
0
 /// <summary>
 /// The LSTMUnitLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type LSTM_UNIT.</param>
 public LSTMUnitLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type           = LayerParameter.LayerType.LSTM_UNIT;
     m_blobXActs      = new Blob <T>(cuda, log);
     m_blobXActs.Name = "lstmu_xacts";
 }
Exemple #15
0
        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="p">provides BatchNormParam batch_norm_param.</param>
        public BatchNormLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
            : base(cuda, log, p)
        {
            m_type                          = LayerParameter.LayerType.BATCHNORM;
            m_blobMean                      = new common.Blob <T>(cuda, log);
            m_blobMean.Name                 = "bn_mean";
            m_blobVariance                  = new common.Blob <T>(cuda, log);
            m_blobVariance.Name             = "bn_variance";
            m_blobTemp                      = new common.Blob <T>(cuda, log);
            m_blobTemp.Name                 = "bn_temp";
            m_blobXNorm                     = new common.Blob <T>(cuda, log);
            m_blobXNorm.Name                = "bn_xnorm";
            m_blobBatchSumMultiplier        = new common.Blob <T>(cuda, log);
            m_blobBatchSumMultiplier.Name   = "bn_summult";
            m_blobNumByChans                = new common.Blob <T>(cuda, log);
            m_blobNumByChans.Name           = "bn_numbychan";
            m_blobSpaitalSumMultiplier      = new common.Blob <T>(cuda, log);
            m_blobSpaitalSumMultiplier.Name = "bn_spatialsummult";

            if (p.batch_norm_param.useCudnn())
            {
                m_blobMean.Name     = "save mean";
                m_blobVariance.Name = "save var";

                m_blobPrivateTop         = new Blob <T>(cuda, log);
                m_blobPrivateTop.Name    = "private top";
                m_blobPrivateBottom      = new Blob <T>(cuda, log);
                m_blobPrivateBottom.Name = "private bottom";
                m_blobScaleOnes          = new Blob <T>(cuda, log);
                m_blobScaleOnes.Name     = "scale ones";
                m_blobBiasZeros          = new Blob <T>(cuda, log);
                m_blobBiasZeros.Name     = "bias zeros";
            }
        }
Exemple #16
0
 /// <summary>
 /// The EmbedLayer constructor
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">provides EmbedLayer embed_param,
 /// with EmbedLayer options:
 /// - num_output. The number of outputs for the Layer.
 ///
 /// - input_dim. The input given as an integer to be interpreted as one-hot
 /// vector indices with dimension num_input.
 ///
 /// - bias_term (/bdefault = true).  Whether or not to use bias.
 ///
 /// - weight_filler. The weight filler to use.
 ///
 /// - bias_filler.  The bias filler to use.
 /// </param>
 public EmbedLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type = LayerParameter.LayerType.EMBED;
     m_blobBiasMultiplier      = new common.Blob <T>(cuda, log);
     m_blobBiasMultiplier.Name = m_param.name + " biasmult";
 }
Exemple #17
0
 /// <summary>
 /// The EltwiseLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">
 /// Provides EltwiseParameter eltwise_param with options:
 ///  - operation. The eltwise operation (e.g. product, summation, maximum).
 ///
 ///  - coeff.  A Blob-wise coefficient for summation.
 ///
 ///  - stable_prod_grad.  Optionally use an asymtotically slower but more stable method for computing the gradient for product operations.
 /// </param>
 public EltwiseLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type            = LayerParameter.LayerType.ELTWISE;
     m_blobMaxIdx      = new Blob <T>(cuda, log);
     m_blobMaxIdx.Name = m_param.name + " maxidx";
 }
Exemple #18
0
 /// <summary>
 /// The DeconvolutionLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">
 /// Provides DropoutParameter dropout_param with options:
 ///  - dropout_ratio. The dropout ratio.
 ///
 ///  - seed.  Optionally, specifies a seed for the random number generator used.
 /// </param>
 public DropoutLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type          = LayerParameter.LayerType.DROPOUT;
     m_blobRand      = new Blob <T>(cuda, log);
     m_blobRand.Name = m_param.name + " rand";
 }
Exemple #19
0
 /// <summary>
 /// The RmsPropSolver constructor.
 /// </summary>
 /// <param name="cuda">Specifies the instance of CudaDnn to use.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies teh SolverParameter.</param>
 /// <param name="evtCancel">Specifies a CancelEvent used to cancel the current operation (e.g. training, testing) for which the Solver is performing.</param>
 /// <param name="evtForceSnapshot">Specifies an automatic reset event that causes the Solver to perform a Snapshot when set.</param>
 /// <param name="evtForceTest">Specifies an automatic reset event that causes teh Solver to run a testing cycle when set.</param>
 /// <param name="imgDb">Specifies the CaffeImageDatabase.</param>
 /// <param name="persist">Specifies the peristence used for loading and saving weights.</param>
 /// <param name="nSolverCount">Specifies the number of Solvers participating in a multi-GPU session.</param>
 /// <param name="nSolverRank">Specifies the rank of this Solver in a multi-GPU session.</param>
 public RmsPropSolver(CudaDnn <T> cuda, Log log, SolverParameter p, CancelEvent evtCancel, AutoResetEvent evtForceSnapshot, AutoResetEvent evtForceTest, IXImageDatabase imgDb, IXPersist <T> persist, int nSolverCount = 1, int nSolverRank = 0)
     : base(cuda, log, p, evtCancel, evtForceSnapshot, evtForceTest, imgDb, persist, nSolverCount, nSolverRank)
 {
     m_log.CHECK_EQ(0, m_param.momentum, "Momentum cannot be used with RmsProp.");
     m_log.CHECK_GE(m_param.rms_decay, 0, "rms_decay should lie between 0 and 1.");
     m_log.CHECK_LT(m_param.rms_decay, 1, "rms_decay should lie between 0 and 1.");
 }
 /// <summary>
 /// The EuclideanLossLayer constructor
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type EUCLIDEAN_LOSS.</param>
 public EuclideanLossLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type          = LayerParameter.LayerType.EUCLIDEAN_LOSS;
     m_blobDiff      = new Blob <T>(cuda, log);
     m_blobDiff.Name = m_param.name + " diff";
 }
        /// <summary>
        /// The constructor.
        /// </summary>
        /// <param name="cuda">Specifies the instance of CudaDnn to use.</param>
        /// <param name="log">Specifies the output log.</param>
        /// <param name="evtCancel">Specifies the cancel event used to abort processing.</param>
        /// <param name="strModelType">Specifies the model type: 'vgg19', 'vgg16'</param>
        /// <param name="strModel">Specifies the network model to use.</param>
        /// <param name="rgWeights">Optionally, specifies the weights to use (or <i>null</i> to ignore).</param>
        /// <param name="bCaffeModel">Specifies whether or not the weights are in the caffe (<i>true</i>) or mycaffe (<i>false</i>) format.</param>
        /// <param name="solverType">Optionally, specifies the solver type to use (default = LBFGS).</param>
        /// <param name="dfLearningRate">Optionally, specifies the solver learning rate (default = 1.0).</param>
        public NeuralStyleTransfer(CudaDnn <T> cuda, Log log, CancelEvent evtCancel, string strModelType, string strModel, byte[] rgWeights, bool bCaffeModel, SolverParameter.SolverType solverType = SolverParameter.SolverType.LBFGS, double dfLearningRate = 1.0)
        {
            m_cuda           = cuda;
            m_log            = log;
            m_evtCancel      = evtCancel;
            m_rgWeights      = rgWeights;
            m_solverType     = solverType;
            m_dfLearningRate = dfLearningRate;

            if (m_evtCancel != null)
            {
                m_evtCancel.Reset();
            }

            RawProto proto = RawProto.Parse(strModel);

            m_param = NetParameter.FromProto(proto);

            add_input_layer(m_param);
            m_rgstrUsedLayers = load_layers(strModelType);
            prune(m_param, m_rgstrUsedLayers);
            add_gram_layers(m_param);

            m_transformationParam             = new TransformationParameter();
            m_transformationParam.color_order = (bCaffeModel) ? TransformationParameter.COLOR_ORDER.BGR : TransformationParameter.COLOR_ORDER.RGB;
            m_transformationParam.scale       = 1.0;
            m_transformationParam.mean_value  = m_rgMeanValues;

            m_persist = new PersistCaffe <T>(m_log, false);
        }
        /// <summary>
        /// The DataLayer constructor.
        /// </summary>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="p">Specifies the LayerParameter data_param</param>
        /// <param name="db">Specifies the external database to use.</param>
        /// <param name="evtCancel">Specifies the CancelEvent used to cancel any pre-fetching operations.</param>
        public DataLayer(CudaDnn <T> cuda, Log log, LayerParameter p, IXImageDatabaseBase db, CancelEvent evtCancel)
            : base(cuda, log, p, db, evtCancel)
        {
            m_type = LayerParameter.LayerType.DATA;

            if (p.data_param.synchronize_target)
            {
                m_rgBatchLabels = new LabelCollection();
            }

            Tuple <IMGDB_LABEL_SELECTION_METHOD, IMGDB_IMAGE_SELECTION_METHOD> kvSel = db.GetSelectionMethod();
            IMGDB_IMAGE_SELECTION_METHOD imgSel = kvSel.Item2;

            if (m_param.data_param.enable_pair_selection.HasValue)
            {
                if (m_param.data_param.enable_pair_selection.Value)
                {
                    imgSel |= IMGDB_IMAGE_SELECTION_METHOD.PAIR;
                }
                else
                {
                    imgSel &= (~IMGDB_IMAGE_SELECTION_METHOD.PAIR);
                }
            }

            if (m_param.data_param.enable_random_selection.HasValue)
            {
                if (m_param.data_param.enable_random_selection.Value)
                {
                    imgSel |= IMGDB_IMAGE_SELECTION_METHOD.RANDOM;
                }
                else
                {
                    imgSel &= (~IMGDB_IMAGE_SELECTION_METHOD.RANDOM);
                }
            }

            db.SetSelectionMethod(null, imgSel);

            m_db = new data.DB <T>(db);
            m_db.Open(p.data_param.source);

            if (p.data_param.display_timing)
            {
                m_swTimerBatch       = new Stopwatch();
                m_swTimerTransaction = new Stopwatch();
            }

            if (m_param.transform_param.mask_param != null && m_param.transform_param.mask_param.Active)
            {
                m_blobMask  = new Blob <T>(cuda, log, false);
                m_blobMask1 = new Blob <T>(cuda, log, false);
            }

            if (m_param.data_param.enable_debug_output)
            {
                m_blobDebug1 = new Blob <T>(cuda, log, false);
            }
        }
Exemple #23
0
 /// <summary>
 /// Starts running the internal thread function which then calls the DoWork event.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda placed in the ActionStartArgs passed along to DoWork.</param>
 /// <param name="log">Specifies the Log for output, placed in the ActionStartArgs passed along to DoWork.</param>
 /// <param name="nDeviceID">Optionally, specifies the DeviceID placed in the ActionStartArgs passed along to DoWork.</param>
 /// <param name="arg">Optionally, specifies an argument defined by the caller.</param>
 public void StartInternalThread(CudaDnn <T> cuda, Log log, int nDeviceID = 0, object arg = null)
 {
     if (m_task == null)
     {
         Action <object> action = new Action <object>(InternalThreadEntry);
         m_task = Task.Factory.StartNew(action, new ActionStateArgs <T>(cuda, log, m_evtCancel, nDeviceID, arg), TaskCreationOptions.LongRunning);
     }
 }
Exemple #24
0
 /// <summary>
 /// Create the layers when using the <i>float</i> base type.
 /// </summary>
 /// <param name="cuda">Specifies the connection to the low-level CUDA interfaces.</param>
 /// <param name="log">Specifies the output log.</param>
 /// <param name="p">Specifies the layer parameter.</param>
 /// <param name="evtCancel">Specifies the cancellation event.</param>
 /// <param name="imgDb">Specifies an interface to the image database, who's use is optional.</param>
 /// <returns>If supported, the layer is returned, otherwise <i>null</i> is returned.</returns>
 public Layer <float> CreateSingle(CudaDnn <float> cuda, Log log, LayerParameter p, CancelEvent evtCancel, IXImageDatabaseBase imgDb)
 {
     switch (p.type)
     {
     default:
         return(null);
     }
 }
Exemple #25
0
 /// <summary>
 /// The LBFGSSolver constructor.
 /// </summary>
 /// <param name="cuda">Specifies the instance of CudaDnn to use.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies teh SolverParameter.</param>
 /// <param name="evtCancel">Specifies a CancelEvent used to cancel the current operation (e.g. training, testing) for which the Solver is performing.</param>
 /// <param name="evtForceSnapshot">Specifies an automatic reset event that causes the Solver to perform a Snapshot when set.</param>
 /// <param name="evtForceTest">Specifies an automatic reset event that causes teh Solver to run a testing cycle when set.</param>
 /// <param name="imgDb">Specifies the CaffeImageDatabase.</param>
 /// <param name="persist">Specifies the peristence used for loading and saving weights.</param>
 /// <param name="nSolverCount">Specifies the number of Solvers participating in a multi-GPU session.</param>
 /// <param name="nSolverRank">Specifies the rank of this Solver in a multi-GPU session.</param>
 public LBFGSSolver(CudaDnn <T> cuda, Log log, SolverParameter p, CancelEvent evtCancel, AutoResetEvent evtForceSnapshot, AutoResetEvent evtForceTest, IXImageDatabase imgDb, IXPersist <T> persist, int nSolverCount = 1, int nSolverRank = 0)
     : base(cuda, log, p, evtCancel, evtForceSnapshot, evtForceTest, imgDb, persist, nSolverCount, nSolverRank)
 {
     m_tZero     = (T)Convert.ChangeType(0, typeof(T));
     m_tOne      = (T)Convert.ChangeType(1, typeof(T));
     m_tMinusOne = (T)Convert.ChangeType(-1, typeof(T));
     PreSolve();
 }
Exemple #26
0
 /// <summary>
 /// The LBFGSSolver constructor.
 /// </summary>
 /// <param name="cuda">Specifies the instance of CudaDnn to use.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies teh SolverParameter.</param>
 /// <param name="evtCancel">Specifies a CancelEvent used to cancel the current operation (e.g. training, testing) for which the Solver is performing.</param>
 /// <param name="evtForceSnapshot">Specifies an automatic reset event that causes the Solver to perform a Snapshot when set.</param>
 /// <param name="evtForceTest">Specifies an automatic reset event that causes teh Solver to run a testing cycle when set.</param>
 /// <param name="imgDb">Specifies the MyCaffeImageDatabase.</param>
 /// <param name="persist">Specifies the peristence used for loading and saving weights.</param>
 /// <param name="nSolverCount">Specifies the number of Solvers participating in a multi-GPU session.</param>
 /// <param name="nSolverRank">Specifies the rank of this Solver in a multi-GPU session.</param>
 /// <param name="shareNet">Optionally, specifies the net to share when creating the training network (default = null, meaning no share net is used).</param>
 /// <param name="getws">Optionally, specifies the handler for getting the workspace.</param>
 /// <param name="setws">Optionally, specifies the handler for setting the workspace.</param>
 public LBFGSSolver(CudaDnn <T> cuda, Log log, SolverParameter p, CancelEvent evtCancel, AutoResetEvent evtForceSnapshot, AutoResetEvent evtForceTest, IXImageDatabaseBase imgDb, IXPersist <T> persist, int nSolverCount = 1, int nSolverRank = 0, Net <T> shareNet = null, onGetWorkspace getws = null, onSetWorkspace setws = null)
     : base(cuda, log, p, evtCancel, evtForceSnapshot, evtForceTest, imgDb, persist, nSolverCount, nSolverRank, shareNet, getws, setws)
 {
     m_tZero     = (T)Convert.ChangeType(0, typeof(T));
     m_tOne      = (T)Convert.ChangeType(1, typeof(T));
     m_tMinusOne = (T)Convert.ChangeType(-1, typeof(T));
     PreSolve();
 }
Exemple #27
0
 /// <summary>
 /// The TripletLossLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type TRIPLET_LOSS with parameter triplet_loss_param.
 /// </param>
 public TripletLossLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type = LayerParameter.LayerType.TRIPLET_LOSS;
     m_blobDiffSameClass      = new Blob <T>(cuda, log, false);
     m_blobDiffSameClass.Name = "diff_pos";
     m_blobDiffDiffClass      = new Blob <T>(cuda, log, false);
     m_blobDiffDiffClass.Name = "diff_neg";
 }
Exemple #28
0
 protected virtual void dispose()
 {
     if (m_bResetOnCleanUp)
     {
         CudaDnn <float> cuda = new CudaDnn <float>(0, DEVINIT.NONE);
         cuda.ResetDevice();
         cuda.Dispose();
     }
 }
Exemple #29
0
 /// <summary>
 /// The BaseDataLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">Specifies the LayerParameter of type MEMORYDATA with memorydata_param options:
 ///   - batch_size. The batch size of the data.
 ///
 ///   - channels. The number of channels in the data.
 ///
 ///   - height. The height of the data.
 ///
 ///   - width. The width of the data.
 /// </param>
 public MemoryDataLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p, null)
 {
     m_type           = LayerParameter.LayerType.MEMORYDATA;
     m_blobData       = new Blob <T>(cuda, log);
     m_blobData.Name  = m_param.name + " data";
     m_blobLabel      = new Blob <T>(cuda, log);
     m_blobLabel.Name = m_param.name + " label";
 }
Exemple #30
0
 /// <summary>
 /// The PoolingLayer constructor.
 /// </summary>
 /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
 /// <param name="log">Specifies the Log for output.</param>
 /// <param name="p">
 /// Provides PoolingParameter pooling_param with PoolingLayer options:
 ///  - num_output. The number of filters.
 ///
 ///  - kernel_size / kernel_h / kernel_w.  The pooling dimensions, given by
 ///  kernel_size for square pooling or kernel_h and kernel-w for rectangular
 ///  pooling.
 ///
 ///  - stride / stride_h / stride_w. (\b optional, default 1).  The pool
 ///  stride, given by stride_size for equal dimensions of stride_h and stride_w
 ///  for different strides.  By default the pool is dense with stride 1.
 ///
 ///  - pad / pad_h / pad_w. (\b optional, default 0). The zero-padding for
 ///  pooling, given by pad for equal dimensions or pad_h and pad_w for
 ///  different padding.  Input padding is computed implicitly instead of
 ///  actual padding.
 ///
 ///  - global_pooling (\b optional, default, false). Whether to use global
 ///  pooling or not.
 ///
 ///  - engine: convolution has Engine.CAFFE (matrix multiplication) and Engine.CUDNN (library
 ///  kernels + stream parallelism) engines.
 /// </param>
 public PoolingLayer(CudaDnn <T> cuda, Log log, LayerParameter p)
     : base(cuda, log, p)
 {
     m_type             = LayerParameter.LayerType.POOLING;
     m_blobRandIdx      = new Blob <T>(cuda, log);
     m_blobRandIdx.Name = m_param.name + " randidx";
     m_blobMaxIdx       = new Blob <T>(cuda, log);
     m_blobMaxIdx.Name  = m_param.name + " maxidx";
 }