예제 #1
0
 private void setupBtmTop(Blob <T> btm, Blob <T> top)
 {
     m_rgBtm.Clear();
     m_rgBtm.Add(btm);
     m_rgTop.Clear();
     m_rgTop.Add(top);
 }
예제 #2
0
        private void addInternal(Blob <T> bottom, Blob <T> top)
        {
            m_colInternalBottom.Clear();
            m_colInternalBottom.Add(bottom);

            m_colInternalTop.Clear();
            m_colInternalTop.Add(top);
        }
예제 #3
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            base.LayerSetUp(colBottom, colTop);

            // Internal softmax layer.
            LayerParameter softmax_param = new LayerParameter(LayerParameter.LayerType.SOFTMAX);

            softmax_param.softmax_param.axis = m_param.infogain_loss_param.axis;
            softmax_param.loss_weight.Clear();
            softmax_param.loss_weight.Add(1);
            m_softmaxLayer = new SoftmaxLayer <T>(m_cuda, m_log, softmax_param);
            m_colSoftmaxBottomVec.Clear();
            m_colSoftmaxBottomVec.Add(colBottom[0]);
            m_colSoftmaxTopVec.Clear();
            m_colSoftmaxTopVec.Add(m_blobProb);
            m_softmaxLayer.Setup(m_colSoftmaxBottomVec, m_colSoftmaxTopVec);

            // ignore label.
            m_nIgnoreLabel = m_param.loss_param.ignore_label;

            // normalization
            m_log.CHECK(!m_param.loss_param.normalize, "normalize is drepreciated, use 'normalization'.");
            m_normalization = m_param.loss_param.normalization;

            // matrix H
            if (colBottom.Count < 3)
            {
                m_log.CHECK(m_param.infogain_loss_param.source != null, "Infogain matrix source must be specified.");
                PersistCaffe <T> persist   = new PersistCaffe <T>(m_log, true);
                BlobProto        blobProto = persist.LoadBlobProto(m_param.infogain_loss_param.source, 1);
                m_blobInfoGain.FromProto(blobProto);
            }
        }
예제 #4
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            base.LayerSetUp(colBottom, colTop);

            m_colSigmoidBottom.Clear();
            m_colSigmoidBottom.Add(m_blobSigmoidInput);
            m_colSigmoidTop.Clear();
            m_colSigmoidTop.Add(m_blobSigmoidOutput);
            m_sigmoidLayer.LayerSetUp(m_colSigmoidBottom, m_colSigmoidTop);
        }
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            base.LayerSetUp(colBottom, colTop);

            m_colSigmoidBottomVec.Clear();
            m_colSigmoidBottomVec.Add(colBottom[0]);
            m_colSigmoidTopVec.Clear();
            m_colSigmoidTopVec.Add(m_blobSigmoidOutput);
            m_sigmoidLayer.Setup(m_colSigmoidBottomVec, m_colSigmoidTopVec);

            m_nIgnoreLabel = m_param.loss_param.ignore_label;
        }
예제 #6
0
        /// <summary>
        /// Runs the pre-solve which parpares the Solver to start Solving.
        /// </summary>
        public void PreSolve()
        {
            BlobCollection <T> colNetParams = m_net.learnable_parameters;

            m_colHistory.Clear(true);
//            m_colUpdate.Clear(true);
            m_colTemp.Clear(true);

            for (int i = 0; i < colNetParams.Count; i++)
            {
                List <int> rgShape = colNetParams[i].shape();

                m_colHistory.Add(new Blob <T>(m_cuda, m_log, rgShape, false));   // diff never used
//                m_colUpdate.Add(new Blob<T>(m_cuda, m_log, rgShape, false));
                m_colTemp.Add(new Blob <T>(m_cuda, m_log, rgShape, false));      // diff never used
            }
        }
예제 #7
0
        /// <summary>
        /// Runs the pre-solve which parpares the Solver to start Solving.
        /// </summary>
        public void PreSolve()
        {
            BlobCollection <T> net_params = m_net.learnable_parameters;

            m_nN = 0;

            for (int i = 0; i < net_params.Count; i++)
            {
                if (m_net.params_lr[i] != 0)
                {
                    m_nN += net_params[i].count();
                }
            }

            // Nothing to do, all learnable parameters have lr_mult = 0
            if (m_nN == 0)
            {
                return;
            }

            List <int> rgShape = new List <int>()
            {
                m_nN
            };

            m_colBlobHistoryS.Clear();
            m_colBlobHistoryY.Clear();
            m_rgRhoHistory.Clear();
            m_nStart = 0;
            m_nEnd   = -1;

            m_blobGradients          = new Blob <T>(m_cuda, m_log, rgShape, false);
            m_blobGradients.Name     = "gradients";
            m_blobGradientsPrev      = new Blob <T>(m_cuda, m_log, rgShape, false);
            m_blobGradientsPrev.Name = "gradients prev";
            m_blobDirection          = new Blob <T>(m_cuda, m_log, rgShape, false);
            m_blobDirection.Name     = "direction";

            for (int i = 0; i < m_param.lbgfs_corrections; i++)
            {
                m_colBlobHistoryS.Add(new Blob <T>(m_cuda, m_log, rgShape, false));
                m_colBlobHistoryY.Add(new Blob <T>(m_cuda, m_log, rgShape, false));
                m_rgRhoHistory.Add(0);
            }
        }