Example #1
0
        private IEnumerable <HBaseCell> ProcessDictionaryMap(DictionaryMap map, ObjectAccessor accessor, string key,
                                                             IList <string> descriptors)
        {
            if (!(accessor[map.Name] is IDictionary target)) // prob fail
            {
                return(new List <HBaseCell>());
            }

            var output = new ConcurrentBag <HBaseCell>();

            Parallel.ForEach((IList <object>)target.Keys, dk =>
            {
                var fullName = $"{map.ColumnFamily}:{dk}";

                if ((descriptors.Count > 0 && descriptors.Contains(fullName)) || descriptors.Count == 0)
                {
                    output.Add(new HBaseCell
                    {
                        FullColumnName = fullName,
                        ValueString    = target[dk].ToString(),
                        RowKey         = key
                    });
                }
            });
            return(output);
        }
Example #2
0
 public EntityMap(Scene scene)
 {
     Name               = string.Format("{0}.{1}", scene.Name, GetType().Name);
     this.scene         = scene;
     entities           = new Dictionary <long, Entity>();
     components         = new Dictionary <long, Component>();
     componentsByEntity = new DictionaryMap <long, long, Component>();
 }
Example #3
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            if (colBottom.Count == 1 && m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else if (colBottom.Count == 1)
            {
                // bias is a learned parameter; initialize it.
                BiasParameter p        = m_param.bias_param;
                int           nAxis    = colBottom[0].CanonicalAxisIndex(p.axis);
                int           nNumAxes = p.num_axes;

                m_log.CHECK_GE(nNumAxes, -1, "num_axes must be non-negative, or -1 to extend to end of bottom[0].");

                if (nNumAxes >= 0)
                {
                    m_log.CHECK_GE(colBottom[0].num_axes, nAxis + nNumAxes, "bias blob's shape extends past bottom[0]'s shape when applied starting with bottom[0] axis = " + nAxis.ToString());
                }

                m_colBlobs = new BlobCollection <T>();

                List <int> rgBiasShape = new List <int>();
                int        nStart      = nAxis;
                int        nEnd        = (nNumAxes == -1) ? colBottom[0].shape().Count : nStart + nNumAxes;

                for (int i = nStart; i < nEnd; i++)
                {
                    rgBiasShape.Add(colBottom[0].shape(i));
                }

                Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                blobBias.Name = m_param.name + " bias";
                blobBias.type = BLOB_TYPE.INTERNAL;
                blobBias.type = BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobBias, rgBiasShape))
                {
                    blobBias.Reshape(rgBiasShape);
                    FillerParameter fp = p.filler;
                    if (fp == null)
                    {
                        fp = new FillerParameter("constant", 0.0);
                    }

                    Filler <T> filler = Filler <T> .Create(m_cuda, m_log, fp);

                    filler.Fill(blobBias);
                }
                m_colBlobs.Add(blobBias);
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);
        }
        public ITermMappingProvider Visit(DictionaryMap.ValueMap valueMap)
        {
            if (valueMap.TermUri != null)
            {
                return new ValueMappingProvider(valueMap.TermUri);
            }

            if (valueMap.NamespacePrefix != null && valueMap.TermName != null)
            {
                return new ValueMappingProvider(valueMap.NamespacePrefix, valueMap.TermName);
            }

            return new ValueMappingProvider();
        }
        public ITermMappingProvider Visit(DictionaryMap.KeyMap keyMap)
        {
            if (keyMap.TermUri != null)
            {
                return new KeyMappingProvider(keyMap.TermUri);
            }

            if (keyMap.NamespacePrefix != null && keyMap.TermName != null)
            {
                return new KeyMappingProvider(keyMap.NamespacePrefix, keyMap.TermName);
            }

            return new KeyMappingProvider();
        }
Example #6
0
        /// <summary>
        /// The Layer constructor.
        /// </summary>
        /// <remarks>
        /// Setup code for derivative classes should go into an override of the LayerSetup function where the
        /// dimensionsn of the Blob%s are provided to the Layer.
        /// </remarks>
        /// <param name="cuda">Specifies the CudaDnn connection to Cuda.</param>
        /// <param name="log">Specifies the Log for output.</param>
        /// <param name="p">Specifies the LayerParameter that contains the settings of the Layer.</param>
        public Layer(CudaDnn <T> cuda, Log log, LayerParameter p)
        {
            m_cuda  = cuda;
            m_log   = log;
            m_param = p.Clone(true);
            m_phase = p.phase;
            m_rgbParamPropagateDown = new DictionaryMap <bool>(false);
            m_rgLoss   = new DictionaryMap <double>(0.0);
            m_colBlobs = new BlobCollection <T>();

            for (int i = 0; i < p.blobs.Count; i++)
            {
                m_colBlobs.Add(new Blob <T>(cuda, log, p.blobs[i]));
            }

            m_tOne  = (T)Convert.ChangeType(1, typeof(T));
            m_tZero = (T)Convert.ChangeType(0, typeof(T));
        }
        private void ProcessDictionaryMap(DictionaryMap map, ObjectAccessor accessor,
                                          Dictionary <byte[], TCell> input, ConcurrentDictionary <string, long> ts)
        {
            var targetedCells = input.Where(x => x.Key.StartsWith(map.FullColumnFamily.GetBytes()));
            var genericParams = map.Type.GetGenericArguments();

            if (genericParams.Length > 2)
            {
                throw new Exception($"Expected 2 type params, got {genericParams.Length}");
            }

            var type = typeof(Dictionary <,>).MakeGenericType(genericParams);
            var obj  = (IDictionary)Activator.CreateInstance(type);

            foreach (var column in targetedCells) //TODO: Paralelize this (protip: make a concurrent dictionary from the interface!!)
            {
                var key   = TypeDescriptor.GetConverter(genericParams.First());
                var value = TypeDescriptor.GetConverter(genericParams.Last());

                obj.Add(key.ConvertFromString(column.Key.GetString().RemoveCf(map.ColumnFamily)), value.ConvertFromString(column.Value.Value.GetString()));

                ts.TryAdd(column.Key.GetString(), column.Value.Timestamp);
            }
        }
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            int nNumOutput = (int)m_param.inner_product_param.num_output;

            m_bBiasTerm    = m_param.inner_product_param.bias_term;
            m_bTranspose   = m_param.inner_product_param.transpose;
            m_bEnableNoise = m_param.inner_product_param.enable_noise;
            m_dfSigmaInit  = m_param.inner_product_param.sigma_init;
            m_nN           = nNumOutput;

            List <int> rgShape     = colBottom[0].shape();
            int        nShapeCount = rgShape.Count;

            for (int i = nShapeCount; i <= m_param.inner_product_param.axis; i++)
            {
                rgShape.Add(1);
            }

            if (nShapeCount != rgShape.Count)
            {
                colBottom[0].Reshape(rgShape);
            }

            int nAxis = colBottom[0].CanonicalAxisIndex(m_param.inner_product_param.axis);

            // Dimensions starting from 'axis' are 'flattened' into a single
            // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
            // and axis == 1, N inner products with dimension CHW are preformed..
            m_nK = colBottom[0].count(nAxis);

            // Check if we need to set up the weights.
            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                // Initialize the weight.
                List <int> rgWeightShape = Utility.Create <int>(2, 0);

                if (m_bTranspose)
                {
                    rgWeightShape[0] = m_nK;
                    rgWeightShape[1] = m_nN;
                }
                else
                {
                    rgWeightShape[0] = m_nN;
                    rgWeightShape[1] = m_nK;
                }

                double   dfNoiseRange = 1.0 / Math.Sqrt(rgWeightShape[1]);
                Blob <T> blobWeight   = new Blob <T>(m_cuda, m_log);
                blobWeight.Name = m_param.name + " weights";
                blobWeight.type = BLOB_TYPE.IP_WEIGHT;

                if (!shareParameter(blobWeight, rgWeightShape))
                {
                    blobWeight.Reshape(rgWeightShape);
                    Filler <T> weight_filler = Filler <T> .Create(m_cuda, m_log, m_param.inner_product_param.weight_filler);

                    weight_filler.Fill(blobWeight);

                    if (m_bEnableNoise)
                    {
                        blobWeight.scale_data(dfNoiseRange);
                    }
                }
                m_colBlobs.Add(blobWeight);

                // If necessary, initialize and fill the bias term.
                if (m_bBiasTerm)
                {
                    List <int> rgBiasShape = Utility.Create <int>(1, 0);
                    rgBiasShape[0] = m_nN;

                    Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                    blobBias.Name = m_param.name + " bias";
                    blobBias.type = BLOB_TYPE.IP_WEIGHT;

                    if (!shareParameter(blobBias, rgBiasShape))
                    {
                        blobBias.Reshape(rgBiasShape);
                        Filler <T> bias_filler = Filler <T> .Create(m_cuda, m_log, m_param.inner_product_param.bias_filler);

                        bias_filler.Fill(blobBias);

                        if (m_bEnableNoise)
                        {
                            blobBias.scale_data(dfNoiseRange);
                        }
                    }
                    m_colBlobs.Add(blobBias);
                }

                // Add Noise sigma weight and bias
                if (m_bEnableNoise)
                {
                    FillerParameter fp = new FillerParameter("uniform");
                    fp.min          = -1;
                    fp.max          = 1;
                    m_fillerEpsilon = Filler <T> .Create(m_cuda, m_log, fp);

                    Blob <T> blobSigmaWeight = new Blob <T>(m_cuda, m_log);
                    blobSigmaWeight.Name = m_param.name + " sigma_wt";
                    blobSigmaWeight.type = BLOB_TYPE.WEIGHT;
                    blobSigmaWeight.ReshapeLike(m_colBlobs[0]);
                    blobSigmaWeight.SetData(m_dfSigmaInit / Math.Sqrt(blobSigmaWeight.shape(1)));
                    m_colBlobs.Add(blobSigmaWeight);
                    m_blobEpsilonWeight.ReshapeLike(blobSigmaWeight);

                    if (m_bBiasTerm)
                    {
                        Blob <T> blobSigmaBias = new Blob <T>(m_cuda, m_log);
                        blobSigmaBias.Name = m_param.name + " sigma_bias";
                        blobSigmaBias.type = BLOB_TYPE.WEIGHT;
                        blobSigmaBias.ReshapeLike(m_colBlobs[1]);
                        blobSigmaBias.SetData(m_dfSigmaInit / Math.Sqrt(blobSigmaBias.shape(0)));
                        m_colBlobs.Add(blobSigmaBias);
                        m_blobEpsilonBias.ReshapeLike(blobSigmaBias);
                    }

                    ResetNoise();
                }
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);
        }
Example #9
0
        /// <summary>
        /// Computes the multibox loss error gradient w.r.t the predictions.
        /// </summary>
        /// <remarks>
        /// Gradients cannot be computed with respect to the label inputs (bottom[1]),
        /// so this method ignores bottom[1] and requires !propagate_down[1], crashing
        /// if propagate_down[1] == true.
        /// </remarks>
        /// <param name="colTop">top output blob vector, providing the error gradient with
        /// respect to the outputs.
        /// </param>
        /// <param name="rgbPropagateDown">see Layer::Backward.  propagate_down[1] must be false as
        /// we can't compute gradients with respect to the labels.</param>
        /// <param name="colBottom">bottom input blob vector
        /// </param>
        protected override void backward(BlobCollection <T> colTop, List <bool> rgbPropagateDown, BlobCollection <T> colBottom)
        {
            if (rgbPropagateDown[2])
            {
                m_log.FAIL(m_type.ToString() + " Layer cannot backpropagate to prior inputs.");
            }

            if (rgbPropagateDown[3])
            {
                m_log.FAIL(m_type.ToString() + " Layer cannot backpropagate to label inputs.");
            }

            // Back propagate on location prediction.
            if (rgbPropagateDown[0])
            {
                colBottom[0].SetDiff(0);

                if (m_nNumMatches >= 1)
                {
                    int         nLocBottomDiffOffset = 0;
                    float[]     rgfLocBottomDiff     = Utility.ConvertVecF <T>(colBottom[0].mutable_cpu_diff);
                    List <bool> rgLocPropagateDown   = new List <bool>();

                    // Only back propagate on prediction, not ground truth.
                    rgLocPropagateDown.Add(true);
                    rgLocPropagateDown.Add(false);
                    m_locLossLayer.Backward(m_colLocTop, rgLocPropagateDown, m_colLocBottom);

                    // Scale gradient.
                    double dfNormalizer = GetNormalizer(m_param.loss_param.normalization.Value, m_nNum, m_nNumPriors, m_nNumMatches);
                    double dfLossWeight = Utility.ConvertVal <T>(colTop[0].GetDiff(0)) / dfNormalizer;
                    m_blobLocPred.scale_diff(dfLossWeight);

                    // Copy gradient back to bottom[0];
                    float[] rgfLocPredDiff = Utility.ConvertVecF <T>(m_blobLocPred.mutable_cpu_diff);
                    int     nCount         = 0;

                    for (int i = 0; i < m_nNum; i++)
                    {
                        DictionaryMap <List <int> > rgMap = m_rgAllMatchIndices[i];

                        foreach (KeyValuePair <int, List <int> > kv in rgMap.Map)
                        {
                            int        nLabel       = (m_bShareLocation) ? 0 : kv.Key;
                            List <int> rgMatchIndex = kv.Value;

                            for (int j = 0; j < rgMatchIndex.Count; j++)
                            {
                                if (rgMatchIndex[j] <= -1)
                                {
                                    continue;
                                }

                                // Copy the diff to the right place.
                                int nStartIdx = m_nLocClasses * 4 * j + nLabel * 4;
                                Array.Copy(rgfLocPredDiff, nCount * 4, rgfLocBottomDiff, nLocBottomDiffOffset + nStartIdx, 4);
                                nCount++;
                            }

                            nLocBottomDiffOffset += colBottom[0].offset(1);
                        }
                    }

                    colBottom[0].mutable_cpu_diff = Utility.ConvertVec <T>(rgfLocBottomDiff);
                }
            }

            // Back propagate on confidence prediction
            if (rgbPropagateDown[1])
            {
                colBottom[1].SetDiff(0);

                if (m_nNumConf >= 1)
                {
                    int         nConfBottomDiffOffset = 0;
                    float[]     rgfConfBottomDiff     = Utility.ConvertVecF <T>(colBottom[1].mutable_cpu_diff);
                    List <bool> rgConfPropagateDown   = new List <bool>();

                    // Only back propagate on prediction, not ground truth.
                    rgConfPropagateDown.Add(true);
                    rgConfPropagateDown.Add(false);
                    m_locLossLayer.Backward(m_colConfTop, rgConfPropagateDown, m_colConfBottom);

                    // Scale gradient.
                    double dfNormalizer = GetNormalizer(m_param.loss_param.normalization.Value, m_nNum, m_nNumPriors, m_nNumMatches);
                    double dfLossWeight = Utility.ConvertVal <T>(colTop[0].GetDiff(0)) / dfNormalizer;
                    m_blobConfPred.scale_diff(dfLossWeight);

                    // Copy gradient back to bottom[0];
                    float[] rgfConfPredDiff = Utility.ConvertVecF <T>(m_blobConfPred.mutable_cpu_diff);
                    if (m_bDoNegMining)
                    {
                        int nCount = 0;

                        for (int i = 0; i < m_nNum; i++)
                        {
                            Dictionary <int, List <int> > rgMap = m_rgAllMatchIndices[i].Map;

                            foreach (KeyValuePair <int, List <int> > kv in rgMap)
                            {
                                int        nLabel       = kv.Key;
                                List <int> rgMatchIndex = kv.Value;

                                for (int j = 0; j < m_nNumPriors; j++)
                                {
                                    if (rgMatchIndex[j] <= -1)
                                    {
                                        continue;
                                    }

                                    // Copy the diff to the right place.
                                    Array.Copy(rgfConfPredDiff, nCount * m_nNumClasses, rgfConfBottomDiff, nConfBottomDiffOffset + j * m_nNumClasses, 4);
                                    nCount++;
                                }

                                nConfBottomDiffOffset += colBottom[0].offset(1);
                            }
                        }
                    }
                    else
                    {
                        // The diff is already computed and stored.
                        m_cuda.copy(colBottom[1].count(), m_blobConfPred.gpu_diff, colBottom[1].mutable_gpu_diff);
                    }

                    colBottom[1].mutable_cpu_diff = Utility.ConvertVec <T>(rgfConfBottomDiff);
                }
            }

            // After backward, remove match statistics.
            m_rgAllMatchIndices.Clear();
            m_rgrgAllNegIndices.Clear();
        }
Example #10
0
        /// <summary>
        /// Forward computation.
        /// </summary>
        /// <param name="colBottom">input blob vector.
        /// </param>
        /// <param name="colTop">output blob vector.
        /// </param>
        protected override void forward(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            float[] rgfLocData   = Utility.ConvertVecF <T>(colBottom[0].mutable_cpu_data);
            float[] rgfConfData  = Utility.ConvertVecF <T>(colBottom[1].mutable_cpu_data);
            float[] rgfPriorData = Utility.ConvertVecF <T>(colBottom[2].mutable_cpu_data);
            float[] rgfGtData    = Utility.ConvertVecF <T>(colBottom[3].mutable_cpu_data);

            // Retrieve all ground truth.
            DictionaryMap <List <NormalizedBBox> > rgAllGtBboxes = m_bboxUtil.GetGroundTruth(rgfGtData, m_nNumGt, m_nBackgroundLabelId, m_bUseDifficultGt);

            // Retrieve all prior bboxes. It is the same within a batch since we assume all
            // images in a batch are of the same dimension.
            List <List <float> >  rgrgPriorVariances;
            List <NormalizedBBox> rgPriorBboxes = m_bboxUtil.GetPrior(rgfPriorData, m_nNumPriors, out rgrgPriorVariances);

            // Retrieve all predictions.
            List <LabelBBox> rgAllLocPreds = m_bboxUtil.GetLocPredictions(rgfLocData, m_nNum, m_nNumPriors, m_nLocClasses, m_bShareLocation);

            // Find matches between source bboxes and ground truth bboxes.
            List <DictionaryMap <List <float> > > rgAllMatchOverlaps;

            m_bboxUtil.FindMatches(rgAllLocPreds, rgAllGtBboxes, rgPriorBboxes, rgrgPriorVariances, m_param.multiboxloss_param, out rgAllMatchOverlaps, out m_rgAllMatchIndices);

            // Sample hard negative (and positive) examples based on mining type.
            int nNumNegs;

            m_nNumMatches = m_bboxUtil.MineHardExamples(colBottom[0], rgAllLocPreds, rgAllGtBboxes, rgPriorBboxes, rgrgPriorVariances, rgAllMatchOverlaps, m_param.multiboxloss_param, m_rgAllMatchIndices, m_rgrgAllNegIndices, out nNumNegs);

            if (m_nNumMatches >= 1)
            {
                // Form data to pass on to loc_loss_layer.
                List <int> rgLocShape = new List <int>()
                {
                    1, m_nNumMatches * 4
                };
                m_blobLocPred.Reshape(rgLocShape);
                m_blobLocGt.Reshape(rgLocShape);

                m_bboxUtil.EncodeLocPrediction(rgAllLocPreds, rgAllGtBboxes, m_rgAllMatchIndices, rgPriorBboxes, rgrgPriorVariances, m_param.multiboxloss_param, m_blobLocPred, m_blobLocGt);

                m_locLossLayer.Reshape(m_colLocBottom, m_colLocTop);
                m_locLossLayer.Forward(m_colLocBottom, m_colLocTop);
            }
            else
            {
                m_blobLocLoss.SetData(0, 0);
            }

            // Form data to pass on to conf_loss_layer
            if (m_bDoNegMining)
            {
                m_nNumConf = m_nNumMatches + nNumNegs;
            }
            else
            {
                m_nNumConf = m_nNum * m_nNumPriors;
            }

            if (m_nNumConf >= 1)
            {
                // Reshape the confidence data.
                List <int> rgConfShape = new List <int>();

                if (m_confLossType == MultiBoxLossParameter.ConfLossType.SOFTMAX)
                {
                    rgConfShape.Add(m_nNumConf);
                    m_blobConfGt.Reshape(rgConfShape);
                    rgConfShape.Add(m_nNumClasses);
                    m_blobConfPred.Reshape(rgConfShape);
                }
                else if (m_confLossType == MultiBoxLossParameter.ConfLossType.LOGISTIC)
                {
                    rgConfShape.Add(1);
                    rgConfShape.Add(m_nNumConf);
                    rgConfShape.Add(m_nNumClasses);
                    m_blobConfGt.Reshape(rgConfShape);
                    m_blobConfPred.Reshape(rgConfShape);
                }
                else
                {
                    m_log.FAIL("Unknown confidence loss type.");
                }

                if (!m_bDoNegMining)
                {
                    // Consider all scores.
                    // Share data and diff with bottom[1].
                    m_log.CHECK_EQ(m_blobConfPred.count(), colBottom[1].count(), "The conf pred and bottom[1] should have the same count.");
                    m_blobConfPred.ShareData(colBottom[1]);
                }

                m_blobConfGt.SetData(m_nBackgroundLabelId);

                m_bboxUtil.EncodeConfPrediction(rgfConfData, m_nNum, m_nNumPriors, m_param.multiboxloss_param, m_rgAllMatchIndices, m_rgrgAllNegIndices, rgAllGtBboxes, m_blobConfPred, m_blobConfGt);
            }
            else
            {
                m_blobConfLoss.SetData(0, 0);
            }

            colTop[0].SetData(0, 0);

            if (m_param.propagate_down[0])
            {
                double dfNormalizer = GetNormalizer(m_param.loss_param.normalization.Value, m_nNum, m_nNumPriors, m_nNumMatches);
                double dfLocLoss    = Utility.ConvertVal <T>(m_blobLocLoss.GetData(0));
                double dfLoss       = Utility.ConvertVal <T>(colTop[0].GetData(0));

                dfLoss += m_fLocWeight * dfLocLoss / dfNormalizer;
                colTop[0].SetData(dfLoss, 0);
            }

            if (m_param.propagate_down[1])
            {
                double dfNormalizer = GetNormalizer(m_param.loss_param.normalization.Value, m_nNum, m_nNumPriors, m_nNumMatches);
                double dfConfLoss   = Utility.ConvertVal <T>(m_blobConfLoss.GetData(0));
                double dfLoss       = Utility.ConvertVal <T>(colTop[0].GetData(0));

                dfLoss += dfConfLoss / dfNormalizer;
                colTop[0].SetData(dfLoss, 0);
            }
        }
Example #11
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            ScaleParameter p = m_param.scale_param;

            if (colBottom.Count == 1 && blobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else if (colBottom.Count == 1)
            {
                // scale is a learned parameter; initialize it.
                m_nAxis = colBottom[0].CanonicalAxisIndex(p.axis);
                int nNumAxes = p.num_axes;
                m_log.CHECK_GE(nNumAxes, -1, "num_axes must be non-negative, or -1 to extend to the end of bottom[0].");

                if (nNumAxes >= 0)
                {
                    m_log.CHECK_GE(colBottom[0].num_axes, m_nAxis + nNumAxes, "scale blob's shape extends past bottom[0]'s shape when applied starting with bottom[0] axis = " + m_nAxis.ToString());
                }

                m_colBlobs = new BlobCollection <T>();

                List <int> rgShape = new List <int>();
                int        nStart  = m_nAxis;
                int        nEnd    = (nNumAxes == -1) ? colBottom[0].shape().Count : nStart + nNumAxes;

                for (int i = nStart; i < nEnd; i++)
                {
                    rgShape.Add(colBottom[0].shape(i));
                }

                Blob <T> blobScale = new Blob <T>(m_cuda, m_log, rgShape);
                blobScale.Name = "scale";
                FillerParameter fp = p.filler;

                // Default to unit (1) filler for identity operation.
                if (fp == null)
                {
                    fp = new FillerParameter("constant", 1.0);
                }

                Filler <T> filler = Filler <T> .Create(m_cuda, m_log, fp);

                filler.Fill(blobScale);

                m_colBlobs.Add(blobScale);
            }

            if (p.bias_term)
            {
                LayerParameter pb = new LayerParameter(LayerParameter.LayerType.BIAS);
                pb.bias_param.axis     = p.axis;
                pb.bias_param.num_axes = (colBottom.Count > 1) ? colBottom[1].num_axes : p.num_axes;
                pb.bias_param.filler   = p.bias_filler;

                m_colBiasBottomVec = new BlobCollection <T>();
                m_colBiasBottomVec.Add(colBottom[0]);

                m_biasLayer = new BiasLayer <T>(m_cuda, m_log, pb);
                m_biasLayer.Setup(m_colBiasBottomVec, colTop);
                m_nBiasParamId = m_colBlobs.Count;
                m_colBlobs.Add(m_biasLayer.blobs[0]);
                m_rgbBiasPropagateDown = Utility.Create <bool>(1, false);
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count(), true);
        }
Example #12
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            Blob <T> blobX    = colBottom[0];
            Blob <T> blobCy   = colBottom[1];
            Blob <T> blobClip = colBottom[2];

            m_log.CHECK_EQ(blobX.shape(1), 1, "Currently, only batch size = 1 is supported.");

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);

            List <int> rgDimX = new List <int>()
            {
                1, 0
            };

            while (rgDimX.Count < colBottom[0].num_axes)
            {
                rgDimX.Add(rgDimX.Count);
            }

            LayerParameter transposeXparam = new LayerParameter(LayerParameter.LayerType.TRANSPOSE);

            transposeXparam.transpose_param.dim = new List <int>(rgDimX);

            m_transposeX = new TransposeLayer <T>(m_cuda, m_log, transposeXparam);

            addInternal(blobX, m_blobX);
            m_transposeX.Setup(m_colInternalBottom, m_colInternalTop);
            m_blobX1.ReshapeLike(m_blobX);

            addInternal(m_blobX, m_blobUh);
            m_ipUa.Setup(m_colInternalBottom, m_colInternalTop);

            addInternal(blobClip, m_blobClip);
            m_transposeClip.Setup(m_colInternalBottom, m_colInternalTop);
            // Make sure the first item is set to 1.
            m_blobClip.SetData(1, 0);

            m_blobState.ReshapeLike(blobCy);

            addInternal(m_blobState, m_blobWc);
            m_ipWa.Setup(m_colInternalBottom, m_colInternalTop);

            m_blobFullWc.ReshapeLike(m_blobUh);

            addInternal(new List <Blob <T> >()
            {
                m_blobUh, m_blobFullWc
            }, m_blobAddOutput);
            m_add1.Setup(m_colInternalBottom, m_colInternalTop);

            addInternal(m_blobAddOutput, m_blobGG);
            m_tanh.Setup(m_colInternalBottom, m_colInternalTop);

            addInternal(m_blobGG, m_blobAA);
            m_ipV.Setup(m_colInternalBottom, m_colInternalTop);

            List <int> rgFocusShape = Utility.Clone <int>(blobX.shape());

            rgFocusShape[0] = blobX.shape(1);
            rgFocusShape[1] = blobX.shape(0);
            m_blobFocusedInput.Reshape(rgFocusShape);

            List <int> rgContextShape = Utility.Clone <int>(blobX.shape());

            rgContextShape[0] = rgContextShape[1];
            rgContextShape[1] = 1;
            m_blobContext.Reshape(rgContextShape);

            List <int> rgTopShape = Utility.Clone <int>(m_blobContext.shape());

            rgTopShape[0] = m_blobContext.shape(1);
            rgTopShape[1] = m_blobContext.shape(0);
            colTop[0].Reshape(rgTopShape);

            blobs.Clear();

            foreach (Blob <T> blob in m_ipUa.blobs)
            {
                blobs.Add(blob);
            }

            foreach (Blob <T> blob in m_ipWa.blobs)
            {
                blobs.Add(blob);
            }

            // V
            blobs.Add(m_ipV.blobs[0]);
        }
Example #13
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            m_nN = (int)m_param.embed_param.num_output;
            m_log.CHECK_GT(m_nN, 0, "EmbedLayer num_output must be positive.");

            m_nK = (int)m_param.embed_param.input_dim;
            m_log.CHECK_GT(m_nK, 0, "EmbedLayer input_dim must be positive.");

            m_bBiasTerm = m_param.embed_param.bias_term;

            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                m_colBlobs.Clear();

                // Initialize the weights --
                // transposed from InnerProductLayer for spacial locality.
                List <int> rgWeightShape = new List <int>()
                {
                    m_nK, m_nN
                };
                Blob <T> blobWeight = new Blob <T>(m_cuda, m_log);
                blobWeight.Name = m_param.name + " weights";
                blobWeight.type = BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobWeight, rgWeightShape))
                {
                    blobWeight.Reshape(rgWeightShape);

                    // fill the weights
                    Filler <T> weight_filler = Filler <T> .Create(m_cuda, m_log, m_param.embed_param.weight_filler);

                    weight_filler.Fill(blobWeight);
                }
                m_colBlobs.Add(blobWeight);


                // If necessary, initialize and fill the bias term
                if (m_bBiasTerm)
                {
                    List <int> rgBiasShape = new List <int>()
                    {
                        m_nN
                    };
                    Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                    blobBias.Name = m_param.name + " bias";
                    blobBias.type = BLOB_TYPE.WEIGHT;

                    if (!shareParameter(blobBias, rgBiasShape))
                    {
                        blobBias.Reshape(rgBiasShape);
                        Filler <T> bias_filler = Filler <T> .Create(m_cuda, m_log, m_param.embed_param.bias_filler);

                        bias_filler.Fill(blobBias);
                    }
                    m_colBlobs.Add(blobBias);
                }
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);
        }
Example #14
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            LSTMAttentionParameter p = m_param.lstm_attention_param;

            if (m_param.lstm_attention_param.enable_attention)
            {
                m_log.CHECK_GE(colBottom.Count, 4, "When using attention, four bottoms are required: x, xClip, encoding, encodingClip.");
                m_log.CHECK_LE(colBottom.Count, 5, "When using attention, four bottoms are required: x, xClip, encoding, encodingClip, vocabcount (optional).");

                if (colBottom.Count == 5)
                {
                    if (p.num_output_ip != 0)
                    {
                        p.num_output_ip = (uint)convertF(colBottom[4].GetData(0));
                    }
                }
            }
            else
            {
                m_log.CHECK_GE(colBottom.Count, 1, "When not using attention, at least one bottom is required: x.");
                m_log.CHECK_LE(colBottom.Count, 2, "When not using attention, no more than two bottoms is required: x, clip.");
            }

            m_dfClippingThreshold = p.clipping_threshold;
            m_nN = colBottom[0].channels;
            m_nH = (int)p.num_output;      // number of hidden units.
            m_nI = colBottom[0].count(2);  // input dimension.

            // Check if we need to set up the weights.
            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                m_colBlobs = new BlobCollection <T>();

                Filler <T> weight_filler = Filler <T> .Create(m_cuda, m_log, p.weight_filler);

                Filler <T> bias_filler = Filler <T> .Create(m_cuda, m_log, p.bias_filler);

                // input-to-hidden weights
                // Initialize the weight.
                List <int> rgShape1 = new List <int>()
                {
                    4 * m_nH, m_nI
                };
                Blob <T> blobWeights_I_H = new Blob <T>(m_cuda, m_log);
                blobWeights_I_H.Name = m_param.name + " weights I to H";
                blobWeights_I_H.type = BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobWeights_I_H, rgShape1))
                {
                    blobWeights_I_H.Reshape(rgShape1);
                    weight_filler.Fill(blobWeights_I_H);
                }
                m_nWeightItoHidx = m_colBlobs.Count;
                m_colBlobs.Add(blobWeights_I_H);

                // hidden-to-hidden weights
                // Initialize the weight.
                List <int> rgShape2 = new List <int>()
                {
                    4 * m_nH, m_nH
                };
                Blob <T> blobWeights_H_H = new Blob <T>(m_cuda, m_log);
                blobWeights_H_H.Name = m_param.name + " weights H to H";
                blobWeights_H_H.type = BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobWeights_H_H, rgShape2))
                {
                    blobWeights_H_H.Reshape(rgShape2);
                    weight_filler.Fill(blobWeights_H_H);
                }
                m_nWeightHtoHidx = m_colBlobs.Count;
                m_colBlobs.Add(blobWeights_H_H);

                // If necessary, initialize and fill the bias term.
                List <int> rgShape3 = new List <int>()
                {
                    4 * m_nH
                };
                Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                blobBias.Name = m_param.name + " bias weights";
                blobBias.type = BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobBias, rgShape3))
                {
                    blobBias.Reshape(rgShape3);
                    bias_filler.Fill(blobBias);
                }
                m_nWeightBiasidx = m_colBlobs.Count;
                m_colBlobs.Add(blobBias);

                // Initialize the bias for the forget gate to 5.0 as described in the
                // Clockwork RNN paper:
                // [1] Koutnik, J., Greff, K., Gomez, F., Schmidhuber, J., 'A Clockwork RNN', 2014"
                if (p.enable_clockwork_forgetgate_bias)
                {
                    double[] rgBias = convertD(blobBias.mutable_cpu_data);

                    for (int i = m_nH; i < 2 * m_nH; i++)
                    {
                        rgBias[i] = 5.0;
                    }

                    blobBias.mutable_cpu_data = convert(rgBias);
                }

                if (m_param.lstm_attention_param.num_output_ip > 0)
                {
                    Blob <T> blobWeightWhd = new Blob <T>(m_cuda, m_log);
                    blobWeightWhd.Name = m_param.name + " weights Whd";
                    blobWeightWhd.type = BLOB_TYPE.WEIGHT;

                    List <int> rgShapeWhd = new List <int>()
                    {
                        m_nH, (int)m_param.lstm_attention_param.num_output_ip
                    };
                    if (!shareParameter(blobWeightWhd, rgShapeWhd))
                    {
                        blobWeightWhd.Reshape(rgShapeWhd);
                        weight_filler.Fill(blobWeightWhd);
                    }
                    m_nWeightWhdidx = m_colBlobs.Count;
                    m_colBlobs.Add(blobWeightWhd);

                    Blob <T> blobWeightWhdb = new Blob <T>(m_cuda, m_log);
                    blobWeightWhdb.Name = m_param.name + " weights Whdb";
                    blobWeightWhdb.type = BLOB_TYPE.WEIGHT;

                    List <int> rgShapeWhdb = new List <int>()
                    {
                        1, (int)m_param.lstm_attention_param.num_output_ip
                    };
                    if (!shareParameter(blobWeightWhdb, rgShape1))
                    {
                        blobWeightWhdb.Reshape(rgShapeWhdb);
                        bias_filler.Fill(blobWeightWhdb);
                    }
                    m_nWeightWhdbidx = m_colBlobs.Count;
                    m_colBlobs.Add(blobWeightWhdb);
                }

                if (m_param.lstm_attention_param.enable_attention)
                {
                    // context-to-hidden weights
                    // Initialize the weight.
                    Blob <T> blobWeights_C_H = new Blob <T>(m_cuda, m_log);
                    blobWeights_C_H.Name = m_param.name + " weights C to H";
                    blobWeights_C_H.type = BLOB_TYPE.WEIGHT;

                    if (!shareParameter(blobWeights_C_H, rgShape1))
                    {
                        blobWeights_C_H.Reshape(rgShape1); // same shape as I to H
                        weight_filler.Fill(blobWeights_C_H);
                    }
                    m_nWeightCtoHidx = m_colBlobs.Count;
                    m_colBlobs.Add(blobWeights_C_H);
                }
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);

            List <int> rgCellShape = new List <int>()
            {
                m_nN, m_nH
            };

            m_blob_C_0.Reshape(rgCellShape);
            m_blob_H_0.Reshape(rgCellShape);
            m_blob_C_T.Reshape(rgCellShape);
            m_blob_H_T.Reshape(rgCellShape);
            m_blob_H_to_H.Reshape(rgCellShape);

            List <int> rgGateShape = new List <int>()
            {
                m_nN, 4, m_nH
            };

            m_blob_H_to_Gate.Reshape(rgGateShape);

            // Attention settings
            if (m_param.lstm_attention_param.enable_attention)
            {
                m_blob_C_to_Gate      = new Blob <T>(m_cuda, m_log, false);
                m_blob_C_to_Gate.Name = m_param.name + "c_to_gate";
                m_blob_C_to_Gate.Reshape(rgGateShape);

                m_blobContext      = new Blob <T>(m_cuda, m_log);
                m_blobContext.Name = "context_out";

                m_blobContextFull      = new Blob <T>(m_cuda, m_log);
                m_blobContextFull.Name = "context_full";

                m_blobPrevCt      = new Blob <T>(m_cuda, m_log);
                m_blobPrevCt.Name = "prev_ct";

                LayerParameter attentionParam = new LayerParameter(LayerParameter.LayerType.ATTENTION);
                attentionParam.attention_param.axis          = 2;
                attentionParam.attention_param.dim           = m_param.lstm_attention_param.num_output;
                attentionParam.attention_param.weight_filler = m_param.lstm_attention_param.weight_filler;
                attentionParam.attention_param.bias_filler   = m_param.lstm_attention_param.bias_filler;

                if (m_param is LayerParameterEx <T> )
                {
                    LayerParameterEx <T> pEx = m_param as LayerParameterEx <T>;
                    attentionParam = new LayerParameterEx <T>(attentionParam, pEx.SharedBlobs, pEx.SharedLayerBlobs, pEx.SharedLayer);
                }

                m_attention = new AttentionLayer <T>(m_cuda, m_log, attentionParam);

                Blob <T> blobEncoding     = colBottom[2];
                Blob <T> blobEncodingClip = colBottom[3];
                addInternal(new List <Blob <T> >()
                {
                    blobEncoding, m_blob_C_T, blobEncodingClip
                }, m_blobContext);
                m_attention.Setup(m_colInternalBottom, m_colInternalTop);

                foreach (Blob <T> b in m_attention.blobs)
                {
                    m_colBlobs.Add(b);
                }
            }
        }
 public IPropertyMappingProvider Visit(DictionaryMap dictionaryMap, ITermMappingProvider key, ITermMappingProvider value)
 {
     var propertyMapping = CreatePropertyMapping(dictionaryMap);
     return new DictionaryMappingProvider(propertyMapping, key, value);
 }
Example #16
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            m_log.CHECK_GE(colBottom[0].num_axes, 2, "Number of axes of bottom must be >= 2");
            PReLUParameter p         = m_param.prelu_param;
            int            nChannels = colBottom[0].channels;

            m_bChannelShared = p.channel_shared;

            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                m_colBlobs = new BlobCollection <T>();

                List <int> rgSlopeShape = new List <int>();
                if (!m_bChannelShared)
                {
                    rgSlopeShape.Add(nChannels);
                }

                Blob <T> blobSlope = new Blob <T>(m_cuda, m_log);
                blobSlope.Name = m_param.name + " slope";
                blobSlope.type = BLOB_TYPE.INTERNAL;

                if (!shareParameter(blobSlope, rgSlopeShape))
                {
                    blobSlope.Reshape(rgSlopeShape);
                    FillerParameter fp = p.filler;

                    if (fp == null)
                    {
                        fp = new FillerParameter("constant", 0.25);
                    }

                    Filler <T> filler = Filler <T> .Create(m_cuda, m_log, fp);

                    filler.Fill(blobSlope);
                }
                m_colBlobs.Add(blobSlope);
            }

            if (m_bChannelShared)
            {
                m_log.CHECK_EQ(m_colBlobs[0].count(), 1, "Negative slope size is inconsistent with prototxt config.");
            }
            else
            {
                m_log.CHECK_EQ(m_colBlobs[0].count(), nChannels, "Negative slope size is inconsistent with prototxt config.");
            }

            // Propagate gradients to the parameters (as directed by backward pass)
            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);

            List <int> rgShape = new List <int>()
            {
                colBottom[0].count(1)
            };

            m_blobMultiplier.Reshape(rgShape);
            m_blobBackwardBuff.Reshape(rgShape);
            m_blobMultiplier.SetData(1.0);
        }
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            if (!reshapeNeeded(colBottom, colTop))
            {
                return;
            }

            // Configure the kernel size, padding, stride and inputs.
            ConvolutionParameter p = m_param.convolution_param;

            m_bForceNDim2col = p.force_nd_im2col;
            m_nChannelAxis   = colBottom[0].CanonicalAxisIndex(p.axis);

            int nFirstSpatialAxis = m_nChannelAxis + 1;
            int nNumAxes          = colBottom[0].num_axes;

            m_nNumSpatialAxes = nNumAxes - nFirstSpatialAxis;

            m_log.CHECK_GE(m_nNumSpatialAxes, 0, "The number of spatial axes must be zero or greater.");

            List <int> rgBottomDimBlobShape = new List <int>()
            {
                m_nNumSpatialAxes + 1
            };
            List <int> rgSpaitalDimBlobShape = new List <int>()
            {
                Math.Max(m_nNumSpatialAxes, 1)
            };

            // Setup filter kernel dimensions (blobKernelShape)
            m_blobKernelShape.Reshape(rgSpaitalDimBlobShape);
            T[] rgKernelShape = m_blobKernelShape.mutable_cpu_data;

            if (p.kernel_h.HasValue || p.kernel_w.HasValue)
            {
                m_log.CHECK_EQ(m_nNumSpatialAxes, 2, "kernel_h & kernel_w can only be used in 2D convolution.");
                m_log.CHECK_EQ(0, p.kernel_size.Count, "Either kernel_size or kernel_h/w should be specified; not both.");
                rgKernelShape[0] = (T)Convert.ChangeType(p.kernel_h.Value, typeof(T));
                rgKernelShape[1] = (T)Convert.ChangeType(p.kernel_w.Value, typeof(T));
            }
            else
            {
                int nNumKernelDims = p.kernel_size.Count;
                m_log.CHECK(nNumKernelDims == 1 || nNumKernelDims == m_nNumSpatialAxes, "Kernel size must be specified once, or once per spatial dimension (kernel_size specified " + nNumKernelDims.ToString() + " times; " + m_nNumSpatialAxes.ToString() + " spatial dims);");

                for (int i = 0; i < m_nNumSpatialAxes; i++)
                {
                    int nIdx = (nNumKernelDims == 1) ? 0 : i;
                    rgKernelShape[i] = (T)Convert.ChangeType(p.kernel_size[nIdx], typeof(T));
                }
            }

            for (int i = 0; i < m_nNumSpatialAxes; i++)
            {
                m_log.CHECK_GT((int)Convert.ChangeType(rgKernelShape[i], typeof(int)), 0, "Filter dimension must be non-zero.");
            }

            m_blobKernelShape.mutable_cpu_data = rgKernelShape;


            // Setup stride dimensions (blobStride)
            m_blobStride.Reshape(rgSpaitalDimBlobShape);
            T[] rgStrideData = m_blobStride.mutable_cpu_data;

            if (p.stride_h.HasValue || p.stride_w.HasValue)
            {
                m_log.CHECK_EQ(m_nNumSpatialAxes, 2, "stride_h & stride_w can only be used in 2D convolution.");
                m_log.CHECK_EQ(0, p.stride.Count, "Either stride_size or stride_h/w should be specified; not both.");
                rgStrideData[0] = (T)Convert.ChangeType(p.stride_h.Value, typeof(T));
                rgStrideData[1] = (T)Convert.ChangeType(p.stride_w.Value, typeof(T));
            }
            else
            {
                int nNumStrideDims = p.stride.Count;
                m_log.CHECK(nNumStrideDims == 0 || nNumStrideDims == 1 || nNumStrideDims == m_nNumSpatialAxes, "Stride size must be specified once, or once per spatial dimension (stride specified " + nNumStrideDims.ToString() + " times; " + m_nNumSpatialAxes.ToString() + " spatial dims);");
                int nDefaultStride = 1;

                for (int i = 0; i < m_nNumSpatialAxes; i++)
                {
                    if (nNumStrideDims == 0)
                    {
                        rgStrideData[i] = (T)Convert.ChangeType(nDefaultStride, typeof(T));
                    }
                    else
                    {
                        int nIdx = (nNumStrideDims == 1) ? 0 : i;
                        rgStrideData[i] = (T)Convert.ChangeType(p.stride[nIdx], typeof(T));
                    }
                    m_log.CHECK_GT((int)Convert.ChangeType(rgStrideData[i], typeof(int)), 0, "Stride dimension must be non-zero.");
                }
            }

            m_blobStride.mutable_cpu_data = rgStrideData;


            // Setup pad dimensions (blobPad)
            m_blobPad.Reshape(rgSpaitalDimBlobShape);
            T[] rgPadData = m_blobPad.mutable_cpu_data;

            if (p.pad_h.HasValue || p.pad_w.HasValue)
            {
                m_log.CHECK_EQ(m_nNumSpatialAxes, 2, "pad_h & pad_w can only be used in 2D convolution.");
                m_log.CHECK_EQ(0, p.pad.Count, "Either pad_size or pad_h/w should be specified; not both.");
                rgPadData[0] = (T)Convert.ChangeType(p.pad_h.Value, typeof(T));
                rgPadData[1] = (T)Convert.ChangeType(p.pad_w.Value, typeof(T));
            }
            else
            {
                int nNumPadDims = p.pad.Count;
                m_log.CHECK(nNumPadDims == 0 || nNumPadDims == 1 || nNumPadDims == m_nNumSpatialAxes, "Pad size must be specified once, or once per spatial dimension (pad specified " + nNumPadDims.ToString() + " times; " + m_nNumSpatialAxes.ToString() + " spatial dims);");
                int nDefaultPad = 0;

                for (int i = 0; i < m_nNumSpatialAxes; i++)
                {
                    if (nNumPadDims == 0)
                    {
                        rgPadData[i] = (T)Convert.ChangeType(nDefaultPad, typeof(T));
                    }
                    else
                    {
                        int nIdx = (nNumPadDims == 1) ? 0 : i;
                        rgPadData[i] = (T)Convert.ChangeType(p.pad[nIdx], typeof(T));
                    }
                }
            }

            m_blobPad.mutable_cpu_data = rgPadData;


            // Setup dilation dimensions (blobDilation)
            m_blobDilation.Reshape(rgSpaitalDimBlobShape);
            T[] rgDilationData   = m_blobDilation.mutable_cpu_data;
            int nNumDilationDims = p.dilation.Count;

            m_log.CHECK(nNumDilationDims == 0 || nNumDilationDims == 1 || nNumDilationDims == m_nNumSpatialAxes, "Dilation size must be specified once, or once per spatial dimension (dilation specified " + nNumDilationDims.ToString() + " times; " + m_nNumSpatialAxes.ToString() + " spatial dims);");
            int nDefaultDilation = 1;

            for (int i = 0; i < m_nNumSpatialAxes; i++)
            {
                if (nNumDilationDims == 0)
                {
                    rgDilationData[i] = (T)Convert.ChangeType(nDefaultDilation, typeof(T));
                }
                else
                {
                    int nIdx = (nNumDilationDims == 1) ? 0 : i;
                    rgDilationData[i] = (T)Convert.ChangeType(p.dilation[nIdx], typeof(T));
                }
            }

            m_blobDilation.mutable_cpu_data = rgDilationData;


            // Special case: im2col is the identity for 1x1 convolution with stride 1
            // add no padding, so flag for skipping the buffer and transformation.
            m_bIs1x1 = true;

            for (int i = 0; i < m_nNumSpatialAxes; i++)
            {
                if (!(val_at(rgKernelShape, i) == 1 &&
                      val_at(rgStrideData, i) == 1 &&
                      val_at(rgPadData, i) == 0))
                {
                    m_bIs1x1 = false;
                    break;
                }
            }

            // Configure output channels and groups.
            m_nChannels  = colBottom[0].shape(m_nChannelAxis);
            m_nNumOutput = (int)p.num_output;
            m_log.CHECK_GT(m_nNumOutput, 0, "Output count must be greater than zero.");

            m_nGroup = (int)p.group;
            m_log.CHECK_EQ(m_nChannels % m_nGroup, 0, "The channels must span evenly across the groups.");
            m_log.CHECK_EQ(m_nNumOutput % m_nGroup, 0, "The number of output should be a in multiples of group.");

            if (reverse_dimensions())
            {
                m_nConvOutChannels = m_nChannels;
                m_nConvInChannels  = m_nNumOutput;
            }
            else
            {
                m_nConvOutChannels = m_nNumOutput;
                m_nConvInChannels  = m_nChannels;
            }

            // Handle the parameters: weights and biases
            // - blobs[0] holds the filter weights.
            // - blobs[1] holds the biases (optional)

            List <int> rgWeightShape = new List <int>();

            rgWeightShape.Add(m_nConvOutChannels);
            rgWeightShape.Add(m_nConvInChannels / m_nGroup);

            for (int i = 0; i < m_nNumSpatialAxes; i++)
            {
                rgWeightShape.Add(val_at(rgKernelShape, i));
            }

            m_bBiasTerm = p.bias_term;

            List <int> rgBiasShape = new List <int>()
            {
                m_nNumOutput
            };

            // Setup the convert to half flags used by the Layer just before calling forward and backward.
            if (p.useCudnn(m_nNumSpatialAxes))
            {
                m_bUseHalfSize = m_param.use_halfsize;
            }

            if (m_colBlobs.Count > 0)
            {
                m_log.CHECK_EQ(1 + ((m_bBiasTerm) ? 1 : 0), m_colBlobs.Count, "Incorrect number of weight blobs.");

                if (!Utility.Compare <int>(rgWeightShape, m_colBlobs[0].shape()))
                {
                    Blob <T> b = new Blob <T>(m_cuda, m_log, rgWeightShape);
                    m_log.FAIL("Incorrect weight shape: expected shape " + b.shape_string + "; instead, shape was " + m_colBlobs[0].shape_string);
                }

                if (m_bBiasTerm && !Utility.Compare <int>(rgBiasShape, m_colBlobs[1].shape()))
                {
                    Blob <T> b = new Blob <T>(m_cuda, m_log, rgBiasShape);
                    m_log.FAIL("Incorrect bias shape: expected shape " + b.shape_string + "; instead, shape was " + m_colBlobs[1].shape_string);
                }

                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                m_colBlobs.Clear();

                // Initialize and fill the weights:
                // output channels x input channels per-group x kernel height x kernel width.
                Blob <T> blobWts = new Blob <T>(m_cuda, m_log, true, m_bUseHalfSize);
                blobWts.Name = colTop[0].Name + " weights";
                blobWts.type = BLOB_TYPE.WEIGHT;

                if (m_bUseHalfSize || !shareParameter(blobWts, rgWeightShape))
                {
                    blobWts.Reshape(rgWeightShape, m_bUseHalfSize);
                    Filler <T> wtFiller = Filler <T> .Create(m_cuda, m_log, p.weight_filler);

                    Blob <T> blobWts1 = blobWts;

                    if (m_bUseHalfSize)
                    {
                        blobWts1 = new Blob <T>(m_cuda, m_log, false, false);
                        blobWts1.ReshapeLike(blobWts);
                    }

                    wtFiller.Fill(blobWts1);

                    if (m_bUseHalfSize)
                    {
                        blobWts.CopyFrom(blobWts1);
                        blobWts1.Dispose();
                    }
                }

                m_colBlobs.Add(blobWts);

                // If necessary, initialize and fill the biases:
                if (m_bBiasTerm)
                {
                    Blob <T> blobBias = new Blob <T>(m_cuda, m_log, true, m_bUseHalfSize);
                    blobBias.Name = colTop[0].Name + " bias";
                    blobBias.type = BLOB_TYPE.WEIGHT;

                    if (m_bUseHalfSize || !shareParameter(blobBias, rgBiasShape))
                    {
                        blobBias.Reshape(rgBiasShape, m_bUseHalfSize);
                        Filler <T> biasFiller = Filler <T> .Create(m_cuda, m_log, p.bias_filler);

                        Blob <T> blobBias1 = blobBias;

                        if (m_bUseHalfSize)
                        {
                            blobBias1 = new Blob <T>(m_cuda, m_log, false, false);
                            blobBias1.ReshapeLike(blobBias);
                        }

                        biasFiller.Fill(blobBias1);

                        if (m_bUseHalfSize)
                        {
                            blobBias.CopyFrom(blobBias1);
                            blobBias1.Dispose();
                        }
                    }

                    m_colBlobs.Add(blobBias);
                }
            }

            m_nKernelDim    = m_colBlobs[0].count(1);
            m_nWeightOffset = m_nConvOutChannels * m_nKernelDim / m_nGroup;

            // Propagate gradients to the parameters (as directed by backward pass).
            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);
        }
Example #18
0
        private void layerSetUpCaffe(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            // Get (recurrent) input/output names.
            List <string> rgOutputNames = new List <string>();

            OutputBlobNames(rgOutputNames);

            List <string> rgRecurInputNames = new List <string>();

            RecurrentInputBlobNames(rgRecurInputNames);

            List <string> rgRecurOutputNames = new List <string>();

            RecurrentOutputBlobNames(rgRecurOutputNames);

            int nNumRecurBlobs = rgRecurInputNames.Count;

            m_log.CHECK_EQ(nNumRecurBlobs, rgRecurOutputNames.Count, "The number of recurrent input names must equal the number of recurrent output names.");

            // If provided, bottom[2] is a static input to the recurrent net.
            int nNumHiddenExposed = (m_bExposeHidden) ? nNumRecurBlobs : 0;

            m_bStaticInput = (colBottom.Count > 2 + nNumHiddenExposed) ? true : false;

            if (m_bStaticInput)
            {
                m_log.CHECK_GE(colBottom[2].num_axes, 1, "When static input is present, the bottom[2].num_axes must be >= 1");
                m_log.CHECK_EQ(m_nN, colBottom[2].shape(0), "When static input is present, the bottom[2].shape(0) must = N which is " + m_nN.ToString());
            }

            // Create a NetParameter; setup the inputs that aren't unique to particular
            // recurrent architectures.
            NetParameter net_param = new NetParameter();

            LayerParameter input_layer = new LayerParameter(LayerParameter.LayerType.INPUT);

            input_layer.top.Add("x");
            BlobShape input_shape1 = new param.BlobShape();

            for (int i = 0; i < colBottom[0].num_axes; i++)
            {
                input_shape1.dim.Add(colBottom[0].shape(i));
            }
            input_layer.input_param.shape.Add(input_shape1);

            input_layer.top.Add("cont");
            BlobShape input_shape2 = new param.BlobShape();

            for (int i = 0; i < colBottom[1].num_axes; i++)
            {
                input_shape2.dim.Add(colBottom[1].shape(i));
            }
            input_layer.input_param.shape.Add(input_shape2);

            if (m_bStaticInput)
            {
                input_layer.top.Add("x_static");
                BlobShape input_shape3 = new BlobShape();
                for (int i = 0; i < colBottom[2].num_axes; i++)
                {
                    input_shape3.dim.Add(colBottom[2].shape(i));
                }
                input_layer.input_param.shape.Add(input_shape3);
            }

            net_param.layer.Add(input_layer);

            // Call the child's FillUnrolledNet implementation to specify the unrolled
            // recurrent architecture.
            FillUnrolledNet(net_param);

            // Prepend this layer's name to the names of each layer in the unrolled net.
            string strLayerName = m_param.name;

            if (strLayerName.Length > 0)
            {
                for (int i = 0; i < net_param.layer.Count; i++)
                {
                    LayerParameter layer = net_param.layer[i];
                    layer.name = strLayerName + "_" + layer.name;
                }
            }

            // Add 'pseudo-losses' to all outputs to force backpropagation.
            // (Setting force_backward is too agressive as we may not need to backprop to
            // all inputs, e.g., the sequence continuation indicators.)
            List <string> rgPseudoLosses = new List <string>();

            for (int i = 0; i < rgOutputNames.Count; i++)
            {
                rgPseudoLosses.Add(rgOutputNames[i] + "_pseudoloss");
                LayerParameter layer = new LayerParameter(LayerParameter.LayerType.REDUCTION, rgPseudoLosses[i]);
                layer.bottom.Add(rgOutputNames[i]);
                layer.top.Add(rgPseudoLosses[i]);
                layer.loss_weight.Add(1.0);
                net_param.layer.Add(layer);
            }

            // Create the unrolled net.
            Net <T> sharedNet = null;

            if (m_param is LayerParameterEx <T> )
            {
                RecurrentLayer <T> sharedLayer = ((LayerParameterEx <T>)m_param).SharedLayer as RecurrentLayer <T>;
                if (sharedLayer != null)
                {
                    sharedNet = sharedLayer.m_unrolledNet;
                }
            }

            m_unrolledNet = new Net <T>(m_cuda, m_log, net_param, m_evtCancel, null, m_phase, null, sharedNet);
            m_unrolledNet.set_debug_info(m_param.recurrent_param.debug_info);

            // Setup pointers to the inputs.
            m_blobXInputBlob    = m_unrolledNet.blob_by_name("x");
            m_blobContInputBlob = m_unrolledNet.blob_by_name("cont");

            if (m_bStaticInput)
            {
                m_blobXStaticInputBlob = m_unrolledNet.blob_by_name("x_static");
            }

            // Setup pointers to paired recurrent inputs/outputs.
            m_colRecurInputBlobs  = new common.BlobCollection <T>();
            m_colRecurOutputBlobs = new common.BlobCollection <T>();

            for (int i = 0; i < nNumRecurBlobs; i++)
            {
                m_colRecurInputBlobs.Add(m_unrolledNet.blob_by_name(rgRecurInputNames[i]));
                m_colRecurOutputBlobs.Add(m_unrolledNet.blob_by_name(rgRecurOutputNames[i]));
            }

            // Setup pointers to outputs.
            m_log.CHECK_EQ(colTop.Count() - nNumHiddenExposed, rgOutputNames.Count, "OutputBlobNames must provide output blob name for each top.");
            m_colOutputBlobs = new common.BlobCollection <T>();
            for (int i = 0; i < rgOutputNames.Count; i++)
            {
                m_colOutputBlobs.Add(m_unrolledNet.blob_by_name(rgOutputNames[i]));
            }

            // We should have 2 inputs (x and cont), plus a number of recurrent inputs,
            // plus maybe a static input.
            int nStaticInput = (m_bStaticInput) ? 1 : 0;

            m_log.CHECK_EQ(2 + nNumRecurBlobs + nStaticInput, m_unrolledNet.input_blobs.Count, "The unrolled net input count should equal 2 + number of recurrent blobs (" + nNumRecurBlobs.ToString() + ") + static inputs (" + nStaticInput.ToString() + ")");

            // This layer's parameters are any parameters in the layers of the unrolled
            // net.  We only want one copy of each parameter, so check that the parameter
            // is 'owned' by the layer, rather than shared with another.
            blobs.Clear();
            for (int i = 0; i < m_unrolledNet.parameters.Count; i++)
            {
                if (m_unrolledNet.param_owners[i] == -1)
                {
                    m_log.WriteLine("Adding parameter " + i.ToString() + ": " + m_unrolledNet.param_display_names[i]);
                    blobs.Add(m_unrolledNet.parameters[i]);
                }
            }

            // Check that param_propagate_down is set for all of the parameters in the
            // unrolled net; set param_propagate_down to true in this layer.
            for (int i = 0; i < m_unrolledNet.layers.Count; i++)
            {
                for (int j = 0; j < m_unrolledNet.layers[i].blobs.Count; j++)
                {
                    m_log.CHECK(m_unrolledNet.layers[i].param_propagate_down(j), "param_propagate_down not set for layer " + i.ToString() + ", param " + j.ToString());
                }
            }
            m_rgbParamPropagateDown = new DictionaryMap <bool>(blobs.Count, true);

            // Set the diffs of recurrent outputs to 0 -- we can't backpropagate across
            // batches.
            for (int i = 0; i < m_colRecurOutputBlobs.Count; i++)
            {
                m_colRecurOutputBlobs[i].SetDiff(0);
            }

            // Check that the last output_names.count layers are the pseudo-losses;
            // set last_layer_index so that we don't actually run these layers.
            List <string> rgLayerNames = m_unrolledNet.layer_names;

            m_nLastLayerIndex = rgLayerNames.Count - 1 - rgPseudoLosses.Count;
            for (int i = m_nLastLayerIndex + 1, j = 0; i < rgLayerNames.Count; i++, j++)
            {
                m_log.CHECK(rgLayerNames[i] == rgPseudoLosses[j], "The last layer at idx " + i.ToString() + " should be the pseudo layer named " + rgPseudoLosses[j]);
            }
        }
Example #19
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            m_dfClippingThreshold = m_param.lstm_simple_param.clipping_threshold;
            m_nN = (int)m_param.lstm_simple_param.batch_size;              // batch size.
            m_nH = (int)m_param.lstm_simple_param.num_output;              // number of hidden units.
            m_nI = (int)(colBottom[0].count() / colBottom[0].num);         // input dimension.

            // Check if we need to set up the weights.
            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                m_colBlobs = new BlobCollection <T>();

                Filler <T> weight_filler = Filler <T> .Create(m_cuda, m_log, m_param.lstm_simple_param.weight_filler);

                Filler <T> bias_filler = Filler <T> .Create(m_cuda, m_log, m_param.lstm_simple_param.bias_filler);

                // input-to-hidden weights
                // Initialize the weight.
                List <int> rgShape1 = new List <int>()
                {
                    4 * m_nH, m_nI
                };
                Blob <T> blobWeights_I_H = new Blob <T>(m_cuda, m_log);
                blobWeights_I_H.Name = m_param.name + " weights I to H";
                blobWeights_I_H.type = Blob <T> .BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobWeights_I_H, rgShape1))
                {
                    blobWeights_I_H.Reshape(rgShape1);
                    weight_filler.Fill(blobWeights_I_H);
                }
                m_colBlobs.Add(blobWeights_I_H);

                // hidden-to-hidden weights
                // Initialize the weight.
                List <int> rgShape2 = new List <int>()
                {
                    4 * m_nH, m_nH
                };
                Blob <T> blobWeights_H_H = new Blob <T>(m_cuda, m_log);
                blobWeights_H_H.Name = m_param.name + " weights H to H";
                blobWeights_H_H.type = Blob <T> .BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobWeights_H_H, rgShape2))
                {
                    blobWeights_H_H.Reshape(rgShape2);
                    weight_filler.Fill(blobWeights_H_H);
                }
                m_colBlobs.Add(blobWeights_H_H);

                // If necessary, initialize and fill the bias term.
                List <int> rgShape3 = new List <int>()
                {
                    4 * m_nH
                };
                Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                blobBias.Name = m_param.name + " bias weights";
                blobBias.type = Blob <T> .BLOB_TYPE.WEIGHT;

                if (!shareParameter(blobBias, rgShape3))
                {
                    blobBias.Reshape(rgShape3);
                    bias_filler.Fill(blobBias);
                }
                m_colBlobs.Add(blobBias);

                // Initialize the bias for the forget gate to 5.0 as described in the
                // Clockwork RNN paper:
                // [1] Koutnik, J., Greff, K., Gomez, F., Schmidhuber, J., 'A Clockwork RNN', 2014"
                if (m_param.lstm_simple_param.enable_clockwork_forgetgate_bias)
                {
                    double[] rgBias = convertD(blobBias.mutable_cpu_data);

                    for (int i = m_nH; i < 2 * m_nH; i++)
                    {
                        rgBias[i] = 5.0;
                    }

                    blobBias.mutable_cpu_data = convert(rgBias);
                }
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);

            List <int> rgCellShape = new List <int>()
            {
                m_nN, m_nH
            };

            m_blob_C_0.Reshape(rgCellShape);
            m_blob_H_0.Reshape(rgCellShape);
            m_blob_C_T.Reshape(rgCellShape);
            m_blob_H_T.Reshape(rgCellShape);
            m_blob_H_to_H.Reshape(rgCellShape);

            List <int> rgGateShape = new List <int>()
            {
                m_nN, 4, m_nH
            };

            m_blob_H_to_Gate.Reshape(rgGateShape);
        }
Example #20
0
        /// <summary>
        /// Setup the layer.
        /// </summary>
        /// <param name="colBottom">Specifies the collection of bottom (input) Blobs.</param>
        /// <param name="colTop">Specifies the collection of top (output) Blobs.</param>
        public override void LayerSetUp(BlobCollection <T> colBottom, BlobCollection <T> colTop)
        {
            int nNumOutput = (int)m_param.inner_product_param.num_output;

            m_bBiasTerm  = m_param.inner_product_param.bias_term;
            m_bTranspose = m_param.inner_product_param.transpose;
            m_nN         = nNumOutput;
            int nAxis = colBottom[0].CanonicalAxisIndex(m_param.inner_product_param.axis);

            // Dimensions starting from 'axis' are 'flattened' into a single
            // length K_ vector. For example, if bottom[0]'s shape is (N, C, H, W),
            // and axis == 1, N inner products with dimension CHW are preformed..
            m_nK = colBottom[0].count(nAxis);

            // Check if we need to set up the weights.
            if (m_colBlobs.Count > 0)
            {
                m_log.WriteLine("Skipping parameter initialization.");
            }
            else
            {
                // Initialize the weight.
                List <int> rgWeightShape = Utility.Create <int>(2, 0);

                if (m_bTranspose)
                {
                    rgWeightShape[0] = m_nK;
                    rgWeightShape[1] = m_nN;
                }
                else
                {
                    rgWeightShape[0] = m_nN;
                    rgWeightShape[1] = m_nK;
                }

                Blob <T> blobWeight = new Blob <T>(m_cuda, m_log);
                blobWeight.Name = m_param.name + " weights";
                blobWeight.type = Blob <T> .BLOB_TYPE.IP_WEIGHT;

                if (!shareParameter(blobWeight, rgWeightShape))
                {
                    blobWeight.Reshape(rgWeightShape);
                    Filler <T> weight_filler = Filler <T> .Create(m_cuda, m_log, m_param.inner_product_param.weight_filler);

                    weight_filler.Fill(blobWeight);
                }
                m_colBlobs.Add(blobWeight);

                // If necessary, initialize and fill the bias term.
                if (m_bBiasTerm)
                {
                    List <int> rgBiasShape = Utility.Create <int>(1, 0);
                    rgBiasShape[0] = m_nN;

                    Blob <T> blobBias = new Blob <T>(m_cuda, m_log);
                    blobBias.Name = m_param.name + " bias";
                    blobBias.type = Blob <T> .BLOB_TYPE.IP_WEIGHT;

                    if (!shareParameter(blobBias, rgBiasShape))
                    {
                        blobBias.Reshape(rgBiasShape);
                        Filler <T> bias_filler = Filler <T> .Create(m_cuda, m_log, m_param.inner_product_param.bias_filler);

                        bias_filler.Fill(blobBias);
                    }
                    m_colBlobs.Add(blobBias);
                }
            }

            m_rgbParamPropagateDown = new DictionaryMap <bool>(m_colBlobs.Count, true);
        }