示例#1
0
        /// <summary>
        /// Map解码
        /// </summary>
        /// <param name="t">要解码的数据</param>
        /// <returns>解码结果</returns>
        public override List <string> Decode(TensorOld t)
        {
            var result = new List <string>(t.ElementCount);

            for (int i = 0; i < t.shape[0]; i++)
            {
                var index = Map2Index((int)t.GetRawValues()[i]);
                result.Add(Categories[index]);
            }

            return(result);
        }
示例#2
0
 public void SetBias(TensorOld bias)
 {
     if (Bias == null)
     {
         foreach (var item in mirrorList)
         {
             item.Bias = bias;
         }
     }
     TensorOld.CheckShape(Bias, bias);
     Array.Copy(bias.GetRawValues(), 0, Bias.GetRawValues(), 0, Bias.ElementCount);
 }
示例#3
0
        public static void ApproximatelyEqual(TensorOld a, TensorOld b, double allowError = 0.00001)
        {
            Assert.True(TensorOld.CheckShapeBool(a, b));

            var dataA = a.GetRawValues();
            var dataB = b.GetRawValues();

            for (int i = 0; i < a.ElementCount; i++)
            {
                Assert.True(Math.Abs(dataA[i] - dataB[i]) < allowError);
            }
        }
示例#4
0
 /// <summary>
 /// 手动设置权重
 /// </summary>
 /// <param name="weights"></param>
 public void SetWeights(TensorOld weights)
 {
     if (Weights == null)
     {
         foreach (var item in mirrorList)
         {
             item.Weights = weights;
         }
     }
     TensorOld.CheckShape(weights, Weights);
     Array.Copy(weights.GetRawValues(), 0, Weights.GetRawValues(), 0, Weights.ElementCount);
 }
示例#5
0
        private void SetPaddingInput(TensorOld input)
        {
            var inputData   = input.GetRawValues();
            var paddingData = PaddingInput.GetRawValues();

            for (int sample = 0; sample < samples; sample++)
            {
                for (int channel = 0; channel < channels; channel++)
                {
                    for (int i = 0; i < input.shape[2]; i++)
                    {
                        var inputStart   = input.GetRawOffset(sample, channel, i, 0);
                        var paddingStart = PaddingInput.GetRawOffset(sample, channel, i + RowPadding, ColumnPadding);
                        Array.Copy(inputData, inputStart, paddingData, paddingStart, input.shape[3]);
                    }
                }
            }
        }
示例#6
0
        /// <summary>
        /// Dummy编码
        /// </summary>
        /// <param name="list">要编码的数据</param>
        /// <returns>编码结果</returns>
        public override TensorOld Encode(IEnumerable <string> data)
        {
            var list   = data.ToList();
            var result = new TensorOld(list.Count, Length);

            for (int i = 0; i < list.Count; i++)
            {
                var index = Categories.IndexOf(list[i]);
                if (index == -1)
                {
                    throw new Exception($"{list[i]} is not in categories list!");
                }

                var code = Index2Dummy(index);
                Array.Copy(code, 0, result.GetRawValues(), i * Length, Length);
            }

            return(result);
        }
示例#7
0
        //计算Weights和Bias的梯度
        private void ComputeGradient(TensorOld error)
        {
            var inputData = ForwardInput.GetRawValues();
            var errorData = error.GetRawValues();
            var features  = WeightsGradient.shape[0];

            Parallel.For(0, features, i =>
            {
                Parallel.For(0, UnitCount, j =>
                {
                    var weightSum = 0d;
                    var biasSum   = 0d;
                    for (int k = 0; k < sampleStartIndex.Length; k++)
                    {
                        weightSum += inputData[sampleStartIndex[k] + i] * errorData[errorStartIndex[k] + j];
                        biasSum   += errorData[errorStartIndex[k] + j];
                    }
                    WeightsGradient[i, j]          = weightSum;
                    BiasGradient.GetRawValues()[j] = biasSum;
                });
            });
        }
        //这个方法不会产生多余的临时对象,问题就是不再存储Derivative
        //private void ErrorBP(Tensor output, Tensor error, Tensor result, int sampleIndex)
        //{
        //    for (int i = 0; i < categoryNumber; i++)
        //    {
        //        var der = 0d;
        //        for (int j = 0; j < categoryNumber; j++)
        //        {
        //            if (i == j)
        //                der += output[sampleIndex, i] * (1 - output[sampleIndex, j]) * error[sampleIndex, j];
        //            else
        //                der += -output[sampleIndex, i] * output[sampleIndex, j] * error[sampleIndex, j];
        //        }
        //        result[sampleIndex, i] = der;
        //    }
        //}

        private void ErrorBP(TensorOld error)
        {
            var derData   = Derivative.GetRawValues();
            var errorData = error.GetRawValues();
            var outData   = BackwardOutput.GetRawValues();

            Parallel.For(0, sampleNumber, sampleIndex =>
            {
                var errorStart = error.GetRawOffset(sampleIndex, 0);
                //这里的两层嵌套执行的并不是严格的矩阵运算,导数应该是:error*jacob,
                //因为jacob矩阵是对称的所以使用jacob每行和error相乘的内积,循环写起来方便
                Parallel.For(0, categoryNumber, i =>
                {
                    var derStart = Derivative.GetRawOffset(sampleIndex, i, 0);
                    var sum      = 0d;
                    for (int j = 0; j < categoryNumber; j++)
                    {
                        sum += derData[derStart + j] * errorData[errorStart + j];
                    }
                    outData[errorStart + i] = sum;
                });
            });
        }