Exemplo n.º 1
0
        static void Main(string[] args)
        {
            //不使用預設的激發函數組,預設激發函數為LogisticSigmoid
            Perceptron.Default_Activation = ActivationFunction.HyperbolicTangent;
            Perceptron.Default_DiffActivation = ActivationFunction.Diff_HyperbolicTangent;

            //建構一個輸入層數值數量為2 感知器權重亂數範圍為-1~1
            //隱藏層節點數量為4 輸出層數量為1的神經網路
            PerceptronNetwork network = new PerceptronNetwork(2, -1, 1, new int[] { 4,1 });
            LearnData[] data = new LearnData[] {//學習用資料
                new LearnData() { Input = new double[] {0,0 },Output = new double[] { 0 }},
                new LearnData() { Input = new double[] {1,1 },Output = new double[] { 0 }},
                new LearnData() { Input = new double[] {1,0 },Output = new double[] { 1 }},
                new LearnData() { Input = new double[] {0,1 },Output = new double[] { 1 }},
            };
            LearnData.Standardize(data, -1, 1, -1, 1);//將資料轉換為符合Tanh激發函數的資料

            //呼叫學習函數,速率0.8,慣性0.6,目標誤差值0.05
            network.Train(data,0.8,0.6, 0.05);

            //測試
            foreach (var item in data) {
                Console.WriteLine($"測試({string.Join(",", item.Input)}) => {string.Join(",", network.Compute(item.Input))}");
            }

            //儲存神經網路結果
            StreamWriter writer = new StreamWriter("output.json");
            writer.Write(network.ToJObject());//匯出JSON,也可使用Load方法反序列化
            writer.Close();

            Console.ReadKey();
        }
Exemplo n.º 2
0
        static void Main(string[] args)
        {
            //不使用預設的激發函數組,預設激發函數為LogisticSigmoid
            Perceptron.Default_Activation     = ActivationFunction.HyperbolicTangent;
            Perceptron.Default_DiffActivation = ActivationFunction.Diff_HyperbolicTangent;

            //建構一個輸入層數值數量為2 感知器權重亂數範圍為-1~1
            //隱藏層節點數量為4 輸出層數量為1的神經網路
            PerceptronNetwork network = new PerceptronNetwork(2, -1, 1, new int[] { 4, 1 });

            LearnData[] data = new LearnData[] {//學習用資料
                new LearnData()
                {
                    Input = new double[] { 0, 0 }, Output = new double[] { 0 }
                },
                new LearnData()
                {
                    Input = new double[] { 1, 1 }, Output = new double[] { 0 }
                },
                new LearnData()
                {
                    Input = new double[] { 1, 0 }, Output = new double[] { 1 }
                },
                new LearnData()
                {
                    Input = new double[] { 0, 1 }, Output = new double[] { 1 }
                },
            };
            LearnData.Standardize(data, -1, 1, -1, 1);//將資料轉換為符合Tanh激發函數的資料

            //呼叫學習函數,速率0.8,慣性0.6,目標誤差值0.05
            network.Train(data, 0.8, 0.6, 0.05);

            //測試
            foreach (var item in data)
            {
                Console.WriteLine($"測試({string.Join(",", item.Input)}) => {string.Join(",", network.Compute(item.Input))}");
            }

            //儲存神經網路結果
            StreamWriter writer = new StreamWriter("output.json");

            writer.Write(network.ToJObject());//匯出JSON,也可使用Load方法反序列化
            writer.Close();

            Console.ReadKey();
        }
Exemplo n.º 3
0
        /// <summary>
        /// 資料集標準化
        /// </summary>
        /// <param name="Data">原始資料集</param>
        /// <param name="InputMin">輸入標準最小值</param>
        /// <param name="InputMax">輸入標準最大值</param>
        /// <param name="OutputMin">輸出標準最小值</param>
        /// <param name="OutputMax">輸出標準最大值</param>
        public static void Standardize(LearnData[] Data,int InputMin = 0,int InputMax = 1,int OutputMin = 0,int OutputMax = 1) {
            var DataInputMax = Enumerable.Range(0, Data.First().Input.Length).Select(
                i => Data.Select(x=>x.Input).Select(x=>x[i]).Max()
            ).ToArray();
            var DataInputMin = Enumerable.Range(0, Data.First().Input.Length).Select(
                i => Data.Select(x => x.Input).Select(x => x[i]).Min()
            ).ToArray();
            var DataOutputMax = Enumerable.Range(0, Data.First().Output.Length).Select(
                i => Data.Select(x => x.Output).Select(x => x[i]).Max()
            ).ToArray();
            var DataOutputMin = Enumerable.Range(0, Data.First().Output.Length).Select(
                i => Data.Select(x => x.Output).Select(x => x[i]).Min()
            ).ToArray();

            for(int i = 0; i < Data.Length; i++) {
                Data[i].Input = Data[i].Input.Select((x, i2) =>
                    ConvertValue((x - DataInputMin[i2]) / (DataInputMax[i2] - DataInputMin[i2]), InputMin, InputMax)
                ).ToArray();
                Data[i].Output = Data[i].Output.Select((x, i2) =>
                    ConvertValue((x - DataOutputMin[i2]) / (DataOutputMax[i2] - DataOutputMin[i2]), OutputMin, OutputMax)
                ).ToArray();
            }
        }
Exemplo n.º 4
0
 /// <summary>
 /// 計算輸入值產生的結果
 /// </summary>
 /// <param name="Input">學習資料</param>
 /// <returns>運算結果</returns>
 public double[] Compute(LearnData Input) {
     return Compute(Input.Input);
 }
Exemplo n.º 5
0
        /// <summary>
        /// 反傳導學習子方法
        /// </summary>
        /// <param name="Data">學習資料</param>
        /// <param name="Rate">學習速率</param>
        /// <returns></returns>
        private double BackPropagate(LearnPerceptron[][] LearnNodes, LearnData Data, double Rate) {
            LearnPerceptron[][] ComputeResult = LearnCompute(Data.Input);

            double Error = 0;

            #region 感知器節點運算結果誤差
            for (int layer = Length - 1; layer > -1; layer--) {
                for (int instance = 0; instance < this[layer].Length; instance++) {
                    if (layer == Length - 1) {//輸出層
                        LearnNodes[layer][instance].ResultDelta =
                            (Data.Output[instance] - ComputeResult[layer][instance].Result) *
                            this[layer][instance].DiffActivation(ComputeResult[layer][instance].Result);
                        Error += Math.Abs(Data.Output[instance] - ComputeResult[layer][instance].Result);
                    } else {//隱藏層
                        //下層偏差值加權總合
                        double sumDelta = 0;// ComputeResult[layer][instance].Result
                        for (int nextInstance = 0; nextInstance < Layers[layer + 1].Length; nextInstance++) {
                            sumDelta += //下一層的節點的誤差加權
                                LearnNodes[layer + 1][nextInstance].ResultDelta *
                                this[layer + 1][nextInstance][instance];
                        }
                        LearnNodes[layer][instance].ResultDelta = sumDelta *
                            this[layer][instance].DiffActivation(ComputeResult[layer][instance].Result);
                    }
                }
            }
            #endregion

            #region 計算感知器節點的權重與閥值修正值
            for (int layer = Length - 1; layer > -1; layer--) {
                for (int instance = 0; instance < this[layer].Length; instance++) {
                    //修正閥值
                    LearnNodes[layer][instance].ThresholdDelta += -Rate * LearnNodes[layer][instance].ResultDelta;
                    for (int weight = 0; weight < this[layer][instance].Length; weight++) {
                        if (layer == 0) {//輸入層
                                         //本節點偏差與輸入值
                            for (int upInstance = 0; upInstance < Data.Input.Length; upInstance++) {
                                LearnNodes[layer][instance].WeightsDelta[upInstance] += Rate * LearnNodes[layer][instance].ResultDelta * Data.Input[upInstance];
                            }
                        } else {
                            //本節點偏差與上層節點給的輸入
                            for (int upInstance = 0; upInstance < this[layer - 1].Length; upInstance++) {
                                LearnNodes[layer][instance].WeightsDelta[upInstance] += Rate * LearnNodes[layer][instance].ResultDelta * ComputeResult[layer - 1][upInstance].Result;
                            }
                        }
                    }
                }
            }
            #endregion

            return Error;
        }
Exemplo n.º 6
0
        /// <summary>
        /// 反傳導學習方法
        /// </summary>
        /// <param name="Data">學習資料集和</param>
        /// <param name="Rate">學習速率</param>
        /// <param name="Momentum">慣性動量</param>
        /// <param name="Difference">目標誤差值</param>
        /// <param name="Iterations">迭代次數限制</param>
        public void Train(LearnData[] Data, double Rate, double Momentum = 0, double Difference = 0, int Iterations = -1) {
            double Min = double.MaxValue;
            for (int i = 0; Iterations == -1 || i < Iterations; i++) {
                //學習用暫存資料
                LearnPerceptron[][] LearnNodes = GetLearnPerceptrons();

                double Error = 0;
                foreach (LearnData Item in Data) {
                    Error += BackPropagate(LearnNodes,Item, Rate);
                }
                
                #region 修正
                for (int layer = Length - 1; layer > -1; layer--) {
                    for (int instance = 0; instance < this[layer].Length; instance++) {
                        //修正閥值
                        this[layer][instance].Threshold += LearnNodes[layer][instance].ThresholdDelta + LearnNodes[layer][instance].Last_ThresholdDelta * Momentum;// / Math.Sqrt(Data.Length);
                        for (int weight = 0; weight < this[layer][instance].Length; weight++) {
                            this[layer][instance][weight] += LearnNodes[layer][instance].WeightsDelta[weight] + LearnNodes[layer][instance].Last_WeightsDelta[weight] * Momentum;// / Math.Sqrt(Data.Length);
                        }
                    }
                }
                #endregion

                #region 備份修正值
                for(int layer = Length - 1;layer > -1;layer--) {
                    for(int instance = 0; instance < this[layer].Length; instance++) {
                        LearnNodes[layer][instance].Last_ThresholdDelta = LearnNodes[layer][instance].ThresholdDelta;
                        LearnNodes[layer][instance].Last_WeightsDelta = LearnNodes[layer][instance].WeightsDelta;
                    }
                }
                #endregion
                Min = Math.Min(Min, Error);
                Console.WriteLine($"迭代:{i}\t{Error}\t{Min}");
                if (Error <= Difference) {
                    break;
                }
            }
        }
Exemplo n.º 7
0
        /// <summary>
        /// 反傳導學習子方法
        /// </summary>
        /// <param name="Data">學習資料</param>
        /// <param name="Rate">學習速率</param>
        /// <returns></returns>
        private double BackPropagate(LearnPerceptron[][] LearnNodes, LearnData Data, double Rate)
        {
            LearnPerceptron[][] ComputeResult = LearnCompute(Data.Input);

            double Error = 0;

            #region 感知器節點運算結果誤差
            for (int layer = Length - 1; layer > -1; layer--)
            {
                for (int instance = 0; instance < this[layer].Length; instance++)
                {
                    if (layer == Length - 1)  //輸出層
                    {
                        LearnNodes[layer][instance].ResultDelta =
                            (Data.Output[instance] - ComputeResult[layer][instance].Result) *
                            this[layer][instance].DiffActivation(ComputeResult[layer][instance].Result);
                        Error += Math.Abs(Data.Output[instance] - ComputeResult[layer][instance].Result);
                    }
                    else    //隱藏層
                    //下層偏差值加權總合
                    {
                        double sumDelta = 0;// ComputeResult[layer][instance].Result
                        for (int nextInstance = 0; nextInstance < Layers[layer + 1].Length; nextInstance++)
                        {
                            sumDelta += //下一層的節點的誤差加權
                                        LearnNodes[layer + 1][nextInstance].ResultDelta *
                                        this[layer + 1][nextInstance][instance];
                        }
                        LearnNodes[layer][instance].ResultDelta = sumDelta *
                                                                  this[layer][instance].DiffActivation(ComputeResult[layer][instance].Result);
                    }
                }
            }
            #endregion

            #region 計算感知器節點的權重與閥值修正值
            for (int layer = Length - 1; layer > -1; layer--)
            {
                for (int instance = 0; instance < this[layer].Length; instance++)
                {
                    //修正閥值
                    LearnNodes[layer][instance].ThresholdDelta += -Rate * LearnNodes[layer][instance].ResultDelta;
                    for (int weight = 0; weight < this[layer][instance].Length; weight++)
                    {
                        if (layer == 0)  //輸入層
                                         //本節點偏差與輸入值
                        {
                            for (int upInstance = 0; upInstance < Data.Input.Length; upInstance++)
                            {
                                LearnNodes[layer][instance].WeightsDelta[upInstance] += Rate * LearnNodes[layer][instance].ResultDelta * Data.Input[upInstance];
                            }
                        }
                        else
                        {
                            //本節點偏差與上層節點給的輸入
                            for (int upInstance = 0; upInstance < this[layer - 1].Length; upInstance++)
                            {
                                LearnNodes[layer][instance].WeightsDelta[upInstance] += Rate * LearnNodes[layer][instance].ResultDelta * ComputeResult[layer - 1][upInstance].Result;
                            }
                        }
                    }
                }
            }
            #endregion

            return(Error);
        }
Exemplo n.º 8
0
 /// <summary>
 /// 計算輸入值產生的結果
 /// </summary>
 /// <param name="Input">學習資料</param>
 /// <returns>運算結果</returns>
 public double[] Compute(LearnData Input)
 {
     return(Compute(Input.Input));
 }