public override double Forward(Matrix Actual, Matrix Expected, MatrixData data, int layerCount)
        {
            double error = 0.0;

            if (Actual.rows != Expected.rows || Actual.cols != Expected.cols)
            {
                throw new MatrixException("Actual does not have the same size as Expected");
            }

            double regularizationValue = 0.0;

            for (int i = 0; i < layerCount; i++)
            {
                regularizationValue = RegularizationFunction.CalculateNorm(data.Data["W" + i.ToString()]);
            }

            for (int i = 0; i < Actual.rows; i++)
            {
                for (int j = 0; j < Actual.cols; j++)
                {
                    error += Expected[i, j] * Math.Log(Expected[i, j] / Actual[i, j]);
                }
            }

            error += regularizationValue;

            BatchCost += error;
            return(error);
        }
示例#2
0
 /// <summary>Constructs a link in the neural network initialized with random weight.</summary>
 /// <param name="source">source The source node.</param>
 /// <param name="dest">@param dest The destination node.</param>
 /// <param name="regularization">regularization The regularization function that computes the penalty for this weight.If null, there will be no regularization.</param>
 /// <param name="initZero"></param>
 public Link(Node source, Node dest, RegularizationFunction regularization, bool initZero = false)
 {
     this.id             = source.Id + "-" + dest.Id;
     this.source         = source;
     this.dest           = dest;
     this.regularization = regularization;
     if (initZero)
     {
         this.weight = 0.0;
     }
 }
示例#3
0
文件: Output.cs 项目: n1arash/Vortex
        public override Matrix Forward(Matrix inputs)
        {
            Params["W"].InMap(MutationFunction.Mutate);

            // Calculate Regularization Value On W and B
            RegularizationValue = (float)RegularizationFunction.CalculateNorm(Params["W"]);

            // Calculate Feed Forward Operation
            Params["X"] = inputs;
            Params["Z"] = Params["W"] * Params["X"] + Params["B"];
            Params["A"] = ActivationFunction.Forward(Params["Z"]);
            return(Params["A"]);
        }
示例#4
0
        public override Matrix Backward(Matrix Actual, Matrix Expected, MatrixData data, int layerCount)
        {
            double error = 0.0;

            if (Actual.rows != Expected.rows || Actual.cols != Expected.cols)
            {
                throw new MatrixException("Actual Matrix does not have the same size as The Expected Matrix");
            }

            double regularizationValue = 0.0;

            for (int i = 0; i < layerCount; i++)
            {
                regularizationValue = RegularizationFunction.CalculateNorm(data.Data["W" + i.ToString()]);
            }

            for (int i = 0; i < Actual.rows; i++)
            {
                for (int j = 0; j < Actual.cols; j++)
                {
                    error += Math.Pow((Actual[i, j] - Expected[i, j]), 2);
                }
            }

            error /= Tao;

            error = Math.Exp(error);

            error *= Tao;

            Matrix gradMatrix = Actual.Duplicate();

            for (int i = 0; i < Actual.rows; i++)
            {
                for (int j = 0; j < Actual.cols; j++)
                {
                    gradMatrix[i, j] = (Actual[i, j] - Expected[i, j]) * error;
                }
            }

            return(gradMatrix);
        }
示例#5
0
            /* Copyright 2016 Google Inc. All Rights Reserved.
             * Licensed under the Apache License, Version 2.0 (the "License");
             * you may not use this file except in compliance with the License.
             * You may obtain a copy of the License at
             *  http://www.apache.org/licenses/LICENSE-2.0
             * Unless required by applicable law or agreed to in writing, software
             * distributed under the License is distributed on an "AS IS" BASIS,
             * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
             * See the License for the specific language governing permissions and
             * limitations under the License.
             *
             * Credits
             * This was created by Daniel Smilkov and Shan Carter. This is a continuation
             * of many people’s previous work — most notably Andrej Karpathy’s convnet.js
             * demo and Chris Olah’s articles about neural networks. Many thanks also to
             * D. Sculley for help with the original idea and to Fernanda Viégas and
             * Martin Wattenberg and the rest of the Big Picture and Google Brain teams
             * for feedback and guidance.
             * ==============================================================================*/

            /// <summary>Builds a neural network.</summary>
            /// <param name="networkShape">The shape of the network. E.g. [1, 2, 3, 1] means the network will have one input node, 2 nodes in first hidden layer, 3 nodes in second hidden layer and 1 output node.</param>
            /// <param name="activation">The activation function of every hidden node.</param>
            /// <param name="outputActivation">The activation function for the output nodes.</param>
            /// <param name="regularization">The regularization function that computes a penalty for a given weight (parameter) in the network. If null, there will be no regularization.</param>
            /// <param name="inputIds">List of ids for the input nodes.</param>
            public List <List <Node> > BuildNetwork(List <int> networkShape, ActivationFunction activation, ActivationFunction outputActivation, RegularizationFunction regularization, List <string> inputIds, bool initZero = false)
            {
                int numLayers = networkShape.Count;
                int id        = 1;

                /* List of layers, with each layer being a list of nodes. */
                List <List <Node> > network = new List <List <Node> >();

                for (int layerIdx = 0; layerIdx < numLayers; layerIdx++)
                {
                    bool        isOutputLayer = layerIdx == numLayers - 1;
                    bool        isInputLayer  = layerIdx == 0;
                    List <Node> currentLayer  = new List <Node>();
                    Network.Add(currentLayer);
                    int numNodes = networkShape[layerIdx];

                    for (int i = 0; i < numNodes; i++)
                    {
                        string nodeId = id.ToString();
                        if (isInputLayer)
                        {
                            nodeId = inputIds[i];
                        }
                        else
                        {
                            id++;
                        }
                        ActivationFunction act;
                        if (isOutputLayer)
                        {
                            act = outputActivation;
                        }
                        else
                        {
                            act = activation;
                        }
                        Node node = new Node(nodeId, act, initZero);
                        currentLayer.Add(node);
                        if (layerIdx >= 1)
                        {
                            // Add links from nodes in the previous layer to this node.
                            for (int j = 0; j < network[layerIdx - 1].Count; j++)
                            {
                                Node prevNode = network[layerIdx - 1][j];
                                Link link     = new Link(prevNode, node, regularization, initZero);
                                prevNode.Outputs.Add(link);
                                node.InputLinks.Add(link);
                            }
                        }
                    }
                }

                return(network);
            }//good
示例#6
0
    public static List <List <Node> > buildNetwork(List <int> networkShape, ActivationFunction activation,
                                                   ActivationFunction outputActivation, RegularizationFunction regularization,
                                                   List <string> inputIds, bool initZero)
    {
        var numLayers = networkShape.Count;
        var id        = 1;

        List <List <Node> > network = new List <List <Node> >();

        for (var layerIdx = 0; layerIdx < numLayers; layerIdx++)
        {
            var         isOutputLayer = layerIdx == numLayers - 1;
            var         isInputLayer  = layerIdx == 0;
            List <Node> currentLayer  = new List <Node>();

            network.Add(currentLayer);

            int numNodes = networkShape[layerIdx];
            for (int i = 0; i < numNodes; i++)
            {
                var nodeId = id.ToString();
                if (isInputLayer)
                {
                    nodeId = inputIds[i];
                }
                else
                {
                    id++;
                }
                var node = new Node(nodeId, isOutputLayer ? outputActivation : activation, initZero);
                currentLayer.Add(node);

                if (layerIdx >= 1)
                {
                    for (int j = 0; j < network[layerIdx - 1].Count; j++)
                    {
                        var prevNode = network[layerIdx - 1][j];
                        var link     = new Link(prevNode, node, regularization, initZero);
                        prevNode.outputs.Add(link);
                        node.inputLinks.Add(link);
                    }
                }
            }
        }


        return(network);
    }