예제 #1
0
        private void Internal_DumpInline(StreamWriter i_Writer)
        {
            if (i_Writer == null)
            {
                return;
            }

            // Activation function.

            i_Writer.Write(m_ActivationFunction.ToString());

            // Weights.

            for (int index = 0; index < inputCount; ++index)
            {
                float weight = GetWeight(index);
                i_Writer.Write(",");
                i_Writer.Write(ANN.FloatToString(weight));
            }

            // Bias.

            i_Writer.Write(",");
            i_Writer.Write(ANN.FloatToString(bias));
        }
예제 #2
0
        private void Internal_Dump(StreamWriter i_Writer)
        {
            if (i_Writer == null)
            {
                return;
            }

            // Activation function.

            i_Writer.WriteLine("Activation function: " + m_ActivationFunction.ToString());

            // Weights.

            i_Writer.WriteLine("Weights");

            for (int index = 0; index < inputCount; ++index)
            {
                float weight = GetWeight(index);
                i_Writer.WriteLine("\t" + (index) + ": " + ANN.FloatToString(weight));
            }

            // Bias.

            i_Writer.WriteLine("Bias: " + ANN.FloatToString(bias));
        }
        protected override void Setup()
        {
            base.Setup();

            ANNDescriptor annDescriptor = (m_ANNDescriptorAsset != null) ? m_ANNDescriptorAsset.ANNDescriptor : m_ANNDescriptor;

            m_ANN = new ANN.ANN(annDescriptor);
        }
예제 #4
0
        public void Uninit()
        {
            if (!m_bInitialized)
            {
                return;
            }

            m_bUsingFixedTrainingSet = false;
            m_TrainingSet            = null;
            m_ANN = null;

            m_bInitialized = false;
        }
        public void Visualize(ANN i_ANN)
        {
            Clear();

            if (i_ANN == null)
            {
                return;
            }

            for (int layerIndex = 0; layerIndex < i_ANN.layerCount; ++layerIndex)
            {
                Layer ANNLayer = i_ANN.GetLayer(layerIndex);
                Internal_AddLayer(ANNLayer);
            }
        }
예제 #6
0
        public void PropagateForward()
        {
            double tempSum;

            for (int i = 0; i < neuronsCount; i++)
            {
                tempSum = 0;
                for (int j = 0; j < neurons[i].inputsNumber; j++)
                {
                    tempSum += neurons[i].inputs[j].value * neurons[i].inputs[j].inNeuron.value;
                }
                neurons[i].value       = ANN.tanh(tempSum);
                neurons[i].text.text   = System.Math.Round(neurons[i].value, 3).ToString();
                neurons[i].lastTempSum = tempSum;
            }
        }
예제 #7
0
 public void GradientDescent()
 {
     for (int i = 0; i < neuronsCount; i++)
     {
         for (int j = 0; j < neurons[i].inputsNumber; j++)
         {
             neurons[i].inputs[j].value =
                 neurons[i].inputs[j].value +
                 ANN.step *
                 neurons[i].mistake *
                 neurons[i].inputs[j].inNeuron.value *
                 ANN.dtanh(neurons[i].lastTempSum)
             ;
             neurons[i].inputs[j].text.text = System.Math.Round(neurons[i].inputs[j].value, 3).ToString();
         }
     }
 }
예제 #8
0
        public void Init()
        {
            if (m_bInitialized)
            {
                return;
            }

            m_ANN                    = new ANN((m_ANNDescriptorObject != null) ? m_ANNDescriptorObject.ANNDescriptor : null);
            m_TrainingSet            = (m_FixedTrainingSet != null) ? m_FixedTrainingSet.trainingSet : new TrainingSet();
            m_bUsingFixedTrainingSet = (m_FixedTrainingSet != null);

            if (m_bUsingFixedTrainingSet)
            {
                Train(m_ANN, m_TrainingSet);
            }

            m_bInitialized = true;
        }
예제 #9
0
        private void Train()
        {
            int    hiddenNodes = 100;
            int    outputNodes = 10;
            double learnRate   = 0.03;
            int    bias        = 1;

            network = new ANN(28 * 28, hiddenNodes, hiddenNodes, outputNodes, learnRate, bias);

            int count = 0;

            for (int appch = 0; appch < 1; appch++)
            {
                using (FileStream file = new FileStream("mnist_train.csv", FileMode.Open, FileAccess.Read))
                    using (StreamReader reader = new StreamReader(file))
                    {
                        string record = null;
                        while ((record = reader.ReadLine()) != null)
                        {
                            string[] all_values = record.Split(',');
                            double[] inputs     = new double[all_values.Length - 1];
                            for (int i = 1; i < all_values.Length; i++)
                            {
                                inputs[i - 1] = (double.Parse(all_values[i]) / 255 * 0.99) + 0.01;
                            }
                            double[] targets = new double[outputNodes];
                            for (int i = 0; i < outputNodes; i++)
                            {
                                targets[i] = 0.01;
                            }
                            targets[int.Parse(all_values[0])] = 0.99;

                            double error = network.Train(inputs, targets);
                            count++;

                            if ((count % 1000) == 0)
                            {
                                Console.WriteLine("[{0}] Sum of Error : {1}", ++count, error);
                            }
                        }
                    }
            }
        }
예제 #10
0
        // INTERNALS

        private void Train(ANN i_ANN, TrainingSet i_TrainingSet)
        {
            if (i_ANN == null || i_TrainingSet == null)
            {
                return;
            }

            if (i_ANN.ANNInputCount != i_TrainingSet.entryInputSize)
            {
                return;
            }

            for (int epoch = 0; epoch < m_EpochToTrain; ++epoch)
            {
                for (int trainingSetEntryIndex = 0; trainingSetEntryIndex < i_TrainingSet.entryCount; ++trainingSetEntryIndex)
                {
                    TrainingSetEntry trainingSetEntry = i_TrainingSet.GetEntry(trainingSetEntryIndex);

                    if (trainingSetEntry == null)
                    {
                        continue;
                    }

                    float[] inputs = new float[trainingSetEntry.inputCount];
                    for (int inputIndex = 0; inputIndex < inputs.Length; ++inputIndex)
                    {
                        inputs[inputIndex] = trainingSetEntry.GetInput(inputIndex);
                    }

                    float[] desiredOutputs = new float[trainingSetEntry.outputCount];
                    for (int outputIndex = 0; outputIndex < desiredOutputs.Length; ++outputIndex)
                    {
                        desiredOutputs[outputIndex] = trainingSetEntry.GetOutput(outputIndex);
                    }

                    float[] outputs;
                    bool    ANNRunSuccess = i_ANN.TryRun(inputs, out outputs);
                    ANNFunctionLibrary.AdjustWeights(i_ANN, desiredOutputs);
                }
            }
        }
예제 #11
0
        static void testANN()
        {
            RandomUtils random     = new RandomUtils();
            List <int>  layerSizes = new List <int>();

            layerSizes.Add(3);
            layerSizes.Add(4);
            layerSizes.Add(2);
            ANN    ann           = new ANN(layerSizes);
            double eta           = 0.01;
            int    miniBatchSize = 10;
            int    nrOfEpochs    = 2;

            //testFeedForward();
            //testSigmoid();
            //testActivation();
            //testFeedForward();
            //testComputeSingletonGradient();
            //testComputeMiniBatchGradient();
            //testAdjustLearningParameters();
            testTrainANN();

            void testActivation()
            {
                Matrix Z = newMatrix(3, 4, random);
                Matrix A = ann.activation(Z);

                writeMatrix(Z, "Z");
                writeMatrix(A, "A");
                Console.ReadLine();
            }

            void testSigmoid()
            {
                double sigmoidRes = ann.sigmoid(-0.5);

                Console.WriteLine("sigmoidRes = " + sigmoidRes.ToString());
                Console.ReadLine();
            }

            void testFeedForward()
            {
                Matrix X = newMatrix(3, 1, random);
                Tuple <List <Matrix>, List <Matrix> > zaLists = ann.feedForward(X);
                List <Matrix> zList = zaLists.Item1;
                List <Matrix> aList = zaLists.Item2;

                Console.WriteLine("zList.Count = " + zList.Count());
                Console.WriteLine("aList.Count = " + aList.Count());

                for (int i = 0; i < zList.Count; i++)
                {
                    if (i > 0)
                    {
                        writeMatrix(zList.ElementAt(i), "zList(" + i.ToString() + ")");
                    }
                    writeMatrix(aList.ElementAt(i), "aList(" + i.ToString() + ")");
                }

                Console.ReadLine();
            }

            void testComputeSingletonGradient()
            {
                Matrix X = newMatrix(3, 1, random);
                Matrix Y = newMatrix(2, 1, random);

                ann.computeSingletonGradient(X, Y);
            }

            void testComputeMiniBatchGradient()
            {
                List <Matrix> X = new List <Matrix>();
                List <Matrix> Y = new List <Matrix>();

                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));

                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));

                Tuple <List <Matrix>, List <Matrix> > wbGradientList =
                    ann.computeMiniBatchGradient(X, Y);
            }

            void testAdjustLearningParameters()
            {
                List <Matrix> X = new List <Matrix>();
                List <Matrix> Y = new List <Matrix>();

                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));
                X.Add(newMatrix(3, 1, random));

                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));
                Y.Add(newMatrix(2, 1, random));

                Tuple <List <Matrix>, List <Matrix> > wbGradientList =
                    ann.computeMiniBatchGradient(X, Y);

                ann.adjustLearningParameters(eta, wbGradientList.Item1, wbGradientList.Item2);
            }

            void testTrainANN()
            {
                List <Matrix> trainX = new List <Matrix>();
                List <Matrix> trainY = new List <Matrix>();

                int inputSize  = layerSizes.First <int>();
                int outputSize = layerSizes.Last <int>();

                int nrOfTrainExamples = 500;

                for (int i = 0; i < nrOfTrainExamples; i++)
                {
                    trainX.Add(newMatrix(inputSize, 1, random));
                    trainY.Add(newMatrix(outputSize, 1, random));
                }

                ann.assignTrainingData(trainX, trainY);
                ann.runAllEpochs(miniBatchSize, eta, nrOfEpochs);
            }
        }
        static void Main(string[] args)
        {
            gp = new Bitmap(1000 + 100 + 400, 1000 + 100);
            Graphics g = Graphics.FromImage(gp);

            g.SmoothingMode     = SmoothingMode.AntiAlias;
            g.InterpolationMode = InterpolationMode.HighQualityBicubic;
            g.PixelOffsetMode   = PixelOffsetMode.HighQuality;
            g.Clear(Color.White);

            Pen p = new Pen(Color.Black, 5);

            p.StartCap = LineCap.Round;
            p.EndCap   = LineCap.Round;

            g.DrawLine(p, 49, 1051, 1051, 1051);
            g.DrawLine(p, 49, 1051, 49, 49);

            p          = new Pen(Color.Black, 2);
            p.StartCap = LineCap.Round;
            p.EndCap   = LineCap.Round;

            g.DrawLine(p, 49, 49, 1051, 49);
            g.DrawLine(p, 1051, 49, 1051, 1051);

            Font font = new Font("나눔고딕", 25);

            g.DrawString("0", font, new SolidBrush(Color.Black), 20, 1050);
            g.DrawString("0.5", new Font("나눔고딕", 20), new SolidBrush(Color.Black), 525, 1055);
            g.DrawString("0.5", new Font("나눔고딕", 20), new SolidBrush(Color.Black), 0, 535);
            g.DrawString("1", font, new SolidBrush(Color.Black), 20, 50);
            g.DrawString("1", font, new SolidBrush(Color.Black), 1050, 1050);

            //g.DrawString("정확도 : 00.00 %", font, new SolidBrush(Color.Black), 1060, 50);

            ann = new ANN(2, 0, 1, new int[] { 0 });
            string learningData = System.IO.File.ReadAllText("Learning_data.txt");

            while (true)
            {
                double acc = ann.MachineLearning(learningData, false, handler);
                Console.Write("\r" + acc.ToString() + "%");

                //시각화
                //ann.DrawThings(true, handler);

                Bitmap gp_ = new Bitmap(gp);
                g = Graphics.FromImage(gp_);
                g.SmoothingMode     = SmoothingMode.AntiAlias;
                g.InterpolationMode = InterpolationMode.HighQualityBicubic;
                g.PixelOffsetMode   = PixelOffsetMode.HighQuality;

                for (double y = 0; y <= 1; y += 0.001)
                {
                    ann.Per[0, 1].v = y;
                    for (double x = 0; x <= 1; x += 0.001)
                    {
                        ann.Per[0, 0].v = x;

                        double[] re = ann.solve();
                        if (0.5 <= re[0] && re[0] < 0.51) //중간값
                        {
                            gp_.SetPixel(50 + Convert.ToInt32(x * 1000), 1049 - Convert.ToInt32(y * 1000), Color.Red);
                        }
                        else
                        {
                            gp_.SetPixel(50 + Convert.ToInt32(x * 1000), 1049 - Convert.ToInt32(y * 1000), Color.FromArgb(255, Convert.ToInt32(re[0] * 255), Convert.ToInt32(re[0] * 255), Convert.ToInt32(re[0] * 255)));
                        }
                    }
                }
                g.DrawString("정확도 : " + String.Format("{0:00.00} %", acc.ToString()), new Font("나눔고딕", 30), new SolidBrush(Color.Black), 1060, 50);

                g.DrawLine(p, 550, 49, 550, 1051);
                g.DrawLine(p, 49, 550, 1051, 550);

                frames++;
                System.IO.File.WriteAllText(@"datas\info_" + frames + ".txt", acc.ToString());
                gp_.Save(@"datas\gp_" + frames + ".png", System.Drawing.Imaging.ImageFormat.Png);

                if (acc > 99)
                {
                    break;
                }
            }
            ann.MachineLearning(learningData, true, handler);
            ann.visual.Save("tmp.png", System.Drawing.Imaging.ImageFormat.Png);

            Console.WriteLine("\n계산할 parameter 입력");
            while (true)
            {
                for (int j = 0; j < ann.count_P[0]; j++)
                {
                    ann.Per[0, j].v = double.Parse(Console.ReadLine());
                }
                double[] re = ann.solve();
                for (int j = 0; j < re.Count(); j++)
                {
                    Console.Write(re[j] + ",");
                }
                Console.WriteLine();
            }
        }
예제 #13
0
        public static void AdjustWeights(ANN i_ANN, float[] i_DesiredOutputs)
        {
            if (i_ANN == null || i_DesiredOutputs == null || i_DesiredOutputs.Length != i_ANN.ANNOutputCount)
            {
                return;
            }

            // Init error gradients map (layer x neuron).

            List <List <float> > layerErrorGradients = new List <List <float> >();

            CommonFunctionLibrary.InitListNewElements <List <float> >(layerErrorGradients, i_ANN.layerCount);

            // Iterate all layers, starting from the output one: this is a backward propagation.

            for (int layerIndex = i_ANN.layerCount - 1; layerIndex >= 0; --layerIndex)
            {
                // Get layer and its error gradients entry.

                Layer layer       = i_ANN.GetLayer(layerIndex);
                bool  isLastLayer = (layerIndex == i_ANN.layerCount - 1);
                Layer nextLayer   = (isLastLayer) ? null : i_ANN.GetLayer(layerIndex + 1);

                List <float> neuronErrorGradients          = layerErrorGradients[layerIndex];
                List <float> nextLayerNeuronErrorGradients = (isLastLayer) ? null : layerErrorGradients[layerIndex + 1];

                // Iterate neuron.

                for (int neuronIndex = 0; neuronIndex < layer.neuronCount; ++neuronIndex)
                {
                    Neuron          neuron = layer.GetNeuron(neuronIndex);
                    NeuronExecution lastNeuronExecution = neuron.lastNeuronExecution;

                    // Compute current error gradient.

                    float errorGradient = 0f;

                    if (isLastLayer)
                    {
                        // If this is the last layer just use iDesired - iActual.

                        float error = i_DesiredOutputs[neuronIndex] - lastNeuronExecution.output;
                        errorGradient = lastNeuronExecution.output * (1f - lastNeuronExecution.output) * error; // This is called DeltaRule (https://en.wikipedia.org/wiki/Delta_rule)
                    }
                    else
                    {
                        // If this is not the final layer, use a weighted error gradient baed on next layer error gradients.

                        errorGradient = lastNeuronExecution.output * (1f - lastNeuronExecution.output);

                        float nextLayerErrorGradientSum = 0f;
                        for (int nextLayerNeuronIndex = 0; nextLayerNeuronIndex < nextLayer.neuronCount; ++nextLayerNeuronIndex)
                        {
                            Neuron nextLayerNeuron        = nextLayer.GetNeuron(nextLayerNeuronIndex);
                            float  nextLayerErrorGradient = nextLayerNeuronErrorGradients[nextLayerNeuronIndex];
                            nextLayerErrorGradientSum += nextLayerErrorGradient * nextLayerNeuron.GetWeight(neuronIndex);
                        }

                        errorGradient *= nextLayerErrorGradientSum;
                    }

                    neuronErrorGradients.Add(errorGradient);

                    // Iterate over weights and adjust them.

                    for (int weightIndex = 0; weightIndex < neuron.inputCount; ++weightIndex)
                    {
                        float weight = neuron.GetWeight(weightIndex);

                        if (isLastLayer)
                        {
                            // If this is the last layer, act as do on a simple perceptron.

                            float error = i_DesiredOutputs[neuronIndex] - lastNeuronExecution.output;
                            weight = weight + i_ANN.alpha * lastNeuronExecution.GetInput(weightIndex) * error;
                        }
                        else
                        {
                            // If this is not the final layer, use error gradient as error.

                            weight = weight + i_ANN.alpha * lastNeuronExecution.GetInput(weightIndex) * errorGradient;
                        }

                        neuron.SetWeight(weightIndex, weight);
                    }

                    // Adjust bias as usual (keeping the learning rate).

                    neuron.bias = neuron.bias + i_ANN.alpha * errorGradient;
                }
            }
        }