コード例 #1
0
        public void collect_weights(
            float[] input,
            float t)
        {
            d_layers[0].process(input);
            for (int l = 1; l < d_layer_count; l++)
            {
                d_layers[l].process(d_layers[l - 1].getOutput());
            }

            // second pass: backward information propagation
            float [] target = { t };
            d_layers[d_layer_count - 1].back_propagate(ToolsMathCollectionFloat.subtract(target, d_layers[d_layer_count - 1].getOutput()));
            for (int l = d_layer_count - 2; l >= 0; l--)
            {
                d_layers[l].back_propagate(d_layers[l + 1].getInputError());
            }

            // third pass: learning.
            // uses the information stored during previous passes
            for (int l = 0; l < d_layer_count; l++)
            {
                d_layers[l].train_batch(d_learning_rate, d_eligibility);
            }
        }
コード例 #2
0
        public void train_batch(
            float[][] inputs,
            float[][] targets)
        {        // train on data set; batch learning
                 // outer loop goes through all input, and makes the layers collect batch weights.
            for (int example = 0; example < inputs.Length; example++)
            {
                d_layers[0].process(inputs[example]);
                for (int layer_index = 1; layer_index < d_layer_count; layer_index++)
                {
                    d_layers[layer_index].process(d_layers[layer_index - 1].getOutput());
                }

                // second pass: backward information propagation
                d_layers[d_layer_count - 1].back_propagate(ToolsMathCollectionFloat.subtract(targets[example], d_layers[d_layer_count - 1]
                                                                                             .getOutput()));
                for (int layer_index = d_layer_count - 2; layer_index >= 0; layer_index--)
                {
                    d_layers[layer_index].back_propagate(d_layers[layer_index + 1].getInputError());
                }

                // third pass: learning.
                // uses the information stored during previous passes
                for (int layer_index = 0; layer_index < d_layer_count; layer_index++)
                {
                    d_layers[layer_index].train_batch(d_learning_rate, d_eligibility);
                }
            }

            for (int layer_index = 0; layer_index < d_layer_count; layer_index++)             // all input has been parsed, apply the
            // weights gained from this batch.
            {
                d_layers[layer_index].applyBatchWeights();
            }
        }
コード例 #3
0
        public void train(
            float[] input,
            float[] target)
        {        // train on single input and target example
                 // first pass: forward information propagation
            d_layers[0].process(input);
            for (int layer_index = 1; layer_index < d_layer_count; layer_index++)
            {
                d_layers[layer_index].process(d_layers[layer_index - 1].getOutput());
            }

            // second pass: backward information propagation
            d_layers[d_layer_count - 1].back_propagate(ToolsMathCollectionFloat.subtract(target, d_layers[d_layer_count - 1].getOutput()));
            for (int later_index = d_layer_count - 2; later_index >= 0; later_index--)
            {
                d_layers[later_index].back_propagate(d_layers[later_index + 1].getInputError());
            }

            // third pass: learning.
            // uses the information stored during previous passes
            for (int layer_index = 0; layer_index < d_layer_count; layer_index++)
            {
                d_layers[layer_index].learn(d_learning_rate, d_eligibility);
            }
        }
コード例 #4
0
 public TransformRescale(
     float [] lower_bounds,
     float [] upper_bounds)
 {
     this.lower_bounds = ToolsCollection.Copy(lower_bounds);
     this.upper_bounds = ToolsCollection.Copy(upper_bounds);
     window_sizes      = ToolsMathCollectionFloat.subtract(upper_bounds, lower_bounds);
 }
コード例 #5
0
 public FunctionColorToFloat32Gray(
     float [] factors)
 {
     if (factors.Length != 4)
     {
         throw new Exception("Factors must be of length 4");
     }
     else
     {
         argb_weigths = ToolsCollection.Copy(factors);
         sum          = ToolsMathCollectionFloat.sum(argb_weigths);
     }
 }