コード例 #1
0
        /// <summary>
        /// Construct a gradient worker.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="owner">The owner that is doing the training.</param>
        /// <param name="training">The training data.</param>
        /// <param name="low">The low index to use in the training data.</param>
        /// <param name="high">The high index to use in the training data.</param>
        public GradientWorkerCPU(FlatNetwork network,
                                 TrainFlatNetworkProp owner,
                                 IEngineIndexableSet training, int low, int high)
        {
            this.errorCalculation = new ErrorCalculation();
            this.network          = network;
            this.training         = training;
            this.low   = low;
            this.high  = high;
            this.owner = owner;

            this.stopwatch = new Stopwatch();

            this.layerDelta = new double[network.LayerOutput.Length];
            this.gradients  = new double[network.Weights.Length];
            this.actual     = new double[network.OutputCount];

            this.weights         = network.Weights;
            this.layerIndex      = network.LayerIndex;
            this.layerCounts     = network.LayerCounts;
            this.weightIndex     = network.WeightIndex;
            this.layerOutput     = network.LayerOutput;
            this.layerFeedCounts = network.LayerFeedCounts;

            this.pair = BasicEngineData.CreatePair(network.InputCount,
                                                   network.OutputCount);
        }
コード例 #2
0
        /// <summary>
        /// Calculate the error for this neural network. The error is calculated
        /// using root-mean-square(RMS).
        /// </summary>
        ///
        /// <param name="data">The training set.</param>
        /// <returns>The error percentage.</returns>
        public double CalculateError(IEngineIndexableSet data)
        {
            ErrorCalculation errorCalculation = new ErrorCalculation();

            double[]    actual = new double[this.outputCount];
            IEngineData pair   = BasicEngineData.CreatePair(data.InputSize,
                                                            data.IdealSize);

            for (int i = 0; i < data.Count; i++)
            {
                data.GetRecord(i, pair);
                Compute(pair.InputArray, actual);
                errorCalculation.UpdateError(actual, pair.IdealArray);
            }
            return(errorCalculation.Calculate());
        }
コード例 #3
0
        /// <summary>
        /// Construct a kernel to train the network.
        /// </summary>
        ///
        /// <param name="device">The OpenCL device to use.</param>
        /// <param name="flat">The network to train.</param>
        /// <param name="training">The training data.</param>
        /// <param name="tempDataSize">How much temp data.</param>
        public KernelNetworkTrain(EncogCLDevice device,
                                  FlatNetwork flat, IEngineIndexableSet training,
                                  int tempDataSize)
            : base(device, "Encog.Engine.Resources.KernelNetTrain.txt", "NetworkTrain")
        {
            this.training       = training;
            this.trainingLength = (int)this.training.Count;
            this.device         = device;
            this.flat           = flat;
            this.weightInArray  = new float[flat.Weights.Length];
            this.weightOutArray = new float[flat.Weights.Length];
            this.tempDataArray  = new float[tempDataSize];
            this.gradients      = new float[flat.Weights.Length];

            this.layerDeltaSize = 0;
            for (int i = 0; i < flat.LayerCounts.Length; i++)
            {
                this.layerDeltaSize += flat.LayerCounts[i];
            }

            int inputSize = flat.InputCount;
            int idealSize = flat.OutputCount;

            this.inputArray = new float[inputSize * this.trainingLength];
            this.idealArray = new float[idealSize * this.trainingLength];
            this.paramArray = new int[10];

            IEngineData pair = BasicEngineData.CreatePair(
                flat.InputCount, flat.OutputCount);

            int inputIndex = 0;
            int idealIndex = 0;

            for (int i = 0; i < this.trainingLength; i++)
            {
                training.GetRecord(i, pair);
                for (int col = 0; col < flat.InputCount; col++)
                {
                    this.inputArray[inputIndex++] = (float)pair.InputArray[col];
                }

                for (int col = 0; col < flat.OutputCount; col++)
                {
                    this.idealArray[idealIndex++] = (float)pair.IdealArray[col];
                }
            }
        }
コード例 #4
0
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        public TrainFlatNetworkProp(FlatNetwork network,
                                    IEngineDataSet training)
        {
            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                          "Training data must be Indexable for this training type.");
            }

            this.training = training;
            this.network  = network;

            this.gradients    = new double[this.network.Weights.Length];
            this.lastGradient = new double[this.network.Weights.Length];

            this.indexable         = (IEngineIndexableSet)training;
            this.numThreads        = 0;
            this.reportedException = null;
        }
コード例 #5
0
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The OpenCL training profile.</param>
        public TrainFlatNetworkOpenCL(FlatNetwork network,
                                      IEngineDataSet training, OpenCLTrainingProfile profile)
        {
            (new ValidateForOpenCL()).Validate(network);

            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                          "Training data must be Indexable for this training type.");
            }

            if (EncogEngine.Instance.CL == null)
            {
                throw new EncogEngineError(
                          "You must enable OpenCL before using this training type.");
            }

            this.profile  = profile;
            this.network  = network;
            this.training = (IEngineIndexableSet)training;
        }
コード例 #6
0
        /// <summary>
        /// Calculate the kernel values.
        /// </summary>
        ///
        /// <param name="kernel">The kernel to calculate for.</param>
        /// <param name="training">The training params to use.</param>
        public void CalculateKernelParams(EncogKernel kernel,
                IEngineIndexableSet training)
        {
            bool globalValuesAssigned = false;
            int workPerIteration;

            // there are two special cases

            // first, if the ratio is 1.0
            if (Math.Abs(this.segmentationRatio - 1.0d) < EncogEngine.DEFAULT_ZERO_TOLERANCE)
            {
                // if the segmentation ratio is 1, then we want NO SEGMENTATION
                // we will have to find a workgroup size that is even
                int trialLocalSize = (int)Math.Min(kernel.MaxWorkGroupSize, training.Count);

                trialLocalSize++;// falsely add one so the loop can decrease it with
                // no effect.

                // loop and try to find a local size small enough to be even.
                do
                {
                    trialLocalSize--;
                    this.kernelLocalWorkgroup = (int)(trialLocalSize * this.localRatio);
                    this.kernelGlobalWorkgroup = (int)(this.kernelLocalWorkgroup * this.globalRatio);
                    this.kernelWorkPerCall = (int)((training.Count / this.kernelGlobalWorkgroup) * this.segmentationRatio);
                    workPerIteration = this.kernelGlobalWorkgroup
                            * this.kernelWorkPerCall;
                } while ((workPerIteration != training.Count)
                        && trialLocalSize > 1);

                if (trialLocalSize > 0)
                    globalValuesAssigned = true;
            }

            // if we either wanted to segment, or the attempt to find an even group
            // size above failed
            if (!globalValuesAssigned)
            {
                // otherwise divide into segments
                int maxLocalSize = (int)Math.Min(kernel.MaxWorkGroupSize, training.Count);
                this.kernelLocalWorkgroup = (int)(maxLocalSize * this.localRatio);
                this.kernelGlobalWorkgroup = (int)(this.kernelLocalWorkgroup * this.globalRatio);

                // second special case, if the segmentation ratio is zero, then just
                // do one item per OpenCL call
                if (this.segmentationRatio < EncogEngine.DEFAULT_ZERO_TOLERANCE)
                {
                    this.kernelWorkPerCall = 1;
                }
                else
                {
                    this.kernelWorkPerCall = (int)((training.Count / this.kernelGlobalWorkgroup) * this.segmentationRatio);
                    if (this.kernelWorkPerCall == 0)
                    {
                        this.kernelWorkPerCall = 1;
                    }
                }
            }

            workPerIteration = this.kernelGlobalWorkgroup * this.kernelWorkPerCall;

            this.kernelNumberOfCalls = (int)(training.Count / workPerIteration);
            this.kernelRemainder = (int)(training.Count % workPerIteration);

            this.kernelRemainderGlobal = this.kernelGlobalWorkgroup;

            // if there is no "final training set", because it lined up evenly,
            // still create one.
            // the final training set is where learning happens.
            if (this.kernelRemainder == 0)
            {
                this.kernelRemainder = this.kernelGlobalWorkgroup;
                this.kernelRemainderPer = this.kernelWorkPerCall;
                this.kernelNumberOfCalls--;
            }
            else
                this.kernelRemainderPer = this.kernelRemainder
                        / this.kernelGlobalWorkgroup;

            // does the remainder not have enough to fill the global tasks global?
            if (this.kernelRemainderPer == 0)
            {
                this.kernelRemainderPer = 1;
                this.kernelRemainderGlobal = this.kernelRemainder;
            }
        }
コード例 #7
0
        /// <summary>
        /// Construct a gradient worker.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="owner">The owner that is doing the training.</param>
        /// <param name="training">The training data.</param>
        /// <param name="low">The low index to use in the training data.</param>
        /// <param name="high">The high index to use in the training data.</param>
        public GradientWorkerCPU(FlatNetwork network,
                TrainFlatNetworkProp owner,
                IEngineIndexableSet training, int low, int high)
        {
            this.errorCalculation = new ErrorCalculation();
            this.network = network;
            this.training = training;
            this.low = low;
            this.high = high;
            this.owner = owner;

            this.stopwatch = new Stopwatch();

            this.layerDelta = new double[network.LayerOutput.Length];
            this.gradients = new double[network.Weights.Length];
            this.actual = new double[network.OutputCount];

            this.weights = network.Weights;
            this.layerIndex = network.LayerIndex;
            this.layerCounts = network.LayerCounts;
            this.weightIndex = network.WeightIndex;
            this.layerOutput = network.LayerOutput;
            this.layerFeedCounts = network.LayerFeedCounts;

            this.pair = BasicEngineData.CreatePair(network.InputCount,
                    network.OutputCount);
        }
コード例 #8
0
        /// <summary>
        /// Construct a kernel to train the network.
        /// </summary>
        ///
        /// <param name="device">The OpenCL device to use.</param>
        /// <param name="flat">The network to train.</param>
        /// <param name="training">The training data.</param>
        /// <param name="tempDataSize">How much temp data.</param>
        public KernelNetworkTrain(EncogCLDevice device,
                FlatNetwork flat, IEngineIndexableSet training,
                int tempDataSize)
            : base(device, "Encog.Engine.Resources.KernelNetTrain.txt", "NetworkTrain")
        {
            this.training = training;
            this.trainingLength = (int)this.training.Count;
            this.device = device;
            this.flat = flat;
            this.weightInArray = new float[flat.Weights.Length];
            this.weightOutArray = new float[flat.Weights.Length];
            this.tempDataArray = new float[tempDataSize];
            this.gradients = new float[flat.Weights.Length];

            this.layerDeltaSize = 0;
            for (int i = 0; i < flat.LayerCounts.Length; i++)
            {
                this.layerDeltaSize += flat.LayerCounts[i];
            }

            int inputSize = flat.InputCount;
            int idealSize = flat.OutputCount;

            this.inputArray = new float[inputSize * this.trainingLength];
            this.idealArray = new float[idealSize * this.trainingLength];
            this.paramArray = new int[10];

            IEngineData pair = BasicEngineData.CreatePair(
                    flat.InputCount, flat.OutputCount);

            int inputIndex = 0;
            int idealIndex = 0;

            for (int i = 0; i < this.trainingLength; i++)
            {
                training.GetRecord(i, pair);
                for (int col = 0; col < flat.InputCount; col++)
                {
                    this.inputArray[inputIndex++] = (float)pair.InputArray[col];
                }

                for (int col = 0; col < flat.OutputCount; col++)
                {
                    this.idealArray[idealIndex++] = (float)pair.IdealArray[col];
                }
            }

        }
コード例 #9
0
        /// <summary>
        /// Calculate the error for this neural network. The error is calculated
        /// using root-mean-square(RMS).
        /// </summary>
        ///
        /// <param name="data">The training set.</param>
        /// <returns>The error percentage.</returns>
        public double CalculateError(IEngineIndexableSet data)
        {
            ErrorCalculation errorCalculation = new ErrorCalculation();

            double[] actual = new double[this.outputCount];
            IEngineData pair = BasicEngineData.CreatePair(data.InputSize,
                    data.IdealSize);

            for (int i = 0; i < data.Count; i++)
            {
                data.GetRecord(i, pair);
                Compute(pair.InputArray, actual);
                errorCalculation.UpdateError(actual, pair.IdealArray);
            }
            return errorCalculation.Calculate();
        }
コード例 #10
0
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        public TrainFlatNetworkProp(FlatNetwork network,
                IEngineDataSet training)
        {

            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                        "Training data must be Indexable for this training type.");
            }

            this.training = training;
            this.network = network;

            this.gradients = new double[this.network.Weights.Length];
            this.lastGradient = new double[this.network.Weights.Length];

            this.indexable = (IEngineIndexableSet)training;
            this.numThreads = 0;
            this.reportedException = null;
        }
コード例 #11
0
        /// <summary>
        /// Train a flat network multithreaded.
        /// </summary>
        ///
        /// <param name="network">The network to train.</param>
        /// <param name="training">The training data to use.</param>
        /// <param name="profile">The OpenCL training profile.</param>
        public TrainFlatNetworkOpenCL(FlatNetwork network,
                IEngineDataSet training, OpenCLTrainingProfile profile)
        {

            (new ValidateForOpenCL()).Validate(network);

            if (!(training is IEngineIndexableSet))
            {
                throw new EncogEngineError(
                        "Training data must be Indexable for this training type.");
            }

            if (EncogEngine.Instance.CL == null)
            {
                throw new EncogEngineError(
                        "You must enable OpenCL before using this training type.");

            }

            this.profile = profile;
            this.network = network;
            this.training = (IEngineIndexableSet)training;
        }
コード例 #12
0
        /// <summary>
        /// Calculate the kernel values.
        /// </summary>
        ///
        /// <param name="kernel">The kernel to calculate for.</param>
        /// <param name="training">The training params to use.</param>
        public void CalculateKernelParams(EncogKernel kernel,
                                          IEngineIndexableSet training)
        {
            bool globalValuesAssigned = false;
            int  workPerIteration;

            // there are two special cases

            // first, if the ratio is 1.0
            if (Math.Abs(this.segmentationRatio - 1.0d) < EncogEngine.DEFAULT_ZERO_TOLERANCE)
            {
                // if the segmentation ratio is 1, then we want NO SEGMENTATION
                // we will have to find a workgroup size that is even
                int trialLocalSize = (int)Math.Min(kernel.MaxWorkGroupSize, training.Count);

                trialLocalSize++;// falsely add one so the loop can decrease it with
                // no effect.

                // loop and try to find a local size small enough to be even.
                do
                {
                    trialLocalSize--;
                    this.kernelLocalWorkgroup  = (int)(trialLocalSize * this.localRatio);
                    this.kernelGlobalWorkgroup = (int)(this.kernelLocalWorkgroup * this.globalRatio);
                    this.kernelWorkPerCall     = (int)((training.Count / this.kernelGlobalWorkgroup) * this.segmentationRatio);
                    workPerIteration           = this.kernelGlobalWorkgroup
                                                 * this.kernelWorkPerCall;
                } while ((workPerIteration != training.Count) &&
                         trialLocalSize > 1);

                if (trialLocalSize > 0)
                {
                    globalValuesAssigned = true;
                }
            }

            // if we either wanted to segment, or the attempt to find an even group
            // size above failed
            if (!globalValuesAssigned)
            {
                // otherwise divide into segments
                int maxLocalSize = (int)Math.Min(kernel.MaxWorkGroupSize, training.Count);
                this.kernelLocalWorkgroup  = (int)(maxLocalSize * this.localRatio);
                this.kernelGlobalWorkgroup = (int)(this.kernelLocalWorkgroup * this.globalRatio);

                // second special case, if the segmentation ratio is zero, then just
                // do one item per OpenCL call
                if (this.segmentationRatio < EncogEngine.DEFAULT_ZERO_TOLERANCE)
                {
                    this.kernelWorkPerCall = 1;
                }
                else
                {
                    this.kernelWorkPerCall = (int)((training.Count / this.kernelGlobalWorkgroup) * this.segmentationRatio);
                    if (this.kernelWorkPerCall == 0)
                    {
                        this.kernelWorkPerCall = 1;
                    }
                }
            }

            workPerIteration = this.kernelGlobalWorkgroup * this.kernelWorkPerCall;

            this.kernelNumberOfCalls = (int)(training.Count / workPerIteration);
            this.kernelRemainder     = (int)(training.Count % workPerIteration);

            this.kernelRemainderGlobal = this.kernelGlobalWorkgroup;

            // if there is no "final training set", because it lined up evenly,
            // still create one.
            // the final training set is where learning happens.
            if (this.kernelRemainder == 0)
            {
                this.kernelRemainder    = this.kernelGlobalWorkgroup;
                this.kernelRemainderPer = this.kernelWorkPerCall;
                this.kernelNumberOfCalls--;
            }
            else
            {
                this.kernelRemainderPer = this.kernelRemainder
                                          / this.kernelGlobalWorkgroup;
            }

            // does the remainder not have enough to fill the global tasks global?
            if (this.kernelRemainderPer == 0)
            {
                this.kernelRemainderPer    = 1;
                this.kernelRemainderGlobal = this.kernelRemainder;
            }
        }