/// <summary> /// Compute the output from this synapse. /// </summary> /// <param name="input">The input to this synapse.</param> /// <returns>The output from this synapse.</returns> public IMLData Compute(IMLData input) { var result = new double[_outputCount]; // clear from previous EngineArray.Fill(_preActivation, 0.0); EngineArray.Fill(_postActivation, 0.0); _postActivation[0] = 1.0; // copy input for (int i = 0; i < _inputCount; i++) { _postActivation[i + 1] = input[i]; } // iterate through the network activationCycles times for (int i = 0; i < ActivationCycles; ++i) { InternalCompute(); } // copy output for (int i = 0; i < _outputCount; i++) { result[i] = _postActivation[_outputIndex + i]; } return(new BasicMLData(result)); }
protected double[][] EstimateGamma(double[][][] xi, ForwardBackwardCalculator fbc) { double[][] gamma = EngineArray.AllocateDouble2D(xi.Length + 1, xi[0].Length); for (int t = 0; t < (xi.Length + 1); t++) { EngineArray.Fill(gamma[t], 0.0); } for (int t = 0; t < xi.Length; t++) { for (int i = 0; i < xi[0].Length; i++) { for (int j = 0; j < xi[0].Length; j++) { gamma[t][i] += xi[t][i][j]; } } } for (int j = 0; j < xi[0].Length; j++) { for (int i = 0; i < xi[0].Length; i++) { gamma[xi.Length][j] += xi[xi.Length - 1][i][j]; } } return(gamma); }
/// <summary> /// Create an array of doubles to hold the specified flat network. /// </summary> /// <param name="flat">The flat network to use as a model.</param> /// <returns>The new array.</returns> public double[] CreateParams(FlatNetwork flat) { var result = new double[flat.ActivationFunctions.Length]; EngineArray.Fill(result, 1); return(result); }
/// <summary> /// Construct a Nelder Mead trainer with a definable step. /// </summary> /// <param name="network">The network to train.</param> /// <param name="training">The training data to use.</param> /// <param name="stepValue">The step value. This value defines, to some degree the range /// of different weights that will be tried.</param> public NelderMeadTraining(BasicNetwork network, IMLDataSet training, double stepValue) : base(TrainingImplementationType.OnePass) { this._network = network; Training = training; _start = NetworkCODEC.NetworkToArray(network); _trainedWeights = NetworkCODEC.NetworkToArray(network); int n = _start.Length; _p = new double[n * (n + 1)]; _pstar = new double[n]; _p2Star = new double[n]; _pbar = new double[n]; _y = new double[n + 1]; _nn = n + 1; _del = 1.0; _rq = EncogFramework.DefaultDoubleEqual * n; _step = new double[NetworkCODEC.NetworkSize(network)]; _jcount = _konvge = 500; EngineArray.Fill(_step, stepValue); }
/// <inheritdoc /> private void InternalCompute(int outputNeuron) { int row = 0; var error = new ErrorCalculation(); var derivative = new double[_weightCount]; // Loop over every training element foreach (IMLDataPair pair in _training) { EngineArray.Fill(derivative, 0); IMLData networkOutput = _network.Compute(pair.Input); double e = pair.Ideal[outputNeuron] - networkOutput[outputNeuron]; error.UpdateError(networkOutput[outputNeuron], pair.Ideal[outputNeuron]); int currentWeight = 0; // loop over the output weights int outputFeedCount = _network.GetLayerTotalNeuronCount(_network.LayerCount - 2); for (int i = 0; i < _network.OutputCount; i++) { for (int j = 0; j < outputFeedCount; j++) { double jc; if (i == outputNeuron) { jc = ComputeDerivative(pair.Input, outputNeuron, currentWeight, _dStep, networkOutput[outputNeuron], row); } else { jc = 0; } _gradients[currentWeight] += jc * e; derivative[currentWeight] = jc; currentWeight++; } } // Loop over every weight in the neural network while (currentWeight < _network.Flat.Weights.Length) { double jc = ComputeDerivative( pair.Input, outputNeuron, currentWeight, _dStep, networkOutput[outputNeuron], row); derivative[currentWeight] = jc; _gradients[currentWeight] += jc * e; currentWeight++; } row++; UpdateHessian(derivative); } _sse += error.CalculateSSE(); }
/// <inheritdoc/> public void Fit(IMLDataSet co) { var weights = new double[co.Count]; EngineArray.Fill(weights, 1.0 / co.Count); Fit(co, weights); }
public void Run(int index) { IMLDataPair pair = _training[index]; Process(pair); _owner.Report(_gradients, 0, null); EngineArray.Fill(_gradients, 0); }
/// <summary> /// Finalize the structure of this Bayesian network. /// </summary> public void FinalizeStructure() { foreach (BayesianEvent e in _eventMap.Values) { e.FinalizeStructure(); } if (Query != null) { Query.FinalizeStructure(); } _inputPresent = new bool[_events.Count]; EngineArray.Fill(_inputPresent, true); _classificationTarget = -1; }
/// <inheritdoc/> public void Run() { _error = 0; EngineArray.Fill(_totDeriv, 0); EngineArray.Fill(_gradients, 0); // Loop over every training element for (int i = _low; i <= _high; i++) { _training.GetRecord(i, _pair); EngineArray.Fill(_derivative, 0); Process(_outputNeuron, _pair.InputArray, _pair.IdealArray); } }
/// <inheritdoc/> public void Run() { _error = 0; EngineArray.Fill(_totDeriv, 0); EngineArray.Fill(_gradients, 0); // Loop over every training element for (int i = _low; i <= _high; i++) { var pair = _training[i]; EngineArray.Fill(_derivative, 0); Process(_outputNeuron, pair); } }
/// <inheritdoc /> public void Run() { _error = 0; EngineArray.Fill(_hessian, 0); EngineArray.Fill(_totDeriv, 0); EngineArray.Fill(_gradients, 0); var derivative = new double[_weightCount]; // Loop over every training element for (int i = _low; i <= _high; i++) { IMLDataPair pair = _training[i]; EngineArray.Fill(derivative, 0); Process(_outputNeuron, derivative, pair); } }
/// <summary> /// Fit this distribution to the specified data, with weights. /// </summary> /// <param name="co">The data to fit to.</param> /// <param name="weights">The weights.</param> public void Fit(IMLDataSet co, double[] weights) { if ((co.Count < 1) || (co.Count != weights.Length)) { throw new EncogError("Invalid weight size."); } for (int i = 0; i < _probabilities.Length; i++) { EngineArray.Fill(_probabilities[i], 0.0); int j = 0; foreach (IMLDataPair o in co) { _probabilities[i][(int)o.Input[i]] += weights[j++]; } } }
/// <summary> /// Init the process. /// </summary> /// private void Init() { // fix flat spot, if needed _flatSpot = new double[_flat.ActivationFunctions.Length]; if (FixFlatSpot) { for (int i = 0; i < _flat.ActivationFunctions.Length; i++) { IActivationFunction af = _flat.ActivationFunctions[i]; if (af is ActivationSigmoid) { _flatSpot[i] = 0.1; } else { _flatSpot[i] = 0.0; } } } else { EngineArray.Fill(_flatSpot, 0.0); } var determine = new DetermineWorkload( _numThreads, (int)_indexable.Count); _workers = new GradientWorker[determine.ThreadCount]; int index = 0; // handle CPU foreach (IntRange r in determine.CalculateWorkers()) { _workers[index++] = new GradientWorker(((FlatNetwork)_network.Flat.Clone()), this, _indexable.OpenAdditional(), r.Low, r.High, _flatSpot, ErrorFunction); } InitOthers(); }
/// <summary> /// Perform the gradient calculation for the specified index range. /// </summary> /// public void Run() { try { _errorCalculation.Reset(); for (int i = _low; i <= _high; i++) { _training.GetRecord(i, _pair); Process(_pair.InputArray, _pair.IdealArray, _pair.Significance); } double error = _errorCalculation.Calculate(); _owner.Report(_gradients, error, null); EngineArray.Fill(_gradients, 0); } catch (Exception ex) { _owner.Report(null, 0, ex); } }
/// <summary> /// Perform the gradient calculation for the specified index range. /// </summary> /// public void Run() { try { _errorCalculation.Reset(); for (int i = _low; i <= _high; i++) { var pair = _training[i]; Process(pair); } double error = _errorCalculation.Calculate(); _owner.Report(_gradients, error, null); EngineArray.Fill(_gradients, 0); } catch (Exception ex) { _owner.Report(null, 0, ex); } }
/// <summary> /// Construct the calculator. /// </summary> /// <param name="seq">The sequence.</param> /// <param name="hmm">The HMM.</param> /// <param name="doAlpha">Should alpha be calculated.</param> /// <param name="doBeta">Should beta be calculated.</param> public ForwardBackwardScaledCalculator( IMLDataSet seq, HiddenMarkovModel hmm, bool doAlpha, bool doBeta) { if (seq.Count < 1) { throw new EncogError("Count cannot be less than one."); } _ctFactors = new double[seq.Count]; EngineArray.Fill(_ctFactors, 0.0); ComputeAlpha(hmm, seq); if (doBeta) { ComputeBeta(hmm, seq); } ComputeProbability(seq, hmm, doAlpha, doBeta); }
public void Update() { if (IterationNumber == 0) { UpdateRule.Init(this); } PreIteration(); UpdateRule.Update(_gradients, _flat.Weights); Error = _errorCalculation.Calculate(); PostIteration(); EngineArray.Fill(_gradients, 0); _errorCalculation.Reset(); if (Training is BatchDataSet) { ((BatchDataSet)Training).Advance(); } }
/// <summary> /// Perform the gradient calculation for the specified index range. /// </summary> /// public virtual void Run() { try { this.stopwatch.Reset(); this.stopwatch.Start(); this.errorCalculation.Reset(); for (int i = this.low; i <= this.high; i++) { this.training.GetRecord(i, this.pair); Process(this.pair.InputArray, this.pair.IdealArray); } double error = this.errorCalculation.Calculate(); this.owner.Report(this.gradients, error, null); EngineArray.Fill(this.gradients, 0); this.stopwatch.Stop(); this.elapsedTime = this.stopwatch.ElapsedTicks; } catch (Exception ex) { this.owner.Report(null, 0, ex); } }
public void Iteration() { HiddenMarkovModel nhmm; nhmm = _method.Clone(); var allGamma = new double[_training.SequenceCount][][]; double[][] aijNum = EngineArray.AllocateDouble2D(_method.StateCount, _method.StateCount); var aijDen = new double[_method.StateCount]; EngineArray.Fill(aijDen, 0.0); for (int i = 0; i < _method.StateCount; i++) { EngineArray.Fill(aijNum[i], 0.0); } int g = 0; foreach (IMLDataSet obsSeq in _training.Sequences) { ForwardBackwardCalculator fbc = GenerateForwardBackwardCalculator( obsSeq, _method); double[][][] xi = EstimateXi(obsSeq, fbc, _method); double[][] gamma = allGamma[g++] = EstimateGamma(xi, fbc); for (int i = 0; i < _method.StateCount; i++) { for (int t = 0; t < (obsSeq.Count - 1); t++) { aijDen[i] += gamma[t][i]; for (int j = 0; j < _method.StateCount; j++) { aijNum[i][j] += xi[t][i][j]; } } } } for (int i = 0; i < _method.StateCount; i++) { if (aijDen[i] == 0.0) { for (int j = 0; j < _method.StateCount; j++) { nhmm.TransitionProbability[i][j] = _method.TransitionProbability[i][j]; } } else { for (int j = 0; j < _method.StateCount; j++) { nhmm.TransitionProbability[i][j] = aijNum[i][j] / aijDen[i]; } } } /* compute pi */ for (int i = 0; i < _method.StateCount; i++) { nhmm.Pi[i] = 0.0; } for (int o = 0; o < _training.SequenceCount; o++) { for (int i = 0; i < _method.StateCount; i++) { nhmm.Pi[i] += (allGamma[o][0][i] / _training .SequenceCount); } } /* compute pdfs */ for (int i = 0; i < _method.StateCount; i++) { var weights = new double[_training.Count]; double sum = 0.0; int j = 0; int o = 0; foreach (IMLDataSet obsSeq in _training.Sequences) { for (int t = 0; t < obsSeq.Count; t++, j++) { sum += weights[j] = allGamma[o][t][i]; } o++; } for (j--; j >= 0; j--) { weights[j] /= sum; } IStateDistribution opdf = nhmm.StateDistributions[i]; opdf.Fit(_training, weights); } _method = nhmm; Iterations++; }
/// <summary> /// Calculate one iteration over the specified range. /// </summary> /// /// <param name="start">The starting position to calculate for.</param> /// <param name="size">The ending position to calculate for.</param> /// <param name="iterations">The number of iterations to execute.</param> /// <param name="learn">True, if we should learn.</param> public void Calculate(int start, int size, bool learn, int iterations) { PrepareKernel(); this.paramArray[KernelNetworkTrain.PARRAY_LEARN] = (learn) ? 1 : 0; this.paramArray[KernelNetworkTrain.PARRAY_START] = start; this.paramArray[KernelNetworkTrain.PARRAY_ITEMS_PER] = size; this.paramArray[KernelNetworkTrain.PARRAY_ITERATIONS] = iterations; EngineArray.ArrayCopy(this.flat.Weights, this.weightInArray); this.Kernel.SetMemoryArgument(0, this.paramBuffer); this.Kernel.SetMemoryArgument(1, this.errorBuffer); this.Kernel.SetMemoryArgument(2, this.layerIndexBuffer); this.Kernel.SetMemoryArgument(3, this.layerCountBuffer); this.Kernel.SetMemoryArgument(4, this.layerFeedCountBuffer); this.Kernel.SetMemoryArgument(5, this.weightIndexBuffer); this.Kernel.SetMemoryArgument(6, this.inputBuffer); this.Kernel.SetMemoryArgument(7, this.idealBuffer); this.Kernel.SetMemoryArgument(8, this.weightInArrayBuffer); this.Kernel.SetMemoryArgument(9, this.weightOutArrayBuffer); this.Kernel.SetMemoryArgument(10, this.gradientOutBuffer); this.Kernel.SetMemoryArgument(11, this.activationTypeBuffer); this.Kernel.SetMemoryArgument(12, this.tempDataInBuffer); this.Kernel.SetMemoryArgument(13, this.tempDataOutBuffer); this.Kernel.SetMemoryArgument(14, this.gradientInBuffer); try { EncogCLQueue queue = this.device.Queue; EngineArray.Fill(this.gradients, 0); if (learn) { this.paramArray[3] = 1; } else { this.paramArray[3] = 0; } this.paramArray[4] = start; queue.Array2Buffer(this.weightInArray, this.weightInArrayBuffer); queue.Array2Buffer(this.tempDataArray, this.tempDataInBuffer); queue.Array2Buffer(this.gradients, this.gradientInBuffer); queue.Array2Buffer(this.paramArray, this.paramBuffer); // Execute the kernel queue.Execute(this); queue.WaitFinish(); // Read the results queue.Buffer2Array(this.errorBuffer, this.errors); queue.Buffer2Array(this.weightOutArrayBuffer, this.weightOutArray); queue.Buffer2Array(this.tempDataOutBuffer, this.tempDataArray); queue.Buffer2Array(this.gradientOutBuffer, this.gradients); } catch (Cloo.ComputeException ex) { if (ex.Message.IndexOf("OutOfResources") != -1) { throw new OutOfOpenCLResources(ex); } else { throw new OpenCLError(ex); } } catch (Exception ex) { throw new OpenCLError(ex); } }
/// <summary> /// Clear to zero. /// </summary> public void Clear() { EngineArray.Fill(_data, 0); }
/// <summary> /// /// </summary> /// public void Reset(int seed) { CurrentState.Clear(); EngineArray.Fill(_weights, 0.0d); }
/// <inheritdoc/> public void Clear() { EngineArray.Fill(_gradients, 0); _hessianMatrix.Clear(); }
/// <summary> /// Clear any connection weights. /// </summary> /// public void Clear() { EngineArray.Fill(_weights, 0); }
/// <summary> /// Compute the output from this synapse. /// </summary> /// /// <param name="input">The input to this synapse.</param> /// <returns>The output from this synapse.</returns> public virtual IMLData Compute(IMLData input) { var result = new double[_outputCount]; if (_neurons.Count == 0) { throw new NeuralNetworkError( "This network has not been evolved yet, it has no neurons in the NEAT synapse."); } int flushCount = 1; if (_snapshot) { flushCount = _networkDepth; } // iterate through the network FlushCount times for (int i = 0; i < flushCount; ++i) { int outputIndex = 0; int index = 0; EngineArray.Fill(result, 0); // populate the input neurons while (_neurons[index].NeuronType == NEATNeuronType.Input) { _neurons[index].Output = input[index]; index++; } // set the bias neuron _neurons[index++].Output = 1; while (index < _neurons.Count) { NEATNeuron currentNeuron = _neurons[index]; double sum = 0; foreach (NEATLink link in currentNeuron.InboundLinks) { double weight = link.Weight; double neuronOutput = link.FromNeuron.Output; sum += weight * neuronOutput; } var d = new double[1]; d[0] = sum / currentNeuron.ActivationResponse; _activationFunction.ActivationFunction(d, 0, d.Length); _neurons[index].Output = d[0]; if (currentNeuron.NeuronType == NEATNeuronType.Output) { result[outputIndex++] = currentNeuron.Output; } index++; } } _outputActivationFunction.ActivationFunction(result, 0, result.Length); return(new BasicMLData(result, false)); }