public SlimRnnNeuronWithWeight(SlimRnnNeuron source, SlimRnnNeuron target, float weight, uint weightIndex, bool isEligable = false) { this.source = source; this.target = target; this.weight = weight; this.weightIndex = weightIndex; this.isEligable = isEligable; }
// propagate the signal for the "next" activation void propagate( IList <SlimRnnNeuron> old, IList <SlimRnnNeuron> @new, IList <SlimRnnNeuronWithWeight> trace, ref double time, out bool gotoHaltLoop ) { gotoHaltLoop = false; // TODO PARALLELIZE foreach (SlimRnnNeuron u_l in old) { int outNeuronWithWeightIndex = -1; // set to -1 because the incrementing is done at the head of the loop foreach (SlimRnnNeuronWithWeight w_lk in u_l.outNeuronsWithWeights) { outNeuronWithWeightIndex++; // see Procedure 2.1: Spread in Schmidhuber paper // here we invoke the learning algorithm if the connection was not jet used if ( learningAlgorithm != null && !w_lk.wasUsed && w_lk.isEligable // call the learning algorithm only if the weight has been marked to be changable by the learning algorithm ) { learningAlgorithm.opportunityToAdjustWeight(w_lk); } w_lk.wasUsed = true; if (w_lk.weight == 0.0f) { continue; } SlimRnnNeuron k = w_lk.target; // add connection to trace if (!w_lk.isMark) { w_lk.isMark = true; trace.Add(w_lk); } if (k.type == SlimRnnNeuron.EnumType.ADDITIVE) { k.next += (w_lk.source.activation * w_lk.weight); } else { Debug.Assert(k.type == SlimRnnNeuron.EnumType.MULTIPLICATIVE); k.next *= (w_lk.source.activation * w_lk.weight); } if (!k.used) { k.used = true; @new.Add(k); } time += w_lk.cost; // after schmidhuber: long wires may cost more if (time > t_lim) { gotoHaltLoop = true; return; } } } if (learningAlgorithm != null && learningAlgorithm.checkForceTermination()) { gotoHaltLoop = true; return; } }