// masked forward, top-k backward, collect activeness public override Tensor StepTopK(Flow f, Tensor x, int k) { return(f.Mask(f.AddBias(f.MultiplyTopRecord(x, Wxh, k, _r), Bh), _r.Mask)); }
// normal forward, top-k backward, collect activeness // inds control the neurons computed, originally for efficient dropout // not used currently (you could use this to see how the activeness of the neurons change when top-k backprop) public override Tensor StepTopK(Flow f, Tensor x, int[] inds, int k) { return(f.AddBias(f.MultiplyTopRecord(x, Wxh, k, _r, inds), Bh, inds)); }
// normal forward, normal backward public override Tensor Step(Flow f, Tensor x) { return(f.Mask(f.AddBias(f.Multiply(x, Wxh), Bh), _r.Mask)); }
// normal forward, top-k backward public virtual Tensor StepTopK(Flow f, Tensor x, int k) { return(f.AddBias(f.MultiplyTop(x, Wxh, k), Bh)); }
// normal forward, normal backward // inds control the neurons computed, originally for efficient dropout public virtual Tensor Step(Flow f, Tensor x, int[] inds) { return(f.AddBias(f.Multiply(x, Wxh, inds), Bh, inds)); }
// normal forward, normal backward public virtual Tensor Step(Flow f, Tensor x) { return(f.AddBias(f.Multiply(x, Wxh), Bh)); }