예제 #1
0
 // masked forward, top-k backward, collect activeness
 public override Tensor StepTopK(Flow f, Tensor x, int k)
 {
     return(f.Mask(f.AddBias(f.MultiplyTopRecord(x, Wxh, k, _r), Bh), _r.Mask));
 }
예제 #2
0
 // normal forward, top-k backward, collect activeness
 // inds control the neurons computed, originally for efficient dropout
 // not used currently (you could use this to see how the activeness of the neurons change when top-k backprop)
 public override Tensor StepTopK(Flow f, Tensor x, int[] inds, int k)
 {
     return(f.AddBias(f.MultiplyTopRecord(x, Wxh, k, _r, inds), Bh, inds));
 }
예제 #3
0
 // normal forward, normal backward
 public override Tensor Step(Flow f, Tensor x)
 {
     return(f.Mask(f.AddBias(f.Multiply(x, Wxh), Bh), _r.Mask));
 }
예제 #4
0
 // normal forward, top-k backward
 public virtual Tensor StepTopK(Flow f, Tensor x, int k)
 {
     return(f.AddBias(f.MultiplyTop(x, Wxh, k), Bh));
 }
예제 #5
0
 // normal forward, normal backward
 // inds control the neurons computed, originally for efficient dropout
 public virtual Tensor Step(Flow f, Tensor x, int[] inds)
 {
     return(f.AddBias(f.Multiply(x, Wxh, inds), Bh, inds));
 }
예제 #6
0
 // normal forward, normal backward
 public virtual Tensor Step(Flow f, Tensor x)
 {
     return(f.AddBias(f.Multiply(x, Wxh), Bh));
 }