public override void Fill(Tensor blob) { Guard.That(() => blob.Count).IsPositive(); using (var @cpuBlob = blob.OnCpu()) { var data = @cpuBlob.Data; var distribution = new ContinuousUniform(0, 1); data.MapInplace(x => distribution.Sample(), Zeros.Include); int dim = blob.Count / blob.Num; Guard.That(() => dim).IsPositive(); for (int i = 0; i < blob.Num; i++) { double sum = 0.0d; for (int j = 0; j < dim; j++) { sum += data[i * dim + j]; } for (int j = 0; j < dim; j++) { data[i * dim + j] /= sum; } } } }
public override void Fill(Tensor blob) { using (var @cpuBlob = blob.OnCpu()) { var data = @cpuBlob.Data; var distribution = new ContinuousUniform(this.Parameters.Min, this.Parameters.Max); data.MapInplace(x => distribution.Sample(), Zeros.Include); } }
public override void Fill(Tensor blob) { using (var @cpuBlob = blob.OnCpu()) { var data = @cpuBlob.Data; var value = this.Parameters.Value; data.MapInplace(x => value, Zeros.Include); } }
public override void Fill(Tensor blob) { Guard.That(() => blob.Count).IsPositive(); using (var @cpuBlob = blob.OnCpu()) { var data = @cpuBlob.Data; int fanIn = blob.Count / blob.Num; double scale = Math.Sqrt(3 / fanIn); var distribution = new ContinuousUniform(-scale, scale); data.MapInplace(x => distribution.Sample(), Zeros.Include); } }
public void CopyFrom(Tensor other, bool copyDiff = false, bool reshape = false) { if (other == null) { throw new ArgumentNullException("other"); } Contract.EndContractBlock(); // If reshaping needed we reshape the instance with new memory. if (reshape) { ReshapeAs(other); } switch (this.Location) { case TensorLocation.Cpu: using (var @thisCpu = this.OnCpu()) using (var @otherCpu = other.OnCpu()) { // We copy the data @otherCpu.Data.CopyTo(@thisCpu.Data); // If copying differential is needed, we copy it too. if (copyDiff) { @otherCpu.Diff.CopyTo(@thisCpu.Diff); } } break; case TensorLocation.Gpu: break; } throw new NotImplementedException(); }
internal override double ForwardCpu(CpuTensorScopeCollection bottom, CpuTensorScopeCollection top) { using (var probabilityCpu = probability.OnCpu()) { // The forward pass computes the softmax prob values. softmaxLayer.ForwardCpu(bottom, new CpuTensorScopeCollection { probabilityCpu }); var probabilityData = probabilityCpu.Data; var labels = bottom[1].Data; int num = bottom[0].Num; int dim = bottom[0].Count / num; double loss = 0; for (int i = 0; i < num; i++) { loss -= Math.Log(Math.Max(probabilityData[i * dim + (int)labels[i]], double.Epsilon)); } loss = loss / num; if (top.Count >= 1) { top[0].Data[0] = loss; } if (top.Count == 2) { top[1].Tensor.ShareData(probability); } return(loss); } }
public override void Fill(Tensor blob) { using (var @cpuBlob = blob.OnCpu()) { var data = @cpuBlob.Data; var distribution = new Normal(this.Parameters.Mean, this.Parameters.Std); data.MapInplace(x => distribution.Sample(), Zeros.Include); if (this.Parameters.IsSparse) { Guard.That(() => blob.Num).Equals(1); Guard.That(() => blob.Channels).Equals(1); int numberOfInputs = blob.Height; double nonZeroProbability = 1.0d / numberOfInputs; var bernoulli = new Bernoulli(nonZeroProbability); var mask = Vector <double> .Build.SameAs(data, () => bernoulli.Sample()); data.PointwiseMultiply(mask, result: data); } } }
public void CopyFrom(Tensor other, bool copyDiff = false, bool reshape = false) { if (other == null) throw new ArgumentNullException("other"); Contract.EndContractBlock(); // If reshaping needed we reshape the instance with new memory. if (reshape) ReshapeAs(other); switch (this.Location) { case TensorLocation.Cpu: using (var @thisCpu = this.OnCpu()) using (var @otherCpu = other.OnCpu()) { // We copy the data @otherCpu.Data.CopyTo(@thisCpu.Data); // If copying differential is needed, we copy it too. if (copyDiff) @otherCpu.Diff.CopyTo(@thisCpu.Diff); } break; case TensorLocation.Gpu: break; } throw new NotImplementedException(); }