public Tensor LayerNorm(Tensor result, Tensor src, Tensor gamma_, Tensor beta_, float eps) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, true, src.Sizes); TensorApplyCPU.LayerNorm(writeTarget, src, gamma_, beta_, eps, (int)src.Sizes[0], (int)src.Sizes[1]); return(writeTarget); }
public Tensor ScatterFill(Tensor result, float value, int dim, Tensor indices) { if (result == null) { throw new ArgumentNullException(nameof(result)); } if (dim < 0 && dim >= result.DimensionCount) { throw new ArgumentOutOfRangeException(nameof(dim)); } if (indices.DimensionCount != result.DimensionCount) { throw new InvalidOperationException("result and indices must have same number of dimensions"); } if (!TensorResultBuilder.ArrayEqualExcept(indices.Sizes, result.Sizes, dim)) { throw new InvalidOperationException("result and indices must be the same size except in dimension dim"); } Tensor writeTarget = result; TensorApplyCPU.ScatterFill(writeTarget, value, dim, indices); return(writeTarget); }
public Tensor AddMulV(Tensor result, Tensor x, Tensor y, float z) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, x, false, x.Sizes); TensorApplyCPU.AddMulV(writeTarget, x, y, z); return(writeTarget); }
public Tensor Gather(Tensor result, Tensor src, int dim, Tensor indices) { if (result != null && result.DimensionCount != src.DimensionCount) { throw new InvalidOperationException("result and src must have same number of dimensions"); } if (result != null && dim < 0 && dim >= result.DimensionCount) { throw new ArgumentOutOfRangeException(nameof(dim)); } if (indices.DimensionCount != src.DimensionCount) { throw new InvalidOperationException("src and indices must have same number of dimensions"); } if (result != null && !result.IsSameSizeAs(indices)) { throw new InvalidOperationException("result and indices must be the same size"); } if (result != null && !TensorResultBuilder.ArrayEqualExcept(src.Sizes, result.Sizes, dim)) { throw new InvalidOperationException("result and src must be the same size except in dimension dim"); } Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, indices.Allocator, src.ElementType, false, indices.Sizes); TensorApplyCPU.Gather(writeTarget, src, dim, indices); return(writeTarget); }
public Tensor SigmoidD(Tensor result, Tensor resW, Tensor resG) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, resW, false, resW.Sizes); TensorApplyCPU.SigmoidD(writeTarget, resW, resG); return(writeTarget); }
public Tensor Sigmoid(Tensor result, Tensor src) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, false, src.Sizes); TensorApplyCPU.Sigmoid(writeTarget, src); return(writeTarget); }
public Tensor ReluD(Tensor result, Tensor w, Tensor g) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, w, false, w.Sizes); TensorApplyCPU.ReluD(result, w, g); return(writeTarget); }
public Tensor Sub(Tensor result, float lhs, Tensor rhs) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, rhs, false, rhs.Sizes); TensorApplyCPU.RSub(writeTarget, lhs, rhs); return(writeTarget); }
public Tensor AddTanhD(Tensor result, Tensor srcX, Tensor srcY, Tensor srcZ) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, srcX, false, srcX.Sizes); TensorApplyCPU.AddTanhD(writeTarget, srcX, srcY, srcZ); return(writeTarget); }
public Tensor MulMulAdd(Tensor result, Tensor srcX, Tensor srcY, Tensor srcZ, Tensor srcW) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, srcX, false, srcX.Sizes); TensorApplyCPU.MulMulAdd(writeTarget, srcX, srcY, srcZ, srcW); return(writeTarget); }
public Tensor Pow(Tensor result, Tensor src, float value) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, false, src.Sizes); TensorApplyCPU.Pow(writeTarget, src, value); return(writeTarget); }
public Tensor Argmax(Tensor result, Tensor src, int dimension) { Tensor writeTarget = NativeWrapper.CreateResultDimensionwise(result, src, dimension); TensorApplyCPU.Argmax(writeTarget, src, dimension); return(writeTarget); }
public Tensor Mul(Tensor result, Tensor lhs, Tensor rhs) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, lhs, false, lhs.Sizes); TensorApplyCPU.Mul(writeTarget, lhs, rhs); return(writeTarget); }
public void Copy(Tensor result, Tensor src) { if (result.ElementCount() != src.ElementCount()) { throw new InvalidOperationException("Tensors must have equal numbers of elements"); } TensorApplyCPU.Copy(result, src); }
public Tensor Div(Tensor result, Tensor lhs, Tensor rhs) { //return NativeWrapper.InvokeNullableResultElementwise(cdiv_func, result, lhs, rhs); Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, lhs, false, lhs.Sizes); TensorApplyCPU.Div(writeTarget, lhs, rhs); return(writeTarget); }
public Tensor Clamp(Tensor result, Tensor src, float min, float max) { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, false, src.Sizes); TensorApplyCPU.Clamp(writeTarget, src, min, max); return(writeTarget); //return NativeWrapper.InvokeNullableResultElementwise(clamp_func, result, src, min, max); }
public Tensor LayerNormGrad(Tensor result, Tensor gradGamma_, Tensor gradBeta_, Tensor adj_, Tensor y_, Tensor x_, Tensor gamma_, Tensor beta_, float eps) { try { Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, adj_, false, adj_.Sizes); TensorApplyCPU.LayerNormGrad(writeTarget, gradGamma_, gradBeta_, adj_, y_, x_, gamma_, beta_, (int)adj_.Sizes[0], (int)adj_.Sizes[1], eps); return(writeTarget); } catch (Exception err) { Logger.WriteLine(Logger.Level.err, ConsoleColor.Red, $"LayerNormGrad exception: '{err.Message}', CallStack:'{err.StackTrace}'"); throw; } }
public Tensor BuildSrcTgtMask(Tensor result, Tensor srcOriginalLengths, Tensor tgtOriginalLengths, int srcPaddedSeqLen, int tgtPaddedSeqLen, float value, float maskedValue) { int ndim = result.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(result.Sizes, result.Strides); long cols = result.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; TensorApplyCPU.BuildSrcTgtMask(result, srcOriginalLengths, tgtOriginalLengths, (int)rows, (int)cols, tgtPaddedSeqLen, value, maskedValue); return(result); }
public Tensor BuildTriMask(Tensor result, float value, float maskedValue) { int ndim = result.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(result.Sizes, result.Strides); long cols = result.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; TensorApplyCPU.BuildTriMask(result, (int)rows, (int)cols, value, maskedValue); return(result); }
public Tensor Softmax(Tensor result, Tensor src) { int ndim = src.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(src.Sizes, src.Strides); long cols = src.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, true, src.Sizes); TensorApplyCPU.Softmax(writeTarget, src, (int)rows, (int)cols); return(writeTarget); }
public Tensor IndexSelect(Tensor result, Tensor src, Tensor indice) { int ndim = result.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(result.Sizes, result.Strides); long cols = result.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, false, new long[] { indice.Sizes[0], src.Sizes[1] }); TensorApplyCPU.IndexSelect(writeTarget, src, indice, (int)rows, (int)cols); return(writeTarget); }
public Tensor SoftmaxGrad(Tensor grad_, Tensor adj_, Tensor val_, bool addGrad = true) { int ndim = adj_.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(adj_.Sizes, adj_.Strides); long cols = adj_.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; Tensor writeTarget = TensorResultBuilder.GetWriteTarget(grad_, adj_, true, adj_.Sizes); TensorApplyCPU.SoftmaxGrad(writeTarget, adj_, val_, (int)rows, (int)cols, addGrad); return(writeTarget); }
public Tensor IndexSelectGrad(Tensor grad, Tensor adj, Tensor indice) { if (grad == null) { throw new ArgumentNullException($"Tensor grad should not be null."); } int ndim = adj.DimensionCount; long storageSize = TensorDimensionHelpers.GetStorageSize(adj.Sizes, adj.Strides); long cols = adj.Sizes[ndim - 1]; if (storageSize % cols != 0) { throw new Exception($"Invalid tensor storage size = '{storageSize}', and cols = '{cols}'"); } long rows = storageSize / cols; TensorApplyCPU.IndexSelectGrad(grad, adj, indice, (int)rows, (int)cols); return(grad); }
public void Fill(Tensor result, float value) { TensorApplyCPU.Fill(result, value); }
public Tensor AtomicAdd(Tensor result, Tensor rhs) { TensorApplyCPU.Add(result, result, rhs); return(result); }
public Tensor Adam(Tensor tw, Tensor tg, Tensor tv, Tensor tm, int batchSize, float step_size, float clipval, float regc, float decay_rate_v, float decay_rate_m, int iter, float eps) { TensorApplyCPU.Adam(tw, tg, tv, tm, (int)tw.Sizes[0], (int)tw.Sizes[1], batchSize, step_size, clipval, regc, decay_rate_v, decay_rate_m, iter, eps); return(tw); }