public Tensor AddmmBatch(Tensor result, float beta, Tensor src, float alpha, Tensor m1, Tensor m2) { TSCudaContext context = CudaHelpers.TSContextForTensor(src); if (src.ElementType != m1.ElementType || src.ElementType != m2.ElementType || (result != null && result.ElementType != src.ElementType)) { throw new InvalidOperationException("All tensors must have the same element type"); } if (result != null && !(result.Storage is CudaStorage)) { throw new ArgumentException("result must be a CUDA tensor", "result"); } if (!(m1.Storage is CudaStorage)) { throw new ArgumentException("m1 must be a CUDA tensor", "m1"); } if (!(m2.Storage is CudaStorage)) { throw new ArgumentException("m2 must be a CUDA tensor", "m2"); } if (src.DimensionCount != 3) { throw new ArgumentException("src must be a matrix", "src"); } if (m1.DimensionCount != 3) { throw new ArgumentException("m1 must be a matrix", "m1"); } if (m2.DimensionCount != 3) { throw new ArgumentException("m2 must be a matrix", "m2"); } if (src.Sizes[1] != m1.Sizes[1] || src.Sizes[2] != m2.Sizes[2] || m1.Sizes[2] != m2.Sizes[1]) { throw new InvalidOperationException($"Size mismatch, srcSize0 = {src.Sizes[0]}, m1Size0 = {m1.Sizes[0]}, srcSize1 = {src.Sizes[1]}, m2Size1 = {m2.Sizes[1]}, m1Size1 = '{m1.Sizes[1]}', m2Size0 = '{m2.Sizes[0]}'"); } Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, true, src.Sizes); if (writeTarget != src) { Ops.Copy(writeTarget, src); } CudaMatrixMulMM.GemmBatch(context, alpha, m1, m2, beta, writeTarget); return(writeTarget); }
public NDArray Addmm(NDArray result, float beta, NDArray src, float alpha, NDArray m1, NDArray m2) { var context = CudaHelpers.TSContextForTensor(src); if (src.ElementType != m1.ElementType || src.ElementType != m2.ElementType || (result != null && result.ElementType != src.ElementType)) { throw new InvalidOperationException("All tensors must have the same element type"); } if (result != null && !(result.Storage is CudaStorage)) { throw new ArgumentException("result must be a CUDA tensor", "result"); } if (!(m1.Storage is CudaStorage)) { throw new ArgumentException("m1 must be a CUDA tensor", "m1"); } if (!(m2.Storage is CudaStorage)) { throw new ArgumentException("m2 must be a CUDA tensor", "m2"); } if (src.DimensionCount != 2) { throw new ArgumentException("src must be a matrix", "src"); } if (m1.DimensionCount != 2) { throw new ArgumentException("m1 must be a matrix", "m1"); } if (m2.DimensionCount != 2) { throw new ArgumentException("m2 must be a matrix", "m2"); } if (src.Shape[0] != m1.Shape[0] || src.Shape[1] != m2.Shape[1] || m1.Shape[1] != m2.Shape[0]) { throw new InvalidOperationException("Size mismatch"); } var writeTarget = TensorResultBuilder.GetWriteTarget(result, src, true, src.Shape); if (writeTarget != src) { Ops.Copy(writeTarget, src); } CudaMatrixMulMM.Gemm(context, alpha, m1, m2, beta, writeTarget); return(writeTarget); }
public NDArray Dot(NDArray result, NDArray lhs, NDArray rhs) { var context = CudaHelpers.TSContextForTensor(lhs); if (lhs.DimensionCount == 1 && rhs.DimensionCount == 1) { return(CudaMatrixMulDot.Dot(context, result, lhs, rhs)); } else if (lhs.DimensionCount == 2 && (rhs.DimensionCount == 1 || rhs.PossibleVector)) { return(CudaMatrixMulMV.Mul_M_V(context, result, lhs, rhs.Ravel()).Reshape(lhs.Shape[0], 1)); } else if (lhs.DimensionCount == 2 && rhs.DimensionCount == 2) { return(CudaMatrixMulMM.Mul_M_M(context, result, lhs, rhs)); } else { throw new NotSupportedException(string.Format("Multiplication of {0}D with {1}D tensor is not supported")); } }
public Tensor Dot(Tensor result, Tensor lhs, Tensor rhs) { var context = CudaHelpers.TSContextForTensor(lhs); if (lhs.DimensionCount == 1 && rhs.DimensionCount == 1) { return(CudaMatrixMulDot.Dot(context, result, lhs, rhs)); } else if (lhs.DimensionCount == 2 && rhs.DimensionCount == 1) { return(CudaMatrixMulMV.Mul_M_V(context, result, lhs, rhs)); } else if (lhs.DimensionCount == 2 && rhs.DimensionCount == 2) { return(CudaMatrixMulMM.Mul_M_M(context, result, lhs, rhs)); } else { throw new NotSupportedException(string.Format("Multiplication of {0}D with {1}D tensor is not supported")); } }
public static Tensor Addmm(Tensor result, float beta, Tensor src, float alpha, Tensor m1, Tensor m2) { try { TSCudaContext context = CudaHelpers.TSContextForTensor(src); if (src.ElementType != m1.ElementType || src.ElementType != m2.ElementType || (result != null && result.ElementType != src.ElementType)) { throw new InvalidOperationException("All tensors must have the same element type"); } if (result != null && !(result.Storage is CudaStorage)) { throw new ArgumentException("result must be a CUDA tensor", nameof(result)); } if (!(m1.Storage is CudaStorage)) { throw new ArgumentException("m1 must be a CUDA tensor", nameof(m1)); } if (!(m2.Storage is CudaStorage)) { throw new ArgumentException("m2 must be a CUDA tensor", nameof(m2)); } if (src.DimensionCount != 2) { throw new ArgumentException("src must be a matrix", nameof(src)); } if (m1.DimensionCount != 2) { throw new ArgumentException("m1 must be a matrix", nameof(m1)); } if (m2.DimensionCount != 2) { throw new ArgumentException("m2 must be a matrix", nameof(m2)); } if (src.Sizes[0] != m1.Sizes[0] || src.Sizes[1] != m2.Sizes[1] || m1.Sizes[1] != m2.Sizes[0]) { throw new InvalidOperationException($"Size mismatch, srcSize0 = {src.Sizes[0]}, m1Size0 = {m1.Sizes[0]}, srcSize1 = {src.Sizes[1]}, m2Size1 = {m2.Sizes[1]}, m1Size1 = '{m1.Sizes[1]}', m2Size0 = '{m2.Sizes[0]}'"); } Tensor writeTarget = TensorResultBuilder.GetWriteTarget(result, src, false, src.Sizes); if (writeTarget != src) { Ops.Copy(writeTarget, src); } CudaMatrixMulMM.Gemm(context, alpha, m1, m2, beta, writeTarget); return(writeTarget); } catch (Exception err) { Logger.WriteLine($"Exception in Addmm: '{err.Message}'"); Logger.WriteLine($"Call stack: '{err.StackTrace}'"); throw; } }