public void CanBroadcast_2_to_3_1() { // http://www.onlamp.com/pub/a/python/2000/09/27/numerically.html?page=2 /* * * z=np.array([1, 2]) * v=np.array([[3], [4], [5]]) * z+v * */ // When comparing the size of each axis, if either one of the compared axes has a size of one, broadcasting can also occur var z = NN.Array(new[] { 1, 2 }); AssertArray.AreEqual(z.Shape, new int[] { 2 }); var v = NN.Array(new[, ] { { 3 }, { 4 }, { 5 } }); AssertArray.AreEqual(v.Shape, new int[] { 3, 1 }); AssertArray.AreEqual(new[, ] { { 4, 5 }, { 5, 6 }, { 6, 7 } }, z + v); //In this form, the first multiarray z was extended to a (3,2) multiarray and the second multiarray v was extended to a (3,2) multiarray. //Essentially, broadcasting occurred on both operands! This only occurs when the axis size of one of the multiarrays has the value of one. }
/// <summary> /// Checks the gradient of an expression without inputs. /// </summary> public static void PassesGradientCheck(Scalar <float> expr, Scalar <float> W, float epsilon = 0.001f, float relativeErr = 1e-3f, float absErr = 1e-4f, int repeat = 6) { var checkGrad = T.RandomGradientCheck(EmptyArray <IVar> .Value, expr, W); var fault = 0; var errors = ""; for (int _ = 0; _ < repeat; ++_) { var eps = (_ % 2 == 0) ? epsilon : -epsilon; var checkRes = checkGrad(eps); var finite = checkRes.Item1; var backpropagated = checkRes.Item2; if (!AssertArray.CheckAreAlmostEqual(finite, backpropagated, relativeErr, absErr)) { var abs = Math.Abs(finite - backpropagated); var relative = 2 * abs / (Math.Abs(finite) + Math.Abs(backpropagated)); errors += $"For epsilon {eps} expected: {finite}, actual {backpropagated}, diff {abs}, relative {relative}.\n"; ++fault; } if (_ % 2 == 1) { epsilon *= 10; } } if (fault > 0) { throw new Exception($"The computed gradient of {W.ToString()} doesn't match finite difference (failed {fault} times over {repeat}).\n{errors}"); } }
/// <summary> /// Checks the gradient of an expression with one input. /// If a shape of the input is unknown, it will be replaced by 10. /// </summary> public static void PassesGradientCheck <X>(Tensor <X> .Var input, Scalar <float> expr, Tensor <float> W, float epsilon = 0.001f, float relativeErr = 1e-3f, float absErr = 1e-4f, int repeat = 50, Func <Array <X> > init = null) { var xShape = input.Shape.Select(s => (s as Scalar <int> .Const)?.Value ?? 10).ToArray(); var checkGrad = T.RandomGradientCheck(new[] { input }, expr, W); if (init == null) { init = () => NN.Random.Uniform(-1f, 1f, xShape).As <X>(); } var fault = 0; var last = ""; for (int _ = 0; _ < repeat; ++_) { var x = init(); var checkRes = checkGrad(x, epsilon); var finite = checkRes.Item1; var backpropagated = checkRes.Item2; if (!AssertArray.CheckAreAlmostEqual(finite, backpropagated, relativeErr, absErr)) { var abs = Math.Abs(finite - backpropagated); var relative = 2 * abs / (Math.Abs(finite) + Math.Abs(backpropagated)); last += $"Expected: {finite}, actual {backpropagated}, diff {abs}, relative {relative}.\n"; ++fault; } } if (fault > 0) { throw new Exception($"The computed gradient of {W.Name} doesn't match finite difference (failed {fault} times over {repeat}).\n{last}"); } }
public void InnerAsEinsteinSumMatchesInner() { var x = NN.Random.Uniform(-1f, 1f, 10); var y = NN.Random.Uniform(-1f, 1f, 10); AssertArray.AreEqual(x.Dot(y), NN.EinsteinSum(x, y, "i,i->")); }
public void TensorDotAsEinsteinSumMatchesTensorDot() { var x = NN.Random.Uniform(-1f, 1f, 10, 20); var y = NN.Random.Uniform(-1f, 1f, 20, 13, 5); AssertArray.AreAlmostEqual(NN.TensorDot(x, new[] { 1 }, y, new[] { 0 }), NN.EinsteinSum(x, y, "ij,jkl->ikl")); }
public void DotAsEinsteinSumMatchesDot() { var x = NN.Random.Uniform(-1f, 1f, 10, 20); var y = NN.Random.Uniform(-1f, 1f, 20, 13); AssertArray.AreAlmostEqual(x.Dot(y), NN.EinsteinSum(x, y, "ij,jk->ik")); }
public void CanBroadcast_3_to_2_3() { // http://www.onlamp.com/pub/a/python/2000/09/27/numerically.html?page=2 /* * * a = np.array([[1, 2, 3], [4, 5, 6]]) * b = np.array([[7, 8, 9]]) * a + b * */ var a = NN.Array(new[, ] { { 1, 2, 3 }, { 4, 5, 6 } }); var c = NN.Array(new[] { 7, 8, 9 }); var rAdd = NN.Array(new[, ] { { 8, 10, 12 }, { 11, 13, 15 } }); var rMul = NN.Array(new[, ] { { 7, 16, 27 }, { 28, 40, 54 } }); a.AssertOfShape(2, 3); c.AssertOfShape(3); AssertArray.GenerateTests(a, c, NN.Ones <int>, (a1, c1) => AssertArray.AreEqual(rAdd, a1 + c1)); AssertArray.GenerateTests(a, c, NN.Zeros <int>, (a1, c1) => AssertArray.AreEqual(rMul, a1 * c1)); }
public void TestLoop1() { // Computing tanh(x(t).dot(W) + b) elementwise //http://deeplearning.net/software/theano/tutorial/loop.html // defining the tensor variables var X = T.Matrix <float>("x"); var W = T.Matrix <float>("W"); var b_sym = T.Vector <float>("b_sym"); var results = T.Scan(v => T.Tanh(T.Dot(v, W) + b_sym), sequence: X); var compute_elementwise = T.Function(inputs: new[] { X, W, b_sym }, output: results); // test values var x = NN.Eye <float>(2); var w = NN.Ones <float>(2, 2); var b = NN.Ones <float>(2); b.Item[1] = 2; var result = compute_elementwise(new[] { x, w, b }); var expected = NN.Tanh(x.Dot(w) + b); AssertArray.AreAlmostEqual(expected[0], result[0]); }
public void OuterAsEinsteinSumMatchesOuter() { var x = NN.Random.Uniform(-1f, 1f, 10); var y = NN.Random.Uniform(-1f, 1f, 15); AssertArray.AreEqual(x.Outer(y), NN.EinsteinSum(x, y, "i,j->ij")); }
public void TestCompatibilityHeapsortBatchBatchParallel() { // test that heapsort batch parallel and batch have same results var test = NN.Random.Normal(0f, 0.1f, 4, 30); var bestd_1 = new float[test.Shape[1]][]; var bestd_2 = new float[test.Shape[1]][]; var bestw_1 = new int[test.Shape[1]][]; var bestw_2 = new int[test.Shape[1]][]; int neighbors = 6; for (int i = 0; i < test.Shape[1]; i++) { bestd_1[i] = new float[neighbors]; bestd_2[i] = new float[neighbors]; bestw_1[i] = new int[neighbors]; bestw_2[i] = new int[neighbors]; } _w2v.NBestHeap(test, bestd_1, bestw_1); _w2v.NBestHeapParallel(test, bestd_2, bestw_2); for (int i = 0; i < test.Shape[1]; i++) { AssertArray.AreAlmostEqual(bestd_1[i], bestd_2[i]); AssertArray.AreEqual(bestw_1[i], bestw_2[i]); } }
public void TestHeapsortSingle() { var bestd = new float[4]; var bestw = new int[4]; var expectedd = new float[4] { 1.34889853f, 1.17417169f, 1.15919185f, 0.866888f }; var expectedw = new int[4] { 8, 2, 0, 5 }; _w2v.NBestHeap(_test_1, bestd, bestw); AssertArray.AreAlmostEqual(expectedd, bestd); AssertArray.AreEqual(expectedw, bestw); expectedd = new float[4] { 4.63800335f, 4.19298649f, 3.162262f, 1.9904952f }; expectedw = new int[4] { 1, 7, 3, 4 }; _w2v.NBestHeap(_test_2, bestd, bestw); AssertArray.AreAlmostEqual(expectedd, bestd); AssertArray.AreEqual(expectedw, bestw); }
public void TestRange() { var a = NN.Range(4); AssertArray.AreEqual(new[] { 0, 1, 2, 3 }, a); AssertArray.AreEqual(new[] { 0, 1, 2, 3 }, a[_]); AssertArray.AreEqual(new[] { 0, 1 }, a[(0, 2)]);
public void TestSparseMatrixMultiplication() { // https://en.wikipedia.org/wiki/Sparse_matrix var aDense = new float[] { 0, 0, 0, 0, 5, 8, 0, 0, 0, 0, 3, 0, 0, 6, 0, 0 }; var a = new float[] { 5, 8, 3, 6 }; var ja = new int[] { 0, 1, 2, 1 }; var ia = new int[] { 0, 0, 2, 3, 4 }; var bDense = new float[] { 1, 2, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 5, 6, 7, 0, 0, 0, 0, 0, 0, 8, }; var cDense = new float[4 * 6]; var c = new float[4 * 6]; Provider.sgemm(Order.RowMajor, Transpose.NoTrans, Transpose.NoTrans, 4, 6, 4, 1, aDense, 0, 4, bDense, 0, 6, 0, cDense, 0, 6); Provider.scsrmm(Transpose.NoTrans, 4, 6, 4, 1, a, 0, ja, 0, ia, 0, bDense, 0, 6, 0, c, 0, 6); AssertArray.AreEqual(cDense, c); }
public void TestShuffleInplaceDim3() { var a = NN.Range(24).Reshape(3, 2, 4); var b = NN.Range(24).Reshape(3, 2, 4); var c = NN.Range(24).Reshape(3, 2, 4); int[] perms1 = new int[3] { 0, 2, 1 }; int[] perms2 = new int[2] { 1, 0 }; int[] perms3 = new int[4] { 3, 1, 0, 2 }; var expected1 = NN.Array(new int[24] { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15 }).Reshape(3, 2, 4); var expected2 = NN.Array(new int[24] { 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 20, 21, 22, 23, 16, 17, 18, 19 }).Reshape(3, 2, 4); var expected3 = NN.Array(new int[24] { 2, 1, 3, 0, 6, 5, 7, 4, 10, 9, 11, 8, 14, 13, 15, 12, 18, 17, 19, 16, 22, 21, 23, 20 }).Reshape(3, 2, 4); a.ShuffleInplace(perms: perms1); b.ShuffleInplace(perms: perms2, axis: 1); c.ShuffleInplace(perms: perms3, axis: 2); AssertArray.AreEqual(a, expected1); AssertArray.AreEqual(b, expected2); AssertArray.AreEqual(c, expected3); }
public void TestLtOnInt() { AssertArray.AreEqual( NN.Array(1, 5, 3, 2, 0, -1) < NN.Array(0, 6, 1, 3, 1, 2), NN.Array(0, 1, 0, 1, 1, 1) ); }
public void SumAsEinsteinSumMatchesSum() { var x = NN.Random.Uniform(-1f, 1f, 10); var y = NN.Ones(1); AssertArray.AreEqual(x.Sum(axis: 0), NN.EinsteinSum(x, y, "i,->")); }
public void ComplexReshapeWorksWithCopyFlag() { var a = NN.Range(4 * 3).Reshape(4, 3); var exp = NN.Array(new int[] { 0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11 }); AssertArray.AreEqual(exp, a.T.Reshape(new int[] { -1 }, allowCopy: true)); }
public void TestOnehotDotM() { var M = T.Matrix <float>("M"); var X = T.Matrix <float>("X"); var a = T.Vector <float>("a"); var oneHot = T.OneHot(X.Shape, 1, a); var B = T.Dot(oneHot, M); var M_ = NN.Array(new float[, ] { { 0, 3, 7 }, { 5, 2, 0 } }); var X_ = NN.Zeros(4, 2); var a_ = NN.Array <float>(1, -1); var B_ = Op.Function(input: (M, X, a), output: B); var B_pred = B_(M_, X_, a_); var Y_ = X_.Copy(); Y_[1] = a_; var B_exp = Y_.Dot(M_); AssertArray.AreEqual(B_exp, B_pred); }
public void TestSumInt4_2() { var a = NN.Range(24).Reshape(1, 2, 3, 4); var b = NN.Range(12).Reshape(3, 4); var expected = NN.Array(new int[, , , ] { { { { 0, 2, 4, 6 }, { 8, 10, 12, 14 }, { 16, 18, 20, 22 } }, { { 12, 14, 16, 18 }, { 20, 22, 24, 26 }, { 28, 30, 32, 34 } } } }); var c = a + b; AssertArray.AreEqual(expected.Values, c.Values); }
public void TestHeapsortBatch() { var bestd = new float[_test_3.Shape[1]][]; var bestw = new int[_test_3.Shape[1]][]; int neighbors = 3; for (int i = 0; i < bestd.Length; i++) { bestd[i] = new float[neighbors]; bestw[i] = new int[neighbors]; } var expectedd = new float[6] { 1.34889853f, 1.17417169f, 1.15919185f, 4.63800335f, 4.19298649f, 3.162262f }; var expectedw = new int[6] { 8, 2, 0, 1, 7, 3 }; _w2v.NBestHeap(_test_3, bestd, bestw); AssertArray.AreAlmostEqual(expectedd, (from bd in bestd from d in bd select d).ToArray()); AssertArray.AreEqual(expectedw, (from bw in bestw from w in bw select w).ToArray()); }
public void TestCompatibilityHeapsortBatchSingle() { // test that batch and single line heapsort have same results var test = NN.Random.Normal(0.1f, 0.1f, 4, 20); var bestd = new float[test.Shape[1]][]; var bestw = new int[test.Shape[1]][]; int neighbors = 3; for (int i = 0; i < bestd.Length; i++) { bestd[i] = new float[neighbors]; bestw[i] = new int[neighbors]; } _w2v.NBestHeap(test, bestd, bestw); var singleBestd = new float[neighbors]; var singleBestw = new int[neighbors]; for (int i = 0; i < test.Shape[1]; i++) { _w2v.NBestHeap(test[Slicer._, i], singleBestd, singleBestw); AssertArray.AreEqual(singleBestw, bestw[i]); AssertArray.AreAlmostEqual(singleBestd, bestd[i]); } }
public void ArgmaxWorksOnVec() { Assert.AreEqual(2, NN.Array(0, 1, 5, 1, 0).Argmax()); AssertArray.AreEqual( NN.Array(2), NN.Array(0, 1, 5, 1, 0).Argmax(axis: 0, keepDims: true)); }
public void TestLogSumExp() { var X = NN.Array(new float[, ] { { 1, 3 }, { 2, 5 } }); AssertArray.AreAlmostEqual(NN.Log(NN.Sum(NN.Exp(X), axis: -1)), NN.LogSumExp(X)); }
public void TestTensorVar() { var x = Op.Matrix <float>("x"); var f = Op.Function(input: x, output: x); AssertArray.AreEqual(f(NN.Const(3.0f, 2, 2)), NN.Const(3.0f, 2, 2)); AssertArray.AreNotEqual(f(NN.Const(3.0f, 2, 2)), NN.Const(4.0f, 2, 2)); }
public void TestTensorConst() { var x = Op.Const(2.0f, 2, 2); var f = Op.Function(output: x); AssertArray.AreEqual(f(), NN.Const(2.0f, 2, 2)); AssertArray.AreNotEqual(f(), NN.Const(3.0f, 2, 2)); }
public void TestMin() { var a = NN.Range(4 * 2).Reshape(4, 2); a.Item[2, 1] = -1; a.Item[0, 1] = 10; AssertArray.AreEqual(NN.Array(new[] { 0, -1 }), a.Min(axis: 0)); AssertArray.AreEqual(NN.Array(new[] { 0, 2, -1, 6 }), a.Min(axis: 1)); }
public void TestRange() { var i = T.Scalar <int>("i"); var x = T.Range(i); var f = T.Function(i, x); AssertArray.AreEqual(NN.Range(10), f(10)); }
public void TestMax() { var a = NN.Range(4 * 2).Reshape(4, 2); a.Item[2, 1] = -1; a.Item[0, 1] = 10; AssertArray.AreEqual(NN.Array(new[] { 6, 10 }), a.Max(axis: 0)); AssertArray.AreEqual(NN.Array(new[] { 10, 3, 4, 7 }), a.Max(axis: 1)); }
public void TestDropAt() { Dim d0 = 10, d1 = 11, d2 = 12, d3 = 13; var shape = new [] { d0, d1, d2, d3 }; AssertArray.AreEqual(new[] { d1, d2, d3 }, shape.DropAt(0)); AssertArray.AreEqual(new[] { d0, d2, d3 }, shape.DropAt(1)); AssertArray.AreEqual(new[] { d0, d3 }, shape.DropAt(1, 2)); }
public void CanReshape_6_to_2_3_WithForcedCopy() { var a0 = NN.Range(6); var b = NN.Array(new[, ] { { 0, 1, 2 }, { 3, 4, 5 } }); AssertArray.GenerateTests(a0, a => AssertArray.AreEqual(b, a.Reshape(new[] { 2, 3 }, forceCopy: true))); }