public override NDArray Add(NDArray x, NDArray y) { return(base.Add(x, y)); int[] lhs = x.Data <int>(); int[] rhs = x.Data <int>(); var simdLength = Vector <int> .Count; var result = new int[lhs.Length]; var i = 0; for (i = 0; i <= lhs.Length - simdLength; i += simdLength) { var va = new Vector <int>(lhs, i); var vb = new Vector <int>(rhs, i); (va + vb).CopyTo(result, i); } for (; i < lhs.Length; ++i) { result[i] = lhs[i] + rhs[i]; } return(result); }
public static TensorProto make_tensor_proto(NDArray nd, bool verify_shape = false) { var shape = nd.Storage.Shape; var numpy_dtype = dtypes.as_dtype(nd.dtype); var tensor_proto = new tensor_pb2.TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = shape.reshape(nd.shape).as_proto() }; switch (nd.dtype.Name) { case "Int32": tensor_proto.IntVal.AddRange(nd.Data <int>()); break; case "Single": tensor_proto.FloatVal.AddRange(nd.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(nd.Data <double>()); break; case "String": tensor_proto.StringVal.AddRange(nd.Data <string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); break; default: throw new Exception("Not Implemented"); } return(tensor_proto); }
public void FindDuplicatedNumber() { repeated = Method1(nd.Data <int>()); repeated = Method2(nd.Data <int>()); repeated = Method4(nd.Data <int>()); }
public virtual NDArray MatMul(NDArray x, NDArray y) { if (x.ndim == 2 && y.ndim == 2) { var nd = new NDArray(x.dtype, new Shape(x.shape[0], y.shape[1])); switch (nd.dtype.Name) { case "Int32": Parallel.ForEach(Enumerable.Range(0, nd.shape[0]), (row) => { for (int col = 0; col < nd.shape[1]; col++) { int sum = 0; for (int s = 0; s < nd.shape[0]; s++) { sum += x.Data <int>(row, s) * y.Data <int>(s, col); } nd[row, col] = sum; } }); break; case "Single": Parallel.ForEach(Enumerable.Range(0, nd.shape[0]), (row) => { for (int col = 0; col < nd.shape[1]; col++) { float sum = 0; for (int s = 0; s < nd.shape[0]; s++) { sum += x.Data <float>(row, s) * y.Data <float>(s, col); } nd[row, col] = sum; } }); break; case "Double": Parallel.ForEach(Enumerable.Range(0, nd.shape[0]), (row) => { for (int col = 0; col < nd.shape[1]; col++) { double sum = 0; for (int s = 0; s < nd.shape[0]; s++) { sum += x.Data <double>(row, s) * y.Data <double>(s, col); } nd[row, col] = sum; } }); break; } return(nd); } throw new NotImplementedException("matmul"); }
/// <summary> /// Pads sequences to the same length. /// https://keras.io/preprocessing/sequence/ /// https://faroit.github.io/keras-docs/1.2.0/preprocessing/sequence/ /// </summary> /// <param name="sequences">List of lists, where each element is a sequence.</param> /// <param name="maxlen">Int, maximum length of all sequences.</param> /// <param name="dtype">Type of the output sequences.</param> /// <param name="padding">String, 'pre' or 'post':</param> /// <param name="truncating">String, 'pre' or 'post'</param> /// <param name="value">Float or String, padding value.</param> /// <returns></returns> public NDArray pad_sequences(NDArray sequences, int?maxlen = null, string dtype = "int32", string padding = "pre", string truncating = "pre", object value = null) { int[] length = new int[sequences.size]; switch (sequences.dtype.Name) { case "Object": for (int i = 0; i < sequences.size; i++) { switch (sequences.Data <object>(i)) { case string data: length[i] = Regex.Matches(data, ",").Count; break; } } break; case "Int32": for (int i = 0; i < sequences.size; i++) { length[i] = Regex.Matches(sequences.Data <object>(i).ToString(), ",").Count; } break; default: throw new NotImplementedException($"pad_sequences: {sequences.dtype.Name}"); } if (maxlen == null) { maxlen = length.Max(); } if (value == null) { value = 0f; } var nd = new NDArray(np.int32, new Shape(sequences.size, maxlen.Value)); for (int i = 0; i < nd.shape[0]; i++) { switch (sequences[i]) { default: throw new NotImplementedException("pad_sequences"); } } return(nd); }
/// <summary> /// Returns a boolean array where two arrays are element-wise equal within a /// tolerance. /// The tolerance values are positive, typically very small numbers.The /// relative difference (`rtol` * abs(`b`)) and the absolute difference /// `atol` are added together to compare against the absolute difference /// between `a` and `b`. /// Warning: The default `atol` is not appropriate for comparing numbers /// that are much smaller than one(see Notes). /// /// See also <seealso cref="allclose"/> /// ///Notes: /// For finite values, isclose uses the following equation to test whether /// two floating point values are equivalent. /// <code>absolute(`a` - `b`) less than or equal to (`atol` + `rtol` * absolute(`b`))</code> /// Unlike the built-in `math.isclose`, the above equation is not symmetric /// in `a` and `b` -- it assumes `b` is the reference value -- so that /// `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, /// the default value of atol is not zero, and is used to determine what /// small values should be considered close to zero.The default value is /// appropriate for expected values of order unity: if the expected values /// are significantly smaller than one, it can result in false positives. /// `atol` should be carefully selected for the use case at hand. A zero value /// for `atol` will result in `False` if either `a` or `b` is zero. /// </summary> /// <param name="a">Input array to compare with b</param> /// <param name="b">Input array to compare with a.</param> /// <param name="rtol">The relative tolerance parameter(see Notes)</param> /// <param name="atol">The absolute tolerance parameter(see Notes)</param> /// <param name="equal_nan">Whether to compare NaN's as equal. If True, NaN's in `a` will be ///considered equal to NaN's in `b` in the output array.</param> ///<returns> /// Returns a boolean array of where `a` and `b` are equal within the /// given tolerance.If both `a` and `b` are scalars, returns a single /// boolean value. ///</returns> public NDArray <bool> IsClose(NDArray a, NDArray b, double rtol = 1.0E-5, double atol = 1.0E-8, bool equal_nan = false) { if (a.size > b.size) { throw new ArgumentException("Array a must not be larger in size than array b"); } var result = new NDArray <bool>(a.shape); bool[] rdata = result.Array; if (a.dtype == np.uint8 || a.dtype == np.int16 || a.dtype == np.int32 || a.dtype == np.int64 && b.dtype == typeof(double) || b.dtype == typeof(float)) { // convert both to double and compare double[] a_arr = a.Data <double>(); double[] b_arr = a.Data <double>(); for (int i = 0; i < a_arr.Length; i++) { rdata[i] = is_within_tol(a_arr[i], b_arr[i], rtol, atol, equal_nan); } } var adata = a.Array; switch (adata) { case double[] a_arr: { var b_arr = b.Data <double>(); for (int i = 0; i < a_arr.Length; i++) { rdata[i] = is_within_tol(a_arr[i], b_arr[i], rtol, atol, equal_nan); } break; } case float[] a_arr: { var b_arr = b.Data <float>(); for (int i = 0; i < a_arr.Length; i++) { rdata[i] = is_within_tol(a_arr[i], b_arr[i], rtol, atol, equal_nan); } break; } case Complex[] arr: { throw new NotImplementedException("Comparing Complex arrays is not implemented yet."); } default: { throw new IncorrectTypeException(); } } return(result); }
private IntPtr Allocate(NDArray nd) { var dotHandle = Marshal.AllocHGlobal(nd.dtypesize * nd.size); ulong size = (ulong)(nd.size * nd.dtypesize); switch (nd.dtype.Name) { case "Int16": Marshal.Copy(nd.Data <short>(), 0, dotHandle, nd.size); break; case "Int32": Marshal.Copy(nd.Data <int>(), 0, dotHandle, nd.size); break; case "Single": Marshal.Copy(nd.Data <float>(), 0, dotHandle, nd.size); break; case "Double": Marshal.Copy(nd.Data <double>(), 0, dotHandle, nd.size); break; case "String": var value = nd.Data <string>()[0]; var bytes = Encoding.UTF8.GetBytes(value); var buf = Marshal.AllocHGlobal(bytes.Length + 1); Marshal.Copy(bytes, 0, buf, bytes.Length); //c_api.TF_SetAttrString(op, "value", buf, (uint)bytes.Length); size = (ulong)bytes.Length; break; default: throw new NotImplementedException("Marshal.Copy failed."); } var dataType = ToTFDataType(nd.dtype); var tfHandle = c_api.TF_NewTensor(dataType, nd.shape.Select(x => (long)x).ToArray(), // shape nd.ndim, dotHandle, size, (IntPtr values, IntPtr len, ref bool closure) => { // Free the original buffer and set flag Marshal.FreeHGlobal(dotHandle); closure = true; }, ref deallocator_called); return(tfHandle); }
private NDArray Sum(NDArray x) { switch (Type.GetTypeCode(x.dtype)) { case TypeCode.Int32: return(x.Data <int>().Sum()); case TypeCode.Single: return(x.Data <float>().Sum()); } throw new NotImplementedException($"DefaultEngine sum {x.dtype.Name}"); }
public NDArray Dot(NDArray x, NDArray y) { var dtype = x.dtype; if (x.ndim == 0 && y.ndim == 0) { switch (dtype.Name) { case "Int32": return(y.Data <int>(0) * x.Data <int>(0)); } } else if (x.ndim == 1 && x.ndim == 1) { int sum = 0; switch (dtype.Name) { case "Int32": for (int i = 0; i < x.size; i++) { sum += x.Data <int>(i) * y.Data <int>(i); } break; } return(sum); } else if (x.ndim == 2 && y.ndim == 1) { var nd = new NDArray(dtype, new Shape(x.shape[0])); switch (dtype.Name) { case "Int32": for (int i = 0; i < x.shape[0]; i++) { for (int j = 0; j < y.shape[0]; j++) { nd.Data <int>()[i] += x.Data <int>(i, j) * y.Data <int>(j); } } break; } return(nd); } else if (x.ndim == 2 && y.ndim == 2) { return(np.matmul(x, y)); } throw new NotImplementedException($"dot {x.ndim} * {y.ndim}"); }
//[TestMethod] public void Simple3x3() { var np1 = new NDArray(typeof(double), new Shape(3, 3)); var np1Arr = np1.Data <double>(); np1Arr[0] = 5; np1Arr[1] = 1; np1Arr[2] = 2; np1Arr[3] = 1; np1Arr[4] = 0; np1Arr[5] = 1; np1Arr[6] = 1; np1Arr[7] = 1; np1Arr[8] = 0; var np1Inv = np1.inv(); var OncesMatrix = np.dot(np1, np1Inv); //Assert.IsTrue(Math.Abs(OncesMatrix[0,0]) < 1.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[1,1]) < 1.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[2,2]) < 1.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[0,1]) < 0.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[0,2]) < 0.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[1,0]) < 0.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[1,2]) < 0.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[2,0]) < 0.000001); //Assert.IsTrue(Math.Abs(OncesMatrix[2,1]) < 0.000001); }
public virtual NDArray MatMul(NDArray x, NDArray y) { if (x.ndim == 2 && y.ndim == 2) { var nd = new NDArray(x.dtype, new Shape(x.shape[0], y.shape[1])); switch (nd.dtype.Name) { case "Int32": { var datax = x.Data<int>(); var datay = y.Data<int>(); #if CPU_PARALLEL Parallel.For(0, nd.shape[0], (row) => #else for (int row = 0; row < nd.shape[0]; row++) #endif { for (int col = 0; col < nd.shape[1]; col++) { int sum = 0; for (int s = 0; s < x.shape[1]; s++) sum += datax[x.GetIndexInShape(row, s)] * datay[y.GetIndexInShape(s, col)]; nd[row, col] = sum; } } #if CPU_PARALLEL ); #endif } break;
/// <summary> /// Gets the value of the byte at the coordinate position. /// </summary> /// <param name="coordinates"></param> /// <returns>The value of the byte at the position identified by the coordinates list.</returns> /// <remarks> /// Using transpose to simulate column-major order, as currently NumSharp does not seem to have a way to set this. /// </remarks> public byte At(List <int> coordinates) { int[] indices = coordinates.ToArray(); NDArray ndArray = np.transpose(this._nd).GetData(indices).flatten(); return(ndArray.Data <byte>()[0]); }
public void StringCast3() { NDArray nd = (NDArray)"[3,1,1,2]"; var intMatr = new double[] { 3, 1, 1, 2 }; Assert.IsTrue(Enumerable.SequenceEqual(intMatr, nd.Data <double>())); }
public void ToDotNetArray1D() { var np1 = new NDArray(typeof(double)).arange(9).MakeGeneric <double>(); double[] np1_ = (double[])np1.ToMuliDimArray <double>(); Assert.IsTrue(Enumerable.SequenceEqual(np1_, np1.Data <double>())); }
public void PositiveAllNegatives() { var nd = new NDArray(np.float32, 3); nd.SetData(new float[] { 1, -2, 3.3f }); nd = np.positive(nd); Assert.IsTrue(nd.Data <float>().All(v => v >= 0)); }
/// <summary> /// Gets the underlaying tensor instance. /// </summary> /// <returns></returns> public Tensor GetTensor() { if (UnderlayingTensor == null) { UnderlayingTensor = K.CreateVariable(UnderlayingVariable.Data <float>(), Shape); } return(UnderlayingTensor); }
public void Setup() { var _ = ScalarMemoryPool.Instance; var __ = InfoOf <byte> .Size; shape = new Shape(2, 1, 50_000); ndarray = np.array(Enumerable.Range(0, 100_000).ToArray()).reshape(ref shape); iter = new NDIterator <int>((IMemoryBlock <int>)ndarray.Data <int>(), shape, null); }
public void DoubleMatrix2DiagonalRight() { var np = new NDArray(typeof(double)).eye(5, 2).MakeGeneric <double>(); Assert.IsTrue(np[0, 2] == 1); Assert.IsTrue(np[1, 3] == 1); Assert.IsTrue(np[2, 4] == 1); Assert.IsTrue(np.Data <double>().Where(x => x == 0).ToArray().Length == 22); }
public void DoubleMatrix2DiagonalLeft() { var np = new NDArray(typeof(double)).eye(5, -2).MakeGeneric <double>(); Assert.IsTrue(np[2, 0] == 1); Assert.IsTrue(np[3, 1] == 1); Assert.IsTrue(np[4, 2] == 1); Assert.IsTrue(np.Data <double>().Where(x => x == 0).ToArray().Length == 22); }
protected double _ZeroFraction(NDArray x) { assert(x.shape); int total_elements = np.prod(x.shape); var eps = 1e-8; var nonzeros = x.Data <double>().Count(d => Math.Abs(d) > eps); return(1.0 - nonzeros / (double)total_elements); }
public void fit(NDArray X, NDArray y) { NDArray unique_y = y.unique <long>(); Dictionary <long, List <List <float> > > dic = new Dictionary <long, List <List <float> > >(); // Init uy in dic foreach (int uy in unique_y.Data <int>()) { dic.Add(uy, new List <List <float> >()); } // Separate training points by class // Shape : nb_classes * nb_samples * nb_features int maxCount = 0; for (int i = 0; i < y.size; i++) { long curClass = (long)y[i]; List <List <float> > l = dic[curClass]; List <float> pair = new List <float>(); pair.Add((float)X[i, 0]); pair.Add((float)X[i, 1]); l.Add(pair); if (l.Count > maxCount) { maxCount = l.Count; } dic[curClass] = l; } float[,,] points = new float[dic.Count, maxCount, X.shape[1]]; foreach (KeyValuePair <long, List <List <float> > > kv in dic) { int j = (int)kv.Key; for (int i = 0; i < maxCount; i++) { for (int k = 0; k < X.shape[1]; k++) { points[j, i, k] = kv.Value[i][k]; } } } NDArray points_by_class = np.array <float>(points); // estimate mean and variance for each class / feature // shape : nb_classes * nb_features var cons = tf.constant(points_by_class); var tup = tf.nn.moments(cons, new int[] { 1 }); var mean = tup.Item1; var variance = tup.Item2; // Create a 3x2 univariate normal distribution with the // Known mean and variance var dist = tf.distributions.Normal(mean, tf.sqrt(variance)); this.dist = dist; }
public void BroadcastArrayTest() { var arr1 = new int[][] { new int[] { 1, 2, 3 } }; NDArray nd1 = np.array(arr1); var arr2 = new int[][] { new int[] { 4 }, new int[] { 5 } }; NDArray nd2 = np.array(arr2); NDArray[] a = np.broadcast_arrays(nd1, nd2); NDArray b = a[0]; NDArray c = a[1]; Assert.IsTrue(b.Data <int>(0, 0) == 1.0); Assert.IsTrue(b.Data <int>(0, 1) == 2.0); Assert.IsTrue(b.Data <int>(0, 2) == 3.0); Assert.IsTrue(c.Data <int>(0, 0) == 4.0); Assert.IsTrue(c.Data <int>(0, 1) == 4.0); Assert.IsTrue(c.Data <int>(0, 2) == 4.0); Assert.IsTrue(b.size == 6); Assert.IsTrue(c.size == 6); }
public void accessInGeneric() { double a = 0; for (int d1 = 0; d1 < shape.Dimensions[0]; d1++) { for (int d2 = 0; d2 < shape.Dimensions[1]; d2++) { a = nd.Data <double>(d1, d2); } } }
public void NegateArray2() { //initialization var nd = new NDArray(np.int32, 3); nd.ReplaceData(new int[] { -1, 0, 1 }); //perform test nd = -nd; //assertions nd.Data <int>().Should().BeEquivalentTo(new int[] { 1, 0, -1 }); }
public void NegateArray() { //initialization var nd = new NDArray(np.float32, 3); nd.ReplaceData(new float[] { 1, -2, 3.3f }); //perform test nd = -nd; //assertions nd.Data <float>().Should().BeEquivalentTo(new float[] { -1, 2, -3.3f }); }
/// <summary> /// Test whether all array elements evaluate to True. /// </summary> /// <param name="nd"></param> /// <returns></returns> public bool All(NDArray nd) { var data = nd.Data <bool>(); for (int i = 0; i < data.Length; i++) { if (!data[i]) { return(false); } } return(true); }
private void Plot(NDArray x, NDArray y) { var matplotlibCs = new MatplotlibCS.MatplotlibCS("python", @"D:\Projects\MatplotlibCS\MatplotlibCS\Python\matplotlib_cs.py"); var items = new List <PlotItem>(); for (int i = 0; i < x.size; i++) { items.Add(new Point2D($"P{i}", x.Data <double>()[i], x.Data <double>()[i]) { MarkerFaceColor = Color.Black, MarkerSize = 3 }); } var figure = new Figure(1, 1) { FileName = "regression.png", OnlySaveImage = true, DPI = 150, Subplots = { new Axes(1, "X axis", "Y axis") { Title = "Regression Test", Grid = new Grid() { XLim = new double[] { 0,1.2 }, YLim = new double[] { -2, 12 } }, PlotItems = items } } }; var t = matplotlibCs.BuildFigure(figure); t.Wait(); }
public void Generic1DBool_NDArray() { var np1 = new NDArray <bool>(new [] { true, true, false, false }, new Shape(4)); var np2 = new NDArray <bool>(new Shape(2)); var np3 = new NDArray <bool>(); Assert.IsTrue(Enumerable.SequenceEqual(new [] { true, true, false, false }, np1.Data <bool>())); Assert.AreEqual(4, np1.size); Assert.AreEqual(1, np1.ndim); Assert.IsTrue(Enumerable.SequenceEqual(new[] { false, false }, np2.Data <bool>())); Assert.AreEqual(2, np2.size); Assert.AreEqual(1, np2.ndim); }
public static void csc_matvec(int n_row, int n_col, int[] Ap, int[] Ai, double[] Ax, double[] Xx, NDArray Yx) { for (int j = 0; j < n_col; j++) { int col_start = Ap[j]; int col_end = Ap[j + 1]; for (int ii = col_start; ii < col_end; ii++) { int i = Ai[ii]; Yx.Data <double>()[i] += Ax[ii] * Xx[j]; } } }
public void CheckVectorString() { var np = new NDArray(typeof(double)).arange(9).MakeGeneric <double>(); var random = new Random(); np.SetData(np.Data <double>().Select(x => x + random.NextDouble()).ToArray()); np[1] = 1; np[2] -= 4; np[3] -= 20; np[8] += 23; var stringOfNp = np.ToString(); }