public Array <float> Fit(Array <float> X, bool center = true) { var m = X.Shape[0]; var n = X.Shape[1]; var k = Math.Min(m, n); this.means = NN.Mean(X, axis: 0); if (center) { X -= means; } var copy = (float[])X.Values.Clone(); var s = new float[k]; var u = NN.Zeros <float>(m, m); components_ = NN.Zeros <float>(n, n); var superb = new float[k - 1]; BlasNet.Lapack.gesvd('A', 'A', m, n, copy, n, s, u.Values, m, components_.Values, n, superb); var components = nComponents == 0 ? k : Math.Min(k, nComponents); SVDFlip(u, components); return(X); }
public static Tuple <Array <Real>, Array <int> > NewDownSample_MaxPooling2d(Array <Real> arr, int pool_h, int pool_w, bool ignoreBorder = true) { int x_h = arr.Shape[0]; int x_w = arr.Shape[1]; int out_h = x_h / pool_h; int out_w = x_w / pool_w; if (ignoreBorder == false) { if (((x_h ^ pool_h) >= 0) && (x_h % pool_h != 0)) { out_h++; } if (((x_w ^ pool_w) >= 0) && (x_w % pool_w != 0)) { out_w++; } } int arr_y_max = 0; int arr_x_max = 0; var poolout = NN.Array(new Real[out_h, out_w]); var switches = NN.Array(new int[out_h, out_w, 2]); for (int y_out = 0; y_out < out_h; y_out++) { int y = y_out * pool_h; int y_min = y; int y_max = Math.Min(y + pool_h, x_h); for (int x_out = 0; x_out < out_w; x_out++) { int x = x_out * pool_w; int x_min = x; int x_max = Math.Min(x + pool_w, x_w); var value = Real.NegativeInfinity; for (int arr_y = y_min; arr_y < y_max; arr_y++) { for (int arr_x = x_min; arr_x < x_max; arr_x++) { var new_value = arr.Item[arr_y, arr_x]; if (new_value > value) { value = new_value; arr_y_max = arr_y; arr_x_max = arr_x; } } } switches[y_out, x_out, 0] = arr_y_max; switches[y_out, x_out, 1] = arr_x_max; poolout[y_out, x_out] = value; } } return(Tuple.Create(poolout, switches)); }
/// <summary> /// Numerically stable shortcut of Log(Sum(Exp(a), axis, keepsDim). /// </summary> public static Array <Real> LogSumExp(Array <Real> a, int axis = -1, bool keepDims = false, Array <Real> result = null) { if (axis < 0) { axis += a.NDim; } var b = NN.Max(a, axis: axis, keepDims: true); var sum = NN.Exp(a - b).Sum(axis: axis, keepDims: true, result: result); result = Apply(sum, b, (x, b_) => b_ + (Real)Math.Log(x), result: result); return(keepDims ? result : result.Reshape(GetAggregatorResultShape(a, axis, keepDims))); }
/// <summary> /// http://aelag.com/translation-of-theano-softmax-function /// </summary> /// <param name="a"></param> /// <param name="axis">The axis to compute the Softmax along, like in the Max function. Default value mimics Theano behavior</param> /// <param name="result"></param> /// <returns></returns> public static Array <Real> Softmax(Array <Real> a, int axis = -1, Array <Real> result = null, Array <Real> buffer = null) { var maxes = NN.Max(a, axis: axis, keepDims: true, result: buffer); var shifted = a.Sub(maxes, result: result); result = NN.Exp(shifted, result: result); var sum = NN.Sum(result, axis: axis, keepDims: true, result: maxes); //result = result.Apply(sum, (x, s) => Math.Max(x / s, 0.000001f), result: result); result = result.Div(sum, result: result); return(result); }
/// <summary> /// Fills the result array using the value from a, and the indexes from selected. /// </summary> /// <typeparam name="T">The type of a content</typeparam> /// <param name="thiz"></param> /// <param name="selected">the result of a ArgMax/ArgMin operation</param> /// <param name="axis"></param> /// <param name="axisSize"></param> /// <param name="keepDims"></param> /// <param name="result"></param> /// <returns></returns> public static Array <T> UnArgmax <T>(this Array <T> thiz, Array <int> selected, int axis, int axisSize, bool keepDims = false, Array <T> result = null) { thiz.AssertOfShape(selected); var dim = thiz.NDim + (keepDims ? 0 : 1); var shape = new int[dim]; if (keepDims) { System.Array.Copy(thiz.Shape, shape, dim); } else { System.Array.Copy(thiz.Shape, 0, shape, 0, axis); System.Array.Copy(thiz.Shape, axis, shape, axis + 1, thiz.NDim - axis); } shape[axis] = axisSize; result = result ?? NN.Zeros <T>(shape); result.AssertOfShape(shape); var resultInc = result.Stride[axis]; // HACK // as result have one more shape than thiz and selected, we have to lie about the number of shapes var resultSlices = new Slice[dim]; for (int i = 0; i < dim; ++i) { resultSlices[i] = Slicer._; } if (!keepDims) { resultSlices[axis] = 0; } else { resultSlices[axis] = Slicer.Upto(1); } var res = result[resultSlices]; Array_.ElementwiseOp(thiz, selected, res, (n, x, offsetx, incx, s, offsetS, incS, r, offsetR, incR) => { for (int i = 0; i < n; ++i) { r[offsetR + resultInc * s[offsetS]] = x[offsetx]; offsetR += incR; offsetS += incS; offsetx += incx; } }); return(result); }
// "fast" softmax for 1D and 2D arrays public static Array <Real> Softmax_(Array <Real> a, Array <Real> result = null) { if (a.Shape.Length > 2) { throw new RankException(string.Format("Must be 1-d or 2-d tensor, got {0}-d with shape ({1}).", a.Shape.Length, string.Join(", ", a.Shape))); } if (result == null) { result = Zeros <Real>(a.Shape); } else { result.AssertOfShape(a); } if (a.Shape.Length == 1) { var max = a.Max(); result = NN.Exp(a - max, result: result); } else { var maxes = NN.Zeros <Real>(a.Shape[0], 1); var vMax = maxes.Values; int off = a.Offset, offX; int incX = a.Stride[0], incY = a.Stride[1]; int nX = a.Shape[0], nY = a.Shape[1]; Real max = Real.NegativeInfinity; var v = a.Values; for (int i = 0; i < nX; ++i) { offX = off; max = Real.NegativeInfinity; for (int j = 0; j < nY; ++j) { max = Math.Max(v[off], max); off += incY; } off = offX + incX; vMax[i] = max; } result = NN.Exp(a - maxes, result: result); } var sum = NN.Sum(result, axis: a.Shape.Length - 1, keepDims: true); result = result.Div(sum, result: result); //result = result.Apply(sum, (x, s) => Math.Max(x / s, 0.000001f), result: result); return(result); }
public static Array <Real> new_Unpooling(Array <Real> delta, Array <int> switches, int pool_h, int pool_w, bool ignoreBorder = true) { var sh = new int[2]; var unpooled = NN.Array(new Real[switches.Shape[0] * pool_h, switches.Shape[1] * pool_w]); for (int y = 0; y < delta.Shape[0]; y++) { for (int x = 0; x < delta.Shape[1]; x++) { unpooled[(int)switches[y, x, 0], (int)switches[y, x, 1]] = (Real)delta[y, x]; } } return(unpooled); }
public static Array <float> EinsteinSum(Array <float> x, Array <float> y, Einstein[] einstein, Array <float> result = null) { var resultShape = EinsteinShape(x.Shape, y.Shape, einstein); if (result == null) { result = NN.Zeros <float>(resultShape); } else { result.AssertOfShape(resultShape); } _EinsteinSum(0, einstein, x, x.Offset, y, y.Offset, result, result.Offset); return(result); }
public Array <double> Normal(double mean, double std, params int[] shape) => NN.Fill(() => NextNormal(mean, std), shape);
public Array <T> Uniform <T>(double min, double max, params int[] shape) => NN.Fill(() => NextUniform <T>(min, max, typeof(T)), shape);
public Array <float> Uniform(float min, float max, params int[] shape) => NN.Fill(() => NextUniformF(min, max), shape);
public Array <double> Uniform(double min, double max, params int[] shape) => NN.Fill(() => NextUniform(min, max), shape);
public Array <float> Normal(float mean, float std, params int[] shape) => NN.Fill(() => NextNormalF(mean, std), shape);
public virtual Array <Type> Add(Type a, Array <Type> b) => NN.Apply(b, b_ => Add(a, b_));
// Default implementation, should be overriden for float and double public virtual Array <Type> Add(Array <Type> a, Array <Type> b) => NN.Apply(a, b, (a_, b_) => Add(a_, b_));
public virtual Array <Type> GtEq(Array <Type> a, Array <Type> b) => NN.Apply(a, b, (a_, b_) => GtEq(a_, b_));
public virtual Array <Type> GtEq(Type a, Array <Type> b) => NN.Apply(b, b_ => GtEq(a, b_));
public virtual Array <Type> Mul(Array <Type> a, Array <Type> b) => NN.Apply(a, b, (a_, b_) => Mul(a_, b_));
public static Array <T> Tile <T>(Array <T> a, params int[] reps) { // Construct an array by repeating A the number of times given by reps. // If `reps` has length ``d``, the result will have dimension of // ``max(d, A.ndim)``. // If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new // axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, // or shape (1, 1, 3) for 3-D replication. If this is not the desired // behavior, promote `A` to d-dimensions manually before calling this // function. // If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. // Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as // (1, 1, 2, 2). // Parameters // ---------- // A : array_like // The input array. // reps : array_like // The number of repetitions of `A` along each axis. // Returns // ------- // c : ndarray // The tiled output array. // See Also // -------- // repeat : Repeat elements of an array. // Examples // -------- // >>> a = np.array([0, 1, 2]) // >>> np.tile(a, 2) // array([0, 1, 2, 0, 1, 2]) // >>> np.tile(a, (2, 2)) // array([[0, 1, 2, 0, 1, 2], // [0, 1, 2, 0, 1, 2]]) // >>> np.tile(a, (2, 1, 2)) // array([[[0, 1, 2, 0, 1, 2]], // [[0, 1, 2, 0, 1, 2]]]) // >>> b = np.array([[1, 2], [3, 4]]) // >>> np.tile(b, 2) // array([[1, 2, 1, 2], // [3, 4, 3, 4]]) // >>> np.tile(b, (2, 1)) // array([[1, 2], // [3, 4], // [1, 2], // [3, 4]]) var tup = reps; var d = tup.Length; //c = _nx.array(A, copy=False, subok=True, ndmin=d) var c = a; var shape = (int[])c.Shape.Clone(); var oldLength = shape.Length; if (oldLength < d) { //System.Array.Resize(ref shape, d); //for (int i = oldLength; i < d; i++) shape[i] = 1; //c = c.Reshape(shape); var newShape = new int[d]; for (int i = 0; i < d - oldLength; i++) { newShape[i] = 1; } System.Array.Copy(shape, 0, newShape, d - oldLength, oldLength); shape = newShape; c = c.Reshape(shape); } var n = Math.Max(c.Size, 1); if (d < c.NDim) { //tup = (1,)*(c.ndim - d) + tup throw new NotImplementedException(); } for (int i = 0; i < tup.Length; i++) { var nrep = tup[i]; if (nrep != 1) { c = NN.Repeat(c.Reshape(-1, n), nrep, axis: 0); } var dim_in = shape[i]; var dim_out = dim_in * nrep; shape[i] = dim_out; n /= Math.Max(dim_in, 1); } return(c.Reshape(shape)); }
public virtual Array <Type> Mul(Type a, Array <Type> b) => NN.Apply(b, b_ => Mul(a, b_));
public Array <float> Normal(float mean, float std, Array <float> result) => NN.Fill(() => NextNormalF(mean, std), result);
public virtual Array <Type> Sub(Array <Type> a, Array <Type> b) => NN.Apply(a, b, (a_, b_) => Sub(a_, b_));
public Array <double> Uniform(double min, double max, Array <double> result) => NN.Fill(() => NextUniform(min, max), result);
public virtual Array <Type> Sub(Type a, Array <Type> b) => NN.Apply(b, b_ => Sub(a, b_));
public Array <float> Uniform(float min, float max, Array <float> result) => NN.Fill(() => NextUniformF(min, max), result);
public virtual Array <Type> Div(Array <Type> a, Array <Type> b) => NN.Apply(a, b, (a_, b_) => Div(a_, b_));
public Array <T> Uniform <T>(double min, double max, Array <T> result) => NN.Fill(() => NextUniform <T>(min, max, typeof(T)), result);
public virtual Array <Type> Div(Type a, Array <Type> b) => NN.Apply(b, b_ => Div(a, b_));
public Array <double> Normal(double mean, double std, Array <double> result) => NN.Fill(() => NextNormal(mean, std), result);
public virtual Array <Type> Gt(Array <Type> a, Type b) => NN.Apply(a, a_ => Gt(a_, b));