Пример #1
0
        /// <summary>
        /// Reverses a `Tensor` along a specified axis.
        /// </summary>
        /// <param name="x">The input tensor to be reversed.</param>
        /// <param name="axis">The set of dimensions to reverse. Must be in the
        ///  range [-rank(x), rank(x)). Defaults to all axes.</param>
        /// <returns></returns>
        public static Tensor reverse(this Tensor x, int[] axis)
        {
            if (x.Rank == 0)
            {
                return(x.clone());
            }
            var axes = Util.parseAxisParam(axis, x.Shape);

            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () => { return(dy.reverse(axes)); });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return
                    (bk.reverse(x, axes));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            var res = e.runKernel(f, inputs, grad);

            return(res.reshapeAs(x));
        }
Пример #2
0
        /// <summary>
        /// Returns the max of a and b (`a > b ? a : b`) element-wise.
        /// Supports broadcasting.
        ///
        /// We also expose `maximumStrict` which has the same signature as this op and
        /// asserts that `a` and `b` are the same shape (does not broadcast).
        /// </summary>
        /// <param name="a">The first tensor.</param>
        /// <param name="b">The second tensor.</param>
        /// <returns></returns>
        public static Tensor maximum(this Tensor a, Tensor b)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("a", () =>
                {
                    return(dy.mul(a.greaterEqual(b)));
                });
                g.gradient.Add("b", () =>
                {
                    return(dy.mul(a.less(b)));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.maximum(a, b));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("a", a);
            inputs.Add("b", b);
            return(e.runKernel(f, inputs, grad));
        }
Пример #3
0
        /// <summary>
        /// Returns (a - b) * (a - b) element-wise.
        /// Supports broadcasting.
        ///
        /// We also expose `squaredDifferenceStrict` which has the same signature as
        /// this op and asserts that `a` and `b` are the same shape (does not
        /// broadcast).
        /// </summary>
        /// <param name="a">The first tensor.</param>
        /// <param name="b">The second tensor.</param>
        /// <returns></returns>
        public static Tensor squaredDifference(this Tensor a, Tensor b)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var two            = scalar(2);
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("a", () =>
                {
                    return(dy.mul(a.sub(b).mul(two)));
                });
                g.gradient.Add("b", () =>
                {
                    return(dy.mul(b.sub(a).mul(two)));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.squaredDifference(a, b));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("a", a);
            inputs.Add("b", b);
            return(e.runKernel(f, inputs, grad));
        }
Пример #4
0
        /// <summary>
        /// Computes exponential of the input `Tensor` element-wise. `e ^ x`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor exp(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var y = s[0];

                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(dy.mulStrict(y));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(saved(bk.exp(x)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #5
0
        /// <summary>
        /// Computes gause error function of the input `Tensor` element-wise:
        /// `erf(x)`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor erf(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var stepRes        = x.step();
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(dy.mulStrict(Ops.scalar(2f / (float)Math.Sqrt(Math.PI))
                                        .mul(x.square().neg().exp())));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.erf(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #6
0
        /// <summary>
        ///  Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <param name="clipValueMin">Lower-bound of range to be clipped to.</param>
        /// <param name="clipValueMax">Upper-bound of range to be clipped to.</param>
        /// <returns></returns>
        public static Tensor clipByValue(this Tensor x, float clipValueMin, float clipValueMax)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var y = s[0];

                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(dy.where (
                               x.greater(scalar(clipValueMin))
                               .logicalAnd(x.less(scalar(clipValueMax))),
                               zerosLike(dy)));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.clip(x, clipValueMin, clipValueMax));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #7
0
        /// <summary>
        ///  Computes exponential linear element-wise, `x > 0 ? e ^ x - 1 : 0`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor elu(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var stepRes        = x.step();
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    var y          = s[0];
                    ForwardFunc fg = (IBackend bk, Func <Tensor, Tensor> saved) =>
                    {
                        return(bk.eluDer(dy, y));
                    };
                    var inputsg = new Dictionary <string, Tensor>();
                    inputsg.Add("dy", dy);
                    inputsg.Add("y", y);
                    return(ENV.engine.runKernel(fg, inputsg));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> save) =>
            {
                return(save(bk.elu(x)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #8
0
        /// <summary>
        /// Computes scaled exponential linear element-wise.
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor selu(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var stepRes        = x.step();
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    var mask               = x.greater(scalar(0));
                    var scaleAlpha         = scalar((float)Util.SELU_SCALEALPHA);
                    var scale              = scalar((float)Util.SELU_SCALE);
                    var greaterThanZeroDer = dy.mul(scale);
                    var lessEqualZeroDer   = dy.mul(scaleAlpha).mul(x.exp());

                    return(where (mask, greaterThanZeroDer, lessEqualZeroDer));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.selu(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #9
0
        /// <summary>
        /// Computes the logical and of elements across dimensions of a `Tensor`.
        ///
        /// Reduces the input along the dimensions given in `axes`. Unless `keepDims`
        /// is true, the rank of the `Tensor` is reduced by 1 for each entry in `axes`.
        /// If `keepDims` is true, the reduced dimensions are retained with length 1.
        /// If `axes` has no entries, all dimensions are reduced, and an `Tensor` with
        /// a single element is returned.
        /// </summary>
        /// <param name="x">The input tensor. Must be of dtype bool.</param>
        /// <param name="axis">The dimension(s) to reduce. By default it reduces
        ///   all dimensions.</param>
        /// <param name="keepDims">If true, retains reduced dimensions with size 1.</param>
        /// <returns></returns>
        public static Tensor all(this Tensor x, int[] axis = null, bool keepDims = false)
        {
            var origAxes     = Util.parseAxisParam(axis, x.Shape);
            var axes         = origAxes;
            var permutedAxes = Util.getAxesPermutation(axes, x.Rank);

            if (permutedAxes != null)
            {
                x    = x.transpose(permutedAxes);
                axes = Util.getInnerMostAxes(axes.Length, x.Rank);
            }
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return
                    (bk.all(x, axes));
            };
            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            var res = ENV.engine.runKernel(f, inputs);

            if (keepDims)
            {
                var newShape = Util.expandShapeToKeepDim(res.Shape, origAxes);
                return(res.reshape(newShape));
            }
            return(res);
        }
Пример #10
0
        /// <summary>
        /// Computes hyperbolic tangent of the input `Tensor` element-wise: `tanh(x)`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor tanh(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var stepRes        = x.step();
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    var y = s[0];
                    return(scalar(1).sub(y.square()).mulStrict(dy));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(saved(bk.tanh(x)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #11
0
        /// <summary>
        /// Normalizes the activation of a local neighborhood across or within
        /// channels.
        /// </summary>
        /// <param name="x">The input tensor. The 4-D input tensor is treated as a 3-D array
        ///    of 1D vectors (along the last dimension), and each vector is
        ///    normalized independently.</param>
        /// <param name="depthRadius">The number of adjacent channels or spatial locations of the
        ///    1D normalization window. In Tensorflow this param is called
        ///    'depth_radius' because only 'acrossChannels' mode is supported.</param>
        /// <param name="bias">A constant bias term for the basis.</param>
        /// <param name="alpha">A scale factor, usually positive.</param>
        /// <param name="beta">An exponent.</param>
        /// <returns></returns>
        public static Tensor localResponseNormalization(this Tensor x,
                                                        float depthRadius = 5, float bias = 1, float alpha = 1, float beta = 0.5f)
        {
            Tensor x4D          = null;
            var    reshapedTo4D = false;

            if (x.Rank == 3)
            {
                reshapedTo4D = true;
                x4D          = x.as4D(1, x.Shape[0], x.Shape[1], x.Shape[2]);
            }
            else
            {
                x4D = x as Tensor;
            }


            Engine e = ENV.engine;

            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x4D", () =>
                {
                    ForwardFunc fgrad = (IBackend bk, Func <Tensor, Tensor> saved) =>
                    {
                        var outputImage = s[0];
                        return(bk.LRNGrad(
                                   dy, x4D, outputImage, depthRadius, bias, alpha, beta));
                    };
                    return(e.runKernel(fgrad, new Dictionary <string, Tensor>()));
                });
                return(g);
            };

            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(saved(bk.localResponseNormalization4D(
                                 x4D, depthRadius, bias, alpha, beta)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x4D", x4D);
            var res = e.runKernel(f, inputs);

            if (reshapedTo4D)
            {
                return(res.as3D(res.Shape[1], res.Shape[2], res.Shape[3]));
            }
            else
            {
                return(res);
            }
        }
Пример #12
0
        /// <summary>
        ///  Computes the sum of elements across dimensions of a `Tensor`.
        ///  Reduces the input along the dimensions given in `axes`. Unless `keepDims`
        ///  is true, the rank of the `Tensor` is reduced by 1 for each entry in `axes`.
        ///  If `keepDims` is true, the reduced dimensions are retained with length 1.
        ///  If axes has no entries, all dimensions are reduced, and a `Tensor` with a
        ///  single element is returned.
        /// </summary>
        /// <param name="x">The input tensor to compute the sum over. If the dtype is `bool`
        /// it will be converted to `int32` and the output dtype will be `int32`.</param>
        /// <param name="axis">The dimension(s) to reduce. By default it reduces all dimensions.</param>
        /// <param name="keepDims">If true, retains reduced dimensions with size 1.</param>
        /// <returns></returns>
        public static Tensor sum(this Tensor x, int[] axis = null, bool keepDims = false)
        {
            var axes     = Util.parseAxisParam(axis, x.Shape);
            var customOp = customGrad(
                (Tensor[] opInputs) =>
            {
                var xi            = opInputs[0];
                var permutation   = Util.getAxesPermutation(axes, xi.Rank);
                var reductionAxes = axes;
                var permutedX     = xi;
                if (permutation != null)
                {
                    permutedX     = xi.transpose(permutation);
                    reductionAxes =
                        Util.getInnerMostAxes(reductionAxes.Length, xi.Rank);
                }
                ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
                {
                    return(bk.Sum(permutedX, reductionAxes));
                };

                var inputs = new Dictionary <string, Tensor>();
                inputs.Add("x", xi);
                var value = ENV.engine.runKernel(f, inputs);

                if (keepDims)
                {
                    var newShape = Util.expandShapeToKeepDim(value.Shape, axes);
                    value        = value.reshape(newShape);
                }
                CustomGradientResults res = new CustomGradientResults();
                res.value    = value;
                res.gradFunc = (Tensor dy) =>
                {
                    var expandedDyShape = new List <int>(xi.Shape).ToArray();

                    foreach (var axis2 in axes)
                    {
                        expandedDyShape[axis2] = 1;
                    }
                    var expandedDy = dy.reshape(expandedDyShape);
                    var derX       = expandedDy.mul(Ops.ones(xi.Shape));
                    return(new List <Tensor>()
                    {
                        derX
                    });
                };
                return(res);
            }
                );

            return(customOp(new Tensor[] { x }));
        }
Пример #13
0
        public static Tensor logicalNot(this Tensor x)
        {
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.logicalNot(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs));
        }
Пример #14
0
        /// <summary>
        /// Returns the truth value of (a less than b) element-wise. Supports broadcasting.
        ///
        /// We also expose `lessStrict` which has the same signature as this op and
        /// asserts that `a` and `b` are the same shape (does not broadcast).
        /// </summary>
        /// <param name="a">The first input tensor.</param>
        /// <param name="b">The second input tensor.</param>
        /// <returns></returns>
        public static Tensor less(this Tensor a, Tensor b)
        {
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.less(a, b));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("a", a);
            inputs.Add("b", b);
            return(e.runKernel(f, inputs));
        }
Пример #15
0
        /// <summary>
        /// Extracts a strided slice of a tensor.
        ///
        /// Roughly speaking, this op extracts a slice of size (end-begin)/stride from
        /// the given input_ tensor. Starting at the location specified by begin the
        /// slice continues by adding stride to the index until all dimensions are not
        /// less than end. Note that a stride can be negative, which causes a reverse
        /// slice.
        /// </summary>
        /// <param name="x">The tensor to stride slice.</param>
        /// <param name="begin">The coordinates to start the slice from.</param>
        /// <param name="end">The coordinates to end the slice at.</param>
        /// <param name="strides">The size of the slice.</param>
        /// <param name="beginMask">If the ith bit of begin_mask is set, begin[i] is ignored
        ///  and the fullest possible range in that dimension is used instead.</param>
        /// <param name="endMask">If the ith bit of end_mask is set, end[i] is ignored
        ///  and the fullest possible range in that dimension is used instead.</param>
        /// <returns></returns>
        public static Tensor stridedSlice(this Tensor x, int[] begin, int[] end,
                                          int[] strides, int beginMask = 0, int endMask = 0)
        {
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.stridedSlice(
                           x, begin, end, strides, beginMask, endMask));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs));
        }
Пример #16
0
        /// <summary>
        /// Computes the 2D max pooling of an image.
        /// </summary>
        /// <param name="x">The input tensor, of rank 4 or rank 3 of shape
        ///    `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed</param>
        /// <param name="filterSize">The filter size, a tuple `[filterHeight, filterWidth]`.</param>
        /// <param name="strides">The strides of the pooling: `[strideHeight, strideWidth]`.</param>
        /// <param name="pad"> The type of padding algorithm.
        /// - `same` and stride 1: output will be of same size as input,
        ///    regardless of filter size.
        /// - `valid`: output will be smaller than input if filter is larger
        ///    than 1x1.
        /// - For more info, see this guide:
        ///  [https://www.tensorflow.org/api_guides/python/nn#Convolution](
        ///       https://www.tensorflow.org/api_guides/python/nn#Convolution)</param>
        /// <param name="dimRoundingMode">The rounding mode used when computing output
        ///  dimensions if pad is a number. If none is provided, it will not round
        ///  and error if the output is of fractional size.</param>
        /// <param name="padvalue"></param>
        /// <returns></returns>
        public static Tensor maxPool(this Tensor x, int[] filterSize, int[] strides, PadType pad,
                                     roundingMode dimRoundingMode = roundingMode.none, Nullable <int> padvalue = null)
        {
            Tensor x4D          = null;
            var    reshapedTo4D = false;

            if (x.Rank == 3)
            {
                reshapedTo4D = true;
                x4D          = x.as4D(1, x.Shape[0], x.Shape[1], x.Shape[2]);
            }
            else
            {
                x4D = x as Tensor;
            }

            var convInfo = Util.computePool2DInfo(
                x4D.Shape, filterSize, strides, pad, dimRoundingMode, ConvDataFormat.channelsLast, padvalue);


            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    var y4D = s[0];
                    return(maxPoolBackprop(dy, x4D, y4D, filterSize, strides, pad, dimRoundingMode, padvalue));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(saved(bk.maxPool(x4D, convInfo)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x4D);
            var res = e.runKernel(f, inputs, grad);

            if (reshapedTo4D)
            {
                return(res.as3D(res.Shape[1], res.Shape[2], res.Shape[3]));
            }
            return(res);
        }
Пример #17
0
        /// <summary>
        /// Bilinear resize a batch of 3D images to a new shape.
        /// </summary>
        /// <param name="images">The images, of rank 4 or rank 3, of shape
        /// `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.</param>
        /// <param name="size">The new shape `[newHeight, newWidth]` to resize the
        /// images to. Each channel is resized individually.</param>
        /// <param name="alignCorners">Defaults to False. If true, rescale
        /// input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4
        ///   corners of images and resized images. If false, rescale by
        ///   `new_height / height`. Treat similarly the width dimension.</param>
        /// <returns></returns>
        public static Tensor resizeBilinear(this Tensor images, int[] size, bool alignCorners = false)
        {
            Tensor batchImages  = null;
            var    reshapedTo4D = false;

            if (images.Rank == 3)
            {
                reshapedTo4D = true;
                batchImages  =
                    images.as4D(1, images.Shape[0], images.Shape[1], images.Shape[2]);
            }
            else
            {
                batchImages = images as Tensor;
            }
            Engine e = ENV.engine;
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    ForwardFunc fb = (IBackend bk, Func <Tensor, Tensor> saved) =>
                    {
                        return(bk.resizeBilinearBackprop(dy, batchImages, alignCorners));
                    };
                    return(e.runKernel(fb, new Dictionary <string, Tensor>()));
                });
                return(g);
            };
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.resizeBilinear(batchImages, size[0], size[1], alignCorners));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("batchImages", batchImages);
            var res = e.runKernel(f, inputs);

            if (reshapedTo4D)
            {
                return(res.as3D(res.Shape[1], res.Shape[2], res.Shape[3]));
            }
            return(res);
        }
Пример #18
0
        static Tensor depthwiseConv2dDerInput(int[] xShape, Tensor dy, Tensor filter,
                                              int[] strides, PadType pad, roundingMode dimRoundingMode, Nullable <int> padValue = null)
        {
            int[]  xShape4D     = xShape;
            Tensor dy4D         = null;
            var    reshapedTo4D = false;

            if (dy.Rank == 3)
            {
                reshapedTo4D = true;
                dy4D         = dy.as4D(1, dy.Shape[0], dy.Shape[1], dy.Shape[2]);
                xShape4D     = new int[] { 1, xShape[0], xShape[1], xShape[2] };
            }
            else
            {
                dy4D = dy as Tensor;
            }


            var inDepth  = xShape4D[3];
            var outDepth = dy4D.Shape[3];

            var dilations = 1;

            var convInfo = Util.computeConv2DInfo(
                xShape4D, filter.Shape, strides, new int[] { dilations, dilations },
                pad, dimRoundingMode,
                false, ConvDataFormat.channelsLast, padValue);
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.depthwiseConv2DDerInput(dy4D, filter, convInfo));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("dy4D", dy4D);
            var res = e.runKernel(f, inputs);

            if (reshapedTo4D)
            {
                return(res.as3D(res.Shape[1], res.Shape[2], res.Shape[3]));
            }
            return(res);
        }
Пример #19
0
        /// <summary>
        ///    * Computes arctangent of `Tensor`s a / b element-wise: `atan2(a, b)`.
        /// Supports broadcasting.
        /// </summary>
        /// <param name="a">The first tensor.</param>
        /// <param name="b">The second tensor.</param>
        /// <returns></returns>
        public static Tensor atan2(this Tensor a, Tensor b)
        {
            var outShape =
                Util.assertAndGetBroadcastShape(a.Shape, b.Shape);

            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("a", () =>
                {
                    var d          = add(square(a), square(b));
                    var res        = dy.mul(b.div(d));
                    var reduceAxes = Util.getReductionAxes(a.Shape, outShape);
                    if (reduceAxes.Length > 0)
                    {
                        res = res.sum(reduceAxes);
                    }
                    return(res.reshape(a.Shape));
                });
                g.gradient.Add("b", () =>
                {
                    var d          = add(square(a), square(b));
                    var res        = neg(dy.mul(a.div(d)));
                    var reduceAxes = Util.getReductionAxes(b.Shape, outShape);
                    if (reduceAxes.Length > 0)
                    {
                        res = res.sum(reduceAxes);
                    }
                    return(res.reshape(b.Shape));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.atan2(a, b));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("a", a);
            inputs.Add("b", b);
            return(e.runKernel(f, inputs, grad));
        }
Пример #20
0
        /// <summary>
        /// Computes the power of one `Tensor` to another. Supports broadcasting.
        ///
        /// Given a `Tensor` x and a `Tensor` y, this operation computes x^y for
        /// corresponding elements in x and y. The result's dtype will be the upcasted
        /// type of the `base` and `exp` dtypes.
        /// </summary>
        /// <param name="baset">The base `Tensor` to pow element-wise.</param>
        /// <param name="exp">The exponent `Tensor` to pow element-wise.</param>
        /// <returns></returns>
        public static Tensor pow(this Tensor baset, Tensor exp)
        {
            var outShape =
                Util.assertAndGetBroadcastShape(baset.Shape, exp.Shape);
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var y = s[0];
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("baset", () =>
                {
                    var res        = dy.mul(exp.mul(y.div(baset)));
                    var reduceAxes =
                        Util.getReductionAxes(baset.Shape, outShape);
                    if (reduceAxes.Length > 0)
                    {
                        res = res.sum(reduceAxes);
                    }
                    return(res.reshape(baset.Shape));
                });
                g.gradient.Add("exp", () =>
                {
                    var res        = dy.mul(y.mul(baset.log()));
                    var reduceAxes = Util.getReductionAxes(exp.Shape, outShape);
                    if (reduceAxes.Length > 0)
                    {
                        res = res.sum(reduceAxes);
                    }
                    return(res.reshape(exp.Shape));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(saved(bk.Pow(baset, exp)));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("baset", baset);
            inputs.Add("exp", exp);
            return(e.runKernel(f, inputs, grad));
        }
Пример #21
0
        private static Tensor concat2Tensors(Tensor a, Tensor b, int axis)
        {
            var outShape = Util.computeOutShape(a.Shape, b.Shape, axis);

            var fs  = new ArraySegment <int>(a.Shape, axis, a.Shape.Length - axis);
            var fs2 = new ArraySegment <int>(b.Shape, axis, b.Shape.Length - axis);

            var a2D = a.as2D(-1,
                             Util.SizeFromShape(

                                 fs.ToArray()


                                 ));
            var b2D = b.as2D(-1,
                             Util.SizeFromShape(fs2.ToArray()));

            var slices = Util.computeGradientSliceShapes(a2D.Shape, b2D.Shape);

            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("a", () => { return(dy.slice(slices.aBegin, slices.aSize)); });
                g.gradient.Add("b", () => { return(dy.slice(slices.bBegin, slices.bSize)); });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return
                    (bk.concat(a2D, b2D));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("a", a2D);
            inputs.Add("b", b2D);
            var res = e.runKernel(f, inputs, grad);

            return(res.reshape(outShape));
        }
Пример #22
0
        /// <summary>
        /// Computes the backprop of a max pool.
        /// </summary>
        /// <param name="dy">The dy error, of rank 4 or rank 3 of shape
        /// [batchSize, height, width, channels]. If rank 3, batch of 1 is assumed.</param>
        /// <param name="input">The original input image, of rank 4, of shape
        /// [batchSize, height, width, channels].</param>
        /// <param name="output ">The original output image, of rank 4, of shape
        /// [batchSize, outHeight, outWidth, channels].</param>
        /// <param name="filterSize">The filter size, a tuple [filterHeight, filterWidth].</param>
        /// <param name="strides">The strides of the pooling: [strideHeight, strideWidth].</param>
        /// <param name="pad">A string from: 'same', 'valid'. The type of padding algorithm used in the forward prop of the op.</param>
        /// <param name="dimRoundingMode">A string from: 'ceil', 'round', 'floor'. The
        /// rounding mode used when computing output dimensions if pad is a
        /// number. If none is provided, it will not round and error if the output
        /// is of fractional size.</param>
        /// <param name="padvalue"></param>
        /// <returns></returns>
        private static Tensor maxPoolBackprop(Tensor dy, Tensor input, Tensor output, int[] filterSize,
                                              int[] strides, PadType pad, roundingMode dimRoundingMode, int?padvalue)
        {
            var convInfo = Util.computePool2DInfo(
                input.Shape, filterSize, strides, pad, dimRoundingMode, ConvDataFormat.channelsLast, padvalue);

            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.maxPoolBackprop(dy, input, output, convInfo));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("dy", dy);
            inputs.Add("input", input);
            var res = e.runKernel(f, inputs);

            return(res);
        }
Пример #23
0
        /// <summary>
        ///  Computes the backprop of an avg pool.
        /// </summary>
        /// <param name="dy">The dy error, of rank 4 or rank 3 of shape
        ///  [batchSize, height, width, channels]. If rank 3, batch of 1 is assumed</param>
        /// <param name="input">The input image, of rank 4 or rank 3 of shape
        /// [batchSize, height, width, channels]. If rank 3, batch of 1 is assumed</param>
        /// <param name="filterSize">The filter size, a tuple [filterHeight, filterWidth].</param>
        /// <param name="strides">The strides of the pooling: [strideHeight, strideWidth].</param>
        /// <param name="pad"> A string from: 'same', 'valid'. The type of padding algorithm used in the forward prop of the op.</param>
        /// <param name="padvalue"></param>
        /// <returns></returns>
        private static Tensor avgPoolBackprop(Tensor dy, Tensor input, int[] filterSize,
                                              int[] strides, PadType pad, int?padvalue)
        {
            Tensor input4D      = null;
            Tensor dy4D         = null;
            var    reshapedTo4D = false;

            if (input.Rank == 3)
            {
                reshapedTo4D = true;
                input4D      = input.as4D(1, input.Shape[0], input.Shape[1], input.Shape[2]);
                dy4D         = dy.as4D(1, dy.Shape[0], dy.Shape[1], dy.Shape[2]);
            }
            else
            {
                input4D = input as Tensor;
                dy4D    = dy as Tensor;
            }


            var convInfo = Util.computePool2DInfo(
                input4D.Shape, filterSize, strides, pad, roundingMode.none, ConvDataFormat.channelsLast, padvalue);

            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.avgPoolBackprop(dy4D, input4D, convInfo));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("dy4D", dy4D);
            inputs.Add("input4D", input4D);
            var res = e.runKernel(f, inputs);

            if (reshapedTo4D)
            {
                return(res.as3D(res.Shape[1], res.Shape[2], res.Shape[3]));
            }
            return(res);
        }
Пример #24
0
        public Tensor runKernel(ForwardFunc forwardFunc,
                                Dictionary <string, Tensor> inputs, Func <Tensor, List <Tensor>, NamedGradientMap> grad = null)
        {
            Tensor                result;
            List <Tensor>         saved    = new List <Tensor>();
            Func <Tensor, Tensor> saveFunc = (Tensor x) =>
            {
                saved.Add(x);
                return(x);
            };
            var scopeName = this.activeScope.name;

            // Stop recording to a tape when running a kernel.
            this.customGradientDepth++;
            result = forwardFunc(this.backend, saveFunc);
            // Continue recording after the kernel is done.
            this.customGradientDepth--;
            if (this.shouldRecord())

            {
                var tapeNode = new TapeNode()
                {
                    id     = this.nextTapeNodeId++,
                    name   = scopeName,
                    inputs = inputs,
                    output = result
                };

                if (grad != null)
                {
                    tapeNode.gradient = (Tensor dy) =>
                    {
                        return(grad(dy, saved));
                    };
                }
                this.activeTape.Add(tapeNode);
            }


            return(result);
        }
Пример #25
0
        static Tensor depthwiseConv2dDerFilter(this Tensor x, Tensor dy, int[] filterShape, int[] strides,
                                               PadType pad, roundingMode dimRoundingMode = roundingMode.none, Nullable <int> padValue = null)
        {
            Tensor x4D = null;

            if (x.Rank == 3)
            {
                x4D = x.as4D(1, x.Shape[0], x.Shape[1], x.Shape[2]);
            }
            else
            {
                x4D = x as Tensor;
            }
            Tensor dy4D = null;

            if (dy.Rank == 3)
            {
                dy4D = dy.as4D(1, dy.Shape[0], dy.Shape[1], dy.Shape[2]);
            }
            else
            {
                dy4D = dy as Tensor;
            }

            var dilations = 1;

            var convInfo = Util.computeConv2DInfo(
                x4D.Shape, filterShape, strides, new int[] { dilations, dilations }, pad, dimRoundingMode, false, ConvDataFormat.channelsLast, padValue);
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.depthwiseConv2DDerFilter(x4D, dy4D, convInfo));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x4D", x4D);
            inputs.Add("dy4D", dy4D);
            return(e.runKernel(f, inputs));
        }
Пример #26
0
        /// <summary>
        /// Returns the indices of the maximum values along an `axis`.
        ///
        /// The result has the same shape as `input` with the dimension along `axis`
        /// removed.
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <param name="axis">The dimension to reduce. Defaults to 0 (outer-most dimension).</param>
        /// <returns></returns>
        public static Tensor argMax(this Tensor x, int[] axis = null)
        {
            var axes         = Util.parseAxisParam(axis, x.Shape);
            var permutedAxes = Util.getAxesPermutation(axes, x.Rank);

            if (permutedAxes != null)
            {
                x    = x.transpose(permutedAxes);
                axes = Util.getInnerMostAxes(axes.Length, x.Rank);
            }
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return
                    (bk.ArgMax(x, axes));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs));
        }
Пример #27
0
        /// <summary>
        ///  Computes floor of input `Tensor` element-wise: `floor(x)`.
        /// </summary>
        /// <param name="x">The input Tensor.</param>
        /// <returns></returns>
        public static Tensor floor(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(zerosLike(dy));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.floor(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #28
0
        /// <summary>
        ///  Computes reciprocal of x element-wise: `1 / x`
        /// `y = 1 / sqrt(x)`
        /// </summary>
        /// <param name="x"> The input tensor.</param>
        /// <returns></returns>
        public static Tensor reciprocal(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(dy.divStrict(x.square().neg()));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.reciprocal(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #29
0
        /// <summary>
        ///  Computes cos of the input `Tensor` element-wise: `cos(x)`
        /// </summary>
        /// <param name="x">The input tensor.</param>
        /// <returns></returns>
        public static Tensor cos(this Tensor x)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var stepRes        = x.step();
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(x.sin().neg().mulStrict(dy));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.cos(x));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }
Пример #30
0
        /// <summary>
        /// Computes step of the input `Tensor` element-wise: `x > 0 ? 1 : alpha * x`
        /// </summary>
        /// <param name="x"> The input tensor.</param>
        /// <param name="alpha">The gradient when input is negative.</param>
        /// <returns></returns>
        public static Tensor step(this Tensor x, float alpha = 0.0f)
        {
            Func <Tensor, List <Tensor>, NamedGradientMap> grad = (Tensor dy, List <Tensor> s) =>
            {
                var two            = scalar(2);
                NamedGradientMap g = new NamedGradientMap();
                g.gradient = new Dictionary <string, Func <Tensor> >();
                g.gradient.Add("x", () =>
                {
                    return(zerosLike(dy));
                });
                return(g);
            };
            Engine      e = ENV.engine;
            ForwardFunc f = (IBackend bk, Func <Tensor, Tensor> saved) =>
            {
                return(bk.step(x, alpha));
            };

            var inputs = new Dictionary <string, Tensor>();

            inputs.Add("x", x);
            return(e.runKernel(f, inputs, grad));
        }