예제 #1
0
        static internal unsafe void Get8DParametersNoAlloc(this TensorShape shape, int[] parameters, int *parameters8D, int defaultValue)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            if (parameters.Length == TensorShape.MaxRank)
            {
                for (int i = 0; i < TensorShape.MaxRank; ++i)
                {
                    parameters8D[i] = parameters[i];
                }
            }
            else
            {
                Assert.AreEqual(4, parameters.Length);
                if (!shape.Is4D())
                {
                    Assert.IsTrue(false, $"4D Parameters {parameters} can't be used with a tensor of shape {shape} as it contains other dimensions, please use 8D parameters for this shape.");
                }
                parameters8D[0] = defaultValue;
                parameters8D[1] = defaultValue;
                parameters8D[2] = parameters[0];
                parameters8D[3] = defaultValue;
                parameters8D[4] = defaultValue;
                parameters8D[5] = parameters[1];
                parameters8D[6] = parameters[2];
                parameters8D[7] = parameters[3];
            }
        }
예제 #2
0
        static internal unsafe int[] AdjustPadToPool(this TensorShape shape, int *pool, int[] stride, int[] pad)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(stride.Length > 0);
            int featureCount = stride.Length;

            Assert.IsTrue(featureCount <= TensorShape.DataFeatures.Length);

            // negative pad values mean auto_pad type is used
            if (pad[0] >= 0)
            {
                return(pad);
            }

            var type = (Layer.AutoPad)pad[0];

            if (type == Layer.AutoPad.SameUpper || type == Layer.AutoPad.SameLower)
            {
                // Based on ONNX (AveragePool & MaxPool)
                //        https://github.com/onnx/onnx/blob/master/docs/Operators.md
                // and TensorFlow docs:
                //         https://www.tensorflow.org/api_guides/python/nn#Notes_on_SAME_Convolution_Padding
                var adjustedPad = new int [featureCount * 2];
                for (var i = 0; i < featureCount; ++i)
                {
                    var featureModStride = shape.width % stride[i];
                    if (featureModStride == 0)
                    {
                        featureModStride = stride[i];
                    }

                    var padAlongFeature = Math.Max(pool[i] - featureModStride, 0);
                    // Code above (based on TensorFlow docs) is equivalent to (based on ONNX docs):
                    // padAlongWidth = (Mathf.Ceil(shape.width/stride[0]) - 1) * stride[0] + pool[0] - shape.width;
                    // padAlongHeight = (Mathf.Ceil(shape.height/stride[1]) - 1) * stride[1] + pool[1] - shape.height;
                    var featureSmall = padAlongFeature / 2;
                    var featureLarge = padAlongFeature - featureSmall;
                    if (type == Layer.AutoPad.SameUpper)
                    {
                        adjustedPad[i] = featureSmall;
                        adjustedPad[i + featureCount] = featureLarge;
                    }
                    else
                    {
                        adjustedPad[i] = featureLarge;
                        adjustedPad[i + featureCount] = featureSmall;
                    }
                }
                return(adjustedPad);
            }
            else
            {
                throw new NotImplementedException("This padding type is not implemented yet!");
            }
        }
예제 #3
0
        static internal bool IsNDHWC(this TensorShape shape)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            return(shape.sequenceLength == 1 &&
                   shape.numberOfDirections == 1 &&
                   shape.extraDimension == 1);
        }
예제 #4
0
        /// <summary>
        /// Reduce TensorShape across specified `axis`
        /// </summary>
        /// <param name="shape">TensorShape</param>
        /// <param name="axis">axis</param>
        /// <returns>output shape</returns>
        static public TensorShape Reduce(this TensorShape shape, int axis)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            axis = shape.Axis(axis);
            var newShapeArray = shape;

            newShapeArray[axis] = 1;
            return(newShapeArray);
        }
예제 #5
0
        static internal int[] AdjustPadToPool(this TensorShape shape, int[] pool, int[] stride, int[] pad)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            unsafe
            {
                fixed(int *pPool = pool)
                {
                    return(AdjustPadToPool(shape, pPool, stride, pad));
                }
            }
        }
예제 #6
0
        /// <summary>
        /// Scale TensorShape by the `scale` factor
        /// </summary>
        /// <param name="shape">TensorShape</param>
        /// <param name="scale">scale</param>
        /// <returns>output shape</returns>
        static public TensorShape Scale(this TensorShape shape, TensorShape scale)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            var newShape = shape;

            for (var axis = 0; axis < TensorShape.MaxRank; axis++)
            {
                newShape[axis] *= scale[axis];
            }
            return(newShape);
        }
예제 #7
0
        static internal TensorShape ApplyKernelInverse(this TensorShape shape, TensorShape kernel, int[] stride, int[] pad, int[] outputAdjustment)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(stride.Length > 0);
            Assert.IsTrue(stride.Length * 2 == pad.Length);
            Assert.IsTrue(stride.Length <= TensorShape.KernelSpatials.Length);
            Assert.IsTrue(stride.Length <= TensorShape.DataFeatures.Length);

            // Based on ONNX (ConvTranspose)
            //        https://github.com/onnx/onnx/blob/master/docs/Operators.md
            // and Theano "Convolution arithmetic tutorial"
            //        http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#transposed-convolution-arithmetic
            //
            // Inverse of:
            //   output_size = (input_size + pad_left + pad_right - kernel_size) / stride + 1
            // Resulting in:
            //   output_size = (input_size - 1 ) * stride - (pad_left + pad_right) + kernel_size + output_adj
            //   output_adj = (input_size + (pad_left + pad_right) - kernel_size) % stride
            //
            if (outputAdjustment == null || outputAdjustment.Length == 0)
            {
                outputAdjustment = new int[stride.Length];
                for (var i = 0; i < stride.Length; ++i)
                {
                    var featureAxis = TensorShape.DataFeatures[i];
                    var kernelAxis  = TensorShape.KernelSpatials[i];
                    var padding     = pad[i] + pad[stride.Length + i];
                    outputAdjustment[i] = (shape[featureAxis] + padding - kernel[kernelAxis]) % stride[i];
                }
            }

            var newShape = shape;

            for (var i = 0; i < stride.Length; ++i)
            {
                var featureAxis = TensorShape.DataFeatures[i];
                var kernelAxis  = TensorShape.KernelSpatials[i];
                var padding     = pad[i] + pad[stride.Length + i];
                newShape[featureAxis] = (shape[featureAxis] - 1) * stride[i] - padding + kernel[kernelAxis] + outputAdjustment[i];
            }

            newShape[TensorShape.KernelOutChannel] = kernel.kernelCount;
            return(newShape);
        }
예제 #8
0
        static internal unsafe TensorShape ApplyStridedSlice8DUnsafeNoAlloc(this TensorShape shape, int *starts, int *ends,
                                                                            int *stride)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            TensorShape sliced = shape;

            for (int i = 0; i < shape.rank; ++i)
            {
                // NOTE: begin=0, end=0, stride=1  <=  full range from the existing axis
                //       begin=0, end=X, stride=1  <=  full range from the existing axis, if X==last element on this axis
                //       begin=0, end=0, stride=0  <=  new axis OR shrink axis to a single 1st element
                //       begin=N, end=N, stride=0  <=              shrink axis to a single Nth element

                // take + 1 is si > shape[i]
                int ei = TensorExtensions.WrapIndex(ends[i], shape[i]);
                int si = TensorExtensions.WrapIndex(starts[i], shape[i]);


                // Barracuda convetion (non ONNX), t[0:0] => t[:]
                if (si == 0 && ei == 0)
                {
                    ei = shape[i];
                }

                if (stride[i] > 0)
                {
                    sliced[i] = (int)Math.Round((double)(Math.Min(ei, shape[i]) - Math.Min(si, shape[i] - 1)) / (double)(Mathf.Abs(stride[i])), MidpointRounding.AwayFromZero);
                }
                else if (stride[i] < 0)
                {
                    bool inclusive = ends[i] < -shape[i]; // edge case when ends is negative and bigger than nchwShape
                    sliced[i] = (int)Math.Round((double)(Math.Min(si, shape[i] - 1) - Math.Min(ei, shape[i]) + (inclusive ? 1 : 0)) / (double)(Mathf.Abs(stride[i])), MidpointRounding.AwayFromZero);
                }
                else
                {
                    // Assert.IsTrue(stride[i] != 0); // 0 strides not allowed
                    // breaks legacy implementations
                    D.LogWarning("StridedSlice with 0 strides, not supported! Slicing to 1D dimension");
                    sliced[i] = 1;
                }
            }

            return(sliced);
        }
예제 #9
0
        static internal int FirstNotIdentityFeatureDimensionIndex(this TensorShape shape)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            for (int dimIndex = TensorShape.DataFeature3; dimIndex < TensorShape.MaxRank; ++dimIndex)
            {
                if (shape[dimIndex] > 1)
                {
                    return(dimIndex);
                }
            }

            return(TensorShape.MaxRank);
        }
예제 #10
0
        static internal TensorShape ApplyPool(this TensorShape shape, int[] pool, int[] stride, int[] pad,
                                              bool ceilMode = false)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(stride.Length == pool.Length);
            unsafe
            {
                fixed(int *pPool = pool)
                {
                    return(ApplyPool(shape, pPool, stride, pad, ceilMode));
                }
            }
        }
예제 #11
0
        static internal TensorShape ApplyStridedSlice(this TensorShape shape, int[] starts, int[] ends, int[] stride)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            unsafe
            {
                int *starts8Dbuffer = stackalloc int[TensorShape.MaxRank];
                int *ends8Dbuffer   = stackalloc int[TensorShape.MaxRank];
                int *stride8Dbuffer = stackalloc int[TensorShape.MaxRank];
                Get8DParametersNoAlloc(shape, starts, starts8Dbuffer, 0);
                Get8DParametersNoAlloc(shape, ends, ends8Dbuffer, 1);
                Get8DParametersNoAlloc(shape, stride, stride8Dbuffer, 1);

                return(shape.ApplyStridedSlice8DUnsafeNoAlloc(starts8Dbuffer, ends8Dbuffer, stride8Dbuffer));
            }
        }
예제 #12
0
        /// <summary>
        /// Calculate output shape for Gather operation
        /// </summary>
        /// <param name="shapes">input shapes</param>
        /// <param name="axis">axis</param>
        /// <returns>output shape</returns>
        static public TensorShape Gather(TensorShape[] shapes, int axis)
        {
            TensorShape shape = shapes[0];

            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            TensorShape indices = shapes[1];

            if (!indices.hasNamedDimensions)
            {
                indices = indices.AsNamed();
            }

            shape[axis] = indices.length;

            return(shape);
        }
예제 #13
0
        static internal unsafe TensorShape ApplyPool(this TensorShape shape, int *pool, int[] stride, int[] pad, bool ceilMode = false)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(stride.Length > 0);

            Assert.IsTrue(stride.Length * 2 == pad.Length);
            int featureCount = stride.Length;

            Assert.IsTrue(featureCount <= TensorShape.DataFeatures.Length);

            // Based on ONNX (AveragePool & MaxPool)
            //        https://github.com/onnx/onnx/blob/master/docs/Operators.md
            // Theano "Convolution arithmetic tutorial"
            //        http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#quick-reference
            // and TensorFlow docs:
            //         https://www.tensorflow.org/api_guides/python/nn#Convolution
            //         https://www.tensorflow.org/api_guides/python/nn#Notes_on_SAME_Convolution_Padding
            //
            //   output_size = (input_size + pad_left + pad_right - kernel_size) / stride + 1
            var newShape = shape;

            for (var i = 0; i < featureCount; ++i)
            {
                // C# automatically rounds down
                // https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/arithmetic-operators
                if (ceilMode)
                {
                    newShape[TensorShape.DataFeatures[i]] = (shape[TensorShape.DataFeatures[i]] + (pad[i] + pad[i + featureCount]) - pool[i] + stride[i] - 1) / stride[i] + 1;
                }
                else
                {
                    newShape[TensorShape.DataFeatures[i]] = (shape[TensorShape.DataFeatures[i]] + (pad[i] + pad[i + featureCount]) - pool[i]) / stride[i] + 1;
                }
            }
            return(newShape);
        }
예제 #14
0
        static internal int[] AdjustPadToKernel(this TensorShape shape, TensorShape kernel, int[] stride, int[] pad)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(stride.Length == 2 || stride.Length == 3);
            unsafe
            {
                int *kernelDims = stackalloc int[stride.Length == 2 ? 2 : 3];
                kernelDims[0] = kernel.kernelWidth;
                kernelDims[1] = kernel.kernelHeight;

                if (stride.Length > 2)
                {
                    kernelDims[2] = kernel.kernelSpatialDepth;
                }

                return(AdjustPadToPool(shape, kernelDims, stride, pad));
            }
        }
예제 #15
0
        static internal TensorShape Permute(this TensorShape shape, NativeArray <int> permutations)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            if (permutations.Length == 4)
            {
                permutations = Get8DPermutationsForNHWCPermutationsAndShape(shape, permutations);
            }

            var permutedShape = new int[TensorShape.MaxRank];

            for (var i = 0; i < permutations.Length; ++i)
            {
                permutedShape[i] = permutations[i] >= 0 ? shape[permutations[i]] : 1;
            }

            var output = new TensorShape(permutedShape);

            return(output);
        }
예제 #16
0
        static internal TensorShape ApplyKernel(this TensorShape shape, TensorShape kernel, int[] stride, int[] pad)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            unsafe
            {
                Assert.IsTrue(stride.Length == 2 || stride.Length == 3);
                int *kernelDims = stackalloc int[stride.Length == 2 ? 2 : 3];
                kernelDims[0] = kernel.kernelWidth;
                kernelDims[1] = kernel.kernelHeight;
                if (stride.Length > 2)
                {
                    kernelDims[2] = kernel.kernelSpatialDepth;
                }

                var outShape = ApplyPool(shape, kernelDims, stride, pad);
                outShape[7] = kernel.kernelCount;
                return(outShape);
            }
        }
예제 #17
0
        /// <summary>
        /// Calculate new shape after applying border to current TensorShape
        /// </summary>
        /// <param name="shape">TensorShape</param>
        /// <param name="border">border</param>
        /// <returns>new TensorShape</returns>
        static public TensorShape ApplyBorder(this TensorShape shape, int[] border)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            Assert.IsTrue(border.Length == 6 || border.Length == 8);
            if (border.Length == 6)
            {
                shape[TensorShape.H] += border[1] + border[4];
                shape[TensorShape.W] += border[0] + border[3];
                shape[TensorShape.C] += border[2] + border[5];
            }
            else if (border.Length == 8)
            {
                shape[TensorShape.D] += border[2] + border[6];
                shape[TensorShape.H] += border[1] + border[5];
                shape[TensorShape.W] += border[0] + border[4];
                shape[TensorShape.C] += border[3] + border[7];
            }

            return(shape);
        }
예제 #18
0
        static internal int[] Get8DPermutationsForNCHWPermutationsAndShape(this TensorShape shape, int[] permutations)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            if (permutations.Length == TensorShape.MaxRank)
            {
                return(permutations);
            }

            Assert.AreEqual(4, permutations.Length);
            if (!shape.Is4D())
            {
                Assert.IsTrue(false, $"4D Permutation {permutations} can't be used with a tensor of shape {shape} as it contains other dimensions, please use an 8D permutation for this shape.");
            }
            int batchOldAxis    = Convert4DTo8DAxis(permutations[0]);
            int channelOldIndex = Convert4DTo8DAxis(permutations[1]);
            int heightOldIndex  = Convert4DTo8DAxis(permutations[2]);
            int widthOldIndex   = Convert4DTo8DAxis(permutations[3]);

            return(new int[] { 0, 1, batchOldAxis, 3, 4, channelOldIndex, heightOldIndex, widthOldIndex });
        }
예제 #19
0
        static internal NativeArray <int> Get8DPermutationsForNCHWPermutationsAndShape(this TensorShape shape, NativeArray <int> inPermutations)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            if (inPermutations.Length == TensorShape.MaxRank)
            {
                return(inPermutations);
            }

            Assert.AreEqual(4, inPermutations.Length);
            if (!shape.Is4D())
            {
                Assert.IsTrue(false, $"4D Permutation {inPermutations.ToString()} can't be used with a tensor of shape {shape} as it contains other dimensions, please use an 8D permutation for this shape.");
            }
            int batchOldAxis    = Convert4DTo8DAxis(inPermutations[0]);
            int channelOldIndex = Convert4DTo8DAxis(inPermutations[1]);
            int heightOldIndex  = Convert4DTo8DAxis(inPermutations[2]);
            int widthOldIndex   = Convert4DTo8DAxis(inPermutations[3]);

            // Valid only for single frame
            NativeArray <int> outPermutations = new NativeArray <int>(8, Allocator.Temp);

            outPermutations[0] = 0;
            outPermutations[1] = 1;
            outPermutations[2] = batchOldAxis;
            outPermutations[3] = 3;
            outPermutations[4] = 4;
            outPermutations[5] = channelOldIndex;
            outPermutations[6] = heightOldIndex;
            outPermutations[7] = widthOldIndex;

            return(outPermutations);
        }
예제 #20
0
        /// <summary>
        /// Scale TensorShape by the `scale` factor
        /// </summary>
        /// <param name="shape">TensorShape</param>
        /// <param name="scale">scale</param>
        /// <returns>output shape</returns>
        static public TensorShape Scale(this TensorShape shape, int[] scale)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            if (scale.Length == TensorShape.MaxRank)
            {
                for (var axis = 0; axis < TensorShape.MaxRank; axis++)
                {
                    shape[axis] *= scale[axis];
                }
            }
            else
            {
                Assert.AreEqual(4, scale.Length);
                shape[TensorShape.DataBatch] *= scale[0];
                shape[5] *= scale[1];
                shape[6] *= scale[2];
                shape[7] *= scale[3];
            }
            return(shape);
        }
예제 #21
0
        /// <summary>
        /// Reshape TensorShape into new shape specified by `size`. At most one dimension of the new shape can be -1.
        /// See: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape
        /// </summary>
        /// <param name="shape">TensorShape</param>
        /// <param name="size4Dor8D">new shape</param>
        /// <returns>output shape</returns>
        /// <exception cref="ArgumentException">more than one dimension is unspecified</exception>
        static public TensorShape Reshape(this TensorShape shape, int[] size4Dor8D)
        {
            if (!shape.hasNamedDimensions)
            {
                shape = shape.AsNamed();
            }

            unsafe
            {
                int *size          = stackalloc int[TensorShape.MaxRank];
                int *newShapeArray = stackalloc int[TensorShape.MaxRank];

                Get8DParametersNoAlloc(shape, size4Dor8D, size, 1);
                for (int d = 0; d < TensorShape.MaxRank; ++d)
                {
                    newShapeArray[d] = shape[d];
                }

                // From: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Reshape
                //
                // At most one dimension of the new shape can be -1.
                // In this case, the value is inferred from the size of the tensor and the remaining dimensions.
                //
                // A dimension could also be 0,
                // in which case the actual dimension value is unchanged (i.e. taken from the input tensor).

                var multipleOf   = 1;
                var unknownIndex = -1;
                for (int q = 0; q < TensorShape.MaxRank; ++q)
                {
                    if (size[q] > 0)
                    {
                        multipleOf      *= size[q];
                        newShapeArray[q] = size[q];
                    }
                    else if (size[q] == 0)
                    {
                        multipleOf *= newShapeArray[q];
                    }
                    else if (unknownIndex == -1)
                    {
                        unknownIndex = q;
                    }
                    else
                    {
                        throw new ArgumentException("Can only specify one unknown dimension");
                    }
                }

                if (unknownIndex == -1)
                {
                    // all dimensions are given
                    var newShape = new TensorShape(newShapeArray[0], newShapeArray[1], newShapeArray[2], newShapeArray[3],
                                                   newShapeArray[4], newShapeArray[5], newShapeArray[6], newShapeArray[7]);
                    if (shape.length != newShape.length)
                    {
                        throw new ArgumentException("Cannot reshape array of size " + shape.length +
                                                    " into shape " + newShape);
                    }
                    return(newShape);
                }

                var  solveForIndex = shape.length / multipleOf;
                bool remainderLeft = shape.length % multipleOf != 0;

                if (remainderLeft)
                {
                    throw new ArgumentException("Cannot reshape array of size " + shape.length +
                                                " into shape with multiple of " + multipleOf + " elements");
                }

                newShapeArray[unknownIndex] = solveForIndex;
                return(new TensorShape(newShapeArray[0], newShapeArray[1], newShapeArray[2], newShapeArray[3],
                                       newShapeArray[4], newShapeArray[5], newShapeArray[6], newShapeArray[7]));
            }
        }