Esempio n. 1
0
            public static Tensor permute(this Tensor x, params int[] dims)
            {
                var y_shape = new int[x.__shape.Length];

                for (int i = 0; i < x.__shape.Length; i++)
                {
                    y_shape[i] = x.__shape[dims[i]];
                }
                var y = new Tensor(y_shape, x.dtype, (!torch.autograd.grad_mode.no_grad.prev) && x.requires_grad);

                switch (x.dtype)
                {
                case torch.float16:
                {
                    MKL.Permute(x.__half, x.__shape, x.__strides, dims, y.__half, y.__shape, y.__strides);
                    if (y.requires_grad)
                    {
                        y.__backward_fn = () =>
                        {
                            MKL.dPermute(x.grad.__half, x.__shape, x.__strides, dims, y.grad.__half, y.__shape, y.__strides);
                            if (x.__backward_fn != null)
                            {
                                x.__backward_fn();
                            }
                        };
                    }
                    break;
                }

                case torch.float32:
                {
                    MKL.Permute(x.__float, x.__shape, x.__strides, dims, y.__float, y.__shape, y.__strides);
                    if (y.requires_grad)
                    {
                        y.__backward_fn = () =>
                        {
                            MKL.dPermute(x.grad.__float, x.__shape, x.__strides, dims, y.grad.__float, y.__shape, y.__strides);
                            if (x.__backward_fn != null)
                            {
                                x.__backward_fn();
                            }
                        };
                    }
                    break;
                }

                case torch.float64:
                {
                    MKL.Permute(x.__double, x.__shape, x.__strides, dims, y.__double, y.__shape, y.__strides);
                    if (y.requires_grad)
                    {
                        y.__backward_fn = () =>
                        {
                            MKL.dPermute(x.grad.__double, x.__shape, x.__strides, dims, y.grad.__double, y.__shape, y.__strides);
                            if (x.__backward_fn != null)
                            {
                                x.__backward_fn();
                            }
                        };
                    }
                    break;
                }

                case torch.int8:
                {
                    MKL.Permute(x.__int8, x.__shape, x.__strides, dims, y.__int8, y.__shape, y.__strides);
                    break;
                }

                case torch.uint8:
                {
                    MKL.Permute(x.__uint8, x.__shape, x.__strides, dims, y.__uint8, y.__shape, y.__strides);
                    break;
                }

                case torch.int16:
                {
                    MKL.Permute(x.__int16, x.__shape, x.__strides, dims, y.__int16, y.__shape, y.__strides);
                    break;
                }

                case torch.int32:
                {
                    MKL.Permute(x.__int32, x.__shape, x.__strides, dims, y.__int32, y.__shape, y.__strides);
                    break;
                }

                case torch.int64:
                {
                    MKL.Permute(x.__int64, x.__shape, x.__strides, dims, y.__int64, y.__shape, y.__strides);
                    break;
                }

                case torch.@bool:
                {
                    MKL.Permute(x.__bool, x.__shape, x.__strides, dims, y.__bool, y.__shape, y.__strides);
                    break;
                }
                }
                return(y);
            }