예제 #1
0
        static public ITensorData CreateFromTexture(Texture tex, TensorShape shape)
        {
            Assert.AreEqual(tex.width, shape.width);
            Assert.AreEqual(tex.height, shape.height);
            Assert.IsTrue(shape.channels < 4);

            // @TODO: implement proper GPU storage
            var data = new ArrayTensorData(shape);

            if (tex is Texture2D)
            {
                Texture2D tex2d  = tex as Texture2D;
                var       pixels = tex2d.GetPixels();
                for (int i = 0; i < data.array.Length && i < pixels.Length * shape.channels; ++i)
                {
                    data.array[i] = pixels[i / shape.channels][i % shape.channels];
                }
            }
            else
            {
                throw new NotImplementedException();
            }

            return(data);
        }
예제 #2
0
        public Tensor GetInputAsTensor(int idx = 0, int batchCount = -1, int fromBatch = 0)
        {
            if (rawTestSet != null)
            {
                throw new Exception("GetInputAsTensor is not supported for RAW test suites");
            }

            var shape         = GetInputShape(idx);
            var array         = GetInputData(idx);
            var maxBatchCount = array.Length / (shape[1] * shape[2] * shape[3]);

            fromBatch = Math.Min(fromBatch, maxBatchCount - 1);
            if (batchCount < 0)
            {
                batchCount = maxBatchCount - fromBatch;
            }

            // pad data with 0s, if test-set doesn't have enough batches:
            // 1) new ArrayTensorData() will initialize to 0
            // 2) Upload will copy as much data as test-set has into ArrayTensorData
            var tensorShape = new TensorShape(batchCount, shape[1], shape[2], shape[3]);
            var data        = new ArrayTensorData(tensorShape.length);

            data.Upload(array, fromBatch * tensorShape.flatWidth, Math.Min(batchCount, maxBatchCount - fromBatch) * tensorShape.flatWidth);

            var res = new Tensor(tensorShape, data);

            res.name = GetInputName(idx);

            return(res);
        }
        /// <summary>
        /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`.
        /// `srcData` must be of size `shape.length`.
        /// </summary>
        public Tensor(TensorShape shape, float[][] srcData, string name = "")
        {
            this.name  = name;
            this.shape = shape;
            var arrayTensorData = new ArrayTensorData(shape);

            for (var i = 0; i < Math.Min(flatHeight, srcData.Length); ++i)
            {
                var src       = srcData[i];
                var dstOffset = i * flatWidth;
                Array.Copy(src, 0, arrayTensorData.array, dstOffset, Math.Min(flatWidth, src.Length));
            }
            m_TensorOnDevice  = arrayTensorData;
            m_TensorAllocator = null;
            m_Cache           = null;
            m_CacheIsDirty    = false;
        }
        /// <summary>
        /// Create a Tensor of shape `s`, an array of data `srcData` and an optional name `n`
        /// `srcData` should be of size `s.length`.
        /// </summary>
        public Tensor(TensorShape s, float[][] srcData, string n = "")
        {
            //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + s + " [][]-> " + srcData);
            name  = n;
            shape = s;
            var arrayTensorData = new ArrayTensorData(shape);

            for (var i = 0; i < Math.Min(flatHeight, srcData.Length); ++i)
            {
                var src       = srcData[i];
                var dstOffset = i * flatWidth;
                Array.Copy(src, 0, arrayTensorData.array, dstOffset, Math.Min(flatWidth, src.Length));
            }
            m_TensorOnDevice  = arrayTensorData;
            m_TensorAllocator = null;
            m_Cache           = null;
            m_CacheIsDirty    = false;
        }
예제 #5
0
        /// <summary>
        /// Get input as `Tensor`
        /// </summary>
        /// <param name="idx">input index</param>
        /// <param name="batchCount">max batch count</param>
        /// <param name="fromBatch">start from batch</param>
        /// <returns>`Tensor`</returns>
        /// <exception cref="Exception">thrown if called on raw test set (only JSON test set is supported)</exception>
        public Tensor GetInputAsTensor(int idx = 0, int batchCount = -1, int fromBatch = 0)
        {
            if (rawTestSet != null)
            {
                throw new Exception("GetInputAsTensor is not supported for RAW test suites");
            }

            TensorShape shape = GetInputShape(idx);

            Assert.IsTrue(shape.sequenceLength == 1 && shape.numberOfDirections == 1);
            var array         = GetInputData(idx);
            var maxBatchCount = array.Length / shape.flatWidth;

            fromBatch = Math.Min(fromBatch, maxBatchCount - 1);
            if (batchCount < 0)
            {
                batchCount = maxBatchCount - fromBatch;
            }

            // pad data with 0s, if test-set doesn't have enough batches
            var shapeArray = shape.ToArray();

            shapeArray[TensorShape.DataBatch] = batchCount;
            var tensorShape             = new TensorShape(shapeArray);
            var managedBufferStartIndex = fromBatch * tensorShape.flatWidth;
            var count = Math.Min(batchCount, maxBatchCount - fromBatch) * tensorShape.flatWidth;

            float[] dataToUpload = new float[tensorShape.length];
            Array.Copy(array, managedBufferStartIndex, dataToUpload, 0, count);

            var data = new ArrayTensorData(tensorShape.length);

            data.Upload(dataToUpload, tensorShape, 0);

            var res = new Tensor(tensorShape, data);

            res.name = GetInputName(idx);
            res.name = res.name.EndsWith(":0") ? res.name.Remove(res.name.Length - 2) : res.name;

            return(res);
        }
 /// <summary>
 /// Uses shared array
 /// </summary>
 /// <param name="sharedArray">shared array</param>
 public BurstTensorData(ArrayTensorData sharedArray) : base(sharedArray)
 {
 }