Exemplo n.º 1
0
 public virtual Tensor Alloc(TensorShape shape, ITensorData buffer)
 {
     if (layerRequiresStorage)
     {
         return(m_StorageAllocator.Alloc(shape, buffer));
     }
     else
     {
         return(m_TemporaryAllocator.Alloc(shape, buffer));
     }
 }
 internal void DisposeAllocatedBuffer(ITensorData buffer)
 {
     for (int i = m_AllocatedBuffers.Count - 1; i >= 0; i--)
     {
         if (m_AllocatedBuffers[i].buffer == buffer)
         {
             m_AllocatedBuffers.RemoveAt(i);
         }
     }
     buffer.Dispose();
 }
 /// <summary>
 /// Create a Tensor of shape tensorShape `s`, an array of data `srcData` and an optional name `n`
 /// `srcData` should be of size `s.length`.
 /// </summary>
 public Tensor(TensorShape s, float[] srcData, string n = "")
 {
     //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + s + " []-> " + srcData);
     name             = n;
     shape            = s;
     m_TensorOnDevice = new ArrayTensorData(shape);
     m_TensorOnDevice.Upload(srcData, 0, Math.Min(length, srcData.Length));
     m_TensorAllocator = null;
     m_Cache           = null;
     m_CacheIsDirty    = false;
 }
Exemplo n.º 4
0
        protected void AddRef(ITensorData buffer)
        {
            if (buffer == null)
            {
                return;
            }

            var sharedBufferCount = 0;

            m_SharedBuffers.TryGetValue(buffer, out sharedBufferCount);
            m_SharedBuffers[buffer] = sharedBufferCount + 1;
        }
        /// Called from ITensorAllocator, puts Tensor in the ready for reuse state.
        internal ITensorData Invalidate()
        {
            ITensorData unpinned = m_TensorOnDevice;

            PinToDevice(null, false);
            Assert.AreEqual(m_TensorOnDevice, null);
            m_Cache           = null;
            m_CacheIsDirty    = false;
            m_TensorOnDevice  = null;
            m_TensorAllocator = null;
            return(unpinned);
        }
        /// <summary>
        /// Create a Tensor from multiple texture, shape is [srcTextures.length, texture.height, texture.width, `channels=3`]
        /// All textures must be of the same size and dimension.
        /// </summary>
        public Tensor(UnityEngine.Texture[] srcTextures, int channels = 3, string n = "")
        {
            name = n;
            var tensorData = new TextureAsTensorData(srcTextures, channels);

            //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + tensorData.shape + " [TEX] " + srcTextures);
            shape = tensorData.shape;
            Assert.IsTrue(tensorData.GetMaxCount() >= length);
            m_TensorOnDevice  = tensorData;
            m_TensorAllocator = null;
            m_Cache           = null;
            m_CacheIsDirty    = false;
        }
        /// <inheritdoc/>
        public virtual Tensor Alloc(TensorShape shape, ITensorData buffer, AllocScope scope, DataType dataType)
        {
            Profiler.BeginSample("Barracuda.SizeAllocator.Alloc");
            var name = "untitled";

            var tensor = AllocTensorInternal(dataType, shape, buffer);

            tensor.name = name;
            m_BusyTensors.Add(tensor, tensor.tensorOnDevice);
            AddRef(tensor.tensorOnDevice);

            Profiler.EndSample();
            return(tensor);
        }
Exemplo n.º 8
0
        public virtual void Cast(Tensor tensor, ITensorData newBuffer, ITensorData oldBuffer)
        {
            if (newBuffer == oldBuffer)
            {
                return;
            }

            Assert.AreEqual(tensor.allocator, this);
            Assert.IsTrue(m_BusyTensors.ContainsKey(tensor));
            m_BusyTensors[tensor] = newBuffer;

            AddRef(newBuffer);
            DecRef(oldBuffer);
        }
        /// <summary>
        /// Associates tensor with the block of data residing on a device.
        /// Tensor values will be downloaded from the `source` upon the first access.
        /// `source` should contain initialized and valid data representing tensor values.
        /// See also `PrepareCacheForAccess()` to schedule download as soon as possible.
        /// </summary>
        public void AttachToDevice(ITensorData source)
        {
            if (m_TensorOnDevice == source && !m_CacheIsDirty)
            {
                return;
            }

            UploadIfDirty();
            PinToDevice(source, disposeUnpinned: true);
            if (m_Cache != null)
            {
                PrepareCacheForAccess();
            }
        }
Exemplo n.º 10
0
        public virtual Tensor Alloc(TensorShape shape, ITensorData buffer)
        {
            Profiler.BeginSample("Barracuda.SizeAllocator.Alloc");
            var name = "untitled";

            var tensor = new Tensor(shape, buffer, this); // @TODO: reuse Tensor instances

            tensor.name = name;
            m_BusyTensors.Add(tensor, tensor.tensorOnDevice);
            AddRef(tensor.tensorOnDevice);

            Profiler.EndSample();
            return(tensor);
        }
        /// <summary>
        /// Allocate tensor on device if needed and download data to cache.
        /// See also `PrepareCacheForAccess()`.
        /// </summary>
        // @TODO: rename to PinToDevice(...)
        public void PinToDeviceAndDownloadFromIt(ITensorData onDevice)
        {
            if (m_TensorOnDevice == onDevice && !m_CacheIsDirty)
            {
                return;
            }

            UploadIfDirty();
            PinToDevice(onDevice, disposeUnpinned: true);
            if (m_Cache != null)
            {
                PrepareCacheForAccess();
            }
        }
        private bool m_Disposing = false; // to protect from infinite-loop. in case UnpinAndDisposeTensor() is called from Dispose()
        /// <summary>
        /// Remove tensor from device, and dispose it.
        /// </summary>
        public ITensorData UnpinAndDisposeTensor()
        {
            // NOTE: since this Tensor is going to be Disposed
            // there is no need to populate cache with data from tensorOnDevice
            // we can save on skipping PrepareCacheForAccess() call
            ITensorData unpinned = m_TensorOnDevice;

            PinToDevice(null, false);
            if (!m_Disposing)
            {
                Dispose();
            }
            return(unpinned);
        }
        private void PinToDevice(ITensorData onDevice, bool disposeUnpinned = true)
        {
            Assert.IsTrue(onDevice?.GetMaxCount() >= length || onDevice == null);

            if (m_TensorAllocator != null)
            {
                m_TensorAllocator.Repin(this, onDevice, m_TensorOnDevice, disposeUnpinned);
            }
            else if (disposeUnpinned)
            {
                m_TensorOnDevice?.Dispose();
            }

            m_TensorOnDevice = onDevice;
        }
 /// <summary>
 /// Create a Tensor of shape `s`, associated ComputeBuffer `srcBuffer` filled with tensor values, and an optional name `n`
 /// `srcBuffer` should be larger than `s.length`.
 /// </summary>
 public Tensor(TensorShape s, UnityEngine.ComputeBuffer srcBuffer, string n = "")
 {
     name  = n;
     shape = s;
     if (srcBuffer.count < s.length)
     {
         throw new ArgumentException($"Compute buffer `{name}` capacity is {srcBuffer.count} less than {s.length} required for shape {s}");
     }
     if (srcBuffer.stride != 4)
     {
         throw new ArgumentException($"Currently only compute buffers with stride of 4 are supported. Compute buffer `{name}` stride is {srcBuffer.stride} instead");
     }
     m_TensorOnDevice  = new ComputeTensorData(srcBuffer, shape, offset: 0, name, ComputeInfo.channelsOrder);
     m_TensorAllocator = null;
     m_Cache           = null;
     m_CacheIsDirty    = false;
 }
        /// <summary>
        /// Create a Tensor with specified `shape`, an array of data `srcData` and an optional debug `name`.
        /// `srcData` must be of size `shape.length`.
        /// </summary>
        public Tensor(TensorShape shape, float[][] srcData, string name = "")
        {
            this.name  = name;
            this.shape = shape;
            var arrayTensorData = new ArrayTensorData(shape);

            for (var i = 0; i < Math.Min(flatHeight, srcData.Length); ++i)
            {
                var src       = srcData[i];
                var dstOffset = i * flatWidth;
                Array.Copy(src, 0, arrayTensorData.array, dstOffset, Math.Min(flatWidth, src.Length));
            }
            m_TensorOnDevice  = arrayTensorData;
            m_TensorAllocator = null;
            m_Cache           = null;
            m_CacheIsDirty    = false;
        }
        /// <summary>
        /// Cast a tensorData to this tensor, transferring ownership of on tensorData device memory to this tensor.
        /// </summary>
        // @TODO: remove in favor of renamed PinToDevice(...), currently only used in UnsafeArrayCPU
        public void CastOnDevice(ITensorData onDevice)
        {
            if (m_TensorOnDevice == onDevice)
            {
                return;
            }

            Assert.IsNotNull(onDevice);
            Assert.IsNotNull(m_TensorOnDevice);
            Assert.IsTrue(onDevice.GetMaxCount() >= length);

            if (m_TensorAllocator != null)
            {
                m_TensorAllocator.Cast(this, onDevice, m_TensorOnDevice);
            }

            m_TensorOnDevice = onDevice;
        }
        /// <summary>
        /// Create a Tensor of shape `s`, an array of data `srcData` and an optional name `n`
        /// `srcData` should be of size `s.length`.
        /// </summary>
        public Tensor(TensorShape s, float[][] srcData, string n = "")
        {
            //;;UnityEngine.Debug.Log("Tensor::Tensor " + n + " " + s + " [][]-> " + srcData);
            name  = n;
            shape = s;
            var arrayTensorData = new ArrayTensorData(shape);

            for (var i = 0; i < Math.Min(flatHeight, srcData.Length); ++i)
            {
                var src       = srcData[i];
                var dstOffset = i * flatWidth;
                Array.Copy(src, 0, arrayTensorData.array, dstOffset, Math.Min(flatWidth, src.Length));
            }
            m_TensorOnDevice  = arrayTensorData;
            m_TensorAllocator = null;
            m_Cache           = null;
            m_CacheIsDirty    = false;
        }
Exemplo n.º 18
0
        protected void AdoptFreeBuffer(TensorShape shape, ITensorData buffer)
        {
            // code below automatically covers handles edge-case (2)
            // by adopting tensor's with the new ITensorData into m_FreeTensors/m_FreeTensorByShape
            var newEntry = new Entry {
                shape = shape, buffer = buffer
            };
            LinkedListNode <Entry> node;

            if (m_FreeBufferByShape.TryGetValue(newEntry.shape, out node))
            {
                m_FreeBuffers.AddAfter(node, newEntry);
            }
            else
            {
                var newNode = m_FreeBuffers.AddLast(newEntry);
                m_FreeBufferByShape.Add(newEntry.shape, newNode);
            }
        }
Exemplo n.º 19
0
        /// <summary>
        /// Dispose Tensor and associated memories.
        /// </summary>
        public virtual void Dispose()
        {
            m_Disposing = true;
            if (m_TensorAllocator != null)
            {
                m_TensorAllocator.Release(this, true);
            }
            else if (m_TensorOnDevice != null)
            {
                //;;UnityEngine.D.Log("DISPOSE " + name + " " + shape + " @ " + m_TensorOnDevice.GetType().Name);
                m_TensorOnDevice.Dispose();
            }

            m_Cache           = null;
            m_CacheIsDirty    = false;
            m_TensorOnDevice  = null;
            m_TensorAllocator = null;
            m_Disposing       = false;
        }
        /// <summary>
        /// Allocate tensor on device if needed and update data.
        /// By default cached copy of the data will be discarded when doing so, set `forceInvalidateCache` to false to keep the cache.
        /// </summary>
        // @TODO: rename to PinToUninitializedDevice(uninitializedDataOnDevice ...)
        public void PinToDeviceAndUploadToIt(ITensorData onDevice, bool forceInvalidateCache = true)
        {
            if (m_TensorOnDevice == onDevice && !m_CacheIsDirty)
            {
                return;
            }

            PrepareCacheForAccess();
            PinToDevice(onDevice, disposeUnpinned: true);

            m_CacheIsDirty = true;
            if (forceInvalidateCache)
            {
                UploadAndInvalidateCache();
            }
            else
            {
                UploadIfDirty();
            }
        }
        internal Tensor AllocTensorInternal(DataType dataType, TensorShape shape, ITensorData buffer)
        {
            Tensor res = null;

            lock (m_AllocatedTensors)
            {
                if (m_AllocatedTensors.Count > 0)
                {
                    res = m_AllocatedTensors.Last();
                    res.Init(shape, buffer, this, dataType);
                    m_AllocatedTensors.RemoveAt(m_AllocatedTensors.Count - 1);
                }
                else
                {
                    res = new Tensor(shape, buffer, this, dataType);
                }
            }

            return(res);
        }
        /// <summary>
        /// Upload tensor values to the device.
        /// This call associates tensor with the uninitialized block of data residing on a device.
        /// `destination` should be allocated on a target device. Previous contents of `destination` will be overwritten after this call.
        /// By default local cache will be discarded after this call, set `invalidateCacheAfterUpload` to false to keep the cache.
        /// </summary>
        public void UploadToDevice(ITensorData destination, bool invalidateCacheAfterUpload = true)
        {
            if (m_TensorOnDevice == destination && !m_CacheIsDirty)
            {
                return;
            }

            PrepareCacheForAccess();
            PinToDevice(destination, disposeUnpinned: true);

            m_CacheIsDirty = true;
            if (invalidateCacheAfterUpload)
            {
                UploadAndInvalidateCache();
            }
            else
            {
                UploadIfDirty();
            }
        }
Exemplo n.º 23
0
        protected void DecRef(ITensorData buffer, Action <ITensorData> onLastRef = null)
        {
            if (buffer == null)
            {
                return;
            }

            Assert.IsTrue(m_SharedBuffers.ContainsKey(buffer));
            Assert.IsTrue(m_SharedBuffers[buffer] > 0);
            if (--m_SharedBuffers[buffer] > 0)
            {
                return;
            }

            m_SharedBuffers.Remove(buffer);

            if (onLastRef != null)
            {
                onLastRef(buffer);
            }
        }
Exemplo n.º 24
0
        public virtual void Repin(Tensor tensor, ITensorData newBuffer, ITensorData oldBuffer, bool disposeUnpinnedHint)
        {
            if (newBuffer == oldBuffer)
            {
                return;
            }

            Assert.AreEqual(tensor.allocator, this);
            Assert.IsTrue(m_BusyTensors.ContainsKey(tensor));
            m_BusyTensors[tensor] = newBuffer;

            AddRef(newBuffer);

            if (disposeUnpinnedHint)
            {
                DecRef(oldBuffer, disposeAllocatedBufferDelegate);
            }
            else
            {
                DecRef(oldBuffer, adoptFreeBufferDelegate);
            }
        }
        /// <inheritdoc/>
        public virtual Tensor Alloc(TensorShape shape, AllocScope scope, DataType dataType)
        {
            Profiler.BeginSample("Barracuda.SizeAllocator.Alloc");
            var name = "untitled";

            for (int i = 0; i < m_AllocatedBuffers.Count; ++i)
            {
                var entry = m_AllocatedBuffers[i];
                if (entry.size >= shape.length && entry.dataType == dataType && entry.free)
                {
                    entry.free            = false;
                    m_AllocatedBuffers[i] = entry;

                    ITensorData buffer = entry.tensorData;
                    buffer?.Reserve(shape.length);

                    var tensor = AllocTensorInternal(dataType, shape, buffer);
                    tensor.name = name;

                    m_BusyTensors.Add(tensor, tensor.tensorOnDevice);
                    AddRef(tensor.tensorOnDevice);

                    Profiler.EndSample();
                    return(tensor);
                }
            }

            ++m_NumAllocatedBufferSinceCleanup;

            var newTensor = AllocTensorInternal(dataType, shape, null);

            newTensor.name = name;
            m_BusyTensors.Add(newTensor, newTensor.tensorOnDevice);
            AddRef(newTensor.tensorOnDevice);

            Profiler.EndSample();
            return(newTensor);
        }
Exemplo n.º 26
0
        public virtual void Repin(Tensor tensor, ITensorData newBuffer, ITensorData oldBuffer, bool disposeUnpinnedHint)
        {
            if (newBuffer == oldBuffer)
            {
                return;
            }

            Assert.AreEqual(tensor.allocator, this);
            Assert.IsTrue(m_BusyTensors.ContainsKey(tensor));
            m_BusyTensors[tensor] = newBuffer;

            AddRef(newBuffer);
            DecRef(oldBuffer,
                   (freeBuffer) => {
                if (disposeUnpinnedHint)
                {
                    freeBuffer.Dispose();
                }
                else
                {
                    AdoptFreeBuffer(tensor.shape, freeBuffer);
                }
            });
        }
 /// <summary>
 /// Create a Tensor from a shape `s`, a ITensorData `d` and a ITensorAllocator `a`
 /// `s` should be of size 4, order is [N,H,W,C].
 /// </summary>
 public Tensor(int[] s, ITensorData d, ITensorAllocator a) : this(new TensorShape(s), d, a)
 {
 }
 /// <summary>
 /// Create a Tensor of shape [N,1,1,C], a ITensorData `d` and an optional name `n`
 /// `srcData` should be of size b*ch
 /// </summary>
 public Tensor(int b, int ch, ITensorData d, string n = "") : this(new TensorShape(b, ch), d, n)
 {
 }
 /// <summary>
 /// Create a Tensor of shape [N,1,1,C], a ITensorData `d` and a ITensorAllocator `a`
 /// `srcData` should be of size b*ch
 /// </summary>
 public Tensor(int b, int ch, ITensorData d, ITensorAllocator a) : this(new TensorShape(b, ch), d, a)
 {
 }
Exemplo n.º 30
0
 public virtual void MoveToDevice(Tensor x, ITensorData newBuffer, ITensorData oldBuffer, bool disposeDetachedBufferHint)
 {
     x.allocator.MoveToDevice(x, newBuffer, oldBuffer, disposeDetachedBufferHint);
 }