/// <summary>
        /// Constructor
        /// </summary>
        /// <param name="memInfo">use to accurately describe a piece of memory that this is wrapping</param>
        /// <param name="shape">shape of this buffer</param>
        /// <param name="elementType">element type</param>
        /// <param name="pointer">the actual pointer to memory</param>
        /// <param name="sizeInBytes">size of the allocation in bytes</param>
        public OrtExternalAllocation(OrtMemoryInfo memInfo, long[] shape, Tensors.TensorElementType elementType, IntPtr pointer, long sizeInBytes)
        {
            Type type;
            int  width;

            if (!TensorElementTypeConverter.GetTypeAndWidth(elementType, out type, out width))
            {
                throw new OnnxRuntimeException(ErrorCode.InvalidArgument,
                                               "Unable to query type information for data type: " + elementType.ToString());
            }

            if (elementType == TensorElementType.String)
            {
                throw new OnnxRuntimeException(ErrorCode.InvalidArgument,
                                               "Strings are not supported by this API");
            }

            var shapeSize          = ArrayUtilities.GetSizeForShape(shape);
            var requiredBufferSize = shapeSize * width;

            if (requiredBufferSize > sizeInBytes)
            {
                var message = String.Format("Shape of {0} elements requires a buffer of at least {1} bytes. Provided: {2} bytes",
                                            shapeSize, requiredBufferSize, sizeInBytes);
                throw new OnnxRuntimeException(ErrorCode.InvalidArgument, message);
            }

            Info        = memInfo;
            Shape       = shape;
            ElementType = elementType;
            Pointer     = pointer;
            Size        = sizeInBytes;
        }
Example #2
0
 /// <summary>
 /// Bind model output to an OrtValue as Tensor with a given type and shape. An instance of OrtMemoryAllocaiton
 /// owns the memory and should be alive for the time of execution.The size of the allocation can not be less than required
 /// by the Tensor of the given size.
 /// </summary>
 /// <param name="name">of the output</param>
 /// <param name="elementType">tensor element type</param>
 /// <param name="shape">tensor shape</param>
 /// <param name="allocation">allocated memory</param>
 public void BindOutput(string name, Tensors.TensorElementType elementType, long[] shape, OrtMemoryAllocation allocation)
 {
     using (var ortValue = OrtValue.CreateTensorValueWithData(allocation.Info,
                                                              elementType,
                                                              shape,
                                                              allocation.Pointer, allocation.Size))
         BindInputOrOutput(name, ortValue.Handle, false);
 }
Example #3
0
        internal static void GetTypeAndWidth(Tensors.TensorElementType elemType, out Type type, out int width)
        {
            TensorElementTypeInfo result = TensorBase.GetElementTypeInfo(elemType);

            if (result != null)
            {
                type  = result.TensorType;
                width = result.TypeSize;
            }
            else
            {
                throw new ArgumentException("Unable to get information for type: " + elemType.ToString());
            }
        }
Example #4
0
        internal static void GetTypeAndWidth(Tensors.TensorElementType elemType, out Type type, out int width)
        {
            TensorElementTypeInfo result = TensorBase.GetElementTypeInfo(elemType);

            if (result != null)
            {
                type  = result.TensorType;
                width = result.TypeSize;
            }
            else
            {
                type  = null;
                width = 0;
            }
        }
        public NativeOnnxTensorMemory(IntPtr onnxValueHandle)
        {
            Type type  = null;
            int  width = 0;

            _onnxValueHandle = onnxValueHandle;
            IntPtr typeAndShape = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(onnxValueHandle, out typeAndShape));
            try
            {
                TensorElementType elemType;
                {
                    IntPtr el_type;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                    elemType = (TensorElementType)el_type;
                }
                TensorElementTypeConverter.GetTypeAndWidth(elemType, out type, out width);

                if (typeof(T) != type)
                {
                    throw new NotSupportedException(nameof(NativeOnnxTensorMemory <T>) + " does not support T = " + nameof(T));
                }

                _elementType  = elemType;
                _elementWidth = width;
                UIntPtr dimension;
                long    count;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensionsCount(typeAndShape, out dimension));
                {
                    IntPtr el_count;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorShapeElementCount(typeAndShape, out el_count));  // count can be negative.
                    count = (long)el_count;
                }
                if (count < 0)
                {
                    throw new NotSupportedException("Symbolic dimensions in the tensor is not supported");
                }

                long[] shape = new long[dimension.ToUInt64()];
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensions(typeAndShape, shape, dimension));  //Note: shape must be alive during the call

                _elementCount = (int)count;
                _dimensions   = new int[dimension.ToUInt64()];
                for (ulong i = 0; i < dimension.ToUInt64(); i++)
                {
                    _dimensions[i] = (int)shape[i];
                }

                if (typeof(T) != typeof(string))
                {
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorMutableData(_onnxValueHandle, out _dataBufferPointer));
                }
                else
                {
                    UIntPtr strLen;
                    var     offsets = new UIntPtr[_elementCount];
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetStringTensorDataLength(_onnxValueHandle, out strLen));
                    var dataBuffer = new byte[strLen.ToUInt64()];

                    using (var dataBufferHandle = new Memory <byte>(dataBuffer).Pin())
                        using (var offsetMemoryHandle = new Memory <UIntPtr>(offsets).Pin())
                        {
                            unsafe
                            {
                                _dataBufferPointer = (IntPtr)dataBufferHandle.Pointer;
                                NativeApiStatus.VerifySuccess(
                                    NativeMethods.OrtGetStringTensorContent(
                                        _onnxValueHandle, _dataBufferPointer, strLen,
                                        (IntPtr)offsetMemoryHandle.Pointer,
                                        (UIntPtr)_elementCount));
                            }
                            _dataBufferAsString = new string[_elementCount];

                            for (var i = 0; i < offsets.Length; i++)
                            {
                                var length = (i == offsets.Length - 1)
                                ? strLen.ToUInt64() - offsets[i].ToUInt64()
                                : offsets[i + 1].ToUInt64() - offsets[i].ToUInt64();
                                // Onnx specifies strings always in UTF-8, no trailing null, no leading BOM
                                _dataBufferAsString[i] = Encoding.UTF8.GetString(dataBuffer, (int)offsets[i], (int)length);
                            }
                        }
                }
            }
            finally
            {
                NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
            }
        }
 /// <summary>
 /// Bind a piece of pre-allocated native memory as a OrtValue Tensor with a given shape
 /// to an input with a given name. The model will read the specified input from that memory
 /// possibly avoiding the need to copy between devices. OrtMemoryAllocation continues to own
 /// the chunk of native memory, and the allocation should be alive until the end of execution.
 /// </summary>
 /// <param name="name">of the input</param>
 /// <param name="elementType">Tensor element type</param>
 /// <param name="shape"></param>
 /// <param name="allocation">native memory allocation</param>
 public void BindInput(string name, Tensors.TensorElementType elementType, long[] shape, OrtMemoryAllocation allocation)
 {
     BindOrtAllocation(name, elementType, shape, allocation, true);
 }