internal static DisposableNamedOnnxValue CreateFromOrtValue(string name, OrtValue ortValue, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;

            IntPtr valueType;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueType(ortValue.Handle, out valueType));
            OnnxValueType onnxValueType = (OnnxValueType)valueType;

            switch (onnxValueType)
            {
            case OnnxValueType.ONNX_TYPE_TENSOR:
                result = CreateTensorFromOnnxValue(name, ortValue);
                break;

            case OnnxValueType.ONNX_TYPE_SEQUENCE:
                result = DisposableNamedOnnxValueFromSequence(name, ortValue, allocator);
                break;

            case OnnxValueType.ONNX_TYPE_MAP:
                result = DisposableNamedOnnxValueFromNativeMap(name, ortValue, allocator);
                break;

            default:
                throw new NotSupportedException("OnnxValueType : " + onnxValueType + " is not supported");
            }
            return(result);
        }
        /// <summary>
        /// This method will create an instance of DisposableNamedOnnxValue that will own ortSequenceValue
        /// an all disposable native objects that are elements of the sequence
        /// </summary>
        /// <param name="name"></param>
        /// <param name="ortValueSequence">ortValueElement that has native sequence</param>
        /// <param name="allocator"> used allocator</param>
        /// <returns>DisposableNamedOnnxValue</returns>
        private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromSequence(string name, OrtValue ortValueSequence, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;
            IntPtr count;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueCount(ortValueSequence.Handle, out count));
            var sequence = new DisposableList <DisposableNamedOnnxValue>(count.ToInt32());

            try
            {
                for (int i = 0; i < count.ToInt32(); i++)
                {
                    IntPtr nativeOnnxValueSeq;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueSequence.Handle, i, allocator.Pointer, out nativeOnnxValueSeq));
                    using (var ortValueElement = new OrtValue(nativeOnnxValueSeq))
                    {
                        // Will take ownership or throw
                        sequence.Add(CreateFromOrtValue(string.Empty, ortValueElement, allocator));
                    }
                }
                // NativeOrtValueCollectionOwner will take ownership of ortValueSequence and will make sure sequence
                // is also disposed.
                var nativeCollectionManager = new NativeOrtValueCollectionOwner(ortValueSequence, sequence);
                result = new DisposableNamedOnnxValue(name, sequence, OnnxValueType.ONNX_TYPE_SEQUENCE, TensorElementType.DataTypeMax, nativeCollectionManager);
            }
            catch (Exception)
            {
                sequence.Dispose();
                throw;
            }
            return(result);
        }
 private FixedBufferOnnxValue(MemoryHandle pinnedMemory, OrtValue ortValue, OnnxValueType onnxValueType, TensorElementType elementType)
 {
     PinnedMemory  = pinnedMemory;
     Value         = ortValue;
     OnnxValueType = onnxValueType;
     ElementType   = elementType;
 }
        /// <summary>
        /// This is a factory method that creates a disposable instance of FixedBufferOnnxValue
        /// on top of a buffer. Internally, it will pin managed buffer and will create
        /// an OrtValue containing a tensor that will not own the memory.
        /// Such instance of FixedBufferOnnxValue can be used both as input and output in InferenceSession.Run()
        /// overload. As compared to CreateFromTensor(), this allows you to pass in buffers with custom data types
        /// that are blittable as defined in https://docs.microsoft.com/en-us/dotnet/framework/interop/blittable-and-non-blittable-types
        /// I.e. those that have the same binary representation as the original type. This includes all existing types
        /// but may also allow using custom types for Float16 and BFloat16 providing they have the same layout and size.
        /// The resulting instance must be disposed of to release pinned memory and deallocate native OrtValue
        /// See example below.
        /// </summary>
        /// <typeparam name="T">Blittable data type, compatible with supported types</typeparam>
        /// <param name="memoryInfo">memoryInfo. For managed buffers simply use OrtMemoryInfo.DefaultInstance</param>
        /// <param name="memory"></param>
        /// <param name="elementType">TensorElementType</param>
        /// <param name="shape">shape of the tensor to be created</param>
        /// <param name="bytesSize">size of the allocation in bytes</param>
        /// <returns>a disposable instance of FixedBufferOnnxValue</returns>
        /// <example>
        /// Here is an example of using a 3rd party library class for processing float16/bfloat16.
        /// Currently, to pass tensor data and create a tensor one must copy data to Float16/BFloat16 structures
        /// so DenseTensor can recognize it.
        ///
        /// If you are using a library that has a class Half and it is blittable, that is its managed in memory representation
        /// matches native one and its size is 16-bits, you can use the following conceptual example
        /// to feed/fetch data for inference using Half array. This allows you to avoid copying data from your Half[] to Float16[]
        ///
        /// \code{.cs}
        /// unsafe { Debug.Assert(sizeof(ushort) == sizeof(Half)); }
        /// Half[] input = new Half[] { 5646, 12345 };
        /// var input_shape = new long[] {input.Length};
        /// Half[] output = new Half[40]; // Whatever the expected len/shape is must match
        /// var output_shape = new long[] {output.Length};
        ///
        /// var memInfo = OrtMemoryInfo.DefaultInstance; // CPU
        ///
        /// using(var fixedBufferInput = FixedBufferOnnxvalue.CreateFromMemory<Half>(memInfo,
        ///                         input, TensorElementType.Float16, input_shape, input.Length * sizeof(ushort))
        /// using(var fixedBufferOutput = FixedBufferOnnxvalue.CreateFromMemory<Half>(memInfo,
        ///                               output, TensorElementType.Float16, output_shape, output.Length * sizeof(ushort))
        /// {
        ///    FixedBufferOnnxvalue[] inputValues = new FixedBufferOnnxvalue[]{fixedBufferInput};
        ///    FixedBufferOnnxvalue[] outputValues = new FixedBufferOnnxvalue[]{fixedBufferOutput};
        ///    session.Run(inputNames, inputValues, outputNames, outputValues);
        ///   // Output is now in output[]
        /// }
        /// \endcode
        /// </example>
        public static FixedBufferOnnxValue CreateFromMemory <T>(OrtMemoryInfo memoryInfo, Memory <T> memory,
                                                                TensorElementType elementType, long[] shape, long bytesSize)
        {
            if (elementType == TensorElementType.String)
            {
                throw new ArgumentException("String data type is not supported");
            }

            var memHandle = memory.Pin();

            try
            {
                IntPtr memPtr;
                unsafe
                {
                    memPtr = (IntPtr)memHandle.Pointer;
                }
                var ortValue = OrtValue.CreateTensorValueWithData(memoryInfo,
                                                                  elementType,
                                                                  shape,
                                                                  memPtr, bytesSize);
                return(new FixedBufferOnnxValue(memHandle, ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType));
            }
            catch (Exception)
            {
                memHandle.Dispose();
                throw;
            }
        }
        internal static DisposableNamedOnnxValue CreateFromOrtValue(string name, OrtValue ortValue)
        {
            var result = CreateFromOnnxValue(name, ortValue.Handle, OrtAllocator.DefaultInstance);

            ortValue.Disown();
            return(result);
        }
Exemple #6
0
        private static OrtValue CreateStringTensor(Tensor <string> tensor)
        {
            if (tensor == null)
            {
                throw new OnnxRuntimeException(ErrorCode.Fail, "Cast to Tensor<string> failed. BUG check!");
            }

            int totalLength = 0;

            for (int i = 0; i < tensor.Length; i++)
            {
                totalLength += System.Text.Encoding.UTF8.GetByteCount(tensor.GetValue(i));
            }

            long[] shape = new long[tensor.Dimensions.Length];
            for (int i = 0; i < tensor.Dimensions.Length; i++)
            {
                shape[i] = tensor.Dimensions[i];
            }

            // allocate the native tensor
            IntPtr valueHandle = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateTensorAsOrtValue(
                                              OrtAllocator.DefaultInstance.Pointer,
                                              shape,
                                              (UIntPtr)(shape.Length),
                                              TensorElementType.String,
                                              out valueHandle
                                              ));

            var ortValue = new OrtValue(valueHandle);

            try
            {
                // fill the native tensor, using GetValue(index) from the Tensor<string>
                var len           = tensor.Length;
                var nativeStrings = new IntPtr[len];
                using (var pinnedHandles = new DisposableList <PinnedGCHandle>((int)len))
                {
                    for (int i = 0; i < len; i++)
                    {
                        var utf8str  = NativeOnnxValueHelper.StringToZeroTerminatedUtf8(tensor.GetValue(i));
                        var gcHandle = GCHandle.Alloc(utf8str, GCHandleType.Pinned);
                        nativeStrings[i] = gcHandle.AddrOfPinnedObject();
                        pinnedHandles.Add(new PinnedGCHandle(gcHandle));
                    }

                    using (var pinnedStrings = new PinnedGCHandle(GCHandle.Alloc(nativeStrings, GCHandleType.Pinned)))
                        NativeApiStatus.VerifySuccess(NativeMethods.OrtFillStringTensor(ortValue.Handle, nativeStrings, (UIntPtr)len));
                }
            }
            catch (OnnxRuntimeException)
            {
                ortValue.Dispose();
                throw;
            }
            return(ortValue);
        }
 /// <summary>
 /// Add a pre-allocated initializer to a session. If a model contains an initializer with a name
 /// that is same as the name passed to this API call, ORT will use this initializer instance
 /// instead of deserializing one from the model file. This is useful when you want to share
 /// the same initializer across sessions.
 /// \param name name of the initializer
 /// \param val OrtValue containing the initializer. Lifetime of 'val' and the underlying initializer buffer must be
 /// managed by the user (created using the CreateTensorWithDataAsOrtValue API) and it must outlive the session object
 /// to which it is added.
 /// </summary>
 public void AddInitializer(string name, OrtValue ortValue)
 {
     var utf8NamePinned = GCHandle.Alloc(NativeOnnxValueHelper.StringToZeroTerminatedUtf8(name), GCHandleType.Pinned);
     using (var pinnedName = new PinnedGCHandle(utf8NamePinned))
     {
         NativeApiStatus.VerifySuccess(NativeMethods.OrtAddInitializer(handle, pinnedName.Pointer, ortValue.Handle));
     }
 }
 /// <summary>
 /// Bind model output to an OrtValue as Tensor with a given type and shape. An instance of OrtMemoryAllocaiton
 /// owns the memory and should be alive for the time of execution.The size of the allocation can not be less than required
 /// by the Tensor of the given size.
 /// </summary>
 /// <param name="name">of the output</param>
 /// <param name="elementType">tensor element type</param>
 /// <param name="shape">tensor shape</param>
 /// <param name="allocation">allocated memory</param>
 public void BindOutput(string name, Tensors.TensorElementType elementType, long[] shape, OrtMemoryAllocation allocation)
 {
     using (var ortValue = OrtValue.CreateTensorValueWithData(allocation.Info,
                                                              elementType,
                                                              shape,
                                                              allocation.Pointer, allocation.Size))
         BindInputOrOutput(name, ortValue.Handle, false);
 }
 /// <summary>
 /// Bind external allocation as input or output.
 /// The allocation is owned by the user code.
 /// </summary>
 /// <param name="name">name </param>
 /// <param name="allocation">non ort allocated memory</param>
 /// <param name="isInput">whether this is an input or output</param>
 private void BindExternalAllocation(string name, OrtExternalAllocation allocation, bool isInput)
 {
     using (var ortValue = OrtValue.CreateTensorValueWithData(allocation.Info,
                                                              allocation.ElementType,
                                                              allocation.Shape,
                                                              allocation.Pointer,
                                                              allocation.Size))
         BindInputOrOutput(name, ortValue.Handle, isInput);
 }
        /// <summary>
        /// Will extract keys and values from the map and create a DisposableNamedOnnxValue from it
        /// </summary>
        /// <param name="name">name of the output</param>
        /// <param name="ortValueMap">ortValue that represents a map.
        /// This function does not take ownership of the map as it we copy all keys an values into a dictionary. We let the caller dispose of it</param>
        /// <param name="allocator"></param>
        /// <returns>DisposableNamedOnnxValue</returns>
        private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromNativeMap(string name, OrtValue ortValueMap, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;

            // Map processing is currently not recursing. It is assumed to contain
            // only primitive types and strings tensors. No sequences or maps.
            // The data is being copied to a dictionary and all ortValues are being disposed.
            // not mapped for client consumption.
            using (var cleanUpList = new DisposableList <IDisposable>())
            {
                // Take possession of the map ortValueElement
                IntPtr nativeOnnxValueMapKeys = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 0, allocator.Pointer, out nativeOnnxValueMapKeys));
                var ortValueKeys = new OrtValue(nativeOnnxValueMapKeys);
                cleanUpList.Add(ortValueKeys);

                IntPtr nativeOnnxValueMapValues = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 1, allocator.Pointer, out nativeOnnxValueMapValues));
                var ortValueValues = new OrtValue(nativeOnnxValueMapValues);
                cleanUpList.Add(ortValueValues);

                IntPtr typeAndShape = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(nativeOnnxValueMapKeys, out typeAndShape));
                TensorElementType elemType = TensorElementType.DataTypeMax;
                try
                {
                    IntPtr el_type;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                    elemType = (TensorElementType)el_type;
                }
                finally
                {
                    NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
                }

                /// XXX: This code always assumes that the value type is float and makes no checks
                /// similar to that of the key. Also Map type in general can also be another sequence or map,
                /// not just a tensor
                switch (elemType)
                {
                case TensorElementType.Int64:
                    result = DisposableNamedOnnxValueFromNativeMapElements <Int64, float>(string.Empty, ortValueKeys, ortValueValues);
                    break;

                case TensorElementType.String:
                    result = DisposableNamedOnnxValueFromNativeMapElements <string, float>(string.Empty, ortValueKeys, ortValueValues);
                    break;

                default:
                    throw new NotSupportedException("Map of element type: " + elemType + " is not supported");
                }
            }
            return(result);
        }
Exemple #11
0
        protected override void Dispose(bool disposing)
        {
            if (IsDisposed)
            {
                return;
            }

            if (_ortValue != null)
            {
                _ortValue.Dispose();
                _ortValue = null;
            }
            IsDisposed = true;
        }
        /// <summary>
        /// Creates a <see cref="FixedBufferOnnxValue"/> object from the tensor and pins its underlying buffer.
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="value"></param>
        /// <returns></returns>
        public static FixedBufferOnnxValue CreateFromTensor <T>(Tensor <T> value)
        {
            MemoryHandle?memHandle;
            var          ortValue = OrtValue.CreateFromTensorObject(value, out memHandle, out TensorElementType elementType);

            if (memHandle.HasValue)
            {
                return(new FixedBufferOnnxValue((MemoryHandle)memHandle, ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType));
            }
            else
            {
                return(new FixedBufferOnnxValue(default(MemoryHandle), ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType));
            }
        }
        /// <summary>
        /// Creates a <see cref="FixedBufferOnnxValue"/> object from the tensor and pins its underlying buffer.
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="value"></param>
        /// <returns>a disposable instance of FixedBufferOnnxValue</returns>
        public static FixedBufferOnnxValue CreateFromTensor <T>(Tensor <T> value)
        {
            MemoryHandle?memHandle;
            var          ortValue = OrtValue.CreateFromTensorObject(value, out memHandle, out TensorElementType elementType);

            // memHandle will have a value when CreateFromTensorObject() pins managed memory and that will have to be
            /// disposed (unpinned) when all is said is done. This is the case for blittable types but does not
            /// happen for string type where each element has its own allocation.
            if (memHandle.HasValue)
            {
                return(new FixedBufferOnnxValue((MemoryHandle)memHandle, ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType));
            }
            else
            {
                return(new FixedBufferOnnxValue(default(MemoryHandle), ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType));
            }
        }
Exemple #14
0
        protected virtual void Dispose(bool disposing)
        {
            if (_disposed)
            {
                return;
            }

            // dispose managed state (managed objects).
            if (disposing)
            {
                if (_disposables != null)
                {
                    _disposables.Dispose();
                    _disposables = null;
                }
                // _ortValueHolder can be null when no native memory is involved
                if (_ortValue != null)
                {
                    _ortValue.Dispose();
                    _ortValue = null;
                }
                _disposed = true;
            }
        }
Exemple #15
0
        private string[] _dataBufferAsString; // string tensor values copied into managed memory

        /// <summary>
        /// Constructs an instance and takes ownership of ortValue on success
        /// </summary>
        /// <param name="ortValue">ortValue that is a Tensor</param>
        public NativeOnnxTensorMemory(OrtValue ortValue)
        {
            Type   type         = null;
            int    width        = 0;
            IntPtr typeAndShape = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(ortValue.Handle, out typeAndShape));
            try
            {
                TensorElementType elemType;
                {
                    IntPtr el_type;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                    elemType = (TensorElementType)el_type;
                }

                if (!TensorElementTypeConverter.GetTypeAndWidth(elemType, out type, out width))
                {
                    throw new OnnxRuntimeException(ErrorCode.InvalidArgument,
                                                   "Unable to query type information for data type: " + elemType.ToString());
                }

                if (typeof(T) != type)
                {
                    var message = String.Format("The NativeOnnxTensorMemory<T> type being instantiated for T = : {0} while supplied OrtValue contains T = {1}",
                                                typeof(T), type);
                    throw new OnnxRuntimeException(ErrorCode.InvalidArgument, message);
                }

                ElementType  = elemType;
                ElementWidth = width;
                UIntPtr dimension;
                long    count;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensionsCount(typeAndShape, out dimension));
                {
                    IntPtr el_count;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorShapeElementCount(typeAndShape, out el_count));  // count can be negative.
                    count = (long)el_count;
                }
                if (count < 0)
                {
                    throw new NotSupportedException("Symbolic dimensions in the tensor is not supported");
                }

                long[] shape = new long[dimension.ToUInt64()];
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensions(typeAndShape, shape, dimension)); //Note: shape must be alive during the call

                Count      = (int)count;
                Dimensions = new int[dimension.ToUInt64()];
                for (ulong i = 0; i < dimension.ToUInt64(); i++)
                {
                    Dimensions[i] = (int)shape[i];
                }

                if (elemType != TensorElementType.String)
                {
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorMutableData(ortValue.Handle, out _dataBufferPointer));
                }
                else
                {
                    UIntPtr strLen;
                    var     offsets = new UIntPtr[Count];
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetStringTensorDataLength(ortValue.Handle, out strLen));
                    var dataBuffer = new byte[strLen.ToUInt64()];

                    using (var dataBufferHandle = new Memory <byte>(dataBuffer).Pin())
                        using (var offsetMemoryHandle = new Memory <UIntPtr>(offsets).Pin())
                        {
                            unsafe
                            {
                                _dataBufferPointer = (IntPtr)dataBufferHandle.Pointer;
                                NativeApiStatus.VerifySuccess(
                                    NativeMethods.OrtGetStringTensorContent(
                                        ortValue.Handle, _dataBufferPointer, strLen,
                                        (IntPtr)offsetMemoryHandle.Pointer,
                                        (UIntPtr)Count));
                            }
                            _dataBufferAsString = new string[Count];

                            for (var i = 0; i < offsets.Length; i++)
                            {
                                var length = (i == offsets.Length - 1)
                                ? strLen.ToUInt64() - offsets[i].ToUInt64()
                                : offsets[i + 1].ToUInt64() - offsets[i].ToUInt64();
                                // Onnx specifies strings always in UTF-8, no trailing null, no leading BOM
                                _dataBufferAsString[i] = Encoding.UTF8.GetString(dataBuffer, (int)offsets[i], (int)length);
                            }
                        }
                }
                // Transfer ownership
                _ortValue = new OrtValue(ortValue.Disown());
            }
            finally
            {
                NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
            }
        }
Exemple #16
0
 /// <summary>
 /// Add a pre-allocated initializer to a session. If a model contains an initializer with a name
 /// that is same as the name passed to this API call, ORT will use this initializer instance
 /// instead of deserializing one from the model file. This is useful when you want to share
 /// the same initializer across sessions.
 /// \param name name of the initializer
 /// \param val OrtValue containing the initializer. Lifetime of 'val' and the underlying initializer buffer must be
 /// managed by the user (created using the CreateTensorWithDataAsOrtValue API) and it must outlive the session object
 /// to which it is added.
 /// </summary>
 public void AddInitializer(string name, OrtValue ort_value)
 {
     NativeApiStatus.VerifySuccess(NativeMethods.OrtAddInitializer(handle, name, ort_value.Handle));
 }
Exemple #17
0
        /// <summary>
        /// This is a factory method creates a native Onnxruntime OrtValue containing a tensor.
        /// The method will attempt to pin managed memory so no copying occurs when data is passed down
        /// to native code.
        /// </summary>
        /// <param name="value">Tensor object</param>
        /// <param name="memoryHandle">For all tensor types but string tensors we endeavor to use managed memory
        ///  to avoid additional allocation and copy. This out parameter represents a chunk of pinned memory which will need
        ///  to be disposed when no longer needed. The lifespan of memoryHandle should eclipse the lifespan of the corresponding
        ///  OrtValue.
        /// </param>
        /// <param name="elementType">discovered tensor element type</param>
        /// <returns>And instance of OrtValue constructed on top of the object</returns>
        public static OrtValue CreateFromTensorObject(Object value, out MemoryHandle?memoryHandle,
                                                      out TensorElementType elementType)
        {
            // Check if this is a Tensor
            if (!(value is TensorBase))
            {
                throw new NotSupportedException("The inference value " + nameof(value) + " is not of a supported type");
            }

            var tensorBase = value as TensorBase;
            var typeInfo   = tensorBase.GetTypeInfo();

            if (typeInfo == null)
            {
                throw new OnnxRuntimeException(ErrorCode.RequirementNotRegistered, "BUG Check");
            }

            MemoryHandle?memHandle;
            OrtValue     ortValue         = null;
            int          dataBufferLength = 0;

            long[] shape = null;
            int    rank  = 0;

            TensorElementType elType = typeInfo.ElementType;
            var typeSize             = typeInfo.TypeSize;

            if (typeInfo.IsString)
            {
                ortValue  = CreateStringTensor(value as Tensor <string>);
                memHandle = null;
            }
            else
            {
                switch (elType)
                {
                case TensorElementType.Float:
                    PinAsTensor(value as Tensor <float>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Double:
                    PinAsTensor(value as Tensor <double>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Int32:
                    PinAsTensor(value as Tensor <int>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.UInt32:
                    PinAsTensor(value as Tensor <uint>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Int64:
                    PinAsTensor(value as Tensor <long>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.UInt64:
                    PinAsTensor(value as Tensor <ulong>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Int16:
                    PinAsTensor(value as Tensor <short>, typeSize, out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.UInt16:
                    PinAsTensor(value as Tensor <ushort>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);

                    break;

                case TensorElementType.UInt8:
                    PinAsTensor(value as Tensor <byte>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Int8:
                    PinAsTensor(value as Tensor <sbyte>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Bool:
                    PinAsTensor(value as Tensor <bool>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.Float16:
                    PinAsTensor(value as Tensor <Float16>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                case TensorElementType.BFloat16:
                    PinAsTensor(value as Tensor <BFloat16>, typeSize,
                                out memHandle, out dataBufferLength,
                                out shape, out rank);
                    break;

                default:
                    throw new NotSupportedException("Element type: " + elType + " is not of a supported type");
                }

                try
                {
                    Debug.Assert(memHandle.HasValue);
                    IntPtr dataBufferPointer = IntPtr.Zero;
                    unsafe
                    {
                        dataBufferPointer = (IntPtr)((MemoryHandle)memHandle).Pointer;
                    }

                    IntPtr nativeValue;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateTensorWithDataAsOrtValue(
                                                      OrtMemoryInfo.DefaultInstance.Pointer,
                                                      dataBufferPointer,
                                                      (UIntPtr)(dataBufferLength),
                                                      shape,
                                                      (UIntPtr)rank,
                                                      elType,
                                                      out nativeValue));

                    ortValue = new OrtValue(nativeValue);
                }
                catch (Exception)
                {
                    memHandle?.Dispose();
                    throw;
                }
            }
            memoryHandle = memHandle;
            elementType  = elType;
            return(ortValue);
        }
        /// <summary>
        /// This method maps keys and values of the map and copies them into a Dictionary
        /// and returns as an instance of DisposableNamedOnnxValue that does not own or dispose
        /// any onnx/ortValueElement. The method takes possession of ortValueTensorKeys and ortValueTensorValues
        /// and disposes of them. The original ortValueElement looses ownership of the Tensor. The caller is still responsible
        /// for disposing these arguments. Disposing ortValueElement that does not have ownership is a no-op, however, either
        /// of the arguments may still need to be disposed on exception.
        /// </summary>
        /// <typeparam name="K">key type</typeparam>
        /// <typeparam name="V">value type</typeparam>
        /// <param name="name">name of the output parameter</param>
        /// <param name="ortValueTensorKeys">tensor with map keys.</param>
        /// <param name="nativeOnnxValueValues">tensor with map values</param>
        /// <returns>instance of DisposableNamedOnnxValue with Dictionary</returns>
        private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromNativeMapElements <K, V>(string name,
                                                                                                     OrtValue ortValueTensorKeys, OrtValue ortValueTensorValues)
        {
            using (var nativeTensorWrapperValues = new NativeOnnxTensorMemory <V>(ortValueTensorValues))
            {
                var denseTensorValues = new DenseTensor <V>(nativeTensorWrapperValues.Memory, nativeTensorWrapperValues.Dimensions);

                if (typeof(K) == typeof(string))
                {
                    var map = new Dictionary <string, V>();
                    using (var nativeTensorWrapper = new NativeOnnxTensorMemory <string>(ortValueTensorKeys))
                    {
                        var denseTensorKeys = new DenseTensor <string>(nativeTensorWrapper.GetBytesAsStringMemory(), nativeTensorWrapper.Dimensions);
                        for (var i = 0; i < denseTensorKeys.Length; i++)
                        {
                            map.Add(denseTensorKeys.GetValue(i), denseTensorValues.GetValue(i));
                        }
                        return(new DisposableNamedOnnxValue(name, map, OnnxValueType.ONNX_TYPE_MAP, TensorElementType.DataTypeMax, null));
                    }
                }
                else
                {
                    var map = new Dictionary <K, V>();
                    using (var nativeTensorWrapper = new NativeOnnxTensorMemory <K>(ortValueTensorKeys))
                    {
                        var denseTensorKeys = new DenseTensor <K>(nativeTensorWrapper.Memory, nativeTensorWrapper.Dimensions);
                        for (var i = 0; i < denseTensorKeys.Length; i++)
                        {
                            map.Add(denseTensorKeys.GetValue(i), denseTensorValues.GetValue(i));
                        }
                        return(new DisposableNamedOnnxValue(name, map, OnnxValueType.ONNX_TYPE_MAP, TensorElementType.DataTypeMax, null));
                    }
                }
            }
        }
Exemple #19
0
 /// <summary>
 /// Pin the underlying memory and create an instance of OrtValue
 /// based on the pinned managed memory. The caller is responsible for Disposing
 /// both OrtValue and pinnedMemoryHandle
 /// </summary>
 /// <param name="pinnedMemoryHandle">dispose after returned OrtValus is disposed</param>
 /// <returns>an instance of OrtValue. The lifespan of OrtValue must overlap pinnedMemoryHandle</returns>
 internal virtual OrtValue ToOrtValue(out MemoryHandle?pinnedMemoryHandle)
 {
     return(OrtValue.CreateFromTensorObject(_value, out pinnedMemoryHandle, out TensorElementType elementType));
 }
Exemple #20
0
        private string[] _dataBufferAsString; // string tensor values copied into managed memory

        /// <summary>
        /// Constructs an instance and takes ownership of ortValue on success
        /// </summary>
        /// <param name="ortValue">ortValue that is a Tensor</param>
        public NativeOnnxTensorMemory(OrtValue ortValue)
        {
            Type   type         = null;
            int    width        = 0;
            IntPtr typeAndShape = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(ortValue.Handle, out typeAndShape));
            try
            {
                TensorElementType elemType;
                {
                    IntPtr el_type;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                    elemType = (TensorElementType)el_type;
                }
                TensorElementTypeConverter.GetTypeAndWidth(elemType, out type, out width);

                if (typeof(T) != type)
                {
                    throw new NotSupportedException(nameof(NativeOnnxTensorMemory <T>) + " does not support T = " + nameof(T));
                }

                ElementType  = elemType;
                ElementWidth = width;
                UIntPtr dimension;
                long    count;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensionsCount(typeAndShape, out dimension));
                {
                    IntPtr el_count;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorShapeElementCount(typeAndShape, out el_count));  // count can be negative.
                    count = (long)el_count;
                }
                if (count < 0)
                {
                    throw new NotSupportedException("Symbolic dimensions in the tensor is not supported");
                }

                long[] shape = new long[dimension.ToUInt64()];
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetDimensions(typeAndShape, shape, dimension)); //Note: shape must be alive during the call

                Count      = (int)count;
                Dimensions = new int[dimension.ToUInt64()];
                for (ulong i = 0; i < dimension.ToUInt64(); i++)
                {
                    Dimensions[i] = (int)shape[i];
                }

                if (typeof(T) != typeof(string))
                {
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorMutableData(ortValue.Handle, out _dataBufferPointer));
                }
                else
                {
                    UIntPtr strLen;
                    var     offsets = new UIntPtr[Count];
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetStringTensorDataLength(ortValue.Handle, out strLen));
                    var dataBuffer = new byte[strLen.ToUInt64()];

                    using (var dataBufferHandle = new Memory <byte>(dataBuffer).Pin())
                        using (var offsetMemoryHandle = new Memory <UIntPtr>(offsets).Pin())
                        {
                            unsafe
                            {
                                _dataBufferPointer = (IntPtr)dataBufferHandle.Pointer;
                                NativeApiStatus.VerifySuccess(
                                    NativeMethods.OrtGetStringTensorContent(
                                        ortValue.Handle, _dataBufferPointer, strLen,
                                        (IntPtr)offsetMemoryHandle.Pointer,
                                        (UIntPtr)Count));
                            }
                            _dataBufferAsString = new string[Count];

                            for (var i = 0; i < offsets.Length; i++)
                            {
                                var length = (i == offsets.Length - 1)
                                ? strLen.ToUInt64() - offsets[i].ToUInt64()
                                : offsets[i + 1].ToUInt64() - offsets[i].ToUInt64();
                                // Onnx specifies strings always in UTF-8, no trailing null, no leading BOM
                                _dataBufferAsString[i] = Encoding.UTF8.GetString(dataBuffer, (int)offsets[i], (int)length);
                            }
                        }
                }
                // Transfer ownership, but only do so if the original OrtValue is already owned.
                if (ortValue.IsOwned)
                {
                    _ortValue = new OrtValue(ortValue.Disown());
                }
                else
                {
                    _ortValue = new OrtValue(ortValue.Handle, false);
                }
            }
            finally
            {
                NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
            }
        }
 /// <summary>
 /// This method creates an instance of DisposableNamedOnnxValue that has possession of ortValueElement
 /// native memory Tensor and returns it to the caller. The original ortValueElement argument looses
 /// ownership of the native ortValueElement handle, however, the caller is still responsible for disposing them
 /// on exception. Disposing of OrtValue that has no ownership is a no-op and fine.
 /// </summary>
 /// <typeparam name="T">data type</typeparam>
 /// <param name="name">name of the output</param>
 /// <param name="ortValue">native tensor</param>
 /// <returns>DisposableNamedOnnxValue instance</returns>
 private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromNativeTensor <T>(string name, OrtValue ortValue)
 {
     if (typeof(T) == typeof(string))
     {
         var nativeTensorWrapper = new NativeOnnxTensorMemory <string>(ortValue);
         try
         {
             var dt = new DenseTensor <string>(nativeTensorWrapper.GetBytesAsStringMemory(), nativeTensorWrapper.Dimensions);
             return(new DisposableNamedOnnxValue(name, dt, OnnxValueType.ONNX_TYPE_TENSOR, nativeTensorWrapper.ElementType, nativeTensorWrapper));
         } catch (Exception)
         {
             nativeTensorWrapper.Dispose();
             throw;
         }
     }
     else
     {
         NativeOnnxTensorMemory <T> nativeTensorWrapper = new NativeOnnxTensorMemory <T>(ortValue);
         try
         {
             DenseTensor <T> dt = new DenseTensor <T>(nativeTensorWrapper.Memory, nativeTensorWrapper.Dimensions);
             return(new DisposableNamedOnnxValue(name, dt, OnnxValueType.ONNX_TYPE_TENSOR, nativeTensorWrapper.ElementType, nativeTensorWrapper));
         }
         catch (Exception)
         {
             nativeTensorWrapper.Dispose();
             throw;
         }
     }
 }
Exemple #22
0
 internal NativeOrtValueCollectionOwner(OrtValue ortValue, DisposableList <DisposableNamedOnnxValue> disposables)
 {
     Debug.Assert(ortValue.IsOwned);
     _ortValue    = new OrtValue(ortValue.Disown());
     _disposables = disposables;
 }
 internal static DisposableNamedOnnxValue CreateFromOrtValue(string name, OrtValue ortValue)
 {
     return(CreateFromOrtValue(name, ortValue, OrtAllocator.DefaultInstance));
 }
        /// <summary>
        /// Creates an instance of DisposableNamedOnnxValue and takes ownership of ortValueElement
        /// on success.
        /// </summary>
        /// <param name="name">name of the value</param>
        /// <param name="ortValue">underlying OrtValue</param>
        /// <returns></returns>
        internal static DisposableNamedOnnxValue CreateTensorFromOnnxValue(string name, OrtValue ortValue)
        {
            DisposableNamedOnnxValue result = null;

            /* Get Tensor element type */  //TODO: Assumed value is Tensor, need to support non-tensor types in future
            IntPtr typeAndShape = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(ortValue.Handle, out typeAndShape));
            TensorElementType elemType = TensorElementType.DataTypeMax;

            try
            {
                IntPtr el_type;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                elemType = (TensorElementType)el_type;
            }
            finally
            {
                NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
            }

            switch (elemType)
            {
            case TensorElementType.Float:
                result = DisposableNamedOnnxValueFromNativeTensor <float>(name, ortValue);
                break;

            case TensorElementType.Double:
                result = DisposableNamedOnnxValueFromNativeTensor <double>(name, ortValue);
                break;

            case TensorElementType.Int16:
                result = DisposableNamedOnnxValueFromNativeTensor <short>(name, ortValue);
                break;

            case TensorElementType.UInt16:
                result = DisposableNamedOnnxValueFromNativeTensor <ushort>(name, ortValue);
                break;

            case TensorElementType.Int32:
                result = DisposableNamedOnnxValueFromNativeTensor <int>(name, ortValue);
                break;

            case TensorElementType.UInt32:
                result = DisposableNamedOnnxValueFromNativeTensor <uint>(name, ortValue);
                break;

            case TensorElementType.Int64:
                result = DisposableNamedOnnxValueFromNativeTensor <long>(name, ortValue);
                break;

            case TensorElementType.UInt64:
                result = DisposableNamedOnnxValueFromNativeTensor <ulong>(name, ortValue);
                break;

            case TensorElementType.UInt8:
                result = DisposableNamedOnnxValueFromNativeTensor <byte>(name, ortValue);
                break;

            case TensorElementType.Int8:
                result = DisposableNamedOnnxValueFromNativeTensor <sbyte>(name, ortValue);
                break;

            case TensorElementType.String:
                result = DisposableNamedOnnxValueFromNativeTensor <string>(name, ortValue);
                break;

            case TensorElementType.Bool:
                result = DisposableNamedOnnxValueFromNativeTensor <bool>(name, ortValue);
                break;

            case TensorElementType.Float16:
                result = DisposableNamedOnnxValueFromNativeTensor <Float16>(name, ortValue);
                break;

            case TensorElementType.BFloat16:
                result = DisposableNamedOnnxValueFromNativeTensor <BFloat16>(name, ortValue);
                break;

            default:
                throw new NotSupportedException("Tensor of element type: " + elemType + " is not supported");
            }

            return(result);
        }