/// <summary>
        /// This method will create an instance of DisposableNamedOnnxValue that will own ortSequenceValue
        /// an all disposable native objects that are elements of the sequence
        /// </summary>
        /// <param name="name"></param>
        /// <param name="ortValueSequence">ortValueElement that has native sequence</param>
        /// <param name="allocator"> used allocator</param>
        /// <returns>DisposableNamedOnnxValue</returns>
        private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromSequence(string name, OrtValue ortValueSequence, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;
            IntPtr count;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueCount(ortValueSequence.Handle, out count));
            var sequence = new DisposableList <DisposableNamedOnnxValue>(count.ToInt32());

            try
            {
                for (int i = 0; i < count.ToInt32(); i++)
                {
                    IntPtr nativeOnnxValueSeq;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueSequence.Handle, i, allocator.Pointer, out nativeOnnxValueSeq));
                    using (var ortValueElement = new OrtValue(nativeOnnxValueSeq))
                    {
                        // Will take ownership or throw
                        sequence.Add(CreateFromOrtValue(string.Empty, ortValueElement, allocator));
                    }
                }
                // NativeOrtValueCollectionOwner will take ownership of ortValueSequence and will make sure sequence
                // is also disposed.
                var nativeCollectionManager = new NativeOrtValueCollectionOwner(ortValueSequence, sequence);
                result = new DisposableNamedOnnxValue(name, sequence, OnnxValueType.ONNX_TYPE_SEQUENCE, TensorElementType.DataTypeMax, nativeCollectionManager);
            }
            catch (Exception)
            {
                sequence.Dispose();
                throw;
            }
            return(result);
        }
        /// <summary>
        ///  This method return a collection of DisposableNamedOnnxValue as in other interfaces
        ///  Query names from OrtIoBinding object and pair then with the array of OrtValues returned
        /// from OrtIoBinding.GetOutputValues()
        ///
        /// </summary>
        /// <param name="runOptions">RunOptions</param>
        /// <param name="ioBinding">OrtIoBinding instance with bindings</param>
        /// <param name="names">optional parameter. If you already know the names of the outputs you can save a native
        /// call to retrieve output names. They will be paired with the returned OrtValues and combined into DisposbleNamedOnnxValues.
        /// Otherwise, the method will retrieve output names from the OrtIoBinding instance.
        /// It is an error if you supply a different number of names than the returned outputs</param>
        public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> RunWithBindingAndNames(RunOptions runOptions, OrtIoBinding ioBinding, string[] names = null)
        {
            NativeApiStatus.VerifySuccess(NativeMethods.OrtRunWithBinding(Handle, runOptions.Handle, ioBinding.Handle));
            using (var ortValues = ioBinding.GetOutputValues())
            {
                string[] outputNames = names;
                if (outputNames == null)
                {
                    outputNames = ioBinding.GetOutputNames();
                }

                if (outputNames.Length != ortValues.Count)
                {
                    throw new OnnxRuntimeException(ErrorCode.InvalidArgument,
                                                   "Number of specified names: " + names.Length + " does not match the output number: " +
                                                   ortValues.Count);
                }

                var result = new DisposableList <DisposableNamedOnnxValue>(outputNames.Length);
                try
                {
                    for (int i = 0; i < outputNames.Length; ++i)
                    {
                        var ortValue = ortValues.ElementAt(i);
                        result.Add(DisposableNamedOnnxValue.CreateTensorFromOnnxValue(outputNames[i], ortValue.Handle));
                        ortValue.Disown();
                    }
                } catch (Exception e)
                {
                    result.Dispose();
                    throw e;
                }
                return(result);
            }
        }
        internal static DisposableNamedOnnxValue CreateFromOrtValue(string name, OrtValue ortValue, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;

            IntPtr valueType;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueType(ortValue.Handle, out valueType));
            OnnxValueType onnxValueType = (OnnxValueType)valueType;

            switch (onnxValueType)
            {
            case OnnxValueType.ONNX_TYPE_TENSOR:
                result = CreateTensorFromOnnxValue(name, ortValue);
                break;

            case OnnxValueType.ONNX_TYPE_SEQUENCE:
                result = DisposableNamedOnnxValueFromSequence(name, ortValue, allocator);
                break;

            case OnnxValueType.ONNX_TYPE_MAP:
                result = DisposableNamedOnnxValueFromNativeMap(name, ortValue, allocator);
                break;

            default:
                throw new NotSupportedException("OnnxValueType : " + onnxValueType + " is not supported");
            }
            return(result);
        }
Exemple #4
0
        public unsafe void Run(
            IReadOnlyCollection <NamedOnnxValue> inputs,
            IReadOnlyCollection <string> outputNames,
            RunOptions options,
            IList <DisposableNamedOnnxValue> result)
        {
            string[] outputNamesArray = GetOutputNames((IReadOnlyList <string>)outputNames);
            var      outputValueArray = stackalloc IntPtr[outputNamesArray.Length];

            Run(inputs, options, outputNamesArray, outputValueArray);

            try
            {
                for (uint i = 0; i < outputNamesArray.Length; i++)
                {
                    result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i]));
                }
            }
            catch (OnnxRuntimeException e)
            {
                // clean up the individual output tensors if it is not null;
                OrtReleaseValues(outputValueArray, outputNamesArray.Length);
                throw e;
            }
        }
        /// <summary>
        /// Will extract keys and values from the map and create a DisposableNamedOnnxValue from it
        /// </summary>
        /// <param name="name">name of the output</param>
        /// <param name="ortValueMap">ortValue that represents a map.
        /// This function does not take ownership of the map as it we copy all keys an values into a dictionary. We let the caller dispose of it</param>
        /// <param name="allocator"></param>
        /// <returns>DisposableNamedOnnxValue</returns>
        private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromNativeMap(string name, OrtValue ortValueMap, OrtAllocator allocator)
        {
            DisposableNamedOnnxValue result = null;

            // Map processing is currently not recursing. It is assumed to contain
            // only primitive types and strings tensors. No sequences or maps.
            // The data is being copied to a dictionary and all ortValues are being disposed.
            // not mapped for client consumption.
            using (var cleanUpList = new DisposableList <IDisposable>())
            {
                // Take possession of the map ortValueElement
                IntPtr nativeOnnxValueMapKeys = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 0, allocator.Pointer, out nativeOnnxValueMapKeys));
                var ortValueKeys = new OrtValue(nativeOnnxValueMapKeys);
                cleanUpList.Add(ortValueKeys);

                IntPtr nativeOnnxValueMapValues = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 1, allocator.Pointer, out nativeOnnxValueMapValues));
                var ortValueValues = new OrtValue(nativeOnnxValueMapValues);
                cleanUpList.Add(ortValueValues);

                IntPtr typeAndShape = IntPtr.Zero;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(nativeOnnxValueMapKeys, out typeAndShape));
                TensorElementType elemType = TensorElementType.DataTypeMax;
                try
                {
                    IntPtr el_type;
                    NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                    elemType = (TensorElementType)el_type;
                }
                finally
                {
                    NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
                }

                /// XXX: This code always assumes that the value type is float and makes no checks
                /// similar to that of the key. Also Map type in general can also be another sequence or map,
                /// not just a tensor
                switch (elemType)
                {
                case TensorElementType.Int64:
                    result = DisposableNamedOnnxValueFromNativeMapElements <Int64, float>(string.Empty, ortValueKeys, ortValueValues);
                    break;

                case TensorElementType.String:
                    result = DisposableNamedOnnxValueFromNativeMapElements <string, float>(string.Empty, ortValueKeys, ortValueValues);
                    break;

                default:
                    throw new NotSupportedException("Map of element type: " + elemType + " is not supported");
                }
            }
            return(result);
        }
        IDisposableReadOnlyCollection <DisposableNamedOnnxValue> CreateDisposableResult(List <OrtValue> ortValues,
                                                                                        IReadOnlyCollection <string> outputNames)
        {
            var result = new DisposableList <DisposableNamedOnnxValue>(outputNames.Count);

            try
            {
                for (int i = 0; i < ortValues.Count; i++)
                {
                    var ortValue = ortValues[i];
                    result.Add(DisposableNamedOnnxValue.CreateFromOrtValue(outputNames.ElementAt(i), ortValue));
                }
            }
            catch (OnnxRuntimeException e)
            {
                result.Dispose();
                throw e;
            }
            return(result);
        }
        /// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>. Uses the given RunOptions for this run.
        /// </summary>
        /// <param name="inputNames">Specify a collection of string that indicates the input names. Should match <paramref name="inputValues"/>.</param>
        /// <param name="inputValues">Specify a collection of <see cref="FixedBufferOnnxValue"/> that indicates the input values.</param>
        /// <param name="outputNames">Specify a collection of string that indicates the output names to fetch.</param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns>
        public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(
            IReadOnlyCollection <string> inputNames,
            IReadOnlyCollection <FixedBufferOnnxValue> inputValues,
            IReadOnlyCollection <string> outputNames,
            RunOptions options)
        {
            if (inputNames.Count != inputValues.Count)
            {
                throw new ArgumentException($"Length of {nameof(inputNames)} ({inputNames.Count}) must match that of {nameof(inputValues)} ({inputValues.Count}).");
            }

            // prepare inputs
            string[] inputNamesArray  = inputNames as string[] ?? inputNames.ToArray();
            IntPtr[] inputValuesArray = new IntPtr[inputNames.Count];
            int      inputIndex       = 0;

            foreach (var input in inputValues)
            {
                inputValuesArray[inputIndex] = input.Value;

                inputIndex++;
            }

            // prepare outputs
            string[] outputNamesArray  = outputNames as string[] ?? outputNames.ToArray();
            IntPtr[] outputValuesArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.OrtRun(
                _nativeHandle,
                options.Handle,
                inputNamesArray,
                inputValuesArray,
                (UIntPtr)inputNames.Count,
                outputNamesArray,
                (UIntPtr)outputNames.Count,
                outputValuesArray                                 /* Empty array is passed in to receive output OrtValue pointers */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new DisposableList <DisposableNamedOnnxValue>(outputValuesArray.Length);
                for (int i = 0; i < outputValuesArray.Length; i++)
                {
                    result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValuesArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (uint i = 0; i < outputValuesArray.Length; i++)
                {
                    if (outputValuesArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.OrtReleaseValue(outputValuesArray[i]);
                    }
                }
                throw e;
            }
        }
        /// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>. Uses the given RunOptions for this run.
        /// </summary>
        /// <param name="inputs">Specify a collection of <see cref="NamedOnnxValue"/> that indicates the input values.</param>
        /// <param name="outputNames">Specify a collection of string that indicates the output names to fetch.</param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns>
        public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options)
        {
            // prepare inputs
            var inputNamesArray          = new string[inputs.Count];
            var inputValuesArray         = new IntPtr[inputs.Count];
            var pinnedInputBufferHandles = new System.Buffers.MemoryHandle[inputs.Count];
            var disposeInputs            = new bool[inputs.Count];

            int inputIndex = 0;

            foreach (var input in inputs)
            {
                inputNamesArray[inputIndex] = input.Name;

                // create Tensor from the input if feasible, else throw notsupported exception for now
                input.ToNativeOnnxValue(
                    out inputValuesArray[inputIndex],
                    out pinnedInputBufferHandles[inputIndex],
                    out disposeInputs[inputIndex]);

                inputIndex++;
            }

            // prepare outputs
            string[] outputNamesArray  = outputNames as string[] ?? outputNames.ToArray();
            IntPtr[] outputValuesArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.OrtRun(
                _nativeHandle,
                options.Handle,
                inputNamesArray,
                inputValuesArray,
                (UIntPtr)inputs.Count,
                outputNamesArray,
                (UIntPtr)outputNames.Count,
                outputValuesArray                                 /* Empty array is passed in to receive output OrtValue pointers */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new DisposableList <DisposableNamedOnnxValue>(outputValuesArray.Length);
                for (int i = 0; i < outputValuesArray.Length; i++)
                {
                    result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValuesArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (int i = 0; i < outputValuesArray.Length; i++)
                {
                    if (outputValuesArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.OrtReleaseValue(outputValuesArray[i]);
                    }
                }
                throw e;
            }
            finally
            {
                for (int i = 0; i < inputs.Count; i++)
                {
                    if (disposeInputs[i])
                    {
                        NativeMethods.OrtReleaseValue(inputValuesArray[i]); // For elementary type Tensors, this should not release the buffer, but should delete the native tensor object.
                                                                            // For string tensors, this releases the native memory allocated for the tensor, including the buffer
                        pinnedInputBufferHandles[i].Dispose();
                    }
                }
            }
        }
        /// <summary>
        /// Creates an instance of DisposableNamedOnnxValue and takes ownership of ortValueElement
        /// on success.
        /// </summary>
        /// <param name="name">name of the value</param>
        /// <param name="ortValue">underlying OrtValue</param>
        /// <returns></returns>
        internal static DisposableNamedOnnxValue CreateTensorFromOnnxValue(string name, OrtValue ortValue)
        {
            DisposableNamedOnnxValue result = null;

            /* Get Tensor element type */  //TODO: Assumed value is Tensor, need to support non-tensor types in future
            IntPtr typeAndShape = IntPtr.Zero;

            NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(ortValue.Handle, out typeAndShape));
            TensorElementType elemType = TensorElementType.DataTypeMax;

            try
            {
                IntPtr el_type;
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type));
                elemType = (TensorElementType)el_type;
            }
            finally
            {
                NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape);
            }

            switch (elemType)
            {
            case TensorElementType.Float:
                result = DisposableNamedOnnxValueFromNativeTensor <float>(name, ortValue);
                break;

            case TensorElementType.Double:
                result = DisposableNamedOnnxValueFromNativeTensor <double>(name, ortValue);
                break;

            case TensorElementType.Int16:
                result = DisposableNamedOnnxValueFromNativeTensor <short>(name, ortValue);
                break;

            case TensorElementType.UInt16:
                result = DisposableNamedOnnxValueFromNativeTensor <ushort>(name, ortValue);
                break;

            case TensorElementType.Int32:
                result = DisposableNamedOnnxValueFromNativeTensor <int>(name, ortValue);
                break;

            case TensorElementType.UInt32:
                result = DisposableNamedOnnxValueFromNativeTensor <uint>(name, ortValue);
                break;

            case TensorElementType.Int64:
                result = DisposableNamedOnnxValueFromNativeTensor <long>(name, ortValue);
                break;

            case TensorElementType.UInt64:
                result = DisposableNamedOnnxValueFromNativeTensor <ulong>(name, ortValue);
                break;

            case TensorElementType.UInt8:
                result = DisposableNamedOnnxValueFromNativeTensor <byte>(name, ortValue);
                break;

            case TensorElementType.Int8:
                result = DisposableNamedOnnxValueFromNativeTensor <sbyte>(name, ortValue);
                break;

            case TensorElementType.String:
                result = DisposableNamedOnnxValueFromNativeTensor <string>(name, ortValue);
                break;

            case TensorElementType.Bool:
                result = DisposableNamedOnnxValueFromNativeTensor <bool>(name, ortValue);
                break;

            case TensorElementType.Float16:
                result = DisposableNamedOnnxValueFromNativeTensor <Float16>(name, ortValue);
                break;

            case TensorElementType.BFloat16:
                result = DisposableNamedOnnxValueFromNativeTensor <BFloat16>(name, ortValue);
                break;

            default:
                throw new NotSupportedException("Tensor of element type: " + elemType + " is not supported");
            }

            return(result);
        }
Exemple #10
0
        /// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>.
        /// </summary>
        /// <param name="inputs"></param>
        /// <param name="outputNames"></param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Collection of NamedOnnxValue</returns>
        //TODO: kept internal until RunOptions is made public
        internal IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options)
        {
            var inputNames          = new string[inputs.Count];
            var inputTensors        = new IntPtr[inputs.Count];
            var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count];

            int inputIndex = 0;

            foreach (var input in inputs)
            {
                inputNames[inputIndex] = input.Name;

                // create Tensor from the input if feasible, else throw notsupported exception for now
                input.ToNativeOnnxValue(out inputTensors[inputIndex], out pinnedBufferHandles[inputIndex]);

                inputIndex++;
            }

            string[] outputNamesArray = outputNames.ToArray();
            IntPtr[] outputValueArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.OrtRun(
                this._nativeHandle,
                IntPtr.Zero,                                  // TODO: use Run options when Run options creation API is available
                                                              // Passing null uses the default run options in the C-api
                inputNames,
                inputTensors,
                (UIntPtr)(inputTensors.Length),
                outputNamesArray,
                (UIntPtr)outputNames.Count,
                outputValueArray                                 /* An array of output value pointers. Array must be allocated by the caller */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new DisposableList <DisposableNamedOnnxValue>();
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    if (outputValueArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.OrtReleaseValue(outputValueArray[i]);
                    }
                }
                throw e;
            }
            finally
            {
                // always unpin the input buffers, and delete the native Onnx value objects
                for (int i = 0; i < inputs.Count; i++)
                {
                    NativeMethods.OrtReleaseValue(inputTensors[i]); // this should not release the buffer, but should delete the native tensor object
                    pinnedBufferHandles[i].Dispose();
                }
            }
        }
Exemple #11
0
        /// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames". Uses the given RunOptions for this run./>.
        /// </summary>
        /// <param name="inputs"></param>
        /// <param name="outputNames"></param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns>
        public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options)
        {
            var inputNames          = new string[inputs.Count];
            var inputTensors        = new IntPtr[inputs.Count];
            var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count];

            int inputIndex = 0;

            foreach (var input in inputs)
            {
                inputNames[inputIndex] = input.Name;

                // create Tensor from the input if feasible, else throw notsupported exception for now
                input.ToNativeOnnxValue(out inputTensors[inputIndex],
                                        out pinnedBufferHandles[inputIndex]);

                inputIndex++;
            }

            string[] outputNamesArray = outputNames.ToArray();
            IntPtr[] outputValueArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.OrtRun(
                this._nativeHandle,
                options.Handle,
                inputNames,
                inputTensors,
                (UIntPtr)(inputTensors.Length),
                outputNamesArray,
                (UIntPtr)outputNames.Count,
                outputValueArray                                 /* An array of output value pointers. Array must be allocated by the caller */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new DisposableList <DisposableNamedOnnxValue>();
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    if (outputValueArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.OrtReleaseValue(outputValueArray[i]);
                    }
                }
                throw e;
            }
            finally
            {
                inputIndex = 0;
                foreach (var input in inputs)
                {
                    // For NamedOnnxValue, always unpin the input buffers, and delete the native Onnx value objects
                    // For DisposableNamedOnnxValue, the user needs to do this by invoking Dispose
                    if (input.GetType() == typeof(NamedOnnxValue))
                    {
                        NativeMethods.OrtReleaseValue(inputTensors[inputIndex]); // For elementary type Tensors, this should not release the buffer, but should delete the native tensor object.
                                                                                 // For string tensors, this releases the native memory allocated for the tensor, including the buffer
                        pinnedBufferHandles[inputIndex].Dispose();
                    }

                    inputIndex++;
                }
            }
        }