/// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>.
        /// </summary>
        /// <param name="inputs"></param>
        /// <param name="outputNames"></param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Dictionary</returns>
        //TODO: kept internal until RunOptions is made public
        internal IReadOnlyCollection <NamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options)
        {
            var inputNames          = new string[inputs.Count];
            var inputTensors        = new IntPtr[inputs.Count];
            var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count];

            int offset = 0;

            foreach (var input in inputs)
            {
                inputNames[offset] = input.Name;

                // create Tensor from the input if feasible, else throw notsupported exception for now
                input.ToNativeOnnxValue(out inputTensors[offset], out pinnedBufferHandles[offset]);

                offset++;
            }

            string[] outputNamesArray = outputNames.ToArray();
            IntPtr[] outputValueArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.ONNXRuntimeRunInference(
                this._nativeHandle,
                IntPtr.Zero,                                  // TODO: use Run options when Run options creation API is available
                                                              // Passing null uses the default run options in the C-api
                inputNames,
                inputTensors,
                (ulong)(inputTensors.Length),                    /* TODO: size_t, make it portable for x86 arm */
                outputNamesArray,
                (ulong)outputNames.Count,                        /* TODO: size_t, make it portable for x86 and arm */
                outputValueArray                                 /* An array of output value pointers. Array must be allocated by the caller */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new List <NamedOnnxValue>();
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    result.Add(NamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    if (outputValueArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.ReleaseONNXValue(outputValueArray[i]);
                    }
                }
                throw e;
            }
            finally
            {
                // always unpin the input buffers, and delete the native Onnx value objects
                for (int i = 0; i < inputs.Count; i++)
                {
                    NativeMethods.ReleaseONNXValue(inputTensors[i]); // this should not release the buffer, but should delete the native tensor object
                    pinnedBufferHandles[i].Dispose();
                }
            }
        }