/// <summary> /// Will extract keys and values from the map and create a DisposableNamedOnnxValue from it /// </summary> /// <param name="name">name of the output</param> /// <param name="ortValueMap">ortValue that represents a map. /// This function does not take ownership of the map as it we copy all keys an values into a dictionary. We let the caller dispose of it</param> /// <param name="allocator"></param> /// <returns>DisposableNamedOnnxValue</returns> private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromNativeMap(string name, OrtValue ortValueMap, OrtAllocator allocator) { DisposableNamedOnnxValue result = null; // Map processing is currently not recursing. It is assumed to contain // only primitive types and strings tensors. No sequences or maps. // The data is being copied to a dictionary and all ortValues are being disposed. // not mapped for client consumption. using (var cleanUpList = new DisposableList <IDisposable>()) { // Take possession of the map ortValueElement IntPtr nativeOnnxValueMapKeys = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 0, allocator.Pointer, out nativeOnnxValueMapKeys)); var ortValueKeys = new OrtValue(nativeOnnxValueMapKeys); cleanUpList.Add(ortValueKeys); IntPtr nativeOnnxValueMapValues = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueMap.Handle, 1, allocator.Pointer, out nativeOnnxValueMapValues)); var ortValueValues = new OrtValue(nativeOnnxValueMapValues); cleanUpList.Add(ortValueValues); IntPtr typeAndShape = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(nativeOnnxValueMapKeys, out typeAndShape)); TensorElementType elemType = TensorElementType.DataTypeMax; try { IntPtr el_type; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type)); elemType = (TensorElementType)el_type; } finally { NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape); } /// XXX: This code always assumes that the value type is float and makes no checks /// similar to that of the key. Also Map type in general can also be another sequence or map, /// not just a tensor switch (elemType) { case TensorElementType.Int64: result = DisposableNamedOnnxValueFromNativeMapElements <Int64, float>(string.Empty, ortValueKeys, ortValueValues); break; case TensorElementType.String: result = DisposableNamedOnnxValueFromNativeMapElements <string, float>(string.Empty, ortValueKeys, ortValueValues); break; default: throw new NotSupportedException("Map of element type: " + elemType + " is not supported"); } } return(result); }
/// <summary> /// This method will create an instance of DisposableNamedOnnxValue that will own ortSequenceValue /// an all disposable native objects that are elements of the sequence /// </summary> /// <param name="name"></param> /// <param name="ortValueSequence">ortValueElement that has native sequence</param> /// <param name="allocator"> used allocator</param> /// <returns>DisposableNamedOnnxValue</returns> private static DisposableNamedOnnxValue DisposableNamedOnnxValueFromSequence(string name, OrtValue ortValueSequence, OrtAllocator allocator) { DisposableNamedOnnxValue result = null; IntPtr count; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueCount(ortValueSequence.Handle, out count)); var sequence = new DisposableList <DisposableNamedOnnxValue>(count.ToInt32()); try { for (int i = 0; i < count.ToInt32(); i++) { IntPtr nativeOnnxValueSeq; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(ortValueSequence.Handle, i, allocator.Pointer, out nativeOnnxValueSeq)); using (var ortValueElement = new OrtValue(nativeOnnxValueSeq)) { // Will take ownership or throw sequence.Add(CreateFromOrtValue(string.Empty, ortValueElement, allocator)); } } // NativeOrtValueCollectionOwner will take ownership of ortValueSequence and will make sure sequence // is also disposed. var nativeCollectionManager = new NativeOrtValueCollectionOwner(ortValueSequence, sequence); result = new DisposableNamedOnnxValue(name, sequence, OnnxValueType.ONNX_TYPE_SEQUENCE, TensorElementType.DataTypeMax, nativeCollectionManager); } catch (Exception) { sequence.Dispose(); throw; } return(result); }
internal static DisposableNamedOnnxValue CreateFromOnnxValue(string name, IntPtr nativeOnnxValue, OrtAllocator allocator) { IntPtr valueType; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueType(nativeOnnxValue, out valueType)); OnnxValueType onnxValueType = (OnnxValueType)valueType; switch (onnxValueType) { case OnnxValueType.ONNX_TYPE_TENSOR: return(CreateTensorFromOnnxValue(name, nativeOnnxValue)); case OnnxValueType.ONNX_TYPE_SEQUENCE: IntPtr count = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueCount(nativeOnnxValue, out count)); var sequence = new DisposableList <DisposableNamedOnnxValue>(count.ToInt32()); for (int i = 0; i < count.ToInt32(); i++) { IntPtr nativeOnnxValueSeq; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, i, allocator.Pointer, out nativeOnnxValueSeq)); sequence.Add(CreateFromOnnxValue(string.Empty, nativeOnnxValueSeq, allocator)); } return(new DisposableNamedOnnxValue(name, sequence, OnnxValueType.ONNX_TYPE_SEQUENCE, TensorElementType.DataTypeMax, null)); case OnnxValueType.ONNX_TYPE_MAP: IntPtr nativeOnnxValueMapKeys = IntPtr.Zero; IntPtr nativeOnnxValueMapValues = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, 0, allocator.Pointer, out nativeOnnxValueMapKeys)); NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, 1, allocator.Pointer, out nativeOnnxValueMapValues)); IntPtr typeAndShape = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(nativeOnnxValueMapKeys, out typeAndShape)); TensorElementType elemType = TensorElementType.DataTypeMax; try { IntPtr el_type; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type)); elemType = (TensorElementType)el_type; } finally { NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape); } switch (elemType) { case TensorElementType.Int64: return(DisposableNamedOnnxValueFromNativeMap <Int64, float>(string.Empty, nativeOnnxValueMapKeys, nativeOnnxValueMapValues)); case TensorElementType.String: return(DisposableNamedOnnxValueFromNativeMap <string, float>(string.Empty, nativeOnnxValueMapKeys, nativeOnnxValueMapValues)); default: throw new NotSupportedException("Map of element type: " + elemType + " is not supported"); } default: throw new NotSupportedException("OnnxValueType : " + onnxValueType + " is not supported"); } }
private DisposableList <OrtValue> RunImpl(RunOptions options, IntPtr[] inputNames, IntPtr[] inputValues, IntPtr[] outputNames, DisposableList <IDisposable> cleanupList) { var ortValues = new DisposableList <OrtValue>(outputNames.Length); cleanupList.Add(ortValues); IntPtr[] outputValuesArray = new IntPtr[outputNames.Length]; NativeApiStatus.VerifySuccess(NativeMethods.OrtRun( _nativeHandle, options.Handle, inputNames, inputValues, (UIntPtr)inputNames.Length, outputNames, (UIntPtr)outputNames.Length, outputValuesArray /* Empty array is passed in to receive output OrtValue pointers */ )); foreach (var v in outputValuesArray) { ortValues.Add(new OrtValue(v)); } return(ortValues); }
/// <summary> /// This fetches bound outputs after running the model with RunWithBinding() /// </summary> /// <returns>IDisposableReadOnlyCollection<OrtValue></returns> public IDisposableReadOnlyCollection <OrtValue> GetOutputValues() { IntPtr ortValues = IntPtr.Zero; UIntPtr count = UIntPtr.Zero; var allocator = OrtAllocator.DefaultInstance; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetBoundOutputValues(handle, allocator.Pointer, out ortValues, out count)); if (count.Equals(UIntPtr.Zero)) { return(new DisposableList <OrtValue>()); } using (var ortValuesAllocation = new OrtMemoryAllocation(allocator, ortValues, 0)) { int outputCount = (int)count; var ortList = new DisposableList <OrtValue>(outputCount); try { for (int i = 0; i < outputCount; ++i) { IntPtr ortValue = Marshal.ReadIntPtr(ortValues, IntPtr.Size * i); ortList.Add(new OrtValue(ortValue)); } } catch (Exception e) { ortList.Dispose(); throw e; } return(ortList); } }
/// <summary> /// This method return a collection of DisposableNamedOnnxValue as in other interfaces /// Query names from OrtIoBinding object and pair then with the array of OrtValues returned /// from OrtIoBinding.GetOutputValues() /// /// </summary> /// <param name="runOptions">RunOptions</param> /// <param name="ioBinding">OrtIoBinding instance with bindings</param> /// <param name="names">optional parameter. If you already know the names of the outputs you can save a native /// call to retrieve output names. They will be paired with the returned OrtValues and combined into DisposbleNamedOnnxValues. /// Otherwise, the method will retrieve output names from the OrtIoBinding instance. /// It is an error if you supply a different number of names than the returned outputs</param> public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> RunWithBindingAndNames(RunOptions runOptions, OrtIoBinding ioBinding, string[] names = null) { NativeApiStatus.VerifySuccess(NativeMethods.OrtRunWithBinding(Handle, runOptions.Handle, ioBinding.Handle)); using (var ortValues = ioBinding.GetOutputValues()) { string[] outputNames = names; if (outputNames == null) { outputNames = ioBinding.GetOutputNames(); } if (outputNames.Length != ortValues.Count) { throw new OnnxRuntimeException(ErrorCode.InvalidArgument, "Number of specified names: " + names.Length + " does not match the output number: " + ortValues.Count); } var result = new DisposableList <DisposableNamedOnnxValue>(outputNames.Length); try { for (int i = 0; i < outputNames.Length; ++i) { var ortValue = ortValues.ElementAt(i); result.Add(DisposableNamedOnnxValue.CreateTensorFromOnnxValue(outputNames[i], ortValue.Handle)); ortValue.Disown(); } } catch (Exception e) { result.Dispose(); throw e; } return(result); } }
private static OrtValue CreateStringTensor(Tensor <string> tensor) { if (tensor == null) { throw new OnnxRuntimeException(ErrorCode.Fail, "Cast to Tensor<string> failed. BUG check!"); } int totalLength = 0; for (int i = 0; i < tensor.Length; i++) { totalLength += System.Text.Encoding.UTF8.GetByteCount(tensor.GetValue(i)); } long[] shape = new long[tensor.Dimensions.Length]; for (int i = 0; i < tensor.Dimensions.Length; i++) { shape[i] = tensor.Dimensions[i]; } // allocate the native tensor IntPtr valueHandle = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateTensorAsOrtValue( OrtAllocator.DefaultInstance.Pointer, shape, (UIntPtr)(shape.Length), TensorElementType.String, out valueHandle )); var ortValue = new OrtValue(valueHandle); try { // fill the native tensor, using GetValue(index) from the Tensor<string> var len = tensor.Length; var nativeStrings = new IntPtr[len]; using (var pinnedHandles = new DisposableList <PinnedGCHandle>((int)len)) { for (int i = 0; i < len; i++) { var utf8str = NativeOnnxValueHelper.StringToZeroTerminatedUtf8(tensor.GetValue(i)); var gcHandle = GCHandle.Alloc(utf8str, GCHandleType.Pinned); nativeStrings[i] = gcHandle.AddrOfPinnedObject(); pinnedHandles.Add(new PinnedGCHandle(gcHandle)); } using (var pinnedStrings = new PinnedGCHandle(GCHandle.Alloc(nativeStrings, GCHandleType.Pinned))) NativeApiStatus.VerifySuccess(NativeMethods.OrtFillStringTensor(ortValue.Handle, nativeStrings, (UIntPtr)len)); } } catch (OnnxRuntimeException) { ortValue.Dispose(); throw; } return(ortValue); }
/// <summary> /// This function obtains ortValues for NamedOnnxValue. /// The problem with NamedOnnxValue is that it does not contain any Onnx (OrtValue) /// so calling ToOrtValue creates a new instance of OrtValue that needs to be disposed. /// The deriving object DisposableNamedValue actually contains and owns OrtValue and it returns /// it. /// </summary> /// <param name="values"></param> /// <param name="cleanupList"></param> /// <returns></returns> private IntPtr[] GetOrtValuesHandles(IReadOnlyCollection <NamedOnnxValue> values, DisposableList <IDisposable> cleanupList) { IntPtr[] result = new IntPtr[values.Count]; for (int inputIndex = 0; inputIndex < values.Count; ++inputIndex) { var input = values.ElementAt(inputIndex); MemoryHandle?memHandle; var ortValue = input.ToOrtValue(out memHandle); if (memHandle.HasValue) { cleanupList.Add(memHandle); } cleanupList.Add(ortValue); result[inputIndex] = ortValue.Handle; } return(result); }
/// <summary> /// Run helper /// </summary> /// <param name="names">names to convert to zero terminated utf8 and pin</param> /// <param name="cleanupList">list to add pinned memory to for later disposal</param> /// <returns></returns> private IntPtr[] ConvertNamesToUtf8 <T>(IReadOnlyCollection <T> inputs, NameExtractor <T> extractor, DisposableList <IDisposable> cleanupList) { var result = new IntPtr[inputs.Count]; for (int i = 0; i < inputs.Count; ++i) { var name = extractor(inputs.ElementAt(i)); var utf8Name = NativeOnnxValueHelper.StringToZeroTerminatedUtf8(name); var pinnedHandle = new PinnedGCHandle(GCHandle.Alloc(utf8Name, GCHandleType.Pinned)); result[i] = pinnedHandle.Pointer; cleanupList.Add(pinnedHandle); } return(result); }
IDisposableReadOnlyCollection <DisposableNamedOnnxValue> CreateDisposableResult(List <OrtValue> ortValues, IReadOnlyCollection <string> outputNames) { var result = new DisposableList <DisposableNamedOnnxValue>(outputNames.Count); try { for (int i = 0; i < ortValues.Count; i++) { var ortValue = ortValues[i]; result.Add(DisposableNamedOnnxValue.CreateFromOrtValue(outputNames.ElementAt(i), ortValue)); } } catch (OnnxRuntimeException e) { result.Dispose(); throw e; } return(result); }
/// <summary> /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>. Uses the given RunOptions for this run. /// </summary> /// <param name="inputNames">Specify a collection of string that indicates the input names. Should match <paramref name="inputValues"/>.</param> /// <param name="inputValues">Specify a collection of <see cref="FixedBufferOnnxValue"/> that indicates the input values.</param> /// <param name="outputNames">Specify a collection of string that indicates the output names to fetch.</param> /// <param name="options"></param> /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns> public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run( IReadOnlyCollection <string> inputNames, IReadOnlyCollection <FixedBufferOnnxValue> inputValues, IReadOnlyCollection <string> outputNames, RunOptions options) { if (inputNames.Count != inputValues.Count) { throw new ArgumentException($"Length of {nameof(inputNames)} ({inputNames.Count}) must match that of {nameof(inputValues)} ({inputValues.Count})."); } // prepare inputs string[] inputNamesArray = inputNames as string[] ?? inputNames.ToArray(); IntPtr[] inputValuesArray = new IntPtr[inputNames.Count]; int inputIndex = 0; foreach (var input in inputValues) { inputValuesArray[inputIndex] = input.Value; inputIndex++; } // prepare outputs string[] outputNamesArray = outputNames as string[] ?? outputNames.ToArray(); IntPtr[] outputValuesArray = new IntPtr[outputNames.Count]; IntPtr status = NativeMethods.OrtRun( _nativeHandle, options.Handle, inputNamesArray, inputValuesArray, (UIntPtr)inputNames.Count, outputNamesArray, (UIntPtr)outputNames.Count, outputValuesArray /* Empty array is passed in to receive output OrtValue pointers */ ); try { NativeApiStatus.VerifySuccess(status); var result = new DisposableList <DisposableNamedOnnxValue>(outputValuesArray.Length); for (int i = 0; i < outputValuesArray.Length; i++) { result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValuesArray[i])); } return(result); } catch (OnnxRuntimeException e) { //clean up the individual output tensors if it is not null; for (uint i = 0; i < outputValuesArray.Length; i++) { if (outputValuesArray[i] != IntPtr.Zero) { NativeMethods.OrtReleaseValue(outputValuesArray[i]); } } throw e; } }
/// <summary> /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>. Uses the given RunOptions for this run. /// </summary> /// <param name="inputs">Specify a collection of <see cref="NamedOnnxValue"/> that indicates the input values.</param> /// <param name="outputNames">Specify a collection of string that indicates the output names to fetch.</param> /// <param name="options"></param> /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns> public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options) { // prepare inputs var inputNamesArray = new string[inputs.Count]; var inputValuesArray = new IntPtr[inputs.Count]; var pinnedInputBufferHandles = new System.Buffers.MemoryHandle[inputs.Count]; var disposeInputs = new bool[inputs.Count]; int inputIndex = 0; foreach (var input in inputs) { inputNamesArray[inputIndex] = input.Name; // create Tensor from the input if feasible, else throw notsupported exception for now input.ToNativeOnnxValue( out inputValuesArray[inputIndex], out pinnedInputBufferHandles[inputIndex], out disposeInputs[inputIndex]); inputIndex++; } // prepare outputs string[] outputNamesArray = outputNames as string[] ?? outputNames.ToArray(); IntPtr[] outputValuesArray = new IntPtr[outputNames.Count]; IntPtr status = NativeMethods.OrtRun( _nativeHandle, options.Handle, inputNamesArray, inputValuesArray, (UIntPtr)inputs.Count, outputNamesArray, (UIntPtr)outputNames.Count, outputValuesArray /* Empty array is passed in to receive output OrtValue pointers */ ); try { NativeApiStatus.VerifySuccess(status); var result = new DisposableList <DisposableNamedOnnxValue>(outputValuesArray.Length); for (int i = 0; i < outputValuesArray.Length; i++) { result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValuesArray[i])); } return(result); } catch (OnnxRuntimeException e) { //clean up the individual output tensors if it is not null; for (int i = 0; i < outputValuesArray.Length; i++) { if (outputValuesArray[i] != IntPtr.Zero) { NativeMethods.OrtReleaseValue(outputValuesArray[i]); } } throw e; } finally { for (int i = 0; i < inputs.Count; i++) { if (disposeInputs[i]) { NativeMethods.OrtReleaseValue(inputValuesArray[i]); // For elementary type Tensors, this should not release the buffer, but should delete the native tensor object. // For string tensors, this releases the native memory allocated for the tensor, including the buffer pinnedInputBufferHandles[i].Dispose(); } } } }
/// <summary> /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>. /// </summary> /// <param name="inputs"></param> /// <param name="outputNames"></param> /// <param name="options"></param> /// <returns>Output Tensors in a Collection of NamedOnnxValue</returns> //TODO: kept internal until RunOptions is made public internal IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options) { var inputNames = new string[inputs.Count]; var inputTensors = new IntPtr[inputs.Count]; var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count]; int inputIndex = 0; foreach (var input in inputs) { inputNames[inputIndex] = input.Name; // create Tensor from the input if feasible, else throw notsupported exception for now input.ToNativeOnnxValue(out inputTensors[inputIndex], out pinnedBufferHandles[inputIndex]); inputIndex++; } string[] outputNamesArray = outputNames.ToArray(); IntPtr[] outputValueArray = new IntPtr[outputNames.Count]; IntPtr status = NativeMethods.OrtRun( this._nativeHandle, IntPtr.Zero, // TODO: use Run options when Run options creation API is available // Passing null uses the default run options in the C-api inputNames, inputTensors, (UIntPtr)(inputTensors.Length), outputNamesArray, (UIntPtr)outputNames.Count, outputValueArray /* An array of output value pointers. Array must be allocated by the caller */ ); try { NativeApiStatus.VerifySuccess(status); var result = new DisposableList <DisposableNamedOnnxValue>(); for (uint i = 0; i < outputValueArray.Length; i++) { result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i])); } return(result); } catch (OnnxRuntimeException e) { //clean up the individual output tensors if it is not null; for (uint i = 0; i < outputValueArray.Length; i++) { if (outputValueArray[i] != IntPtr.Zero) { NativeMethods.OrtReleaseValue(outputValueArray[i]); } } throw e; } finally { // always unpin the input buffers, and delete the native Onnx value objects for (int i = 0; i < inputs.Count; i++) { NativeMethods.OrtReleaseValue(inputTensors[i]); // this should not release the buffer, but should delete the native tensor object pinnedBufferHandles[i].Dispose(); } } }
/// <summary> /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames". Uses the given RunOptions for this run./>. /// </summary> /// <param name="inputs"></param> /// <param name="outputNames"></param> /// <param name="options"></param> /// <returns>Output Tensors in a Collection of NamedOnnxValue. User must dispose the output.</returns> public IDisposableReadOnlyCollection <DisposableNamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options) { var inputNames = new string[inputs.Count]; var inputTensors = new IntPtr[inputs.Count]; var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count]; int inputIndex = 0; foreach (var input in inputs) { inputNames[inputIndex] = input.Name; // create Tensor from the input if feasible, else throw notsupported exception for now input.ToNativeOnnxValue(out inputTensors[inputIndex], out pinnedBufferHandles[inputIndex]); inputIndex++; } string[] outputNamesArray = outputNames.ToArray(); IntPtr[] outputValueArray = new IntPtr[outputNames.Count]; IntPtr status = NativeMethods.OrtRun( this._nativeHandle, options.Handle, inputNames, inputTensors, (UIntPtr)(inputTensors.Length), outputNamesArray, (UIntPtr)outputNames.Count, outputValueArray /* An array of output value pointers. Array must be allocated by the caller */ ); try { NativeApiStatus.VerifySuccess(status); var result = new DisposableList <DisposableNamedOnnxValue>(); for (uint i = 0; i < outputValueArray.Length; i++) { result.Add(DisposableNamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i])); } return(result); } catch (OnnxRuntimeException e) { //clean up the individual output tensors if it is not null; for (uint i = 0; i < outputValueArray.Length; i++) { if (outputValueArray[i] != IntPtr.Zero) { NativeMethods.OrtReleaseValue(outputValueArray[i]); } } throw e; } finally { inputIndex = 0; foreach (var input in inputs) { // For NamedOnnxValue, always unpin the input buffers, and delete the native Onnx value objects // For DisposableNamedOnnxValue, the user needs to do this by invoking Dispose if (input.GetType() == typeof(NamedOnnxValue)) { NativeMethods.OrtReleaseValue(inputTensors[inputIndex]); // For elementary type Tensors, this should not release the buffer, but should delete the native tensor object. // For string tensors, this releases the native memory allocated for the tensor, including the buffer pinnedBufferHandles[inputIndex].Dispose(); } inputIndex++; } } }