private FixedBufferOnnxValue(MemoryHandle pinnedMemory, OrtValue ortValue, OnnxValueType onnxValueType, TensorElementType elementType) { PinnedMemory = pinnedMemory; Value = ortValue; OnnxValueType = onnxValueType; ElementType = elementType; }
/// <summary> /// Ctor /// </summary> /// <param name="name">Name of the output value</param> /// <param name="value">Managed object created to represent output value, such as DenseTensor<T> /// List or Dictionary /// </param> /// <param name="onnxValueType">Use this to decide what you want to call to fetch data, AsTensor(), AsDictionary() /// or AsEnumerable()</param> /// <param name="elementType">Tensor element type if value type is a Tensor</param> /// <param name="ortValueHolder">Object that holds native resources. /// Typically, this is an output OrtValue that holds native memory where Tensor is mapped but may also be /// other things that would need to be disposed by this instance depending on how IOrtValueOwner is implemented.</param> private DisposableNamedOnnxValue(string name, Object value, OnnxValueType onnxValueType, TensorElementType elementType, IOrtValueOwner ortValueHolder) : base(name, value) { _ortValueHolder = ortValueHolder; ValueType = onnxValueType; ElementType = elementType; }
internal NodeMetadata(OnnxValueType onnxValueType, int[] dimensions, string[] symbolicDimensions, Type type) { _onnxValueType = onnxValueType; _dimensions = dimensions; _symbolicDimensions = symbolicDimensions; _type = type; }
internal static DisposableNamedOnnxValue CreateFromOrtValue(string name, OrtValue ortValue, OrtAllocator allocator) { DisposableNamedOnnxValue result = null; IntPtr valueType; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueType(ortValue.Handle, out valueType)); OnnxValueType onnxValueType = (OnnxValueType)valueType; switch (onnxValueType) { case OnnxValueType.ONNX_TYPE_TENSOR: result = CreateTensorFromOnnxValue(name, ortValue); break; case OnnxValueType.ONNX_TYPE_SEQUENCE: result = DisposableNamedOnnxValueFromSequence(name, ortValue, allocator); break; case OnnxValueType.ONNX_TYPE_MAP: result = DisposableNamedOnnxValueFromNativeMap(name, ortValue, allocator); break; default: throw new NotSupportedException("OnnxValueType : " + onnxValueType + " is not supported"); } return(result); }
private DisposableNamedOnnxValue(string name, Object value, OnnxValueType onnxValueType, TensorElementType elementType, NativeMemoryHandler nativeMemoryManager) : base(name, value) { _onnxValueType = onnxValueType; _elementType = elementType; _nativeMemoryManager = nativeMemoryManager; }
internal static DisposableNamedOnnxValue CreateFromOnnxValue(string name, IntPtr nativeOnnxValue, OrtAllocator allocator) { IntPtr valueType; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueType(nativeOnnxValue, out valueType)); OnnxValueType onnxValueType = (OnnxValueType)valueType; switch (onnxValueType) { case OnnxValueType.ONNX_TYPE_TENSOR: return(CreateTensorFromOnnxValue(name, nativeOnnxValue)); case OnnxValueType.ONNX_TYPE_SEQUENCE: IntPtr count = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValueCount(nativeOnnxValue, out count)); var sequence = new DisposableList <DisposableNamedOnnxValue>(count.ToInt32()); for (int i = 0; i < count.ToInt32(); i++) { IntPtr nativeOnnxValueSeq; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, i, allocator.Pointer, out nativeOnnxValueSeq)); sequence.Add(CreateFromOnnxValue(string.Empty, nativeOnnxValueSeq, allocator)); } return(new DisposableNamedOnnxValue(name, sequence, OnnxValueType.ONNX_TYPE_SEQUENCE, TensorElementType.DataTypeMax, null)); case OnnxValueType.ONNX_TYPE_MAP: IntPtr nativeOnnxValueMapKeys = IntPtr.Zero; IntPtr nativeOnnxValueMapValues = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, 0, allocator.Pointer, out nativeOnnxValueMapKeys)); NativeApiStatus.VerifySuccess(NativeMethods.OrtGetValue(nativeOnnxValue, 1, allocator.Pointer, out nativeOnnxValueMapValues)); IntPtr typeAndShape = IntPtr.Zero; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorTypeAndShape(nativeOnnxValueMapKeys, out typeAndShape)); TensorElementType elemType = TensorElementType.DataTypeMax; try { IntPtr el_type; NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorElementType(typeAndShape, out el_type)); elemType = (TensorElementType)el_type; } finally { NativeMethods.OrtReleaseTensorTypeAndShapeInfo(typeAndShape); } switch (elemType) { case TensorElementType.Int64: return(DisposableNamedOnnxValueFromNativeMap <Int64, float>(string.Empty, nativeOnnxValueMapKeys, nativeOnnxValueMapValues)); case TensorElementType.String: return(DisposableNamedOnnxValueFromNativeMap <string, float>(string.Empty, nativeOnnxValueMapKeys, nativeOnnxValueMapValues)); default: throw new NotSupportedException("Map of element type: " + elemType + " is not supported"); } default: throw new NotSupportedException("OnnxValueType : " + onnxValueType + " is not supported"); } }
/// <summary> /// Attempts to Pin the buffer, and create a native OnnxValue out of it. the pinned MemoryHandle is passed to output. /// In this case, the pinnedHandle should be kept alive till the native OnnxValue is used, then dispose it. /// If it is not possible to Pin the buffer, then creates OnnxValue from the copy of the data. The output pinnedMemoryHandle /// contains a default value in that case. /// Attempts to infer the type of the value while creating the OnnxValue /// </summary> /// <param name="value"></param> /// <param name="onnxValue"></param> /// <param name="pinnedMemoryHandle"></param> /// <param name="elementType"></param> internal static void CreateNativeOnnxValue(Object value, out IntPtr onnxValue, out MemoryHandle pinnedMemoryHandle, out OnnxValueType onnxValueType, out TensorElementType elementType) { //try to cast _value to Tensor<T> elementType = TensorElementType.DataTypeMax; //invalid IntPtr dataBufferPointer = IntPtr.Zero; int dataBufferLength = 0; ReadOnlySpan <int> shape = null; int rank = 0; onnxValue = IntPtr.Zero; if (!(value is Tensor <string>)) { if (TryPinAsTensor <float>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <double>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <int>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <uint>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <long>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <ulong>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <short>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <ushort>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <byte>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <sbyte>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } else if (TryPinAsTensor <bool>(value, out pinnedMemoryHandle, out dataBufferPointer, out dataBufferLength, out shape, out rank, out elementType)) { } //TODO: add other types else { // nothing to cleanup here, since no memory has been pinned throw new NotSupportedException("The inference value " + nameof(value) + " is not of a supported type"); } Debug.Assert(dataBufferPointer != IntPtr.Zero, "dataBufferPointer must be non-null after obtaining the pinned buffer"); onnxValueType = OnnxValueType.ONNX_TYPE_TENSOR; // set onnx value type to tensor // copy to an ulong[] shape to match size_t[] long[] longShape = new long[rank]; for (int i = 0; i < rank; i++) { longShape[i] = shape[i]; } IntPtr status = NativeMethods.OrtCreateTensorWithDataAsOrtValue( NativeMemoryInfo.DefaultInstance.Handle, dataBufferPointer, (UIntPtr)(dataBufferLength), longShape, (UIntPtr)rank, elementType, out onnxValue ); try { NativeApiStatus.VerifySuccess(status); } catch (OnnxRuntimeException e) { pinnedMemoryHandle.Dispose(); throw e; } } // special case for string Tensor, data needs to be copied to the native buffer else { // calculate native tensor length (sum of string lengths in utf-8) var tensorValue = value as Tensor <string>; int totalLength = 0; for (int i = 0; i < tensorValue.Length; i++) { totalLength += Encoding.UTF8.GetByteCount(tensorValue.GetValue(i)); } long[] longShape = new long[tensorValue.Dimensions.Length]; for (int i = 0; i < tensorValue.Dimensions.Length; i++) { longShape[i] = tensorValue.Dimensions[i]; } // allocate the native tensor IntPtr nativeTensor = IntPtr.Zero; try { NativeApiStatus.VerifySuccess(NativeMethods.OrtCreateTensorAsOrtValue( NativeMemoryAllocator.DefaultInstance.Handle, longShape, (UIntPtr)(longShape.Length), TensorElementType.String, out nativeTensor )); // fill the native tensor, using GetValue(index) from the Tensor<string> var len = tensorValue.Length; var stringsInTensor = new IntPtr[len]; var pinnedHandles = new GCHandle[len + 1]; pinnedHandles[len] = GCHandle.Alloc(stringsInTensor, GCHandleType.Pinned); try { for (int i = 0; i < len; i++) { var utf8str = UTF8Encoding.UTF8.GetBytes(tensorValue.GetValue(i) + "\0"); pinnedHandles[i] = GCHandle.Alloc(utf8str, GCHandleType.Pinned); stringsInTensor[i] = pinnedHandles[i].AddrOfPinnedObject(); } NativeApiStatus.VerifySuccess(NativeMethods.OrtFillStringTensor(nativeTensor, stringsInTensor, (UIntPtr)len)); } finally { foreach (var handle in pinnedHandles) { if (handle.IsAllocated) { handle.Free(); } } } } catch (OnnxRuntimeException e) { if (nativeTensor != IntPtr.Zero) { NativeMethods.OrtReleaseValue(nativeTensor); throw e; } } onnxValue = nativeTensor; // set the output pinnedMemoryHandle = default; // dummy value for the output onnxValueType = OnnxValueType.ONNX_TYPE_TENSOR; // set onnx value type to tensor elementType = TensorElementType.String; // set tensor element type to string } }
internal NodeMetadata(OnnxValueType onnxValueType, int[] dimensions, Type type) { _onnxValueType = onnxValueType; _dimensions = dimensions; _type = type; }