Exemple #1
0
        public static Tensors.ITensor WrapAsInterface(this ONNX.NamedOnnxValue namedValue)
        {
            Tensors.ITensor tensor;

            if (_TryWrap <Single>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <Double>(namedValue, out tensor))
            {
                return(tensor);
            }

            if (_TryWrap <Char>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <Boolean>(namedValue, out tensor))
            {
                return(tensor);
            }

            if (_TryWrap <Byte>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <SByte>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <UInt16>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <Int16>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <UInt32>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <Int32>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <UInt64>(namedValue, out tensor))
            {
                return(tensor);
            }
            if (_TryWrap <Int64>(namedValue, out tensor))
            {
                return(tensor);
            }

            throw new NotSupportedException();
        }
Exemple #2
0
        public static bool TryClone(this ONNX.NamedOnnxValue src, out ONNX.NamedOnnxValue dst)
        {
            if (_TryClone <Single>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <Double>(src, out dst))
            {
                return(true);
            }

            if (_TryClone <Boolean>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <Char>(src, out dst))
            {
                return(true);
            }

            if (_TryClone <Byte>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <SByte>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <Int16>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <UInt16>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <Int32>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <UInt32>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <Int64>(src, out dst))
            {
                return(true);
            }
            if (_TryClone <UInt64>(src, out dst))
            {
                return(true);
            }

            dst = null;
            return(false);
        }
Exemple #3
0
        private static bool _TryWrap <T>(this ONNX.NamedOnnxValue namedValue, out Tensors.ITensor tensor)
            where T : unmanaged
        {
            if (namedValue.Value is ONNX.Tensors.DenseTensor <T> tt)
            {
                tensor = new OnnxDenseTensor <T>(tt, namedValue.Name);
                return(true);
            }

            tensor = null;
            return(false);
        }
Exemple #4
0
        private static bool _TryClone <TValue>(this ONNX.NamedOnnxValue src, out ONNX.NamedOnnxValue dst)
            where TValue : unmanaged
        {
            if (src.Value is ONNX.Tensors.DenseTensor <TValue> srcTensor)
            {
                var dstTensor = new ONNX.Tensors.DenseTensor <TValue>(srcTensor.Dimensions);
                srcTensor.Buffer.CopyTo(dstTensor.Buffer);
                dst = ONNX.NamedOnnxValue.CreateFromTensor(src.Name, dstTensor);
                return(true);
            }

            dst = null;
            return(false);
        }
Exemple #5
0
        private ONNX.Tensors.DenseTensor <T> _UpdateTensor <T>(ref ONNX.NamedOnnxValue namedValue, params int[] dims) where T : unmanaged
        {
            if (namedValue.Value is ONNX.Tensors.DenseTensor <T> denseTensor)
            {
                if (denseTensor.Dimensions.SequenceEqual(dims))
                {
                    return(denseTensor);
                }
            }

            denseTensor = new ONNX.Tensors.DenseTensor <T>(dims);

            namedValue = ONNX.NamedOnnxValue.CreateFromTensor(namedValue.Name, denseTensor);

            return(denseTensor);
        }
Exemple #6
0
        public static ONNX.NamedOnnxValue Clone(this ONNX.NamedOnnxValue src)
        {
            if (src == null)
            {
                return(null);
            }

            if (src.Value == null)
            {
                return(ONNX.NamedOnnxValue.CreateFromTensor <Byte>(src.Name, null));
            }

            ONNX.NamedOnnxValue dst;

            return(src.TryClone(out dst)
                ? dst
                : throw new InvalidOperationException($"Can't clone tensor {src.Value}"));
        }
Exemple #7
0
        /// <summary>
        /// Runs the loaded model for the given inputs, and fetches the specified outputs in <paramref name="outputNames"/>.
        /// </summary>
        /// <param name="inputs"></param>
        /// <param name="outputNames"></param>
        /// <param name="options"></param>
        /// <returns>Output Tensors in a Dictionary</returns>
        //TODO: kept internal until RunOptions is made public
        internal IReadOnlyCollection <NamedOnnxValue> Run(IReadOnlyCollection <NamedOnnxValue> inputs, IReadOnlyCollection <string> outputNames, RunOptions options)
        {
            var inputNames          = new string[inputs.Count];
            var inputTensors        = new IntPtr[inputs.Count];
            var pinnedBufferHandles = new System.Buffers.MemoryHandle[inputs.Count];

            int offset = 0;

            foreach (var input in inputs)
            {
                inputNames[offset] = input.Name;

                // create Tensor from the input if feasible, else throw notsupported exception for now
                input.ToNativeOnnxValue(out inputTensors[offset], out pinnedBufferHandles[offset]);

                offset++;
            }

            string[] outputNamesArray = outputNames.ToArray();
            IntPtr[] outputValueArray = new IntPtr[outputNames.Count];

            IntPtr status = NativeMethods.OrtRunInference(
                this._nativeHandle,
                IntPtr.Zero,                                  // TODO: use Run options when Run options creation API is available
                                                              // Passing null uses the default run options in the C-api
                inputNames,
                inputTensors,
                (ulong)(inputTensors.Length),                    /* TODO: size_t, make it portable for x86 arm */
                outputNamesArray,
                (ulong)outputNames.Count,                        /* TODO: size_t, make it portable for x86 and arm */
                outputValueArray                                 /* An array of output value pointers. Array must be allocated by the caller */
                );

            try
            {
                NativeApiStatus.VerifySuccess(status);
                var result = new List <NamedOnnxValue>();
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    result.Add(NamedOnnxValue.CreateFromOnnxValue(outputNamesArray[i], outputValueArray[i]));
                }

                return(result);
            }
            catch (OnnxRuntimeException e)
            {
                //clean up the individual output tensors if it is not null;
                for (uint i = 0; i < outputValueArray.Length; i++)
                {
                    if (outputValueArray[i] != IntPtr.Zero)
                    {
                        NativeMethods.OrtReleaseValue(outputValueArray[i]);
                    }
                }
                throw e;
            }
            finally
            {
                // always unpin the input buffers, and delete the native Onnx value objects
                for (int i = 0; i < inputs.Count; i++)
                {
                    NativeMethods.OrtReleaseValue(inputTensors[i]); // this should not release the buffer, but should delete the native tensor object
                    pinnedBufferHandles[i].Dispose();
                }
            }
        }
Exemple #8
0
        internal static NamedOnnxValue CreateFromOnnxValue(string name, IntPtr nativeOnnxValue)
        {
            NamedOnnxValue result = null;

            /* Get Tensor element type */  //TODO: Assumed value is Tensor, need to support non-tensor types in future
            IntPtr            typeAndShape = IntPtr.Zero;
            TensorElementType elemType     = TensorElementType.DataTypeMax;

            try
            {
                NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorShapeAndType(nativeOnnxValue, out typeAndShape));
                elemType = NativeMethods.OrtGetTensorElementType(typeAndShape);
            }
            finally
            {
                if (typeAndShape != IntPtr.Zero)
                {
                    NativeMethods.OrtReleaseObject(typeAndShape);
                }
            }

            switch (elemType)
            {
            case TensorElementType.Float:
                result = NameOnnxValueFromNativeTensor <float>(name, nativeOnnxValue);
                break;

            case TensorElementType.Double:
                result = NameOnnxValueFromNativeTensor <double>(name, nativeOnnxValue);
                break;

            case TensorElementType.Int16:
                result = NameOnnxValueFromNativeTensor <short>(name, nativeOnnxValue);
                break;

            case TensorElementType.UInt16:
                result = NameOnnxValueFromNativeTensor <ushort>(name, nativeOnnxValue);
                break;

            case TensorElementType.Int32:
                result = NameOnnxValueFromNativeTensor <int>(name, nativeOnnxValue);
                break;

            case TensorElementType.UInt32:
                result = NameOnnxValueFromNativeTensor <uint>(name, nativeOnnxValue);
                break;

            case TensorElementType.Int64:
                result = NameOnnxValueFromNativeTensor <long>(name, nativeOnnxValue);
                break;

            case TensorElementType.UInt64:
                result = NameOnnxValueFromNativeTensor <ulong>(name, nativeOnnxValue);
                break;

            case TensorElementType.UInt8:
                result = NameOnnxValueFromNativeTensor <byte>(name, nativeOnnxValue);
                break;

            default:
                throw new NotSupportedException("Tensor of element type: " + elemType + " is not supported");
            }

            return(result);
        }