Esempio n. 1
0
        /// <summary>
        /// Constructs OnnxModel object from file.
        /// </summary>
        /// <param name="modelFile">Model file path.</param>
        /// <param name="gpuDeviceId">GPU device ID to execute on. Null for CPU.</param>
        /// <param name="fallbackToCpu">If true, resumes CPU execution quitely upon GPU error.</param>
        public OnnxModel(string modelFile, int?gpuDeviceId = null, bool fallbackToCpu = false)
        {
            _modelFile = modelFile;

            if (gpuDeviceId != null)
            {
                try
                {
                    _session = new InferenceSession(modelFile,
                                                    SessionOptions.MakeSessionOptionWithCudaProvider(gpuDeviceId.Value));
                }
                catch (OnnxRuntimeException)
                {
                    if (fallbackToCpu)
                    {
                        _session = new InferenceSession(modelFile);
                    }
                    else
                    {
                        // if called from OnnxTranform, is caught and rethrown.
                        throw;
                    }
                }
            }
            else
            {
                _session = new InferenceSession(modelFile);
            }

            ModelInfo   = new OnnxModelInfo(GetInputsInfo(), GetOutputsInfo());
            InputNames  = ModelInfo.InputsInfo.Select(i => i.Name).ToList();
            OutputNames = ModelInfo.OutputsInfo.Select(i => i.Name).ToList();
        }
Esempio n. 2
0
        /// <summary>
        /// Constructor.
        /// </summary>
        /// <param name="onnxFileName"></param>
        /// <param name="gpuID"></param>
        /// <param name="batchSize"></param>
        public NetExecutorONNXRuntime(string onnxFileName, int gpuID)
        {
            //     if (gpuID < 0 || gpuID > 16) throw new Exception($"Invalid GPU ID { gpuID}");

#if CUDA
            if (gpuID == -999) // CPU. TO DO: clean this up
            {
                Session = new InferenceSession(onnxFileName);
            }
            else if (gpuID == -1)
            {
                Session = new InferenceSession(onnxFileName, SessionOptions.MakeSessionOptionWithCudaProvider());
            }
            else
            {
#if NOT
//Yields error: Unable to find an entry point named 'OrtSessionOptionsAppendExecutionProvider_Tensorrt' in DLL 'onnxruntime'.

                SessionOptions options = new SessionOptions();
                options.AppendExecutionProvider_Tensorrt(gpuID);
//        options.AppendExecutionProvider_CUDA();
                Session = new InferenceSession(onnxFileName, options);
#endif
                Session = new InferenceSession(onnxFileName, SessionOptions.MakeSessionOptionWithCudaProvider(gpuID));
            }
#else
            Session = new InferenceSession(onnxFileName);
#endif

            GPUID = gpuID;
        }
Esempio n. 3
0
        private void TestCUDAProviderOptions()
        {
            string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");

            using (var cleanUp = new DisposableListTest <IDisposable>())
            {
                var cudaProviderOptions = new OrtCUDAProviderOptions();
                cleanUp.Add(cudaProviderOptions);

                var providerOptionsDict = new Dictionary <string, string>();
                providerOptionsDict["device_id"]                    = "0";
                providerOptionsDict["gpu_mem_limit"]                = "20971520";
                providerOptionsDict["arena_extend_strategy"]        = "kSameAsRequested";
                providerOptionsDict["cudnn_conv_algo_search"]       = "DEFAULT";
                providerOptionsDict["do_copy_in_default_stream"]    = "1";
                providerOptionsDict["cudnn_conv_use_max_workspace"] = "1";
                providerOptionsDict["cudnn_conv1d_pad_to_nc1d"]     = "1";
                cudaProviderOptions.UpdateOptions(providerOptionsDict);

                var resultProviderOptionsDict = new Dictionary <string, string>();
                ProviderOptionsValueHelper.StringToDict(cudaProviderOptions.GetOptions(), resultProviderOptionsDict);

                // test provider options configuration
                string value;
                value = resultProviderOptionsDict["device_id"];
                Assert.Equal("0", value);
                value = resultProviderOptionsDict["gpu_mem_limit"];
                Assert.Equal("20971520", value);
                value = resultProviderOptionsDict["arena_extend_strategy"];
                Assert.Equal("kSameAsRequested", value);
                value = resultProviderOptionsDict["cudnn_conv_algo_search"];
                Assert.Equal("DEFAULT", value);
                value = resultProviderOptionsDict["do_copy_in_default_stream"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["cudnn_conv_use_max_workspace"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["cudnn_conv1d_pad_to_nc1d"];
                Assert.Equal("1", value);

                // test correctness of provider options
                SessionOptions options = SessionOptions.MakeSessionOptionWithCudaProvider(cudaProviderOptions);
                cleanUp.Add(options);

                var session = new InferenceSession(modelPath, options);
                cleanUp.Add(session);

                var     inputMeta = session.InputMetadata;
                var     container = new List <NamedOnnxValue>();
                float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
                foreach (var name in inputMeta.Keys)
                {
                    Assert.Equal(typeof(float), inputMeta[name].ElementType);
                    Assert.True(inputMeta[name].IsTensor);
                    var tensor = new DenseTensor <float>(inputData, inputMeta[name].Dimensions);
                    container.Add(NamedOnnxValue.CreateFromTensor <float>(name, tensor));
                }

                session.Run(container);
            }
        }
Esempio n. 4
0
        static void Main()
        {
            // Session options
            Console.WriteLine($"FaceONNX: GPU Perfomance tests with CUDA provider\n");
            using var bitmap = new Bitmap(@"..\..\..\images\brad.jpg");
            var options = SessionOptions.MakeSessionOptionWithCudaProvider(gpuId);

            // FPS tests
            FaceDetectorFPSTest(options, bitmap);
            FaceDetectorLightFPSTest(options, bitmap);
        }
Esempio n. 5
0
        static Tuple <InferenceSession, float[], DenseTensor <float>, float[]> OpenSessionSqueezeNet(int?cudaDeviceId = null)
        {
            string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
            var    session   = (cudaDeviceId.HasValue)
                ? new InferenceSession(modelPath, SessionOptions.MakeSessionOptionWithCudaProvider(cudaDeviceId.Value))
                : new InferenceSession(modelPath);

            float[] inputData      = LoadTensorFromFile(@"bench.in");
            float[] expectedOutput = LoadTensorFromFile(@"bench.expected_out");
            var     inputMeta      = session.InputMetadata;
            var     tensor         = new DenseTensor <float>(inputData, inputMeta["data_0"].Dimensions);

            return(new Tuple <InferenceSession, float[], DenseTensor <float>, float[]>(session, inputData, tensor, expectedOutput));
        }
Esempio n. 6
0
        public static void SettingSession(string model_path)
        {
            options.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_BASIC;

            try
            {
                session = new InferenceSession(model_path, SessionOptions.MakeSessionOptionWithCudaProvider(0));
            }
            catch (Exception e)
            {
                session = new InferenceSession(model_path, options);
            }
            inputName = session.InputMetadata.Keys.ToList()[0];
        }
Esempio n. 7
0
        public ORTWrapper(string modelPath, DNNMode mode)
        {
            // Optional : Create session options and set the graph optimization level for the session
            SessionOptions options = new SessionOptions();

            //options.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_EXTENDED;
            cfg = new Yolov3BaseConfig();

            this.mode = mode;
            switch (mode)
            {
            case DNNMode.LT:
            case DNNMode.Frame:
                session1 = new InferenceSession(modelPath, SessionOptions.MakeSessionOptionWithCudaProvider(0));
                break;

            case DNNMode.CC:
                session2 = new InferenceSession(modelPath, SessionOptions.MakeSessionOptionWithCudaProvider(0));
                break;
            }
        }
Esempio n. 8
0
        /// <summary>
        /// Constructs OnnxModel object from file.
        /// </summary>
        /// <param name="modelFile">Model file path.</param>
        /// <param name="gpuDeviceId">GPU device ID to execute on. Null for CPU.</param>
        /// <param name="fallbackToCpu">If true, resumes CPU execution quietly upon GPU error.</param>
        /// <param name="ownModelFile">If true, the <paramref name="modelFile"/> will be deleted when <see cref="OnnxModel"/> is
        /// no longer needed.</param>
        /// <param name="shapeDictionary"></param>
        public OnnxModel(string modelFile, int?gpuDeviceId = null, bool fallbackToCpu           = false,
                         bool ownModelFile = false, IDictionary <string, int[]> shapeDictionary = null)
        {
            // If we don't own the model file, _disposed should be false to prevent deleting user's file.
            _disposed = false;

            if (gpuDeviceId != null)
            {
                try
                {
                    _session = new InferenceSession(modelFile,
                                                    SessionOptions.MakeSessionOptionWithCudaProvider(gpuDeviceId.Value));
                }
                catch (OnnxRuntimeException)
                {
                    if (fallbackToCpu)
                    {
                        _session = new InferenceSession(modelFile);
                    }
                    else
                    {
                        // If called from OnnxTransform, is caught and rethrown
                        throw;
                    }
                }
            }
            else
            {
                _session = new InferenceSession(modelFile);
            }

            try
            {
                // Load ONNX model file and parse its input and output schema. The reason of doing so is that ONNXRuntime
                // doesn't expose full type information via its C# APIs.
                var model = new OnnxCSharpToProtoWrapper.ModelProto();
                // If we own the model file set the DeleteOnClose flag so it is always deleted.
                if (ownModelFile)
                {
                    ModelStream = new FileStream(modelFile, FileMode.Open, FileAccess.Read, FileShare.Read, 4096, FileOptions.DeleteOnClose);
                }
                else
                {
                    ModelStream = new FileStream(modelFile, FileMode.Open, FileAccess.Read);
                }

                // The CodedInputStream auto closes the stream, and we need to make sure that our main stream stays open, so creating a new one here.
                using (var modelStream = new FileStream(modelFile, FileMode.Open, FileAccess.Read, FileShare.Delete | FileShare.Read))
                    using (var codedStream = Google.Protobuf.CodedInputStream.CreateWithLimits(modelStream, Int32.MaxValue, 100))
                        model = OnnxCSharpToProtoWrapper.ModelProto.Parser.ParseFrom(codedStream);

                // Parse actual input and output types stored in the loaded ONNX model to get their DataViewType's.
                var inputTypePool = new Dictionary <string, DataViewType>();
                foreach (var valueInfo in model.Graph.Input)
                {
                    inputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
                }

                var initializerTypePool = new Dictionary <string, DataViewType>();
                foreach (var valueInfo in model.Graph.Initializer)
                {
                    initializerTypePool[valueInfo.Name] = OnnxTypeParser.GetScalarDataViewType(valueInfo.DataType);
                }

                var outputTypePool = new Dictionary <string, DataViewType>();
                // Build casters which maps NamedOnnxValue to .NET objects.
                var casterPool = new Dictionary <string, Func <NamedOnnxValue, object> >();
                foreach (var valueInfo in model.Graph.Output)
                {
                    outputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
                    casterPool[valueInfo.Name]     = OnnxTypeParser.GetDataViewValueCasterAndResultedType(valueInfo.Type, out Type actualType);
                }

                var inputInfos  = GetOnnxVariablesFromMetadata(_session.InputMetadata, shapeDictionary, inputTypePool, null);
                var outputInfos = GetOnnxVariablesFromMetadata(_session.OutputMetadata, shapeDictionary, outputTypePool, casterPool);
                var overrideableInitializers = GetOnnxVariablesFromMetadata(_session.OverridableInitializerMetadata, shapeDictionary, inputTypePool, null);

                // Create a view to the used ONNX model from ONNXRuntime's perspective.
                ModelInfo = new OnnxModelInfo(inputInfos, outputInfos, overrideableInitializers);

                Graph = model.Graph;
            }
            catch
            {
                _session.Dispose();
                _session = null;
                throw;
            }
        }
Esempio n. 9
0
        static void Main(string[] args)
        {
            if (args.Length < 2)
            {
                System.Console.WriteLine("Not enough arguments given, use input image output image");
                return;
            }
            // when using CPU / MKLDNN provider uncomment the next line
            // var options = new SessionOptions();
            // when using CUDA/GPU Provider if not comment this line
            var options = SessionOptions.MakeSessionOptionWithCudaProvider();

            options.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_ALL;
            options.InterOpNumThreads      = 8;
            options.IntraOpNumThreads      = 8;
            String           onnxfile = "YOUR_MUT1NY_DETECTOR_MODEL.onnx";
            InferenceSession session  = null;

            scales[0]             = 32.0f;
            scales[1]             = 16.0f;
            scales[2]             = 8.0f;
            anchorLevels[0, 0, 0] = 116.0f;
            anchorLevels[0, 0, 1] = 90.0f;
            anchorLevels[0, 1, 0] = 156.0f;
            anchorLevels[0, 1, 1] = 198.0f;
            anchorLevels[0, 2, 0] = 373.0f;
            anchorLevels[0, 2, 1] = 326.0f;

            anchorLevels[1, 0, 0] = 30.0f;
            anchorLevels[1, 0, 1] = 61.0f;
            anchorLevels[1, 1, 0] = 62.0f;
            anchorLevels[1, 1, 1] = 45.0f;
            anchorLevels[1, 2, 0] = 59.0f;
            anchorLevels[1, 2, 1] = 119.0f;

            anchorLevels[2, 0, 0] = 10.0f;
            anchorLevels[2, 0, 1] = 13.0f;
            anchorLevels[2, 1, 0] = 16.0f;
            anchorLevels[2, 1, 1] = 30.0f;
            anchorLevels[2, 2, 0] = 33.0f;
            anchorLevels[2, 2, 1] = 23.0f;
            String inputFilename  = args[0];
            String outputFilename = args[1];

            try
            {
                session = new InferenceSession(onnxfile, options);
                var   inputMeta = session.InputMetadata;
                int[] inputDim  = new int[4];
                float scaleFactor;
                int   offsetX, offsetY;
                foreach (var name in inputMeta.Keys)
                {
                    var dim = inputMeta[name].Dimensions;
                    for (int n = 0; n < dim.Length; n++)
                    {
                        inputDim[n] = dim[n];
                    }
                }
                Stopwatch totalSw;
                Stopwatch processInSw;
                Stopwatch processOutSw;
                Stopwatch executeSw;
                long      totalTime       = 0;
                long      totalProcessIn  = 0;
                long      totalProcessOut = 0;
                long      totalExecute    = 0;
                for (int runs = 0; runs < (BENCHMARKMODE ? NUMRUNS : 1); runs++)
                {
                    totalSw     = Stopwatch.StartNew();
                    processInSw = Stopwatch.StartNew();
                    var testData = CreateInputTensorFromImage(inputFilename, inputDim, out scaleFactor, out offsetX, out offsetY);
                    processInSw.Stop();
                    var container = new List <NamedOnnxValue>();

                    foreach (var name in inputMeta.Keys)
                    {
                        var tensor = new DenseTensor <float>(testData, inputMeta[name].Dimensions);
                        container.Add(NamedOnnxValue.CreateFromTensor <float>(name, tensor));
                    }
                    executeSw = Stopwatch.StartNew();
                    using (var results = session.Run(container))
                    {
                        executeSw.Stop();
                        int       numResults = results.Count;
                        int       levelNr    = 0;
                        ArrayList dets       = new ArrayList();
                        processOutSw = Stopwatch.StartNew();
                        foreach (var r in results)
                        {
                            var resultTensor    = r.AsTensor <float>();
                            var resultDimension = resultTensor.Dimensions;
                            var resultArray     = resultTensor.ToArray();
                            ProcessOutput(levelNr, resultDimension, resultArray, dets);
                            levelNr++;
                        }
                        System.Console.WriteLine("# Dets = " + dets.Count);
                        processOutSw.Stop();
                        dets.Sort();
                        ArrayList finalRects = BuildFinalOutput(dets, 1.0f / scaleFactor, offsetX, offsetY);
                        System.Console.WriteLine("Final # detected Rects = " + finalRects.Count);
                        totalSw.Stop();
                        Console.WriteLine("Prepocessing took " + processInSw.ElapsedMilliseconds);
                        Console.WriteLine("Execution of DNN took " + executeSw.ElapsedMilliseconds);
                        Console.WriteLine("Postprocessing took " + processOutSw.ElapsedMilliseconds);
                        Console.WriteLine("Total processing took " + totalSw.ElapsedMilliseconds);
                        if (runs > WARMUPRUNS)
                        {
                            totalTime       += totalSw.ElapsedMilliseconds;
                            totalExecute    += executeSw.ElapsedMilliseconds;
                            totalProcessIn  += processInSw.ElapsedMilliseconds;
                            totalProcessOut += processOutSw.ElapsedMilliseconds;
                        }
                        if (!BENCHMARKMODE)
                        {
                            WriteOutputDet(inputFilename, outputFilename, finalRects);
                        }
                        results.Dispose();
                        container.Clear();
                    }
                }
                float avgTotalTime      = (float)totalTime / (float)(NUMRUNS - WARMUPRUNS);
                float avgExecuteTime    = (float)totalExecute / (float)(NUMRUNS - WARMUPRUNS);
                float avgProcessInTime  = (float)totalProcessIn / (float)(NUMRUNS - WARMUPRUNS);
                float avgProcessOutTime = (float)totalProcessOut / (float)(NUMRUNS - WARMUPRUNS);
                Console.WriteLine("Avg time of preprocess: " + avgProcessInTime);
                Console.WriteLine("Avg time of xecution of DNN: " + avgExecuteTime);
                Console.WriteLine("Avg time of postprocess: " + avgProcessOutTime);
                Console.WriteLine("Avg time of total: " + avgTotalTime);
            }
            catch (Exception e)
            {
                System.Console.WriteLine("Could not load ONNX model, because " + e.ToString());
                return;
            }
            System.Console.WriteLine("Done");
        }
Esempio n. 10
0
        /// <summary>
        /// Constructs OnnxModel object from file.
        /// </summary>
        /// <param name="modelFile">Model file path.</param>
        /// <param name="gpuDeviceId">GPU device ID to execute on. Null for CPU.</param>
        /// <param name="fallbackToCpu">If true, resumes CPU execution quitely upon GPU error.</param>
        /// <param name="ownModelFile">If true, the <paramref name="modelFile"/> will be deleted when <see cref="OnnxModel"/> is
        /// no longer needed.</param>
        public OnnxModel(string modelFile, int?gpuDeviceId = null, bool fallbackToCpu = false, bool ownModelFile = false)
        {
            ModelFile = modelFile;
            // If we don't own the model file, _disposed should be false to prevent deleting user's file.
            _ownModelFile = ownModelFile;
            _disposed     = false;

            if (gpuDeviceId != null)
            {
                try
                {
                    _session = new InferenceSession(modelFile,
                                                    SessionOptions.MakeSessionOptionWithCudaProvider(gpuDeviceId.Value));
                }
                catch (OnnxRuntimeException)
                {
                    if (fallbackToCpu)
                    {
                        _session = new InferenceSession(modelFile);
                    }
                    else
                    {
                        // if called from OnnxTranform, is caught and rethrown.
                        throw;
                    }
                }
            }
            else
            {
                _session = new InferenceSession(modelFile);
            }

            // Load ONNX model file and parse its input and output schema. The reason of doing so is that ONNXRuntime
            // doesn't expose full type information via its C# APIs.
            ModelFile = modelFile;
            var model = new OnnxCSharpToProtoWrapper.ModelProto();

            using (var modelStream = File.OpenRead(modelFile))
                model = OnnxCSharpToProtoWrapper.ModelProto.Parser.ParseFrom(modelStream);

            // Parse actual input and output types stored in the loaded ONNX model to get their DataViewType's.
            var inputTypePool = new Dictionary <string, DataViewType>();

            foreach (var valueInfo in model.Graph.Input)
            {
                inputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
            }
            var outputTypePool = new Dictionary <string, DataViewType>();

            // Build casters which maps NamedOnnxValue to .NET objects.
            var casterPool = new Dictionary <string, Func <NamedOnnxValue, object> >();

            foreach (var valueInfo in model.Graph.Output)
            {
                outputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
                casterPool[valueInfo.Name]     = OnnxTypeParser.GetDataViewValueCasterAndResultedType(valueInfo.Type, out Type actualType);
            }

            var onnxRuntimeInputInfos = new List <OnnxVariableInfo>();

            foreach (var pair in _session.InputMetadata)
            {
                var name         = pair.Key;
                var meta         = pair.Value;
                var dataViewType = inputTypePool[name];
                var info         = new OnnxVariableInfo(name, meta.Dimensions.ToList(), meta.ElementType, dataViewType, null);
                onnxRuntimeInputInfos.Add(info);
            }

            var onnxRuntimeOutputInfos = new List <OnnxVariableInfo>();

            foreach (var pair in _session.OutputMetadata)
            {
                var name         = pair.Key;
                var meta         = pair.Value;
                var dataViewType = outputTypePool[name];
                var caster       = casterPool[name];
                var info         = new OnnxVariableInfo(name, meta.Dimensions.ToList(), meta.ElementType, dataViewType, caster);
                onnxRuntimeOutputInfos.Add(info);
            }

            ModelInfo = new OnnxModelInfo(onnxRuntimeInputInfos, onnxRuntimeOutputInfos);
        }
Esempio n. 11
0
        /// <summary>
        /// Constructs OnnxModel object from file.
        /// </summary>
        /// <param name="modelFile">Model file path.</param>
        /// <param name="gpuDeviceId">GPU device ID to execute on. Null for CPU.</param>
        /// <param name="fallbackToCpu">If true, resumes CPU execution quitely upon GPU error.</param>
        /// <param name="ownModelFile">If true, the <paramref name="modelFile"/> will be deleted when <see cref="OnnxModel"/> is
        /// no longer needed.</param>
        /// <param name="shapeDictionary"></param>
        public OnnxModel(string modelFile, int?gpuDeviceId = null, bool fallbackToCpu           = false,
                         bool ownModelFile = false, IDictionary <string, int[]> shapeDictionary = null)
        {
            ModelFile = modelFile;
            // If we don't own the model file, _disposed should be false to prevent deleting user's file.
            _ownModelFile = ownModelFile;
            _disposed     = false;

            if (gpuDeviceId != null)
            {
                try
                {
                    _session = new InferenceSession(modelFile,
                                                    SessionOptions.MakeSessionOptionWithCudaProvider(gpuDeviceId.Value));
                }
                catch (OnnxRuntimeException)
                {
                    if (fallbackToCpu)
                    {
                        _session = new InferenceSession(modelFile);
                    }
                    else
                    {
                        // if called from OnnxTranform, is caught and rethrown.
                        throw;
                    }
                }
            }
            else
            {
                _session = new InferenceSession(modelFile);
            }

            // Load ONNX model file and parse its input and output schema. The reason of doing so is that ONNXRuntime
            // doesn't expose full type information via its C# APIs.
            ModelFile = modelFile;
            var model = new OnnxCSharpToProtoWrapper.ModelProto();

            using (var modelStream = File.OpenRead(modelFile))
                using (var codedStream = Google.Protobuf.CodedInputStream.CreateWithLimits(modelStream, Int32.MaxValue, 10))
                    model = OnnxCSharpToProtoWrapper.ModelProto.Parser.ParseFrom(codedStream);

            // Parse actual input and output types stored in the loaded ONNX model to get their DataViewType's.
            var inputTypePool = new Dictionary <string, DataViewType>();

            foreach (var valueInfo in model.Graph.Input)
            {
                inputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
            }
            var outputTypePool = new Dictionary <string, DataViewType>();

            // Build casters which maps NamedOnnxValue to .NET objects.
            var casterPool = new Dictionary <string, Func <NamedOnnxValue, object> >();

            foreach (var valueInfo in model.Graph.Output)
            {
                outputTypePool[valueInfo.Name] = OnnxTypeParser.GetDataViewType(valueInfo.Type);
                casterPool[valueInfo.Name]     = OnnxTypeParser.GetDataViewValueCasterAndResultedType(valueInfo.Type, out Type actualType);
            }

            var onnxRuntimeInputInfos = new List <OnnxVariableInfo>();

            // Collect input information for this ONNX model from ONNXRuntime's perspective.
            foreach (var pair in _session.InputMetadata)
            {
                var name         = pair.Key;
                var meta         = pair.Value;
                var dataViewType = inputTypePool[name];

                OnnxVariableInfo info = null;
                if (shapeDictionary != null && shapeDictionary.ContainsKey(name))
                {
                    // If user provides a shape of a specific tensor, the provided shape overwrites the corresponding one loaded from
                    // ONNX model file and the deduced DataViewVectorType.

                    if (!CheckOnnxShapeCompatibility(shapeDictionary[name].ToList(), meta.Dimensions.ToList()))
                    {
                        throw Contracts.ExceptParamValue(shapeDictionary[name], nameof(shapeDictionary),
                                                         "The specified shape " + string.Join(",", shapeDictionary[name]) +
                                                         " is not compatible with the shape " + string.Join(",", meta.Dimensions) +
                                                         " loaded from the ONNX model file. Only unknown dimension can replace or " +
                                                         "be replaced by another dimension.");
                    }

                    if (dataViewType is VectorDataViewType vectorType)
                    {
                        if (shapeDictionary[name].All(value => value > 0))
                        {
                            dataViewType = new VectorDataViewType(vectorType.ItemType, shapeDictionary[name]);
                        }
                        else
                        {
                            dataViewType = new VectorDataViewType(vectorType.ItemType);
                        }
                    }

                    info = new OnnxVariableInfo(name, shapeDictionary[name].ToList(), meta.ElementType, dataViewType, null);
                }
                else
                {
                    // No user-specified shape is found, so the shape loaded from ONNX model file is used.
                    info = new OnnxVariableInfo(name, meta.Dimensions.ToList(), meta.ElementType, dataViewType, null);
                }
                onnxRuntimeInputInfos.Add(info);
            }

            var onnxRuntimeOutputInfos = new List <OnnxVariableInfo>();

            // Collect output information for this ONNX model from ONNXRuntime's perspective.
            foreach (var pair in _session.OutputMetadata)
            {
                var name         = pair.Key;
                var meta         = pair.Value;
                var dataViewType = outputTypePool[name];
                var caster       = casterPool[name];

                OnnxVariableInfo info = null;
                if (shapeDictionary != null && shapeDictionary.ContainsKey(name))
                {
                    // If user provide a shape of a specific tensor, the provided shape overwrites the corresponding one loaded from
                    // ONNX model file.

                    if (!CheckOnnxShapeCompatibility(shapeDictionary[name].ToList(), meta.Dimensions.ToList()))
                    {
                        throw Contracts.ExceptParamValue(shapeDictionary[name], nameof(shapeDictionary),
                                                         "The specified shape " + string.Join(",", shapeDictionary[name]) +
                                                         " is not compatible with the shape " + string.Join(",", meta.Dimensions) +
                                                         " loaded from the ONNX model file. Only unknown dimension can replace or " +
                                                         "be replaced by another dimension.");
                    }

                    if (dataViewType is VectorDataViewType vectorType)
                    {
                        if (shapeDictionary[name].All(value => value > 0))
                        {
                            dataViewType = new VectorDataViewType(vectorType.ItemType, shapeDictionary[name]);
                        }
                        else
                        {
                            dataViewType = new VectorDataViewType(vectorType.ItemType);
                        }
                    }

                    info = new OnnxVariableInfo(name, shapeDictionary[name].ToList(), meta.ElementType, dataViewType, caster);
                }
                else
                {
                    // No user-specified shape is found, so the shape loaded from ONNX model file is used.
                    info = new OnnxVariableInfo(name, meta.Dimensions.ToList(), meta.ElementType, dataViewType, caster);
                }

                onnxRuntimeOutputInfos.Add(info);
            }

            // Create a view to the used ONNX model from ONNXRuntime's perspective.
            ModelInfo = new OnnxModelInfo(onnxRuntimeInputInfos, onnxRuntimeOutputInfos);
        }