Ejemplo n.º 1
0
        private void TestCUDAProviderOptions()
        {
            string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");

            using (var cleanUp = new DisposableListTest <IDisposable>())
            {
                var cudaProviderOptions = new OrtCUDAProviderOptions();
                cleanUp.Add(cudaProviderOptions);

                var providerOptionsDict = new Dictionary <string, string>();
                providerOptionsDict["device_id"]                    = "0";
                providerOptionsDict["gpu_mem_limit"]                = "20971520";
                providerOptionsDict["arena_extend_strategy"]        = "kSameAsRequested";
                providerOptionsDict["cudnn_conv_algo_search"]       = "DEFAULT";
                providerOptionsDict["do_copy_in_default_stream"]    = "1";
                providerOptionsDict["cudnn_conv_use_max_workspace"] = "1";
                providerOptionsDict["cudnn_conv1d_pad_to_nc1d"]     = "1";
                cudaProviderOptions.UpdateOptions(providerOptionsDict);

                var resultProviderOptionsDict = new Dictionary <string, string>();
                ProviderOptionsValueHelper.StringToDict(cudaProviderOptions.GetOptions(), resultProviderOptionsDict);

                // test provider options configuration
                string value;
                value = resultProviderOptionsDict["device_id"];
                Assert.Equal("0", value);
                value = resultProviderOptionsDict["gpu_mem_limit"];
                Assert.Equal("20971520", value);
                value = resultProviderOptionsDict["arena_extend_strategy"];
                Assert.Equal("kSameAsRequested", value);
                value = resultProviderOptionsDict["cudnn_conv_algo_search"];
                Assert.Equal("DEFAULT", value);
                value = resultProviderOptionsDict["do_copy_in_default_stream"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["cudnn_conv_use_max_workspace"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["cudnn_conv1d_pad_to_nc1d"];
                Assert.Equal("1", value);

                // test correctness of provider options
                SessionOptions options = SessionOptions.MakeSessionOptionWithCudaProvider(cudaProviderOptions);
                cleanUp.Add(options);

                var session = new InferenceSession(modelPath, options);
                cleanUp.Add(session);

                var     inputMeta = session.InputMetadata;
                var     container = new List <NamedOnnxValue>();
                float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
                foreach (var name in inputMeta.Keys)
                {
                    Assert.Equal(typeof(float), inputMeta[name].ElementType);
                    Assert.True(inputMeta[name].IsTensor);
                    var tensor = new DenseTensor <float>(inputData, inputMeta[name].Dimensions);
                    container.Add(NamedOnnxValue.CreateFromTensor <float>(name, tensor));
                }

                session.Run(container);
            }
        }
Ejemplo n.º 2
0
        private void CanRunInferenceOnAModelWithTensorRT()
        {
            string modelPath = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");

            using (var cleanUp = new DisposableListTest <IDisposable>())
            {
                SessionOptions options = SessionOptions.MakeSessionOptionWithTensorrtProvider(0);
                cleanUp.Add(options);

                var session = new InferenceSession(modelPath, options);
                cleanUp.Add(session);

                var     inputMeta = session.InputMetadata;
                var     container = new List <NamedOnnxValue>();
                float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
                foreach (var name in inputMeta.Keys)
                {
                    Assert.Equal(typeof(float), inputMeta[name].ElementType);
                    Assert.True(inputMeta[name].IsTensor);
                    var tensor = new DenseTensor <float>(inputData, inputMeta[name].Dimensions);
                    container.Add(NamedOnnxValue.CreateFromTensor <float>(name, tensor));
                }


                using (var results = session.Run(container))
                {
                    ValidateRunResults(results);
                }
            }
        }
Ejemplo n.º 3
0
        private void TestTensorRTProviderOptions()
        {
            string modelPath           = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet.onnx");
            string calTablePath        = "squeezenet_calibration.flatbuffers";
            string enginePath          = "./";
            string engineDecrptLibPath = "engine_decryp";

            using (var cleanUp = new DisposableListTest <IDisposable>())
            {
                var trtProviderOptions = new OrtTensorRTProviderOptions();
                cleanUp.Add(trtProviderOptions);

                var providerOptionsDict = new Dictionary <string, string>();
                providerOptionsDict["device_id"]       = "0";
                providerOptionsDict["trt_fp16_enable"] = "1";
                providerOptionsDict["trt_int8_enable"] = "1";
                providerOptionsDict["trt_int8_calibration_table_name"] = calTablePath;
                providerOptionsDict["trt_engine_cache_enable"]         = "1";
                providerOptionsDict["trt_engine_cache_path"]           = enginePath;
                providerOptionsDict["trt_engine_decryption_enable"]    = "0";
                providerOptionsDict["trt_engine_decryption_lib_path"]  = engineDecrptLibPath;
                trtProviderOptions.UpdateOptions(providerOptionsDict);

                var resultProviderOptionsDict = new Dictionary <string, string>();
                ProviderOptionsValueHelper.StringToDict(trtProviderOptions.GetOptions(), resultProviderOptionsDict);

                // test provider options configuration
                string value;
                value = resultProviderOptionsDict["device_id"];
                Assert.Equal("0", value);
                value = resultProviderOptionsDict["trt_fp16_enable"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["trt_int8_enable"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["trt_int8_calibration_table_name"];
                Assert.Equal(calTablePath, value);
                value = resultProviderOptionsDict["trt_engine_cache_enable"];
                Assert.Equal("1", value);
                value = resultProviderOptionsDict["trt_engine_cache_path"];
                Assert.Equal(enginePath, value);
                value = resultProviderOptionsDict["trt_engine_decryption_enable"];
                Assert.Equal("0", value);
                value = resultProviderOptionsDict["trt_engine_decryption_lib_path"];
                Assert.Equal(engineDecrptLibPath, value);

                // test correctness of provider options
                SessionOptions options = SessionOptions.MakeSessionOptionWithTensorrtProvider(trtProviderOptions);
                cleanUp.Add(options);

                var session = new InferenceSession(modelPath, options);
                cleanUp.Add(session);

                var     inputMeta = session.InputMetadata;
                var     container = new List <NamedOnnxValue>();
                float[] inputData = TestDataLoader.LoadTensorFromFile(@"bench.in"); // this is the data for only one input tensor for this model
                foreach (var name in inputMeta.Keys)
                {
                    Assert.Equal(typeof(float), inputMeta[name].ElementType);
                    Assert.True(inputMeta[name].IsTensor);
                    var tensor = new DenseTensor <float>(inputData, inputMeta[name].Dimensions);
                    container.Add(NamedOnnxValue.CreateFromTensor <float>(name, tensor));
                }

                session.Run(container);
            }
        }
        public void TestIOBindingWithOrtAllocation()
        {
            var inputName  = "data_0";
            var outputName = "softmaxout_1";
            var allocator  = OrtAllocator.DefaultInstance;

            // From the model
            using (var dispList = new DisposableListTest <IDisposable>())
            {
                var tuple       = OpenSessionSqueezeNet();
                var session     = tuple.Item1;
                var inputData   = tuple.Item2;
                var inputTensor = tuple.Item3;
                var outputData  = tuple.Item4;
                dispList.Add(session);
                var runOptions = new RunOptions();
                dispList.Add(runOptions);

                var inputMeta    = session.InputMetadata;
                var outputMeta   = session.OutputMetadata;
                var outputTensor = new DenseTensor <float>(outputData, outputMeta[outputName].Dimensions);

                var ioBinding = session.CreateIoBinding();
                dispList.Add(ioBinding);

                var ortAllocationInput = allocator.Allocate((uint)inputData.Length * sizeof(float));
                dispList.Add(ortAllocationInput);
                var inputShape = Array.ConvertAll <int, long>(inputMeta[inputName].Dimensions, d => d);
                var shapeSize  = ArrayUtilities.GetSizeForShape(inputShape);
                Assert.Equal(shapeSize, inputData.Length);
                PopulateNativeBufferFloat(ortAllocationInput, inputData);

                // Create an external allocation for testing OrtExternalAllocation
                var    cpuMemInfo  = OrtMemoryInfo.DefaultInstance;
                var    sizeInBytes = shapeSize * sizeof(float);
                IntPtr allocPtr    = Marshal.AllocHGlobal((int)sizeInBytes);
                dispList.Add(new OrtSafeMemoryHandle(allocPtr));
                PopulateNativeBuffer(allocPtr, inputData);

                var ortAllocationOutput = allocator.Allocate((uint)outputData.Length * sizeof(float));
                dispList.Add(ortAllocationOutput);

                var outputShape = Array.ConvertAll <int, long>(outputMeta[outputName].Dimensions, i => i);

                // Test 1. Bind the output to OrtAllocated buffer
                using (FixedBufferOnnxValue fixedInputBuffer = FixedBufferOnnxValue.CreateFromTensor(inputTensor))
                {
                    ioBinding.BindInput(inputName, fixedInputBuffer);
                    ioBinding.BindOutput(outputName, Tensors.TensorElementType.Float, outputShape, ortAllocationOutput);
                    ioBinding.SynchronizeBoundInputs();
                    using (var outputs = session.RunWithBindingAndNames(runOptions, ioBinding))
                    {
                        ioBinding.SynchronizeBoundOutputs();
                        Assert.Equal(1, outputs.Count);
                        var output = outputs.ElementAt(0);
                        Assert.Equal(outputName, output.Name);
                        var tensor = output.AsTensor <float>();
                        Assert.True(tensor.IsFixedSize);
                        Assert.Equal(outputData, tensor.ToArray <float>(), new FloatComparer());
                    }
                }

                // Test 2. Bind the input to memory allocation and output to a fixedBuffer
                {
                    ioBinding.BindInput(inputName, Tensors.TensorElementType.Float, inputShape, ortAllocationInput);
                    ioBinding.BindOutput(outputName, Tensors.TensorElementType.Float, outputShape, ortAllocationOutput);
                    ioBinding.SynchronizeBoundInputs();
                    using (var outputs = session.RunWithBindingAndNames(runOptions, ioBinding))
                    {
                        ioBinding.SynchronizeBoundOutputs();
                        Assert.Equal(1, outputs.Count);
                        var output = outputs.ElementAt(0);
                        Assert.Equal(outputName, output.Name);
                        var tensor = output.AsTensor <float>();
                        Assert.True(tensor.IsFixedSize);
                        Assert.Equal(outputData, tensor.ToArray <float>(), new FloatComparer());
                    }
                }
                // 3. Test external allocation
                {
                    var externalInputAllocation = new OrtExternalAllocation(cpuMemInfo, inputShape,
                                                                            Tensors.TensorElementType.Float, allocPtr, sizeInBytes);

                    ioBinding.BindInput(inputName, externalInputAllocation);
                    ioBinding.BindOutput(outputName, Tensors.TensorElementType.Float, outputShape, ortAllocationOutput);
                    ioBinding.SynchronizeBoundInputs();
                    using (var outputs = session.RunWithBindingAndNames(runOptions, ioBinding))
                    {
                        ioBinding.SynchronizeBoundOutputs();
                        Assert.Equal(1, outputs.Count);
                        var output = outputs.ElementAt(0);
                        Assert.Equal(outputName, output.Name);
                        var tensor = output.AsTensor <float>();
                        Assert.True(tensor.IsFixedSize);
                        Assert.Equal(outputData, tensor.ToArray <float>(), new FloatComparer());
                    }
                }
                // 4. Some negative tests for external allocation
                {
                    // Small buffer size
                    Action smallBuffer = delegate()
                    {
                        new OrtExternalAllocation(cpuMemInfo, inputShape,
                                                  Tensors.TensorElementType.Float, allocPtr, sizeInBytes - 10);
                    };

                    Assert.Throws <OnnxRuntimeException>(smallBuffer);

                    Action stringType = delegate()
                    {
                        new OrtExternalAllocation(cpuMemInfo, inputShape,
                                                  Tensors.TensorElementType.String, allocPtr, sizeInBytes);
                    };

                    Assert.Throws <OnnxRuntimeException>(stringType);
                }
            }
        }
Ejemplo n.º 5
0
        public void TestIOBindingWithOrtAllocation()
        {
            var inputName  = "data_0";
            var outputName = "softmaxout_1";
            var allocator  = OrtAllocator.DefaultInstance;

            // From the model
            using (var dispList = new DisposableListTest <IDisposable>())
            {
                var tuple       = OpenSessionSqueezeNet();
                var session     = tuple.Item1;
                var inputData   = tuple.Item2;
                var inputTensor = tuple.Item3;
                var outputData  = tuple.Item4;
                dispList.Add(session);
                var inputMeta    = session.InputMetadata;
                var outputMeta   = session.OutputMetadata;
                var outputTensor = new DenseTensor <float>(outputData, outputMeta[outputName].Dimensions);

                var ioBinding = session.CreateIoBinding();
                dispList.Add(ioBinding);

                var ortAllocationInput = allocator.Allocate((uint)inputData.Length * sizeof(float));
                dispList.Add(ortAllocationInput);
                var inputShape = Array.ConvertAll <int, long>(inputMeta[inputName].Dimensions, d => d);
                PopulateNativeBufferFloat(ortAllocationInput, inputData);

                var ortAllocationOutput = allocator.Allocate((uint)outputData.Length * sizeof(float));
                dispList.Add(ortAllocationOutput);

                var outputShape = Array.ConvertAll <int, long>(outputMeta[outputName].Dimensions, i => i);

                // Test 1. Bind the output to OrtAllocated buffer
                using (FixedBufferOnnxValue fixedInputBuffer = FixedBufferOnnxValue.CreateFromTensor(inputTensor))
                {
                    ioBinding.BindInput(inputName, fixedInputBuffer);
                    ioBinding.BindOutput(outputName, Tensors.TensorElementType.Float, outputShape, ortAllocationOutput);
                    using (var outputs = session.RunWithBindingAndNames(new RunOptions(), ioBinding))
                    {
                        Assert.Equal(1, outputs.Count);
                        var output = outputs.ElementAt(0);
                        Assert.Equal(outputName, output.Name);
                        var tensor = output.AsTensor <float>();
                        Assert.True(tensor.IsFixedSize);
                        Assert.Equal(outputData, tensor.ToArray <float>(), new floatComparer());
                    }
                }

                // Test 2. Bind the input to memory allocation and output to a fixedBuffer
                {
                    ioBinding.BindInput(inputName, Tensors.TensorElementType.Float, inputShape, ortAllocationInput);
                    ioBinding.BindOutput(outputName, Tensors.TensorElementType.Float, outputShape, ortAllocationOutput);
                    using (var outputs = session.RunWithBindingAndNames(new RunOptions(), ioBinding))
                    {
                        Assert.Equal(1, outputs.Count);
                        var output = outputs.ElementAt(0);
                        Assert.Equal(outputName, output.Name);
                        var tensor = output.AsTensor <float>();
                        Assert.True(tensor.IsFixedSize);
                        Assert.Equal(outputData, tensor.ToArray <float>(), new floatComparer());
                    }
                }
            }
        }