예제 #1
0
        /// <summary>
        /// Append a TensorRT EP instance (based on specified configuration) to the SessionOptions instance.
        /// Use only if you have the onnxruntime package specific to this Execution Provider.
        /// </summary>
        /// <param name="trtProviderOptions">TensorRT EP provider options</param>
        public void AppendExecutionProvider_Tensorrt(OrtTensorRTProviderOptions trtProviderOptions)
        {
#if __MOBILE__
            throw new NotSupportedException("The TensorRT Execution Provider is not supported in this build");
#else
            NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider_TensorRT_V2(handle, trtProviderOptions.Handle));
#endif
        }
예제 #2
0
        /// <summary>
        /// A helper method to construct a SessionOptions object for TensorRT execution provider.
        /// Use only if CUDA/TensorRT are installed and you have the onnxruntime package specific to this Execution Provider.
        /// </summary>
        /// <param name="trtProviderOptions">TensorRT EP provider options</param>
        /// <returns>A SessionsOptions() object configured for execution on provider options</returns>
        public static SessionOptions MakeSessionOptionWithTensorrtProvider(OrtTensorRTProviderOptions trtProviderOptions)
        {
            CheckTensorrtExecutionProviderDLLs();
            SessionOptions options = new SessionOptions();

            try
            {
                // Make sure that CUDA EP uses the same device id as TensorRT EP.
                options.AppendExecutionProvider_Tensorrt(trtProviderOptions);
                options.AppendExecutionProvider_CUDA(trtProviderOptions.GetDeviceId());
                return(options);
            }
            catch (Exception)
            {
                options.Dispose();
                throw;
            }
        }
예제 #3
0
        /// <summary>
        /// A helper method to construct a SessionOptions object for TensorRT execution provider.
        /// Use only if CUDA/TensorRT are installed and you have the onnxruntime package specific to this Execution Provider.
        /// </summary>
        /// <param name="trtProviderOptions">TensorRT EP provider options</param>
        /// <returns>A SessionsOptions() object configured for execution on provider options</returns>
        public static SessionOptions MakeSessionOptionWithTensorrtProvider(OrtTensorRTProviderOptions trtProviderOptions)
        {
            CheckTensorrtExecutionProviderDLLs();
            SessionOptions options = new SessionOptions();

            try
            {
                // Make sure that CUDA EP uses the same device id as TensorRT EP.
                int deviceId = trtProviderOptions.GetDeviceId();

                NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider_TensorRT(options.Handle, trtProviderOptions.Handle));
                NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_CUDA(options.Handle, deviceId));
                NativeApiStatus.VerifySuccess(NativeMethods.OrtSessionOptionsAppendExecutionProvider_CPU(options.Handle, 1));
                return(options);
            }
            catch (Exception e)
            {
                options.Dispose();
                throw e;
            }
        }
예제 #4
0
 /// <summary>
 /// Append a TensorRT EP instance (based on specified configuration) to the SessionOptions instance.
 /// Use only if you have the onnxruntime package specific to this Execution Provider.
 /// </summary>
 /// <param name="trtProviderOptions">TensorRT EP provider options</param>
 public void AppendExecutionProvider_Tensorrt(OrtTensorRTProviderOptions trtProviderOptions)
 {
     NativeApiStatus.VerifySuccess(NativeMethods.SessionOptionsAppendExecutionProvider_TensorRT(handle, trtProviderOptions.Handle));
 }