public void TestSimpleCase(bool useOptionsCtor) { var modelFile = "squeezenet/00000001/model.onnx"; var samplevector = GetSampleArrayData(); var dataView = ML.Data.LoadFromEnumerable( new TestData[] { new TestData() { data_0 = samplevector }, new TestData() { data_0 = samplevector } }); var xyData = new List <TestDataXY> { new TestDataXY() { A = new float[InputSize] } }; var stringData = new List <TestDataDifferentType> { new TestDataDifferentType() { data_0 = new string[InputSize] } }; var sizeData = new List <TestDataSize> { new TestDataSize() { data_0 = new float[2] } }; var options = new OnnxOptions() { OutputColumns = new[] { "softmaxout_1" }, InputColumns = new[] { "data_0" }, ModelFile = modelFile, GpuDeviceId = _gpuDeviceId, FallbackToCpu = _fallbackToCpu, InterOpNumThreads = 1, IntraOpNumThreads = 1 }; var pipe = useOptionsCtor ? ML.Transforms.ApplyOnnxModel(options) : ML.Transforms.ApplyOnnxModel(options.OutputColumns, options.InputColumns, modelFile, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu); var invalidDataWrongNames = ML.Data.LoadFromEnumerable(xyData); var invalidDataWrongTypes = ML.Data.LoadFromEnumerable(stringData); var invalidDataWrongVectorSize = ML.Data.LoadFromEnumerable(sizeData); TestEstimatorCore(pipe, dataView, invalidInput: invalidDataWrongNames); TestEstimatorCore(pipe, dataView, invalidInput: invalidDataWrongTypes); pipe.GetOutputSchema(SchemaShape.Create(invalidDataWrongVectorSize.Schema)); try { var onnxTransformer = pipe.Fit(invalidDataWrongVectorSize); (onnxTransformer as IDisposable)?.Dispose(); Assert.False(true); } catch (ArgumentOutOfRangeException) { } catch (InvalidOperationException) { } }
/// <summary> /// Create a <see cref="OnnxScoringEstimator"/> using the specified <see cref="OnnxOptions"/>. /// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies, /// and how to run it on a GPU. /// </summary> /// <param name="catalog">The transform's catalog.</param> /// <param name="options">Options for the <see cref="OnnxScoringEstimator"/>.</param> public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, OnnxOptions options) => new OnnxScoringEstimator(CatalogUtils.GetEnvironment(catalog), options.OutputColumns, options.InputColumns, options.ModelFile, options.GpuDeviceId, options.FallbackToCpu, options.ShapeDictionary, options.RecursionLimit, options.InterOpNumThreads, options.IntraOpNumThreads);
/// <summary> /// Create a <see cref="OnnxScoringEstimator"/> using the specified <see cref="OnnxOptions"/>. /// Please refer to <see cref="OnnxScoringEstimator"/> to learn more about the necessary dependencies, /// and how to run it on a GPU. /// </summary> /// <remarks> /// If the options.GpuDeviceId value is <see langword="null" /> the <see cref="P:MLContext.GpuDeviceId"/> value will be used if it is not <see langword="null" />. /// </remarks> /// <param name="catalog">The transform's catalog.</param> /// <param name="options">Options for the <see cref="OnnxScoringEstimator"/>.</param> public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, OnnxOptions options) { var(env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, options.GpuDeviceId, options.FallbackToCpu); return(new OnnxScoringEstimator(env, options.OutputColumns, options.InputColumns, options.ModelFile, gpuDeviceIdToUse, fallbackToCpuToUse, options.ShapeDictionary, options.RecursionLimit, options.InterOpNumThreads, options.IntraOpNumThreads)); }