public Interpreter(byte[] modelData, Options options)
        {
            GCHandle modelDataHandle = GCHandle.Alloc(modelData, GCHandleType.Pinned);
            IntPtr   modelDataPtr    = modelDataHandle.AddrOfPinnedObject();

            model = TfLiteModelCreate(modelDataPtr, modelData.Length);
            if (model == IntPtr.Zero)
            {
                throw new Exception("Failed to create TensorFlowLite Model");
            }

            if (!options.Equals(default(Options)))
            {
                this.options = TfLiteInterpreterOptionsCreate();
                if (options.threads > 1)
                {
                    TfLiteInterpreterOptionsSetNumThreads(this.options, options.threads);
                }
                if (options.gpuDelegate != null)
                {
                    TfLiteInterpreterOptionsAddDelegate(this.options, options.gpuDelegate.Delegate);
                    gpuDelegate = options.gpuDelegate;
                }
            }

            interpreter = TfLiteInterpreterCreate(model, this.options);
            if (interpreter == IntPtr.Zero)
            {
                throw new Exception("Failed to create TensorFlowLite Interpreter");
            }
        }
 public void Dispose()
 {
     if (interpreter != IntPtr.Zero)
     {
         TfLiteInterpreterDelete(interpreter);
     }
     interpreter = IntPtr.Zero;
     if (model != IntPtr.Zero)
     {
         TfLiteModelDelete(model);
     }
     model = IntPtr.Zero;
     if (options != IntPtr.Zero)
     {
         TfLiteInterpreterOptionsDelete(options);
     }
     options = IntPtr.Zero;
     if (gpuDelegate != null)
     {
         gpuDelegate.Dispose();
     }
     gpuDelegate = null;
 }
Beispiel #3
0
 public static Status ModifyGraphWithDelegate(this Interpreter interpreter, IGpuDelegate gpuDelegate)
 {
     return(TfLiteInterpreterModifyGraphWithDelegate(interpreter.InterpreterPointer, gpuDelegate.Delegate));
 }