/** Initializes a {@code Classifier}. */
        protected ClassifierBase(Activity activity, Device device, int numThreads)
        {
            intValues   = new int[getImageSizeX() * getImageSizeY()];
            tfliteModel = loadModelFile(activity);
            switch (device)
            {
            case Device.NNAPI:
                tfliteOptions.SetUseNNAPI(true);
                break;

            case Device.GPU:
                //gpuDelegate = new Xamarin.TensorFlow.Lite. GpuDelegate();
                //tfliteOptions.addDelegate(gpuDelegate);
                break;

            case Device.CPU:
                break;
            }
            tfliteOptions.SetNumThreads(numThreads);
            tflite  = new Interpreter(tfliteModel, tfliteOptions);
            labels  = loadLabelList(activity);
            imgData =
                ByteBuffer.AllocateDirect(
                    DIM_BATCH_SIZE
                    * getImageSizeX()
                    * getImageSizeY()
                    * DIM_PIXEL_SIZE
                    * getNumBytesPerChannel());
            imgData.Order(ByteOrder.NativeOrder());
            //LOGGER.d("Created a Tensorflow Lite Image Classifier.");
        }
        public async Task<IEnumerable<Prediction>> ClassifyAsync(byte[] bytes)
        {
            var mappedByteBuffer = GetModelAsMappedByteBuffer();

            //var interpreter = new Xamarin.TensorFlow.Lite.Interpreter(mappedByteBuffer);

            System.Console.WriteLine($"Running Tensorflow interpreter");
            System.Console.WriteLine($"Tensorflow runtime version {TensorFlowLite.RuntimeVersion()}");
            System.Console.WriteLine($"Tensorflow schema version {TensorFlowLite.SchemaVersion()}");
            
            var interpreterOptions = new Interpreter.Options();
            //TODO: Pass from UI?
            var numThreads = 1;
            interpreterOptions.SetNumThreads(numThreads);
            //TODO: Look into use of GPU delegate vs NNAPI
            // https://developer.android.com/ndk/guides/neuralnetworks
            interpreterOptions.SetUseNNAPI(true);
            interpreterOptions.SetAllowFp16PrecisionForFp32(true);

            //var interpreter = new Interpreter(mappedByteBuffer);
            var interpreter = new Interpreter(mappedByteBuffer, interpreterOptions);

            var tensor = interpreter.GetInputTensor(0);

            var shape = tensor.Shape();

            var width = shape[1];
            var height = shape[2];

            var labels = await LoadLabelsAsync(LabelsFileName);
            var byteBuffer = GetPhotoAsByteBuffer(bytes, width, height);

            //var outputLocations = new float[1][] { new float[labels.Count] };
            var outputLocations = new[] { new float[labels.Count] };

            var outputs = Java.Lang.Object.FromArray(outputLocations);

            interpreter.Run(byteBuffer, outputs);

            var classificationResult = outputs.ToArray<float[]>();

            var result = new List<Prediction>();

            for (var i = 0; i < labels.Count; i++)
            {
                var label = labels[i];
                result.Add(new Prediction(label, classificationResult[0][i]));
            }

            //TODO: Consider using this event or MediatR to return results to view model
            //https://blog.duijzer.com/posts/mvvmcross_with_mediatr/
            PredictionCompleted?.Invoke(this, new PredictionCompletedEventArgs(result));

            return result;
        }