Example #1
0
        /*
         * Process an image and identify what is in it. When done, the method
         * {@link #onPhotoRecognitionReady(Collection)} must be called with the results of
         * the image recognition process.
         *
         * @param image Bitmap containing the image to be classified. The image can be
         *              of any size, but preprocessing might occur to resize it to the
         *              format expected by the classification process, which can be time
         *              and power consuming.
         */
        List <Recognition> DoRecognize(Bitmap image)
        {
            // Allocate space for the inference results
            var count = mLabels.Count;

            // Allocate buffer for image pixels.
            int[]      intValues = new int[TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT];
            ByteBuffer imgData   = ByteBuffer.AllocateDirect(
                4 * DIM_BATCH_SIZE * TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT * DIM_PIXEL_SIZE);

            imgData.Order(ByteOrder.NativeOrder());

            // Read image data into buffer formatted for the TensorFlow model
            TensorFlowHelper.ConvertBitmapToByteBuffer(image, intValues, imgData);

            // Run inference on the network with the image bytes in imgData as input,
            // storing results on the confidencePerLabel array. initialize arrays.
            float[][] confidence = new float[1][];
            confidence[0] = new float[count];

            //wrap it inside a Java Object
            var conf = FromArray <float[]>(confidence);

            mTensorFlowLite.Run(imgData, conf);

            //convert it back
            confidence = conf.ToArray <float[]>();
            List <Recognition> results = TensorFlowHelper.GetBestResults(confidence[0], mLabels);

            return(results);
        }
Example #2
0
        /*
         * Process an image and identify what is in it. When done, the method
         * {@link #onPhotoRecognitionReady(Collection)} must be called with the results of
         * the image recognition process.
         *
         * @param image Bitmap containing the image to be classified. The image can be
         *              of any size, but preprocessing might occur to resize it to the
         *              format expected by the classification process, which can be time
         *              and power consuming.
         */
        void DoRecognize(Bitmap image)
        {
            // Allocate space for the inference results
            var count = mLabels.Count;

            // Allocate buffer for image pixels.
            int[] intValues = new int[TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT];
            //ByteBuffer imgData = ByteBuffer.AllocateDirect(
            //        4 * DIM_BATCH_SIZE * TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT * DIM_PIXEL_SIZE);
            ByteBuffer imgData = ByteBuffer.AllocateDirect(
                DIM_BATCH_SIZE * TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT * DIM_PIXEL_SIZE);

            imgData.Order(ByteOrder.NativeOrder());

            // Read image data into buffer formatted for the TensorFlow model
            TensorFlowHelper.ConvertBitmapToByteBuffer(image, intValues, imgData);

            // Run inference on the network with the image bytes in imgData as input,
            // storing results on the confidencePerLabel array.
            //ByteBuffer confidenceByteBuffer = ByteBuffer.Allocate(count);
            //mTensorFlowLite.Run(imgData, confidenceByteBuffer);
            //byte[] confidenceByteArray = ConvertResults(confidenceByteBuffer);

            //var confidenceBuffer = FloatBuffer.Allocate(4 * count);


            byte[][] confidence = new byte[1][];
            confidence[0] = new byte[count];

            var conf = Java.Lang.Object.FromArray <byte[]>(confidence);

            mTensorFlowLite.Run(imgData, conf);

            confidence = conf.ToArray <byte[]>();
            List <Recognition> results = TensorFlowHelper.GetBestResults(confidence[0], mLabels);


            /*float[][] confidence = new float[1][];
             * confidence[0] = new float[count];
             *
             * var conf = Java.Lang.Object.FromArray<float[]>(confidence);
             * mTensorFlowLite.Run(imgData, conf);
             *
             * confidence = conf.ToArray<float[]>();
             * List<Recognition> results = TensorFlowHelper.GetBestResults(confidence[0], mLabels);
             */
            //float[] confidenceArray = ConvertResults(confidenceBuffer.AsFloatBuffer());
            //float[] confidenceByteArray = ConvertResultsFloat(confidenceBuffer, count);

            // Get the results with the highest confidence and map them to their labels
            //List<Recognition> results = TensorFlowHelper.GetBestResults(confidenceArray, mLabels);
            //List<Recognition> results = TensorFlowHelper.GetBestResults(confidenceByteArray, mLabels);
            //List<Recognition> results = TensorFlowHelper.GetBestResults(confidencePerLabel, mLabels);

            // Report the results with the highest confidence
            OnPhotoRecognitionReady(results);
        }
Example #3
0
 /*
  * Initialize the classifier that will be used to process images.
  */
 void InitClassifier()
 {
     try
     {
         mTensorFlowLite = new Interpreter(TensorFlowHelper.LoadModelFile(this, MODEL_FILE), 2);
         mLabels         = TensorFlowHelper.ReadLabels(this, LABELS_FILE);
     }
     catch (IOException e)
     {
         Log.Warn(TAG, "Unable to initialize TensorFlow Lite.", e);
     }
 }