コード例 #1
0
ファイル: NeuralNetworkService.cs プロジェクト: FLAMESpl/biai
        public void Predict(object[] inputs)
        {
            predictingLogger.Message($"{BELT}{DateTime.Now.TimeOfDay}{BELT}");
            predictingLogger.Message("Normalizing inputs.");

            double[] normalizedInputs = new double[inputs.Length];
            for (int i = 0; i < inputs.Length; i++)
            {
                if (inputs[i] == null)
                {
                    predictingLogger.Message("Aborted. Value set contains null.");
                    PredictionCompleted?.Invoke(this, ProcessResult.Failure);
                    return;
                }
                ;

                normalizedInputs[i] = initializers[i].TryGetValue(inputs[i]).Value;
            }

            predictingLogger.Message("Starting prediction.");
            var result = network.Predict(normalizedInputs);

            predictingLogger.Message($"Finished with result {FormatOutput(result)}.");
            PredictionCompleted?.Invoke(this, ProcessResult.Success);
        }
コード例 #2
0
        public async Task<IEnumerable<Prediction>> ClassifyAsync(byte[] bytes)
        {
            var mappedByteBuffer = GetModelAsMappedByteBuffer();

            //var interpreter = new Xamarin.TensorFlow.Lite.Interpreter(mappedByteBuffer);

            System.Console.WriteLine($"Running Tensorflow interpreter");
            System.Console.WriteLine($"Tensorflow runtime version {TensorFlowLite.RuntimeVersion()}");
            System.Console.WriteLine($"Tensorflow schema version {TensorFlowLite.SchemaVersion()}");
            
            var interpreterOptions = new Interpreter.Options();
            //TODO: Pass from UI?
            var numThreads = 1;
            interpreterOptions.SetNumThreads(numThreads);
            //TODO: Look into use of GPU delegate vs NNAPI
            // https://developer.android.com/ndk/guides/neuralnetworks
            interpreterOptions.SetUseNNAPI(true);
            interpreterOptions.SetAllowFp16PrecisionForFp32(true);

            //var interpreter = new Interpreter(mappedByteBuffer);
            var interpreter = new Interpreter(mappedByteBuffer, interpreterOptions);

            var tensor = interpreter.GetInputTensor(0);

            var shape = tensor.Shape();

            var width = shape[1];
            var height = shape[2];

            var labels = await LoadLabelsAsync(LabelsFileName);
            var byteBuffer = GetPhotoAsByteBuffer(bytes, width, height);

            //var outputLocations = new float[1][] { new float[labels.Count] };
            var outputLocations = new[] { new float[labels.Count] };

            var outputs = Java.Lang.Object.FromArray(outputLocations);

            interpreter.Run(byteBuffer, outputs);

            var classificationResult = outputs.ToArray<float[]>();

            var result = new List<Prediction>();

            for (var i = 0; i < labels.Count; i++)
            {
                var label = labels[i];
                result.Add(new Prediction(label, classificationResult[0][i]));
            }

            //TODO: Consider using this event or MediatR to return results to view model
            //https://blog.duijzer.com/posts/mvvmcross_with_mediatr/
            PredictionCompleted?.Invoke(this, new PredictionCompletedEventArgs(result));

            return result;
        }