public static void Inference <TResult>(this IInferenceContext <PointerBitmap, TResult> context, TResult result, InferenceInput <MemoryBitmap> image, RECT?imageRect = null)
            where TResult : class
        {
            if (image.Content.PixelFormat == Pixel.BGR24.Format)
            {
                context.Inference(result, image.Content.OfType <Pixel.BGR24>(), imageRect);
                return;
            }

            // convert image to BGR24
            MemoryBitmap tmp = default;

            image.Content.AsSpanBitmap().CopyTo(ref tmp, Pixel.BGR24.Format);

            tmp.AsSpanBitmap().PinReadablePointer
            (
                ptrBmp => context.Inference(result, (image.CaptureDevice, image.CaptureTime, ptrBmp), imageRect)
            );
        }
 public void SetFrameSize(InferenceInput <PointerBitmap> input)
 {
     SetFrameSize(input.Content.Width, input.Content.Height);
 }
        public void Inference(DetectedObject.Collection result, InferenceInput <PointerBitmap> input, Rectangle?inputWindow = null)
        {
            // if we don't have any tracked face
            // or the tracked faces have a very low confidence,
            // try find new ones:

            // result.Clear();

            _Stats_AddNarrowScore(input.CaptureTime, Score.Zero);
            _Stats_AddBroadScore(input.CaptureTime, Score.Zero);

            if (_BroadTracked.Count == 0)
            {
                var seeds = new DetectedObject.Collection(new SizeF(input.Content.Width, input.Content.Height));
                _BroadDetector.Inference(seeds, input, inputWindow);

                var tracked = seeds.Objects
                              .Where(item => item.Name == _ObjectFilter)
                              .Where(item => item.Score.IsValid)
                              .Select(item => new DetectedFrame(item.Rect, _BroadOutputScale, input.CaptureTime, item.Score))
                              .ToList();

                DetectedFrame.RemoveOverlapping(tracked);

                _BroadTracked.Clear();
                _BroadTracked.AddRange(tracked.Select(item => new _TrackedObject(item)));

                foreach (var o in _BroadTracked.ToList())
                {
                    var r = o.GetDetectionWindow(1);
                    result.AddObject(r, o._DetectionT1.Value.Score, "Detected");

                    _Stats_AddBroadScore(input.CaptureTime, o._DetectionT1.Value.Score);
                }
            }

            foreach (var o in _BroadTracked.ToList())
            {
                var broadRect = Rectangle.Round(_NarrowDetector.GetNextDetectionWindow(o._DetectionT1.Value, o._DetectionT0));

                _NarrowTracked.Clear();
                _NarrowTracked.SetFrameSize(input);
                _NarrowDetector.Inference(_NarrowTracked, input, broadRect);

                var item = _NarrowTracked
                           .Objects
                           .Where(ttt => ttt.Name == _ObjectFilter)
                           .Where(ttt => ttt.Score.IsValid)
                           .FirstOrDefault();

                if (item.Name == null || item.Area < 16) // tracking lost
                {
                    _BroadTracked.Remove(o);
                }
                else
                {
                    result.Add(_NarrowTracked, result.AddObject(broadRect, item.Score, "Window"));

                    // update the broad tracking window for this face
                    o.AddDetection(new DetectedFrame(item.Rect, input.CaptureTime, item.Score));
                }

                _Stats_AddNarrowScore(input.CaptureTime, item.Score);
            }
        }