Example #1
0
 private void ColorFrameReader_FrameArrived_FrameRenderer(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
 {
     _frameRenderer.ProcessFrame(sender.TryAcquireLatestFrame());
 }
Example #2
0
        private void FrameReader_FrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try {
                var frame = sender.TryAcquireLatestFrame();
                if (frame != null)
                {
                    this.frames[frame.SourceKind] = frame;
                }

                if (this.frames[MediaFrameSourceKind.Color] != null && this.frames[MediaFrameSourceKind.Depth] != null)
                {
                    var colorDesc = this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.SoftwareBitmap.LockBuffer(BitmapBufferAccessMode.Read).GetPlaneDescription(0);
                    var depthDesc = this.frames[MediaFrameSourceKind.Depth].VideoMediaFrame.SoftwareBitmap.LockBuffer(BitmapBufferAccessMode.Read).GetPlaneDescription(0);

                    // get points in 3d space
                    DepthCorrelatedCoordinateMapper coordinateMapper = this.frames[MediaFrameSourceKind.Depth].VideoMediaFrame.DepthMediaFrame.TryCreateCoordinateMapper(
                        this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.CameraIntrinsics, this.frames[MediaFrameSourceKind.Color].CoordinateSystem);

                    // get color information
                    var    bitmap     = SoftwareBitmap.Convert(this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Ignore);
                    byte[] colorBytes = new byte[bitmap.PixelWidth * bitmap.PixelHeight * 4];
                    bitmap.CopyToBuffer(colorBytes.AsBuffer());

                    Vector3[] depthPoints = new Vector3[this.mapPoints.Length];
                    coordinateMapper.UnprojectPoints(this.mapPoints, this.frames[MediaFrameSourceKind.Color].CoordinateSystem, depthPoints);

                    // resize image 1920x1080 -> 480x270 and apply depth filter
                    byte[] resizedGrayColorBytes = new byte[240 * 135];
                    int    row = 0;
                    int    at  = 0;
                    for (int idx = 0; idx < resizedGrayColorBytes.Length; ++idx)
                    {
                        var depth = depthPoints[idx];

                        // get depth bound for pixel idx
                        float depthBound = 10.0f;
                        if (float.IsNaN(depth.X))
                        {
                            depthBound = 0.0f;
                        }
                        else
                        {
                            for (int i = 0; i < this.depthMap.Count; ++i)
                            {
                                if (this.depthMap[i].xMin < depth.X && depth.X < this.depthMap[i].xMax)
                                {
                                    depthBound = this.depthMap[i].F(depth.X);
                                    break;
                                }
                            }
                        }

                        // get color of pixel idx
                        // topLeft : at; topRight : at + strideX - 4;
                        // bottomLeft : at + rgbWidth * (strideY - 1); bottomRight : at + rgbWidth * (strideY - 1) + strideX - 4;
                        float bgr = 255;
                        if (depth.Z < depthBound)
                        {
                            bgr =
                                (Convert.ToInt16(colorBytes[at] + colorBytes[at + 28] + colorBytes[at + 53760] + colorBytes[at + 53788]
                                                 + colorBytes[at + 1] + colorBytes[at + 29] + colorBytes[at + 53761] + colorBytes[at + 53789]
                                                 + colorBytes[at + 2] + colorBytes[at + 30] + colorBytes[at + 53762] + colorBytes[at + 53790])) * 0.0833333f;
                        }
                        resizedGrayColorBytes[idx] = BitConverter.GetBytes(Convert.ToInt32(bgr))[0];

                        // iterate
                        at += 32;
                        if (at - row * 7680 > 7648)   // at - row * rgbWidth > rgbWidth - strideX
                        {
                            row += 8;
                            at   = row * 7680;
                        }
                    }
                    var faces = this.cascadeClassifier.DetectMultiScale(resizedGrayColorBytes, 240, 135);

                    // debug image (optional)
                    //byte[] dbg = new byte[240 * 135 + 4];
                    //Buffer.BlockCopy(BitConverter.GetBytes(240), 0, dbg, 0, 2);
                    //Buffer.BlockCopy(BitConverter.GetBytes(135), 0, dbg, 2, 2);
                    //Buffer.BlockCopy(resizedGrayColorBytes, 0, dbg, 4, 32400);
                    //this.client.Publish("/kinect/face/debug", dbg);

                    // reset face found status
                    foreach (var log in this.faceLog)
                    {
                        log.SetFoundFalse();
                    }

                    // create byte array from each face
                    uint          totalSize     = 0;
                    List <byte[]> faceBytesList = new List <byte[]>();
                    at = 1;
                    int numDetectFaces = faces[0];
                    for (int j = 0; j < numDetectFaces; ++j)
                    {
                        // parse result from C++
                        uint xj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint yj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint width = Convert.ToUInt32(faces[at++]) * 8;
                        // result is head + shoulder -> multiply 0.6 to get head only region
                        uint height = Convert.ToUInt32(Convert.ToInt32(faces[at++]) * 8 * 0.6);
                        // center crop image
                        xj   += Convert.ToUInt32(width * 0.2);
                        width = Convert.ToUInt32(width * 0.6);

                        uint   size      = width * height * 3 + 20;
                        byte[] faceBytes = new byte[size];
                        totalSize += size;

                        // get face 3d position
                        var centerPoint    = new Point(xj + Convert.ToUInt32(width * 0.5), yj + Convert.ToUInt32(height * 0.5));
                        var positionVector = coordinateMapper.UnprojectPoint(centerPoint, this.frames[MediaFrameSourceKind.Color].CoordinateSystem);

                        // get likely face id from face position
                        int   likelyEnum = -1;
                        float likeliness = float.MaxValue;
                        for (int i = 0; i < this.faceLog.Count; ++i)
                        {
                            if (this.faceLog[i].isTracked && !this.faceLog[i].foundInThisFrame)
                            {
                                var dist = Vector3.Distance(positionVector, this.faceLog[i].facePosition);
                                if (dist < 0.3 && dist < likeliness)   // it is unlikely for a face to jump 30cm between frames
                                {
                                    likelyEnum = i;
                                    likeliness = dist;
                                }
                            }
                        }
                        if (likelyEnum < 0) // if no likely face was found
                        {
                            for (int i = 0; i < this.faceLog.Count; ++i)
                            {
                                if (!this.faceLog[i].isTracked)   // a new track is registered (will switch to isTracked when called Update)
                                {
                                    likelyEnum = i;
                                    break;
                                }
                            }
                        }
                        if (likelyEnum < 0) // trackable number of faces already occupied, cannot track new face
                        {
                            continue;       // id will be free once existing track is lost
                        }
                        this.faceLog[likelyEnum].Update(positionVector);

                        // first 4 bytes is size of face image
                        Array.Copy(BitConverter.GetBytes(width), 0, faceBytes, 0, 2);
                        Array.Copy(BitConverter.GetBytes(height), 0, faceBytes, 2, 2);

                        // next 12 bytes is 3d position of face
                        var position = new float[] { positionVector.X, positionVector.Y, positionVector.Z };
                        Buffer.BlockCopy(position, 0, faceBytes, 4, 12);

                        // next 4 bytes is face id
                        Buffer.BlockCopy(BitConverter.GetBytes(this.faceLog[likelyEnum].id), 0, faceBytes, 16, 4);

                        // copy rgb image
                        for (int y = 0; y < height; ++y)
                        {
                            var srcIdx  = Convert.ToInt32(((y + yj) * colorDesc.Width + xj) * 4);
                            var destIdx = Convert.ToInt32((y * width) * 3 + 20);
                            for (int x = 0; x < width; ++x)
                            {
                                Buffer.BlockCopy(colorBytes, srcIdx + x * 4, faceBytes, destIdx + x * 3, 3);
                            }
                        }

                        faceBytesList.Add(faceBytes);
                    }

                    // for faces that were not found in current frame, release track state
                    foreach (var log in this.faceLog)
                    {
                        if (log.isTracked && !log.foundInThisFrame)
                        {
                            ++log.lostTrackCount;
                            // get fps, note, clock is always running when frame is being captured
                            int fps = Convert.ToInt32(kinectFrameCount / this.appClock.Elapsed.TotalSeconds);
                            if (log.lostTrackCount > 2 * fps)   // lost for two seconds
                            {
                                log.Free(this.nextReservedFaceId);
                                ++this.nextReservedFaceId;
                            }
                        }
                    }

                    // concatenate byte arrays to send (post-processed as totalSize not known in first foreach)
                    int    head  = 1; // first 1 byte is number of faces
                    byte[] bytes = new byte[totalSize + 1];
                    Array.Copy(BitConverter.GetBytes(faceBytesList.Count), 0, bytes, 0, head);
                    foreach (byte[] faceByte in faceBytesList)
                    {
                        Array.Copy(faceByte, 0, bytes, head, faceByte.Length);
                        head += faceByte.Length;
                    }
                    this.client.Publish("/kinect/detected/face", bytes);

                    ++this.kinectFrameCount;

                    bitmap.Dispose();
                    coordinateMapper.Dispose();
                    this.frames[MediaFrameSourceKind.Color].Dispose();
                    this.frames[MediaFrameSourceKind.Depth].Dispose();
                    this.frames[MediaFrameSourceKind.Color] = null;
                    this.frames[MediaFrameSourceKind.Depth] = null;
                }
            } catch (Exception ex) {
                // TODO
            } finally {
                frameProcessingSemaphore.Release();
            }
        }
Example #3
0
        private async void ActionButton2_Click(object sender, RoutedEventArgs e)
        {
            var frameSourceGroups = await MediaFrameSourceGroup.FindAllAsync();


            // Color, infrared, and depth

            var selectedGroupObjects = frameSourceGroups.Select(group =>
                                                                new
            {
                sourceGroup     = group,
                colorSourceInfo = group.SourceInfos.FirstOrDefault((sourceInfo) =>
                {
                    return(sourceInfo.SourceKind == MediaFrameSourceKind.Color);
                })
            }).Where(t => t.colorSourceInfo != null)
                                       .FirstOrDefault();

            MediaFrameSourceGroup selectedGroup   = selectedGroupObjects?.sourceGroup;
            MediaFrameSourceInfo  colorSourceInfo = selectedGroupObjects?.colorSourceInfo;

            if (selectedGroup == null)
            {
                return;
            }

            mediaCapture = new MediaCapture();

            var settings = new MediaCaptureInitializationSettings()
            {
                SourceGroup          = selectedGroup,
                SharingMode          = MediaCaptureSharingMode.ExclusiveControl,
                MemoryPreference     = MediaCaptureMemoryPreference.Cpu,
                StreamingCaptureMode = StreamingCaptureMode.Video
            };

            try
            {
                await mediaCapture.InitializeAsync(settings);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine("MediaCapture initialization failed: " + ex.Message);
                return;
            }

            var colorFrameSource = mediaCapture.FrameSources[colorSourceInfo.Id];
            var preferredFormat  = colorFrameSource.SupportedFormats.Where(format =>
            {
                return(format.VideoFormat.Width == 1920);
            }).FirstOrDefault();


            if (preferredFormat == null)
            {
                // Our desired format is not supported
                return;
            }
            await colorFrameSource.SetFormatAsync(preferredFormat);


            mediaFrameReader = await mediaCapture.CreateFrameReaderAsync(colorFrameSource, MediaEncodingSubtypes.Argb32);

            mediaFrameReader.FrameArrived += ColorFrameReader_FrameArrived_FrameRenderer;

            _frameRenderer = new FrameRenderer(imageElement);


            await mediaFrameReader.StartAsync();
        }
        private FrameGrabber(MediaCapture mediaCapture = null, MediaFrameSource mediaFrameSource = null, MediaFrameReader mediaFrameReader = null)
        {
            this.mediaCapture     = mediaCapture;     // capture audio, video , and image from camera
            this.mediaFrameSource = mediaFrameSource; // source of camera fames (color camera in this case)
            this.mediaFrameReader = mediaFrameReader; // access to frames from a MediaFrameSource then notifies when new frame arrives


            if (this.mediaFrameReader != null)
            {
                this.mediaFrameReader.FrameArrived += MediaFrameReader_FrameArrived;
            }
        }
Example #5
0
        private void FrameReader_FrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try {
                var frame = sender.TryAcquireLatestFrame();
                if (frame != null)
                {
                    this.frames[frame.SourceKind] = frame;
                }

                if (this.frames[MediaFrameSourceKind.Color] != null && this.frames[MediaFrameSourceKind.Depth] != null)
                {
                    var colorDesc = this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.SoftwareBitmap.LockBuffer(BitmapBufferAccessMode.Read).GetPlaneDescription(0);
                    var depthDesc = this.frames[MediaFrameSourceKind.Depth].VideoMediaFrame.SoftwareBitmap.LockBuffer(BitmapBufferAccessMode.Read).GetPlaneDescription(0);

                    // get points in 3d space
                    DepthCorrelatedCoordinateMapper coordinateMapper = this.frames[MediaFrameSourceKind.Depth].VideoMediaFrame.DepthMediaFrame.TryCreateCoordinateMapper(
                        this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.CameraIntrinsics, this.frames[MediaFrameSourceKind.Color].CoordinateSystem);

                    // get color information
                    var    bitmap     = SoftwareBitmap.Convert(this.frames[MediaFrameSourceKind.Color].VideoMediaFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Ignore);
                    byte[] colorBytes = new byte[bitmap.PixelWidth * bitmap.PixelHeight * 4];
                    bitmap.CopyToBuffer(colorBytes.AsBuffer());

                    Vector3[] depthPoints = new Vector3[this.mapPoints.Length];
                    coordinateMapper.UnprojectPoints(this.mapPoints, this.frames[MediaFrameSourceKind.Color].CoordinateSystem, depthPoints);

                    // resize image 1920x1080 -> 480x270 and apply depth filter
                    byte[] resizedGrayColorBytes = new byte[240 * 135];
                    int    row = 0;
                    int    at  = 0;
                    for (int idx = 0; idx < resizedGrayColorBytes.Length; ++idx)
                    {
                        var depth = depthPoints[idx];

                        // get depth bound for pixel idx
                        float depthBound = 10.0f;
                        if (float.IsNaN(depth.X))
                        {
                            depthBound = 0.0f;
                        }
                        else
                        {
                            for (int i = 0; i < this.depthMap.Count; ++i)
                            {
                                if (this.depthMap[i].xMin < depth.X && depth.X < this.depthMap[i].xMax)
                                {
                                    depthBound = this.depthMap[i].F(depth.X);
                                    break;
                                }
                            }
                        }

                        // get color of pixel idx
                        // topLeft : at; topRight : at + strideX - 4;
                        // bottomLeft : at + rgbWidth * (strideY - 1); bottomRight : at + rgbWidth * (strideY - 1) + strideX - 4;
                        float bgr = 255;
                        if (depth.Z < depthBound)
                        {
                            bgr =
                                (Convert.ToInt16(colorBytes[at] + colorBytes[at + 28] + colorBytes[at + 53760] + colorBytes[at + 53788]
                                                 + colorBytes[at + 1] + colorBytes[at + 29] + colorBytes[at + 53761] + colorBytes[at + 53789]
                                                 + colorBytes[at + 2] + colorBytes[at + 30] + colorBytes[at + 53762] + colorBytes[at + 53790])) * 0.0833333f;
                        }
                        resizedGrayColorBytes[idx] = BitConverter.GetBytes(Convert.ToInt32(bgr))[0];

                        // iterate
                        at += 32;
                        if (at - row * 7680 > 7648)   // at - row * rgbWidth > rgbWidth - strideX
                        {
                            row += 8;
                            at   = row * 7680;
                        }
                    }
                    var faces = this.cascadeClassifier.DetectMultiScale(resizedGrayColorBytes, 240, 135);

                    // debug image (optional)
                    //byte[] dbg = new byte[240 * 135 + 4];
                    //Buffer.BlockCopy(BitConverter.GetBytes(240), 0, dbg, 0, 2);
                    //Buffer.BlockCopy(BitConverter.GetBytes(135), 0, dbg, 2, 2);
                    //Buffer.BlockCopy(resizedGrayColorBytes, 0, dbg, 4, 32400);
                    //this.client.Publish("/kinect/face/debug", dbg);

#if TRACKING_ON
                    // reset face found status
                    foreach (var log in this.faceLog)
                    {
                        log.SetFoundFalse();
                    }

                    // parse data from cascadeClassifier
                    List <Vector3> facePositionList = new List <Vector3>();
                    List <Tuple <uint, uint, uint, uint> > faceBoundsList = new List <Tuple <uint, uint, uint, uint> >();
                    at = 1;
                    int numDetectFaces = faces[0];
                    for (int j = 0; j < numDetectFaces; ++j)
                    {
                        // parse result from C++
                        uint xj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint yj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint width = Convert.ToUInt32(faces[at++]) * 8;
                        // result is head + shoulder -> multiply 0.6 to get head only region
                        uint height = Convert.ToUInt32(Convert.ToInt32(faces[at++]) * 8 * 0.6);
                        // center crop image
                        xj   += Convert.ToUInt32(width * 0.2);
                        width = Convert.ToUInt32(width * 0.6);

                        // get face 3d position
                        var centerPoint    = new Point(xj + Convert.ToUInt32(width * 0.5), yj + Convert.ToUInt32(height * 0.5));
                        var positionVector = coordinateMapper.UnprojectPoint(centerPoint, this.frames[MediaFrameSourceKind.Color].CoordinateSystem);

                        facePositionList.Add(positionVector);
                        faceBoundsList.Add(new Tuple <uint, uint, uint, uint>(xj, yj, width, height));
                    }

                    // find face in current frame that matches log (only one face is linked to each log)
                    int[]   faceCorrespondingLog    = Enumerable.Repeat(-1, facePositionList.Count).ToArray();
                    float[] faceCorrespondenceScore = Enumerable.Repeat(float.MaxValue, facePositionList.Count).ToArray();
                    for (int i = 0; i < this.faceLog.Count; ++i)
                    {
                        if (!faceLog[i].isTracked)
                        {
                            continue;                        // log is currently not used
                        }
                        int   likelyEnum = -1;
                        float likeliness = float.MaxValue;
                        for (int j = 0; j < facePositionList.Count; ++j)
                        {
                            var dist = Vector3.Distance(facePositionList[j], this.faceLog[i].facePosition);
                            if (dist < 0.3 && dist < likeliness)   // it is unlikely for a face to jump 30cm between frames
                            {
                                likelyEnum = j;
                                likeliness = dist;
                            }
                        }

                        if (likelyEnum >= 0 && likeliness < faceCorrespondenceScore[likelyEnum])
                        {
                            if (faceCorrespondingLog[likelyEnum] >= 0)
                            {
                                this.faceLog[faceCorrespondingLog[likelyEnum]].foundInThisFrame = false; // last log was not right match, undo match
                            }
                            faceCorrespondingLog[likelyEnum] = i;
                            this.faceLog[i].foundInThisFrame = true; // this log is now matched with face
                        }
                    }

                    // check faceCorrespondingLog and create byte array from each face
                    uint          totalSize     = 0;
                    List <byte[]> faceBytesList = new List <byte[]>();
                    for (int j = 0; j < faceCorrespondingLog.Length; ++j)
                    {
                        int     likelyEnum     = -1;
                        Vector3 positionVector = facePositionList[j];
                        if (faceCorrespondingLog[j] < 0)   // corresponding log was not yet found
                        // find likely face log from logs
                        {
                            float likeliness = float.MaxValue;
                            for (int i = 0; i < this.faceLog.Count; ++i)
                            {
                                if (this.faceLog[i].isTracked)
                                {
                                    var dist = Vector3.Distance(positionVector, this.faceLog[i].facePosition);
                                    if (dist < 0.3 && dist < likeliness)   // it is unlikely for a face to jump 30cm between frames
                                    {
                                        likelyEnum = i;
                                        likeliness = dist;
                                    }
                                }
                            }
                            if (likelyEnum >= 0 && this.faceLog[likelyEnum].foundInThisFrame)
                            {
                                continue;       // detected face was somehow a duplicate of an existing region, ignore
                            }
                            if (likelyEnum < 0) // if no likely face was found
                            {
                                for (int i = 0; i < this.faceLog.Count; ++i)
                                {
                                    if (!this.faceLog[i].isTracked)   // a new track is registered (will switch to isTracked when called Update)
                                    {
                                        likelyEnum = i;
                                        break;
                                    }
                                }
                            }
                            if (likelyEnum < 0) // trackable number of faces already occupied, cannot track new face
                            {
                                continue;       // id will be free once existing track is lost
                            }
                        }
                        else     // corresponding log is already found
                        {
                            likelyEnum = faceCorrespondingLog[j];
                        }

                        this.faceLog[likelyEnum].Update(positionVector);

                        uint xj     = faceBoundsList[j].Item1;
                        uint yj     = faceBoundsList[j].Item2;
                        uint width  = faceBoundsList[j].Item3;
                        uint height = faceBoundsList[j].Item4;

                        uint   size      = width * height * 3 + 20;
                        byte[] faceBytes = new byte[size];
                        totalSize += size;

                        // first 4 bytes is size of face image
                        Array.Copy(BitConverter.GetBytes(width), 0, faceBytes, 0, 2);
                        Array.Copy(BitConverter.GetBytes(height), 0, faceBytes, 2, 2);

                        // next 12 bytes is 3d position of face
                        var position = new float[] { positionVector.X, positionVector.Y, positionVector.Z };
                        Buffer.BlockCopy(position, 0, faceBytes, 4, 12);

                        // next 4 bytes is face id
                        Buffer.BlockCopy(BitConverter.GetBytes(this.faceLog[likelyEnum].id), 0, faceBytes, 16, 4);

                        // copy rgb image
                        for (int y = 0; y < height; ++y)
                        {
                            var srcIdx  = Convert.ToInt32(((y + yj) * colorDesc.Width + xj) * 4);
                            var destIdx = Convert.ToInt32((y * width) * 3 + 20);
                            for (int x = 0; x < width; ++x)
                            {
                                Buffer.BlockCopy(colorBytes, srcIdx + x * 4, faceBytes, destIdx + x * 3, 3);
                            }
                        }

                        faceBytesList.Add(faceBytes);
                    }

                    // for faces that were not found in current frame, release track state
                    foreach (var log in this.faceLog)
                    {
                        if (log.isTracked && !log.foundInThisFrame)
                        {
                            ++log.lostTrackCount;
                            // get fps, note, clock is always running when frame is being captured
                            int fps = Convert.ToInt32(kinectFrameCount / this.appClock.Elapsed.TotalSeconds);
                            if (log.lostTrackCount > 10 * fps)   // lost for ten seconds
                            {
                                log.Free(this.nextReservedFaceId);
                                ++this.nextReservedFaceId;
                            }
                        }
                    }
#else
                    // parse data from cascadeClassifier
                    uint          totalSize     = 0;
                    List <byte[]> faceBytesList = new List <byte[]>();
                    at = 1;
                    int numDetectFaces = faces[0];
                    for (int j = 0; j < numDetectFaces; ++j)
                    {
#if VALID_FACE_CHECK
                        // parse result from C++
                        uint xjRaw    = Convert.ToUInt32(faces[at++]);
                        uint yjRaw    = Convert.ToUInt32(faces[at++]);
                        uint widthRaw = Convert.ToUInt32(faces[at++]);
                        // result is head + shoulder -> multiply 0.6 to get head only region
                        uint heightRaw = Convert.ToUInt32(Convert.ToInt32(faces[at++]) * 0.6);
                        // center crop image
                        xjRaw   += Convert.ToUInt32(widthRaw * 0.2);
                        widthRaw = Convert.ToUInt32(widthRaw * 0.6);

                        uint xj     = xjRaw * 8;
                        uint yj     = yjRaw * 8;
                        uint width  = widthRaw * 8;
                        uint height = heightRaw * 8;
#else
                        // parse result from C++
                        uint xj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint yj    = Convert.ToUInt32(faces[at++]) * 8;
                        uint width = Convert.ToUInt32(faces[at++]) * 8;
                        // result is head + shoulder -> multiply 0.6 to get head only region
                        uint height = Convert.ToUInt32(Convert.ToInt32(faces[at++]) * 8 * 0.6);
                        // center crop image
                        xj   += Convert.ToUInt32(width * 0.2);
                        width = Convert.ToUInt32(width * 0.6);
#endif
                        // get face 3d position
                        var centerPoint    = new Point(xj + Convert.ToUInt32(width * 0.5), yj + Convert.ToUInt32(height * 0.5));
                        var positionVector = coordinateMapper.UnprojectPoint(centerPoint, this.frames[MediaFrameSourceKind.Color].CoordinateSystem);
#if VALID_FACE_CHECK
                        // find y height
                        uint  yjMax       = yjRaw + heightRaw;
                        uint  xjMax       = xjRaw + widthRaw;
                        float threshold_z = positionVector.Z + 0.25f;
                        float minimumY    = float.MaxValue;
                        float maximumY    = float.MinValue;
                        for (uint y = yjRaw; y < yjMax; ++y)
                        {
                            int   rowPoints = 0;
                            float averageY  = 0.0f;
                            uint  point     = xjRaw + y * 240;
                            var   pxy       = depthPoints[point];
                            for (uint x = xjRaw; x < xjMax; ++x)
                            {
                                if (!float.IsNaN(pxy.Y) && !float.IsInfinity(pxy.Y) && (pxy.Z < threshold_z))
                                {
                                    averageY += pxy.Y;
                                    ++rowPoints;
                                }
                                pxy = depthPoints[++point];
                            }
                            averageY /= rowPoints;
                            if (rowPoints != 0)
                            {
                                if (averageY < minimumY)
                                {
                                    minimumY = averageY;
                                }
                                else if (averageY > maximumY)
                                {
                                    maximumY = averageY;
                                }
                            }
                        }

                        if ((maximumY - minimumY) > 0.35 || (maximumY - minimumY) < 0.1) // unlikely a face
                        {
                            continue;
                        }
#endif
                        uint   size      = width * height * 3 + 20;
                        byte[] faceBytes = new byte[size];
                        totalSize += size;

                        // first 4 bytes is size of face image
                        Array.Copy(BitConverter.GetBytes(width), 0, faceBytes, 0, 2);
                        Array.Copy(BitConverter.GetBytes(height), 0, faceBytes, 2, 2);

                        // next 12 bytes is 3d position of face
                        var position = new float[] { positionVector.X, positionVector.Y, positionVector.Z };
                        Buffer.BlockCopy(position, 0, faceBytes, 4, 12);

                        // next 4 bytes is face id (dummy)
                        Buffer.BlockCopy(BitConverter.GetBytes(j), 0, faceBytes, 16, 4);

                        // copy rgb image
                        for (int y = 0; y < height; ++y)
                        {
                            var srcIdx  = Convert.ToInt32(((y + yj) * colorDesc.Width + xj) * 4);
                            var destIdx = Convert.ToInt32((y * width) * 3 + 20);
                            for (int x = 0; x < width; ++x)
                            {
                                Buffer.BlockCopy(colorBytes, srcIdx + x * 4, faceBytes, destIdx + x * 3, 3);
                            }
                        }

                        faceBytesList.Add(faceBytes);
                    }
#endif
                    // concatenate byte arrays to send (post-processed as totalSize not known in first foreach)
                    int    head  = 1; // first 1 byte is number of faces
                    byte[] bytes = new byte[totalSize + 1];
                    Array.Copy(BitConverter.GetBytes(faceBytesList.Count), 0, bytes, 0, head);
                    foreach (byte[] faceByte in faceBytesList)
                    {
                        Array.Copy(faceByte, 0, bytes, head, faceByte.Length);
                        head += faceByte.Length;
                    }
                    this.client.Publish("/kinect/detected/face", bytes);

                    ++this.kinectFrameCount;

                    bitmap.Dispose();
                    coordinateMapper.Dispose();
                    this.frames[MediaFrameSourceKind.Color].Dispose();
                    this.frames[MediaFrameSourceKind.Depth].Dispose();
                    this.frames[MediaFrameSourceKind.Color] = null;
                    this.frames[MediaFrameSourceKind.Depth] = null;
                }
            } catch (Exception ex) {
                // TODO
            } finally {
                frameProcessingSemaphore.Release();
            }
        }
Example #6
0
        public async Task Initialize(VideoSetting videoSetting)
        {
            await DispatcherHelper.RunAndAwaitAsync(CoreApplication.MainView.CoreWindow.Dispatcher, CoreDispatcherPriority.Normal, async() =>
//            await CoreApplication.MainView.CoreWindow.Dispatcher.RunAsyncRunAndAwaitAsync(CoreDispatcherPriority.Normal, async () =>
            {
                _threadsCount   = videoSetting.UsedThreads;
                _stoppedThreads = videoSetting.UsedThreads;

                _lastFrameAdded.Start();

                _imageQuality         = new BitmapPropertySet();
                var imageQualityValue = new BitmapTypedValue(videoSetting.VideoQuality, Windows.Foundation.PropertyType.Single);
                _imageQuality.Add("ImageQuality", imageQualityValue);

                _mediaCapture = new MediaCapture();

                var frameSourceGroups = await MediaFrameSourceGroup.FindAllAsync();

                var settings = new MediaCaptureInitializationSettings()
                {
                    SharingMode = MediaCaptureSharingMode.ExclusiveControl,

                    //With CPU the results contain always SoftwareBitmaps, otherwise with GPU
                    //they preferring D3DSurface
                    MemoryPreference = MediaCaptureMemoryPreference.Cpu,

                    //Capture only video, no audio
                    StreamingCaptureMode = StreamingCaptureMode.Video
                };

                await _mediaCapture.InitializeAsync(settings);

                var mediaFrameSource      = _mediaCapture.FrameSources.First().Value;
                var videoDeviceController = mediaFrameSource.Controller.VideoDeviceController;

                videoDeviceController.DesiredOptimization = Windows.Media.Devices.MediaCaptureOptimization.Quality;
                videoDeviceController.PrimaryUse          = Windows.Media.Devices.CaptureUse.Video;

                //Set exposure (auto light adjustment)
                if (_mediaCapture.VideoDeviceController.Exposure.Capabilities.Supported &&
                    _mediaCapture.VideoDeviceController.Exposure.Capabilities.AutoModeSupported)
                {
                    _mediaCapture.VideoDeviceController.Exposure.TrySetAuto(true);
                }

                var videoResolutionWidthHeight = VideoResolutionWidthHeight.Get(videoSetting.VideoResolution);
                var videoSubType = VideoSubtypeHelper.Get(videoSetting.VideoSubtype);

                //Set resolution, frame rate and video subtyp
                var videoFormat = mediaFrameSource.SupportedFormats.Where(sf => sf.VideoFormat.Width == videoResolutionWidthHeight.Width &&
                                                                          sf.VideoFormat.Height == videoResolutionWidthHeight.Height &&
                                                                          sf.Subtype == videoSubType)
                                  .OrderByDescending(m => m.FrameRate.Numerator / m.FrameRate.Denominator)
                                  .First();

                await mediaFrameSource.SetFormatAsync(videoFormat);

                _mediaFrameReader = await _mediaCapture.CreateFrameReaderAsync(mediaFrameSource);
                await _mediaFrameReader.StartAsync();
            });
        }
        /// <summary>
        /// Stops reading from the frame reader, disposes of the reader and updates the button state.
        /// </summary>
        private async Task StopReaderAsync()
        {
            _streaming = false;

            if (_reader != null)
            {
                await _reader.StopAsync();
                _reader.FrameArrived -= Reader_FrameArrived;
                _reader.Dispose();
                _reader = null;

                _logger.Log("Reader stopped.");
            }

            await UpdateButtonStateAsync();
        }
        public async Task Initialize()
        {
            await CoreApplication.MainView.CoreWindow.Dispatcher.RunAndAwaitAsync(CoreDispatcherPriority.Normal, async() =>
            {
                _imageQuality         = new BitmapPropertySet();
                var imageQualityValue = new BitmapTypedValue(IMAGE_QUALITY_PERCENT, Windows.Foundation.PropertyType.Single);
                _imageQuality.Add("ImageQuality", imageQualityValue);

                _mediaCapture = new MediaCapture();

                var frameSourceGroups = await MediaFrameSourceGroup.FindAllAsync();

                var settings = new MediaCaptureInitializationSettings()
                {
                    SharingMode = MediaCaptureSharingMode.ExclusiveControl,

                    //With CPU the results contain always SoftwareBitmaps, otherwise with GPU
                    //they preferring D3DSurface
                    MemoryPreference = MediaCaptureMemoryPreference.Cpu,

                    //Capture only video, no audio
                    StreamingCaptureMode = StreamingCaptureMode.Video
                };

                await _mediaCapture.InitializeAsync(settings);

                var mediaFrameSource      = _mediaCapture.FrameSources.First().Value;
                var videoDeviceController = mediaFrameSource.Controller.VideoDeviceController;

                videoDeviceController.DesiredOptimization = Windows.Media.Devices.MediaCaptureOptimization.Quality;
                videoDeviceController.PrimaryUse          = Windows.Media.Devices.CaptureUse.Video;

                //Set backlight compensation to min (otherwise there are problems with strong light sources)
                if (videoDeviceController.BacklightCompensation.Capabilities.Supported)
                {
                    videoDeviceController.BacklightCompensation.TrySetValue(videoDeviceController.BacklightCompensation.Capabilities.Min);
                }

                //Set exposure (auto light adjustment)
                if (_mediaCapture.VideoDeviceController.Exposure.Capabilities.Supported &&
                    _mediaCapture.VideoDeviceController.Exposure.Capabilities.AutoModeSupported)
                {
                    _mediaCapture.VideoDeviceController.Exposure.TrySetAuto(true);
                }

                //Set resolution, frame rate and video subtyp
                var videoFormat = mediaFrameSource.SupportedFormats.First(sf => sf.VideoFormat.Width == VIDEO_WIDTH &&
                                                                          sf.VideoFormat.Height == VIDEO_HEIGHT &&
                                                                          sf.Subtype == VIDEO_SUBTYP);

                await mediaFrameSource.SetFormatAsync(videoFormat);

                _mediaFrameReader = await _mediaCapture.CreateFrameReaderAsync(mediaFrameSource);

                //If debugger is attached you can't get frames from the camera, because the BitmapEncoder
                //has a bug and not dispose correctly. This results in an System.OutOfMemoryException
                if (!Debugger.IsAttached)
                {
                    _mediaFrameReader.FrameArrived += FrameArrived;

                    await _mediaFrameReader.StartAsync();
                }
            });
        }
Example #9
0
 internal BodyFrameReader(Sensor sensor, MediaFrameReader bodyReader)
 {
     Sensor      = sensor;
     _bodyReader = bodyReader;
 }
Example #10
0
    /// <summary>
    /// The Task to asynchronously initialize MediaCapture in UWP. The camera of HoloLens will
    /// be configured to preview video of 896x504 at 30 fps, pixel format is NV12. MediaFrameReader
    /// will be initialized and register the callback function OnFrameArrived to each video
    /// frame. Note that this task does not start running the video preview, but configures the
    /// running behavior. This task should be executed when ARUWPController status is
    /// ARUWP_STATUS_CLEAN, and will change it to ARUWP_STATUS_VIDEO_INITIALIZED if no error
    /// occurred. [internal use]
    /// </summary>
    /// <returns>Whether video pipeline is successfully initialized</returns>
    public async Task <bool> InitializeMediaCaptureAsyncTask()
    {
        if (controller.status != ARUWP.ARUWP_STATUS_CLEAN)
        {
            Debug.Log(TAG + ": InitializeMediaCaptureAsyncTask() unsupported status");
            return(false);
        }

        if (mediaCapture != null)
        {
            Debug.Log(TAG + ": InitializeMediaCaptureAsyncTask() fails because mediaCapture is not null");
            return(false);
        }

        var allGroups = await MediaFrameSourceGroup.FindAllAsync();

        foreach (var group in allGroups)
        {
            Debug.Log(group.DisplayName + ", " + group.Id);
        }

        if (allGroups.Count <= 0)
        {
            Debug.Log(TAG + ": InitializeMediaCaptureAsyncTask() fails because there is no MediaFrameSourceGroup");
            return(false);
        }

        Config.SourceSelectionDictionary.TryGetValue(Config.Mono, out isPublishing);
        if (isPublishing)
        {
            rosConnector         = controller.rosConnector;
            publisher            = new RosSharp.RosBridgeClient.NonMono.Publisher <RosSharp.RosBridgeClient.Messages.Sensor.Image>(ref rosConnector, "/hololens/" + Config.Mono);
            FrameReadyToPublish += PublishFrame;
        }

        // Initialize mediacapture with the source group.
        mediaCapture = new MediaCapture();
        var settings = new MediaCaptureInitializationSettings
        {
            SourceGroup = allGroups[0],
            // This media capture can share streaming with other apps.
            SharingMode = MediaCaptureSharingMode.SharedReadOnly,
            // Only stream video and don't initialize audio capture devices.
            StreamingCaptureMode = StreamingCaptureMode.Video,
            // Set to CPU to ensure frames always contain CPU SoftwareBitmap images
            // instead of preferring GPU D3DSurface images.
            MemoryPreference = MediaCaptureMemoryPreference.Cpu
        };

        await mediaCapture.InitializeAsync(settings);

        Debug.Log(TAG + ": MediaCapture is successfully initialized in shared mode.");

        try
        {
            int   targetVideoWidth, targetVideoHeight;
            float targetVideoFrameRate;
            switch (videoParameter)
            {
            case VideoParameter.Param1280x720x15:
                targetVideoWidth     = 1280;
                targetVideoHeight    = 720;
                targetVideoFrameRate = 15.0f;
                break;

            case VideoParameter.Param1280x720x30:
                targetVideoWidth     = 1280;
                targetVideoHeight    = 720;
                targetVideoFrameRate = 30.0f;
                break;

            case VideoParameter.Param1344x756x15:
                targetVideoWidth     = 1344;
                targetVideoHeight    = 756;
                targetVideoFrameRate = 15.0f;
                break;

            case VideoParameter.Param1344x756x30:
                targetVideoWidth     = 1344;
                targetVideoHeight    = 756;
                targetVideoFrameRate = 30.0f;
                break;

            case VideoParameter.Param896x504x15:
                targetVideoWidth     = 896;
                targetVideoHeight    = 504;
                targetVideoFrameRate = 15.0f;
                break;

            case VideoParameter.Param896x504x30:
                targetVideoWidth     = 896;
                targetVideoHeight    = 504;
                targetVideoFrameRate = 30.0f;
                break;

            default:
                targetVideoWidth     = 896;
                targetVideoHeight    = 504;
                targetVideoFrameRate = 30.0f;
                break;
            }
            var mediaFrameSourceVideoPreview  = mediaCapture.FrameSources.Values.Single(x => x.Info.MediaStreamType == MediaStreamType.VideoPreview);
            MediaFrameFormat targetResFormat  = null;
            float            framerateDiffMin = 60f;
            foreach (var f in mediaFrameSourceVideoPreview.SupportedFormats.OrderBy(x => x.VideoFormat.Width * x.VideoFormat.Height))
            {
                if (f.VideoFormat.Width == targetVideoWidth && f.VideoFormat.Height == targetVideoHeight)
                {
                    if (targetResFormat == null)
                    {
                        targetResFormat  = f;
                        framerateDiffMin = Mathf.Abs(f.FrameRate.Numerator / f.FrameRate.Denominator - targetVideoFrameRate);
                    }
                    else if (Mathf.Abs(f.FrameRate.Numerator / f.FrameRate.Denominator - targetVideoFrameRate) < framerateDiffMin)
                    {
                        targetResFormat  = f;
                        framerateDiffMin = Mathf.Abs(f.FrameRate.Numerator / f.FrameRate.Denominator - targetVideoFrameRate);
                    }
                }
            }
            if (targetResFormat == null)
            {
                targetResFormat = mediaFrameSourceVideoPreview.SupportedFormats[0];
                Debug.Log(TAG + ": Unable to choose the selected format, fall back");
                targetResFormat = mediaFrameSourceVideoPreview.SupportedFormats.OrderBy(x => x.VideoFormat.Width * x.VideoFormat.Height).FirstOrDefault();
            }

            await mediaFrameSourceVideoPreview.SetFormatAsync(targetResFormat);

            frameReader = await mediaCapture.CreateFrameReaderAsync(mediaFrameSourceVideoPreview, targetResFormat.Subtype);

            frameReader.FrameArrived += OnFrameArrived;
            controller.frameWidth     = Convert.ToInt32(targetResFormat.VideoFormat.Width);
            controller.frameHeight    = Convert.ToInt32(targetResFormat.VideoFormat.Height);
            videoBufferSize           = controller.frameWidth * controller.frameHeight * 4;
            Debug.Log(TAG + ": FrameReader is successfully initialized, " + controller.frameWidth + "x" + controller.frameHeight +
                      ", Framerate: " + targetResFormat.FrameRate.Numerator + "/" + targetResFormat.FrameRate.Denominator);
        }
        catch (Exception e)
        {
            Debug.Log(TAG + ": FrameReader is not initialized");
            Debug.Log(TAG + ": Exception: " + e);
            return(false);
        }

        if (isPublishing)
        {
            double fps;
            if (Config.FrameRateDictionary.TryGetValue(Config.Mono, out fps))
            {
                publishPeriod = 1 / fps;
                var bytesPerPixel   = (int)HololensRobotController.WindowsSensors.CameraHandler.GetBytesPerPixel(MediaFrameSourceKind.Color);
                int pixelBufferSize = (int)(controller.frameHeight * controller.frameWidth * bytesPerPixel);
                publishRowSize = controller.frameWidth * bytesPerPixel;
                publishBuffer  = new byte[pixelBufferSize];
            }
            else
            {
                isPublishing = false;
            }
        }


        controller.status = ARUWP.ARUWP_STATUS_VIDEO_INITIALIZED;
        signalInitDone    = true;
        Debug.Log(TAG + ": InitializeMediaCaptureAsyncTask() is successful");
        return(true);
    }
Example #11
0
    /// <summary>
    /// The callback that is triggered when new video preview frame arrives. In this function,
    /// video frame is saved for Unity UI if videoPreview is enabled, tracking task is triggered
    /// in this function call, and video FPS is recorded. [internal use]
    /// </summary>
    /// <param name="sender">MediaFrameReader object</param>
    /// <param name="args">arguments not used here</param>
    private void OnFrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
    {
        ARUWPUtils.VideoTick();
        using (var frame = sender.TryAcquireLatestFrame())
        {
            if (frame != null)
            {
                float[] cameraToWorldMatrixAsFloat;
                if (TryGetCameraToWorldMatrix(frame, out cameraToWorldMatrixAsFloat) == false)
                {
                    return;
                }

                Matrix4x4 latestLocatableCameraToWorld = ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);

                var originalSoftwareBitmap = frame.VideoMediaFrame.SoftwareBitmap;
                var softwareBitmap         = SoftwareBitmap.Convert(originalSoftwareBitmap, BitmapPixelFormat.Rgba8, BitmapAlphaMode.Ignore);
                originalSoftwareBitmap?.Dispose();
                if (videoPreview)
                {
                    Interlocked.Exchange(ref _bitmap, softwareBitmap);
                    controller.ProcessFrameAsync(SoftwareBitmap.Copy(softwareBitmap), latestLocatableCameraToWorld);
                }
                else
                {
                    controller.ProcessFrameAsync(SoftwareBitmap.Copy(softwareBitmap), latestLocatableCameraToWorld);
                }
                signalTrackingUpdated = true;

                if (isPublishing)
                {
                    TimeSpan sampledCurrentTime  = HololensRobotController.Utilities.Timer.SampleCurrentStopwatch();
                    double   elapsedTotalSeconds = HololensRobotController.Utilities.Timer.GetElapsedTimeInSeconds(sampledCurrentTime);
                    if (elapsedTotalSeconds >= nextPublishTime)
                    {
                        SoftwareBitmap publishedBitmap;
                        if (Config.convertColorToGrayscale)
                        {
                            publishedBitmap = SoftwareBitmap.Convert(softwareBitmap, BitmapPixelFormat.Gray8);
                            originalSoftwareBitmap?.Dispose();
                        }
                        else
                        {
                            publishedBitmap = softwareBitmap;
                        }

                        IBuffer ibuffer = publishBuffer.AsBuffer();
                        publishedBitmap.CopyToBuffer(ibuffer);
                        OnFrameReadyToPublish(
                            new HololensRobotController.WindowsSensors.FrameEventArgs(
                                (uint)controller.frameHeight,
                                (uint)controller.frameWidth,
                                HololensRobotController.WindowsSensors.CameraHandler.GetCameraSourceEncoding(MediaFrameSourceKind.Color),
                                (uint)publishRowSize,
                                publishBuffer,
                                sampledCurrentTime + HololensRobotController.Utilities.Timer.GetOffsetUTC()));

                        nextPublishTime = nextPublishTime + publishPeriod;
                    }
                }
            }
        }
    }
Example #12
0
        /// <summary>
        /// Event handler for video frames for the local video capture device.
        /// </summary>
        private async void FrameArrivedHandler(MediaFrameReader sender, MediaFrameArrivedEventArgs e)
        {
            if (!_isClosed)
            {
                if (!_videoFormatManager.SelectedFormat.IsEmpty() && (OnVideoSourceEncodedSample != null || OnVideoSourceRawSample != null))
                {
                    using (var mediaFrameReference = sender.TryAcquireLatestFrame())
                    {
                        var videoMediaFrame = mediaFrameReference?.VideoMediaFrame;
                        var softwareBitmap  = videoMediaFrame?.SoftwareBitmap;

                        if (softwareBitmap == null && videoMediaFrame != null)
                        {
                            var videoFrame = videoMediaFrame.GetVideoFrame();
                            softwareBitmap = await SoftwareBitmap.CreateCopyFromSurfaceAsync(videoFrame.Direct3DSurface);
                        }

                        if (softwareBitmap != null)
                        {
                            int width  = softwareBitmap.PixelWidth;
                            int height = softwareBitmap.PixelHeight;

                            if (softwareBitmap.BitmapPixelFormat != BitmapPixelFormat.Nv12)
                            {
                                softwareBitmap = SoftwareBitmap.Convert(softwareBitmap, BitmapPixelFormat.Nv12, BitmapAlphaMode.Ignore);
                            }

                            // Swap the processed frame to _backBuffer and dispose of the unused image.
                            softwareBitmap = Interlocked.Exchange(ref _backBuffer, softwareBitmap);

                            using (BitmapBuffer buffer = _backBuffer.LockBuffer(BitmapBufferAccessMode.Read))
                            {
                                using (var reference = buffer.CreateReference())
                                {
                                    unsafe
                                    {
                                        byte *dataInBytes;
                                        uint  capacity;
                                        ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacity);
                                        byte[] nv12Buffer = new byte[capacity];
                                        Marshal.Copy((IntPtr)dataInBytes, nv12Buffer, 0, (int)capacity);

                                        if (OnVideoSourceEncodedSample != null)
                                        {
                                            lock (_videoEncoder)
                                            {
                                                var encodedBuffer = _videoEncoder.EncodeVideo(width, height, nv12Buffer, EncoderInputFormat, _videoFormatManager.SelectedFormat.Codec);

                                                if (encodedBuffer != null)
                                                {
                                                    uint fps           = (_fpsDenominator > 0 && _fpsNumerator > 0) ? _fpsNumerator / _fpsDenominator : DEFAULT_FRAMES_PER_SECOND;
                                                    uint durationRtpTS = VIDEO_SAMPLING_RATE / fps;
                                                    OnVideoSourceEncodedSample.Invoke(durationRtpTS, encodedBuffer);
                                                }

                                                if (_forceKeyFrame)
                                                {
                                                    _forceKeyFrame = false;
                                                }
                                            }
                                        }

                                        if (OnVideoSourceRawSample != null)
                                        {
                                            uint frameSpacing = 0;
                                            if (_lastFrameAt != DateTime.MinValue)
                                            {
                                                frameSpacing = Convert.ToUInt32(DateTime.Now.Subtract(_lastFrameAt).TotalMilliseconds);
                                            }

                                            var bgrBuffer = PixelConverter.NV12toBGR(nv12Buffer, width, height, width * 3);

                                            OnVideoSourceRawSample(frameSpacing, width, height, bgrBuffer, VideoPixelFormatsEnum.Bgr);
                                        }
                                    }
                                }
                            }

                            _backBuffer?.Dispose();
                            softwareBitmap?.Dispose();
                        }

                        _lastFrameAt = DateTime.Now;
                    }
                }
            }
        }
Example #13
0
        /// <summary>
        /// Attempts to initialise the local video capture device.
        /// </summary>
        /// <param name="width">The frame width to attempt to initialise the video capture device with. Set as 0 to use default.</param>
        /// <param name="height">The frame height to attempt to initialise the video capture device with. Set as 0 to use default.</param>
        /// <param name="fps">The frame rate, in frames per second, to attempt to initialise the video capture device with.
        /// Set as 0 to use default.</param>
        private async Task <bool> InitialiseDevice(uint width, uint height, uint fps)
        {
            var mediaCaptureSettings = new MediaCaptureInitializationSettings()
            {
                StreamingCaptureMode = StreamingCaptureMode.Video,
                SharingMode          = MediaCaptureSharingMode.ExclusiveControl,
                MediaCategory        = MediaCategory.Communications
            };

            if (_videoDeviceID != null)
            {
                var vidCapDevices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture).AsTask().ConfigureAwait(false);

                var vidDevice = vidCapDevices.FirstOrDefault(x => x.Id == _videoDeviceID || x.Name == _videoDeviceID);

                if (vidDevice == null)
                {
                    logger.LogWarning($"Could not find video capture device for specified ID {_videoDeviceID}, using default device.");
                }
                else
                {
                    logger.LogInformation($"Video capture device {vidDevice.Name} selected.");
                    mediaCaptureSettings.VideoDeviceId = vidDevice.Id;
                }
            }

            await _mediaCapture.InitializeAsync(mediaCaptureSettings).AsTask().ConfigureAwait(false);

            MediaFrameSourceInfo colorSourceInfo = null;

            foreach (var srcInfo in _mediaCapture.FrameSources)
            {
                if (srcInfo.Value.Info.MediaStreamType == MediaStreamType.VideoRecord &&
                    srcInfo.Value.Info.SourceKind == MediaFrameSourceKind.Color)
                {
                    colorSourceInfo = srcInfo.Value.Info;
                    break;
                }
            }

            var colorFrameSource = _mediaCapture.FrameSources[colorSourceInfo.Id];

            var preferredFormat = colorFrameSource.SupportedFormats.Where(format =>
            {
                return(format.VideoFormat.Width >= _width &&
                       format.VideoFormat.Width >= _height &&
                       (format.FrameRate.Numerator / format.FrameRate.Denominator) >= fps &&
                       format.Subtype == MF_NV12_PIXEL_FORMAT);
            }).FirstOrDefault();

            if (preferredFormat == null)
            {
                // Try again without the pixel format.
                preferredFormat = colorFrameSource.SupportedFormats.Where(format =>
                {
                    return(format.VideoFormat.Width >= _width &&
                           format.VideoFormat.Width >= _height &&
                           (format.FrameRate.Numerator / format.FrameRate.Denominator) >= fps);
                }).FirstOrDefault();
            }

            if (preferredFormat == null)
            {
                // Still can't get what we want. Log a warning message and take the default.
                logger.LogWarning($"The video capture device did not support the requested format (or better) {_width}x{_height} {fps}fps. Using default mode.");

                preferredFormat = colorFrameSource.SupportedFormats.First();
            }

            if (preferredFormat == null)
            {
                throw new ApplicationException("The video capture device does not support a compatible video format for the requested parameters.");
            }

            await colorFrameSource.SetFormatAsync(preferredFormat).AsTask().ConfigureAwait(false);

            _mediaFrameReader = await _mediaCapture.CreateFrameReaderAsync(colorFrameSource).AsTask().ConfigureAwait(false);

            _mediaFrameReader.AcquisitionMode = MediaFrameReaderAcquisitionMode.Realtime;

            // Frame source and format have now been successfully set.
            _width          = preferredFormat.VideoFormat.Width;
            _height         = preferredFormat.VideoFormat.Height;
            _fpsNumerator   = preferredFormat.FrameRate.Numerator;
            _fpsDenominator = preferredFormat.FrameRate.Denominator;

            //double fpsSelected = _fpsNumerator / _fpsDenominator;
            //string pixFmt = preferredFormat.Subtype == MF_I420_PIXEL_FORMAT ? "I420" : preferredFormat.Subtype;
            //string deviceName = colorFrameSource.Info.DeviceInformation.Name;
            //logger.LogInformation($"Video capture device {deviceName} successfully initialised: {_width}x{_height} {fpsSelected:0.##}fps pixel format {pixFmt}.");

            PrintFrameSourceInfo(colorFrameSource);

            _mediaFrameReader.FrameArrived += FrameArrivedHandler;

            return(true);
        }
        public static async Task <FrameGrabber> CreateAsync()
        {
            MediaCapture          mediaCapture       = null;
            MediaFrameReader      mediaFrameReader   = null;
            MediaFrameSourceGroup selectedGroup      = null;
            MediaFrameSourceInfo  selectedSourceInfo = null;

            var groups = await MediaFrameSourceGroup.FindAllAsync();

            foreach (MediaFrameSourceGroup sourceGroup in groups)
            {
                // there should be only one color source for the HoloLens
                foreach (MediaFrameSourceInfo sourceInfo in sourceGroup.SourceInfos)
                {
                    if (sourceInfo.SourceKind == MediaFrameSourceKind.Color)
                    {
                        selectedSourceInfo = sourceInfo;
                        break;
                    }
                }
                if (selectedSourceInfo != null)
                {
                    selectedGroup = sourceGroup;
                }
            }

            // define the type of MediaCapture we want (Initialize MediaCapture to capture video from a color camera on teh CPU)
            var settings = new MediaCaptureInitializationSettings
            {
                SourceGroup          = selectedGroup,
                SharingMode          = MediaCaptureSharingMode.SharedReadOnly,
                StreamingCaptureMode = StreamingCaptureMode.Video,
                MemoryPreference     = MediaCaptureMemoryPreference.Cpu,
            };

            mediaCapture = new MediaCapture();

            try
            {
                await mediaCapture.InitializeAsync(settings);
            }
            catch (Exception e)
            {
                Debug.WriteLine($"Can't initialize MediaCapture {e.ToString()}");
                return(new FrameGrabber());
            }

            // if initialization is successful, obtain MediaFrameSource and create MediaFrameReader
            MediaFrameSource selectedSource = mediaCapture.FrameSources[selectedSourceInfo.Id];

            mediaFrameReader = await mediaCapture.CreateFrameReaderAsync(selectedSource);

            // ensure MediaFrameReader is successfully created to instantiate Grabber instance
            MediaFrameReaderStartStatus status = await mediaFrameReader.StartAsync();

            if (status == MediaFrameReaderStartStatus.Success)
            {
                return(new FrameGrabber(mediaCapture, selectedSource, mediaFrameReader));
            }
            else
            {
                return(new FrameGrabber());
            }
        }
Example #15
0
        private async void ActionButton_Click(object sender, RoutedEventArgs e)
        {
            // <SnippetImageElementSource>
            imageElement.Source = new SoftwareBitmapSource();
            // </SnippetImageElementSource>

            // <SnippetFindAllAsync>
            var frameSourceGroups = await MediaFrameSourceGroup.FindAllAsync();

            // </SnippetFindAllAsync>

            // Color, infrared, and depth


            // <SnippetSelectColor>
            var selectedGroupObjects = frameSourceGroups.Select(group =>
                                                                new
            {
                sourceGroup     = group,
                colorSourceInfo = group.SourceInfos.FirstOrDefault((sourceInfo) =>
                {
                    // On XBox/Kinect, omit the MediaStreamType and EnclosureLocation tests
                    return(sourceInfo.MediaStreamType == MediaStreamType.VideoPreview &&
                           sourceInfo.SourceKind == MediaFrameSourceKind.Color &&
                           sourceInfo.DeviceInformation?.EnclosureLocation.Panel == Windows.Devices.Enumeration.Panel.Front);
                })
            }).Where(t => t.colorSourceInfo != null)
                                       .FirstOrDefault();

            MediaFrameSourceGroup selectedGroup   = selectedGroupObjects?.sourceGroup;
            MediaFrameSourceInfo  colorSourceInfo = selectedGroupObjects?.colorSourceInfo;

            if (selectedGroup == null)
            {
                return;
            }
            // </SnippetSelectColor>

            // <SnippetInitMediaCapture>
            mediaCapture = new MediaCapture();

            var settings = new MediaCaptureInitializationSettings()
            {
                SourceGroup          = selectedGroup,
                SharingMode          = MediaCaptureSharingMode.ExclusiveControl,
                MemoryPreference     = MediaCaptureMemoryPreference.Cpu,
                StreamingCaptureMode = StreamingCaptureMode.Video
            };

            try
            {
                await mediaCapture.InitializeAsync(settings);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine("MediaCapture initialization failed: " + ex.Message);
                return;
            }
            // </SnippetInitMediaCapture>


            var colorFrameSource = mediaCapture.FrameSources[colorSourceInfo.Id];
            var preferredFormat  = colorFrameSource.SupportedFormats.Where(format =>
            {
                return(format.VideoFormat.Width == 1920);
            }).FirstOrDefault();

            if (preferredFormat == null)
            {
                // Our desired format is not supported
                return;
            }
            await colorFrameSource.SetFormatAsync(preferredFormat);

            // <SnippetCreateFrameReader>
            mediaFrameReader = await mediaCapture.CreateFrameReaderAsync(colorFrameSource, MediaEncodingSubtypes.Argb32);

            mediaFrameReader.FrameArrived += ColorFrameReader_FrameArrived;
            await mediaFrameReader.StartAsync();

            // </SnippetCreateFrameReader>
        }
Example #16
0
        /// <summary>
        /// Initialize camera pipeline resources and register a callback for when new VideoFrames become available.
        /// </summary>
        /// <returns></returns>
        private async Task InitializeAsync()
        {
            // Initialize MediaCapture with default settings in video-only streaming mode.
            // We first try to aquire exclusive sharing mode and if we fail, we then attempt again in shared mode
            // so that multiple instances can access the camera concurrently
            m_mediaCapture = new MediaCapture();
            var mediaCaptureInistializationSettings = new MediaCaptureInitializationSettings()
            {
                StreamingCaptureMode = StreamingCaptureMode.Video,
                SharingMode          = m_sharingMode
            };

            // Register a callback in case MediaCapture fails. This can happen for example if another app is using the camera and we can't get ExclusiveControl
            m_mediaCapture.Failed += MediaCapture_Failed;

            await m_mediaCapture.InitializeAsync(mediaCaptureInistializationSettings);

            // Get a list of available Frame source and iterate through them to find a video preview or
            // a video record source with color images (and not IR, depth or other types)
            var selectedFrameSource = m_mediaCapture.FrameSources.FirstOrDefault(source => source.Value.Info.MediaStreamType == MediaStreamType.VideoPreview &&
                                                                                 source.Value.Info.SourceKind == MediaFrameSourceKind.Color).Value;

            if (selectedFrameSource == null)
            {
                selectedFrameSource = m_mediaCapture.FrameSources.FirstOrDefault(source => source.Value.Info.MediaStreamType == MediaStreamType.VideoRecord &&
                                                                                 source.Value.Info.SourceKind == MediaFrameSourceKind.Color).Value;
            }
            if (selectedFrameSource == null)
            {
                throw new Exception("No valid video frame sources were found with source type color.");
            }

            Console.WriteLine($"{selectedFrameSource.Info.DeviceInformation?.Name} | MediaStreamType: {selectedFrameSource.Info.MediaStreamType} MediaFrameSourceKind: {selectedFrameSource.Info.SourceKind}");

            // If initializing in ExclusiveControl mode, attempt to use a 15fps+ BGRA8 format natively from the camera.
            // If not, just use whatever format is already set.
            MediaFrameFormat selectedFormat = selectedFrameSource.CurrentFormat;

            if (m_sharingMode == MediaCaptureSharingMode.ExclusiveControl)
            {
                var mediaFrameFormats = selectedFrameSource.SupportedFormats.OrderByDescending((format) => format.VideoFormat.Width * format.VideoFormat.Height);
                selectedFormat = mediaFrameFormats.Where(
                    format => format.FrameRate.Numerator / format.FrameRate.Denominator >= 15 && // fps
                    string.Compare(format.Subtype, MediaEncodingSubtypes.Bgra8, true) == 0).FirstOrDefault();

                // If not possible, then try to use other supported format at 15fps+
                if (selectedFormat == null)
                {
                    selectedFormat = mediaFrameFormats.Where(
                        format => format.FrameRate.Numerator / format.FrameRate.Denominator >= 15 && // fps
                        (string.Compare(format.Subtype, MediaEncodingSubtypes.Nv12, true) == 0 ||
                         string.Compare(format.Subtype, MediaEncodingSubtypes.Yuy2, true) == 0 ||
                         string.Compare(format.Subtype, MediaEncodingSubtypes.Rgb32, true) == 0)).FirstOrDefault();
                }
                if (selectedFormat == null)
                {
                    throw (new Exception("No suitable media format found on the selected source"));
                }
                await selectedFrameSource.SetFormatAsync(selectedFormat);

                selectedFormat = selectedFrameSource.CurrentFormat;
                Console.WriteLine($"Attempting to set camera source to {selectedFormat.Subtype} : " +
                                  $"{selectedFormat.VideoFormat.Width}x{selectedFormat.VideoFormat.Height}" +
                                  $"@{selectedFormat.FrameRate.Numerator / selectedFormat.FrameRate.Denominator}fps");
            }

            Console.WriteLine($"Frame source format: {selectedFormat.Subtype} : " +
                              $"{selectedFormat.VideoFormat.Width}x{selectedFormat.VideoFormat.Height}" +
                              $"@{selectedFormat.FrameRate.Numerator / selectedFormat.FrameRate.Denominator}fps");

            m_frameReader = await m_mediaCapture.CreateFrameReaderAsync(selectedFrameSource);

            m_frameReader.FrameArrived   += FrameArrivedHandler;
            m_frameReader.AcquisitionMode = MediaFrameReaderAcquisitionMode.Realtime;
            await m_frameReader.StartAsync();
        }
Example #17
0
        /// <summary>
        /// Start the video stream. This just prepares the stream for capture, and doesn't start collecting frames
        /// </summary>
        /// <param name="streamDesc">The description of the stream to start.</param>
        public async void Start(StreamDescription streamDesc)
        {
#if CAN_USE_UWP_TYPES
            lock (stateLock)
            {
                if (State != CameraState.Initialized)
                {
                    throw new InvalidOperationException("Start cannot be called until the camera is in the Initialized state");
                }

                State = CameraState.Starting;
            }

            Resolution = streamDesc.Resolution;
            CameraType = streamDesc.CameraType;

            StreamDescriptionInternal desc = streamDesc as StreamDescriptionInternal;

            MediaCaptureInitializationSettings initSettings = new MediaCaptureInitializationSettings()
            {
                SourceGroup          = desc.FrameSourceGroup,
                SharingMode          = MediaCaptureSharingMode.ExclusiveControl,
                MemoryPreference     = MediaCaptureMemoryPreference.Cpu,
                StreamingCaptureMode = StreamingCaptureMode.Video
            };

            // initialize the media device
            mediaCapture = new MediaCapture();

            try
            {
                await mediaCapture.InitializeAsync(initSettings);
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine($"MediaCapture initialization failed: {ex.Message}");
                mediaCapture.Dispose();
                mediaCapture = null;
            }

            if (mediaCapture != null)
            {
                // get access to the video device controller for property settings
                videoDeviceController = mediaCapture.VideoDeviceController;

                // choose media source
                MediaFrameSource frameSource     = mediaCapture.FrameSources[desc.FrameSourceInfo.Id];
                MediaFrameFormat preferredFormat = null;

                foreach (MediaFrameFormat format in frameSource.SupportedFormats)
                {
                    if (format.VideoFormat.Width == desc.Resolution.Width && format.VideoFormat.Height == desc.Resolution.Height && Math.Abs((double)format.FrameRate.Numerator / (double)format.FrameRate.Denominator - desc.Resolution.Framerate) < epsilon)
                    {
                        preferredFormat = format;
                        break;
                    }
                }

                if (preferredFormat != null && preferredFormat != frameSource.CurrentFormat)
                {
                    await frameSource.SetFormatAsync(preferredFormat);
                }
                else
                {
                    System.Diagnostics.Debug.WriteLine($"failed to set desired frame format");
                }

                // set up frame readercapture frame data
                frameReader = await mediaCapture.CreateFrameReaderAsync(frameSource);

                frameReader.FrameArrived += OnMediaFrameArrived;
                await frameReader.StartAsync();

                lock (stateLock)
                {
                    State = CameraState.Ready;
                    OnCameraStarted?.Invoke(this, true);
                }
            }
            else
            {
                lock (stateLock)
                {
                    // drop back to initialized when the camera doesn't initialize
                    State = CameraState.Initialized;
                    OnCameraStarted?.Invoke(this, false);
                }
            }
#else
            await Task.CompletedTask;
#endif
        }
Example #18
0
 internal BodyFrameReader(Sensor sensor, MediaFrameReader bodyReader)
 {
     Sensor      = sensor;
     _bodyReader = bodyReader;
     _bodyReader.FrameArrived += BodyFrameReader_FrameArrived;
 }
        /// <summary>
        /// Creates a frame reader from the current frame source and registers to handle its frame events.
        /// </summary>
        private async Task CreateReaderAsync()
        {
            await InitializeCaptureAsync();

            UpdateFrameSource();

            if (_source != null)
            {
                _reader = await _mediaCapture.CreateFrameReaderAsync(_source);
                _reader.FrameArrived += Reader_FrameArrived;
                
                _logger.Log($"Reader created on source: {_source.Info.Id}");
            }
        }
Example #20
0
 internal void Dispose()
 {
     _bodyReader?.Dispose();
     _bodyReader = null;
 }
 /// <summary>
 /// Handles the frame arrived event by converting the frame to a displayable
 /// format and rendering it to the screen.
 /// </summary>
 private void Reader_FrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
 {
     // TryAcquireLatestFrame will return the latest frame that has not yet been acquired.
     // This can return null if there is no such frame, or if the reader is not in the
     // "Started" state. The latter can occur if a FrameArrived event was in flight
     // when the reader was stopped.
     using (var frame = sender.TryAcquireLatestFrame())
     {
         _frameRenderer.ProcessFrame(frame);
     }
 }
Example #22
0
        public async Task StartAsync(string Name, bool UseGpu = false)
        {
            var frameSourceGroups = await AsAsync(MediaFrameSourceGroup.FindAllAsync());

            var selectedGroup = frameSourceGroups.Where(x => x.DisplayName.Contains(Name)).FirstOrDefault();

            if (null == selectedGroup)
            {
                throw new ApplicationException($"Unable to find frame source named {Name}");
            }

            var colorSourceInfo = selectedGroup.SourceInfos
                                  .Where(x => x.MediaStreamType == MediaStreamType.VideoRecord && x.SourceKind == MediaFrameSourceKind.Color)
                                  .FirstOrDefault();

            if (null == colorSourceInfo)
            {
                throw new ApplicationException($"Unable to find color video recording source on {Name}");
            }

            mediaCapture = new MediaCapture();

            if (null == mediaCapture)
            {
                throw new ApplicationException($"Unable to create new mediacapture");
            }

            var settings = new MediaCaptureInitializationSettings()
            {
                SourceGroup          = selectedGroup,
                SharingMode          = MediaCaptureSharingMode.ExclusiveControl,
                MemoryPreference     = UseGpu ? MediaCaptureMemoryPreference.Auto : MediaCaptureMemoryPreference.Cpu,
                StreamingCaptureMode = StreamingCaptureMode.Video
            };

            try
            {
                await AsAsync(mediaCapture.InitializeAsync(settings));
            }
            catch (Exception ex)
            {
                throw new ApplicationException("MediaCapture initialization failed: " + ex.Message, ex);
            }

            var colorFrameSource = mediaCapture.FrameSources[colorSourceInfo.Id];

            var preferredFormat = colorFrameSource.SupportedFormats.Where(format => format.VideoFormat.Width >= 1080 && format.Subtype == "NV12").FirstOrDefault();

            if (null == preferredFormat)
            {
                throw new ApplicationException("Our desired format is not supported");
            }

            await AsAsync(colorFrameSource.SetFormatAsync(preferredFormat));

            mediaFrameReader = await AsAsync(mediaCapture.CreateFrameReaderAsync(colorFrameSource, MediaEncodingSubtypes.Argb32));

            if (null == mediaFrameReader)
            {
                throw new ApplicationException($"Unable to create new mediaframereader");
            }

            evtFrame = new EventWaitHandle(false, EventResetMode.ManualReset);
            mediaFrameReader.FrameArrived += (s, a) => evtFrame.Set();
            await AsAsync(mediaFrameReader.StartAsync());

            Log.WriteLineVerbose("FrameReader Started");
        }