Exemple #1
0
        public override bool TryGetCameraImage(ref CameraImage cameraImage)
        {
            ARTextureHandles handles = nativeInterface.GetARVideoTextureHandles();

            if (handles.textureY == System.IntPtr.Zero || handles.textureCbCr == System.IntPtr.Zero)
            {
                return(false);
            }

            if (!m_TexturesInitialized)
            {
                return(false);
            }

            m_CurrentFrameIndex = (m_CurrentFrameIndex + 1) % 2;

            nativeInterface.SetCapturePixelData(true,
                                                PinByteArray(ref m_PinnedYArray, YByteArrayForFrame(m_CurrentFrameIndex)),
                                                PinByteArray(ref m_PinnedUVArray, UVByteArrayForFrame(m_CurrentFrameIndex)));

            cameraImage.y      = YByteArrayForFrame(1 - m_CurrentFrameIndex);
            cameraImage.uv     = UVByteArrayForFrame(1 - m_CurrentFrameIndex);
            cameraImage.width  = m_CameraWidth;
            cameraImage.height = m_CameraHeight;
            return(true);
        }
Exemple #2
0
        private void backgroundWorker_DoWork(object sender, DoWorkEventArgs e)
        {
            while (!backgroundWorker.CancellationPending)
            {
                cam.Update();

                CameraImage camImg = cam.CalcSelectedChannel();
                if (null == camImg)
                {
                    // ignore errors. However, this might be a hint that something is wrong in your application.
                    continue;
                }

                Bitmap bmp = camImg.ToBitmap();
                if (saveSnapshot)
                {
                    string snapName     = "MetriCam 2 Snapshot.png";
                    string snapFilename = Path.GetTempPath() + snapName;
                    bmp.Save(snapFilename);
                    MessageBox.Show(string.Format("Snapshot saved as '{0}'.", snapFilename), "Snapshot saved", MessageBoxButtons.OK, MessageBoxIcon.Asterisk);
                    saveSnapshot = false;
                }
                this.BeginInvokeEx(f => pictureBox.Image = bmp);
            }
            DisconnectCamera();
            isBgwFinished.Set();
        }
 // Update is called once per frame
 void Update()
 {
     if (save)
     {
         CameraImage cameraImage = new CameraImage();
         if (GetCameraImage(ref cameraImage))
         {
             try
             {
                 Texture2D texUVchannels = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RG16, false, false);
                 texUVchannels.LoadRawTextureData(cameraImage.uv);
                 texUVchannels.Apply();
                 var bytes = texUVchannels.EncodeToJPG();
                 Destroy(texUVchannels);
                 System.IO.File.WriteAllBytes(filePath + "/image.jpg", bytes);
                 save = false;
             }
             catch (SystemException e)
             {
                 StreamWriter s = new StreamWriter(filePath + "/Log.txt", true);
                 s.Write(e);
                 s.Close();
             }
         }
     }
 }
Exemple #4
0
        internal BitmapVideoPlayer(AviTools aviTools, bool useEmbeddedVideo, string bitmapFilesLocation, int playbackBufferSize)
        {
            this.aviTools           = aviTools;
            this.cameraImage        = new CameraImage();
            this.playbackBufferSize = playbackBufferSize;

            integration = 1;

            if (!imagesBuffered)
            {
                UIThreadCaller.Invoke((frm, prms) => BufferVideoInternal(frm, prms), useEmbeddedVideo, bitmapFilesLocation);

                imagesBuffered = true;
            }

            if (errorBitmap != null)
            {
                width  = errorBitmap.Width;
                height = errorBitmap.Height;
            }
            else if (allImagesPixels.Count > 0)
            {
                width  = bufferedImageWidth;
                height = bufferedImageHeight;
            }
            else
            {
                width  = 0;
                height = 0;
            }
        }
        internal VideoCamera(AviTools aviTools)
        {
            this.aviTools = aviTools;
            cameraImage   = new CameraImage();

            ReloadSimulatorSettings();

            isConnected = false;
        }
Exemple #6
0
        public void TestCameraImageDefault()
        {
            var imageInfo = new CameraImage("Camera Name", "http://cameraurl");
            var builder   = new CameraInfoReplyActivityBuilder(imageInfo, cameraData.Object);
            var activity  = ActivityTestUtils.CreateActivity();
            var userData  = new Mock <IUserData>();

            var reply = builder.BuildReplyActivity(activity, userData.Object);

            reply.Text.Should().Contain("![camera](" + "http://cameraurl");
            reply.Text.Should().Contain(CameraInfoReplyActivityBuilder.ViewInBrowserPrompt);
            reply.Text.Should().Contain("Camera Name");
        }
        private void ParallelUpdate(int index, CameraImage cameraImage, DataModels.Camera localCamera)
        {
            if (cameraImage.CameraID != localCamera.ID)
            {
                return;
            }

            SKBitmap bitmap = SKBitmap.Decode(cameraImage.ImageData).Resize(new SKImageInfo(
                                                                                Constants.URHO_TEXTURE_SIZE, Constants.URHO_TEXTURE_SIZE),
                                                                            SKBitmapResizeMethod.Lanczos3);

            if (localCamera.Screen != null)
            {
                localCamera.Screen.ImageData = bitmap.Bytes;
            }
        }
        public void Detect(CameraImage cameraImage)
        {
            var partial = cameraConverter.ToImage(cameraImage);
            var sourceBitmap = converter.ToBitmap(partial);

            var blur = new Blur();
            blur.ApplyInPlace(sourceBitmap);

            var points = harris.ProcessImage(sourceBitmap);
            var featurePoints = points.Select(t => new CornerFeaturePoint(t)).Cast<IFeaturePoint>().ToList();
            if (featurePoints.Count > 0 && oldPoints.Count > 0)
            {
                try
                {
                    var matches = matcher.Match(featurePoints, oldPoints);

                    using (var g = Graphics.FromImage(sourceBitmap))
                    {
                        for (var i = 0; i < matches[0].Length; i++)
                        {
                            g.DrawRectangle(Pens.Blue, matches[0][i].X, matches[0][i].Y, 3, 3);
                            g.DrawRectangle(Pens.Red, matches[1][i].X, matches[1][i].Y, 3, 3);
                            g.DrawLine(Pens.Red, matches[0][i].X + 1, matches[0][i].Y + 1, matches[1][i].X + 1,
                                matches[1][i].Y + 1);
                        }
                    }

                    var resultImage = imageFactory.Create(sourceBitmap);
                    Image = cameraConverter.Convert(resultImage);

                    oldPoints.Clear();
                    oldPoints.AddRange(featurePoints.AsReadOnly());
                }
                catch (Exception)
                {
                }
                finally
                {
                    sourceBitmap.Dispose();
                }
            }
            else
            {
                oldPoints.Clear();
                oldPoints.AddRange(featurePoints.AsReadOnly());
            }
        }
Exemple #9
0
        public override bool TryGetCameraImage(ref CameraImage cameraImage)
        {
            if (Frame.TrackingState != TrackingState.Tracking)
            {
                return(false);
            }

            if (Frame.CameraImage.Texture == null || Frame.CameraImage.Texture.GetNativeTexturePtr() == IntPtr.Zero)
            {
                return(false);
            }

            //This is a GL texture ID
            int textureId  = Frame.CameraImage.Texture.GetNativeTexturePtr().ToInt32();
            int bufferSize = 0;
            //Ask the native plugin to start reading the image of the current frame,
            //and return the image read from the privous frame
            IntPtr bufferPtr = TextureReader_submitAndAcquire(textureId, k_ARCoreTextureWidth, k_ARCoreTextureHeight, ref bufferSize);

            //I think this is needed because of this bug
            //https://github.com/google-ar/arcore-unity-sdk/issues/66
            GL.InvalidateState();

            if (bufferPtr == IntPtr.Zero || bufferSize == 0)
            {
                return(false);
            }

            if (pixelBuffer == null || pixelBuffer.Length != bufferSize)
            {
                pixelBuffer = new byte[bufferSize];
            }

            //Copy buffer
            Marshal.Copy(bufferPtr, pixelBuffer, 0, bufferSize);

            //Convert to YUV data
            PixelBuffertoYUV2(pixelBuffer, k_ARCoreTextureWidth, k_ARCoreTextureHeight,
                              k_ImageFormatType, ref cameraImage.y, ref cameraImage.uv);

            cameraImage.width  = k_ARCoreTextureWidth;
            cameraImage.height = k_ARCoreTextureHeight;

            return(true);
        }
        public override bool TryGetCameraImage(ref CameraImage cameraImage)
        {
            //If remote device is not sending video, we can assume that the current cached frame is out of date.
            if (!m_SendVideo)
            {
                return(false);
            }
            //We only return a cached frame if it has all it's components
            if (m_CameraImage.height == 0 || m_CameraImage.width == 0 || m_CameraImage.y == null || m_CameraImage.uv == null)
            {
                return(false);
            }

            cameraImage.width  = m_CameraImage.width;
            cameraImage.height = m_CameraImage.height;
            cameraImage.y      = m_CameraImage.y;
            cameraImage.uv     = m_CameraImage.uv;

            return(true);
        }
Exemple #11
0
        public void TestCameraImageTeams()
        {
            var imageInfo = new CameraImage("Camera Name", "http://cameraurl");
            var builder   = new CameraInfoReplyActivityBuilder(imageInfo, cameraData.Object);
            var activity  = ActivityTestUtils.CreateActivity();

            activity.ChannelId = "teams";
            var userData = new Mock <IUserData>();

            var reply = builder.BuildReplyActivity(activity, userData.Object);

            reply.Type.Should().Be("message");
            reply.Attachments.Count.Should().Be(1);
            var heroCard = reply.Attachments.First().Content as HeroCard;

            heroCard.Buttons.Count.Should().Be(1);
            heroCard.Title.Should().Be("Camera Name");
            heroCard.Buttons[0].Value.Should().Be("http://cameraurl");
            heroCard.Buttons[0].Type.Should().Be("openUrl");
            heroCard.Buttons[0].Title.Should().Be(CameraInfoReplyActivityBuilder.CardViewPrompt);
        }
        public BitmapVideoPlayer(AviTools aviTools, bool useEmbeddedVideo, string bitmapFilesLocation, int playbackBufferSize)
        {
            this.aviTools           = aviTools;
            this.cameraImage        = new CameraImage();
            this.playbackBufferSize = playbackBufferSize;

            integration = 1;

            if (!imagesBuffered)
            {
                if (useEmbeddedVideo)
                {
                    BufferEmbeddedOccultationVideo();
                }
                else
                {
                    BufferVideoFrames(bitmapFilesLocation);
                }

                imagesBuffered = true;
            }

            if (errorBitmap != null)
            {
                width  = errorBitmap.Width;
                height = errorBitmap.Height;
            }
            else if (allImagesPixels.Count > 0)
            {
                width  = bufferedImageWidth;
                height = bufferedImageHeight;
            }
            else
            {
                width  = 0;
                height = 0;
            }
        }
 private void OnNewDisparityMap(object sender, CameraImage cameraImage)
 {
     disparityMap.Write(cameraImage);
 }
Exemple #14
0
 private void OnNewDisparityMap(object sender, CameraImage cameraImage)
 {
     processing = false;
 }
Exemple #15
0
        /// <summary>
        /// Selects every channel and tries to get an image.
        /// </summary>
        /// <returns>true on success, else false</returns>
        private bool TestCalcChannels()
        {
            Camera cam = GetNewCamera();

            channels = GetChannel(cam);
            bool res = false;

            if (channels == null || channels.Length == 0)
            {
                AddError("Camera of type \"" + camType.ToString() + "\" has no channel.");
                return(false);
            }

            try { cam.Connect(); } catch { AddError("Connect failed."); return(false); }

            /*
             * Disable all active channel first. This is needed, 'cause some cameras
             * cannot have some channel enabled at the same time.
             * So only one channel will be active at the same time.
             */
            foreach (ChannelRegistry.ChannelDescriptor activeChannel in cam.ActiveChannels.ToArray())
            {
                try
                {
                    cam.DeactivateChannel(activeChannel.Name);
                }
                catch (Exception ex)
                {
                    AddError("Deactivate channel \"" + activeChannel.Name + "\" failed: " + ex.Message);
                    goto err;
                }
            }

            // iterate through channels
            foreach (string channel in channels)
            {
                CameraImage img = null;
                try
                {
                    if (!cam.IsChannelActive(channel))
                    {
                        cam.ActivateChannel(channel);
                    }
                }
                catch (Exception ex)
                {
                    AddError("Activate channel \"" + channel + "\" failed: " + ex.Message);
                    goto err;
                }
                cam.SelectChannel(channel);
                try
                {
                    cam.Update();
                }
                catch (Exception ex)
                {
                    AddError("Update failed: " + ex.Message);
                    goto err;
                }
                try
                {
                    img = cam.CalcSelectedChannel();
                }
                catch (Exception ex)
                {
                    AddError("CalcChannel failed: " + ex.Message);
                    goto err;
                }
                if (img == null)
                {
                    AddError("Resulting image for channel \"" + channel + "\" is NULL.");
                    goto err;
                }
                if (img.FrameNumber == -1)
                {
                    AddError("Framenumber was not updated.");
                    goto err;
                }
                if (img.TimeStamp == 0)
                {
                    AddError("Timestamp was not updated.");
                    goto err;
                }
                try
                {
                    cam.DeactivateChannel(channel);
                }
                catch (Exception ex)
                {
                    AddError("Deactivate channel \"" + channel + "\" failed: " + ex.Message);
                    goto err;
                }
            }

            res = true;
err:
            try { cam.Disconnect(); } catch { AddError("Disconnect failed."); res = false; }
            return(res);
        }
 public override bool TryGetCameraImage(ref CameraImage cameraImage)
 {
     return(false);
 }
Exemple #17
0
 public void SetImage(CameraImage cameraImage)
 {
     Image = converter.ToImage(cameraImage);
 }
Exemple #18
0
 //////////////////////////////////////////////////////////////////////////
 /// <summary>
 /// Generates an unpacked 2-D image from a 1-D image.
 /// </summary>
 /// <param name="image">The camera image to be unpacked.</param>
 /// <returns>A packed 2-D image based on the CameraImage source.</returns>
 public static ushort[,] UnpackImage(CameraImage image)
 {
     return(UnpackImage(image.PackedImage, image.ImageWidth, image.ImageHeight));
 }
 private void OnWrite(CameraImage image)
 {
     CameraMovementDetector.ProcessImage(image);
 }
    public bool GetCameraImage(ref CameraImage cameraImage)
    {
        //Set LuminanceOnly to true if you need a Y component only (black and and white)
        const bool LuminanceOnly = false;

        using (var imageBytes = Frame.CameraImage.AcquireCameraImageBytes())
        {
            if (!imageBytes.IsAvailable)
            {
                //Y shoud be 1 byte per pixel. Not doing anything otherwise.
                return(false);
            }

            cameraImage.width  = imageBytes.Width;
            cameraImage.height = imageBytes.Height;

            if (imageBytes.YRowStride != imageBytes.Width)
            {
                //Y shoud be 1 byte per pixel. Not doing anything otherwise.
                return(false);
            }

            //We expect 1 byte per pixel for Y
            int bufferSize = imageBytes.Width * imageBytes.Height;
            if (cameraImage.y == null || cameraImage.y.Length != bufferSize)
            {
                cameraImage.y = new byte[bufferSize];
            }

            //Y plane is copied as is.
            Marshal.Copy(imageBytes.Y, cameraImage.y, 0, bufferSize);


            if (LuminanceOnly || imageBytes.UVRowStride != imageBytes.Width || imageBytes.UVPixelStride != 2)
            {
                //Weird values. Y is probably enough.
                cameraImage.uv = null;
                return(true);
            }

            //We expect 2 bytes per pixel, interleaved U/V, with 2x2 subsampling
            bufferSize = imageBytes.Width * imageBytes.Height / 2;
            if (cameraImage.uv == null || cameraImage.uv.Length != bufferSize)
            {
                cameraImage.uv = new byte[bufferSize];
            }

            //Because U an V planes are returned separately, while remote expects interleaved U/V
            //same as ARKit, we merge the buffers ourselves
            unsafe
            {
                fixed(byte *uvPtr = cameraImage.uv)
                {
                    byte *UV = uvPtr;

                    byte *U = (byte *)imageBytes.U.ToPointer();
                    byte *V = (byte *)imageBytes.V.ToPointer();

                    for (int i = 0; i < bufferSize; i += 2)
                    {
                        *UV++ = *U;
                        *UV++ = *V;

                        U += imageBytes.UVPixelStride;
                        V += imageBytes.UVPixelStride;
                    }
                }
            }
            return(true);
        }
    }
Exemple #21
0
 public void ProcessLeftImage(CameraImage cameraImage)
 {
     leftCameraImage = cameraImage;
     Process();
 }
 ///<exclude/>
 public bool Equals(CameraImage other)
 {
     if (ReferenceEquals(null, other)) return false;
     if (ReferenceEquals(this, other)) return true;
     return other._Tm.Equals(_Tm) && other._Width.Equals(_Width) && other._Height.Equals(_Height) && other._Bpp.Equals(_Bpp) && other._Format == (_Format) && other._FDiv.Equals(_FDiv) && other._Pixels.SequenceEqual(_Pixels);
 }
Exemple #23
0
        public override string Solve(string input, bool part2)
        {
            foreach (var image in GetGroupedLines(input))
            {
                images.Add(new CameraImage(GetLines(image)));
            }
            dimension = (int)Math.Floor(Math.Sqrt(images.Count));

            Render(false);
            Console.WriteLine();

            List <(int, int, char)> borders = new List <(int, int, char)>();

            foreach (CameraImage image in images)
            {
                for (int i = 0; i < 2; ++i)
                {   //get all borders for every image. since Right and bottom basically read reversed, we don't need to flip and only do a 180.
                    //since the value for example for a border at the left would be the same for the same border at the top we can skip this rotation step
                    image.RotateRight();
                    image.RotateRight();
                    borders.Add((image.ID, image.TopBorderNr, 'U'));
                    borders.Add((image.ID, image.BottomBorderNr, 'D'));
                    borders.Add((image.ID, image.LeftBorderNr, 'L'));
                    borders.Add((image.ID, image.RightBorderNr, 'R'));
                }
            }

            //Group the images according to how they share a border
            Lookup <int, (int, int, char)> tileGroups = (Lookup <int, (int, int, char)>)borders.ToLookup(k => k.Item2, v => v);
            //Separate these groups for outer and inner connections
            //(outer connection being the image border that connects to no other image and is therefore part of the outer border of the final image)
            var borderConnections = tileGroups.Where(x => x.Count() == 1);
            var innerConnections  = tileGroups.Where(x => x.Count() == 2);
            //all borders must be pairs. there can't be a border value, that matches more than 2 images. at least i would panic a bit in that case.
            var undetermined = tileGroups.Where(x => x.Count() > 2);

            if (undetermined.Count() > 0)
            {
                throw new InvalidOperationException("Some borders couldn't be assigned !!");
            }


            //reorganize the inner connections, so that we don't have duplicates (bidirectional) anymore
            List <(int, int, int, string, int, string)> uniqueInnerConnections = new List <(int, int, int, string, int, string)>();

            foreach (var connection in innerConnections)
            {
                (int id1, int border1, char side1) = connection.ElementAt(0);
                (int id2, int border2, char side2) = connection.ElementAt(1);

                var detected = uniqueInnerConnections.Where(x =>
                                                            (id1 == x.Item1 || id1 == x.Item2) &&
                                                            (id2 == x.Item1 || id2 == x.Item2)
                                                            ).ToList();

                string sideName = side1.ToString() + side2.ToString();

                if (detected.Count() == 0)
                {
                    uniqueInnerConnections.Add((id1, id2, connection.Key, sideName, 0, ""));
                }
                else
                {
                    (int dId1, int dId2, int dBorder1, string dSide1, int dBorder2, string dSide2) = detected[0];
                    uniqueInnerConnections[uniqueInnerConnections.IndexOf(detected[0])]            =
                        (dId1, dId2, dBorder1, dSide1, connection.Key, sideName);
                }
            }

            //reorganize the outer borders, so that all possible border values are assigned to th same image
            Dictionary <int, List <(int, char)> > uniqueBorderConnections = new Dictionary <int, List <(int, char)> >();

            foreach (var connection in borderConnections)
            {
                (int id1, int border1, char side) = connection.ElementAt(0);

                if (!uniqueBorderConnections.ContainsKey(id1))
                {
                    uniqueBorderConnections.Add(id1, new List <(int, char)>()
                    {
                        (connection.Key, side)
                    });
                }
                else
                {
                    List <(int, char)> dBorders = uniqueBorderConnections[id1];
                    if (dBorders.Count >= 1 && dBorders.Count <= 4)
                    {
                        dBorders.Add((connection.Key, side));
                    }
                }
            }



            if (part2)
            {
                ArrangeImage(uniqueBorderConnections, uniqueInnerConnections);
                //Create an image from the rendered Map
                CameraImage map = new CameraImage(GetLines("Tile 2020:\r\n" + Render(true)));
                Console.WriteLine();
                //Create an image from the monster string
                CameraImage monster       = new CameraImage(GetLines("Tile 2020:\r\n..................#.\r\n#....##....##....###\r\n.#..#..#..#..#..#..."));
                int         monsterWidth  = monster.TopBorder.Count;
                int         monsterHeight = monster.Image.Count;

                //Set up trying to find the monsters
                int  currMonsterCnt = 0;
                int  rotationCount  = 0;
                bool flipped        = false;
                Console.SetCursorPosition(0, 0);
                while (currMonsterCnt == 0)
                {
                    currMonsterCnt = 0;
                    //Going over every line in the map except the last three, because our monster is 3 rows tall
                    for (int y = 0; y <= map.Image.Count - monsterHeight; ++y)
                    {   //Moving along this line leaving enough space for the width of our monster.
                        for (int x = 0; x <= map.Image[0].Count - monsterWidth; ++x)
                        {
                            //go over the lines of the monster
                            bool isMonster = true;
                            for (int monsterY = 0; monsterY < monsterHeight; ++monsterY)
                            {
                                //Get the area of the current line of the map and the monster as number (bitmask)
                                long mappedValue = Bitwise.GetBitMaskLong(map.Image[y + monsterY].GetRange(x, monsterWidth));
                                long monsterRow  = Bitwise.GetBitMaskLong(monster.Image[monsterY]);
                                //if the map contains the monster, the and join should output a perfect replica of the monster
                                if ((mappedValue & monsterRow) != monsterRow)
                                {//otherwise we don't have a complete monster
                                    isMonster = false;
                                    break;
                                }
                            }
                            if (isMonster)
                            {   //When we got a full monster increase the counter and censor the area, the monster is located in
                                //I was to lazy, to work out the pixels, that are part of the monster so we just draw  in the area the monster takes up (monsterWidth x monsterHeight)
                                for (int monsterY = 0; monsterY < monsterHeight; ++monsterY)
                                {
                                    Console.SetCursorPosition(x, y + monsterY);
                                    //MArk the area with a letter to differentiate monsters. especially those that overlap
                                    Console.WriteLine("".PadLeft(monsterWidth, (char)(0x41 + currMonsterCnt)));
                                }
                                ++currMonsterCnt;
                            }
                        }
                    }


                    if (currMonsterCnt == 0)
                    {//if we didn't find a single monster, rotate the image by 90 degrees
                        map.RotateRight();
                        Render(true);
                        if (++rotationCount >= 4)
                        {//we did a full 360. Perhaps we need to flip
                            if (flipped)
                            {
                                if (rotationCount >= 5)//we flipped one more time to be sure. this image couldn't be properly aligned. that shouldn't happen :(
                                {
                                    throw new InvalidOperationException("LÖÖP");
                                }
                                else
                                {
                                    continue;
                                }
                            }
                            map.Flip();
                            rotationCount = 0;
                            flipped       = true;
                        }
                    }
                }

                Console.CursorTop = map.Image.Count + 1;
                int roughness = map.ToString().Where(x => x == '#').Count();
                roughness -= monster.ToString().Where(x => x == '#').Count() * currMonsterCnt;
                return("Monsters Found: " + currMonsterCnt + "\r\nRoughness: " + roughness);
            }
            else
            {//part 1 only needs the product of the ids of the corner tiles
                long product = 1;
                foreach (var connection in uniqueBorderConnections.Where(x => x.Value.Count == 4))
                {
                    product *= connection.Key;
                }
                return("Product of corner tile-IDs: " + product);
            }
        }
 private void ProcessLeftCamera(CameraImage image)
 {
     leftCameraOut.Write(image);
     StereoImaging.ProcessLeftImage(image);
 }
 private void ProcessRightCamera(CameraImage image)
 {
     rightCameraOut.Write(image);
     StereoImaging.ProcessRightImage(image);
 }
 private void OnWrite(CameraImage image)
 {
     Detector.Detect(image);
 }
Exemple #27
0
 private void FrameReady(object sender, FrameReadyEventArgs e)
 {
     CameraImage.UpdateImage(e.Image);
 }
        public override bool TryGetCameraImage(ref CameraImage cameraImage)
        {
            ARCoreNative.NativeImage nativeImage = new ARCoreNative.NativeImage();
            if (ARCoreNative.Device.TryAcquireLatestImageBuffer(ref nativeImage))
            {
                cameraImage.width  = (int)nativeImage.width;
                cameraImage.height = (int)nativeImage.height;

                var planeInfos = nativeImage.planeInfos;

                // The Y plane is always the first one.
                var    yOffset     = planeInfos[0].offset;
                var    numYBytes   = planeInfos[0].size;
                IntPtr yPlaneStart = new IntPtr(nativeImage.planeData.ToInt64() + yOffset);

                if (cameraImage.y == null || cameraImage.y.Length != numYBytes)
                {
                    cameraImage.y = new byte[numYBytes];
                }

                Marshal.Copy(yPlaneStart, cameraImage.y, 0, (int)numYBytes);

                // UV planes are not deterministic, but we want all the data in one go
                // so the offset will be the min of the two planes.
                int uvOffset = Mathf.Min(
                    (int)nativeImage.planeInfos[1].offset,
                    (int)nativeImage.planeInfos[2].offset);

                // Find the end of the uv plane data
                int uvDataEnd = 0;
                for (int i = 1; i < planeInfos.Count; ++i)
                {
                    uvDataEnd = Mathf.Max(uvDataEnd, (int)planeInfos[i].offset + planeInfos[i].size);
                }

                // Finally, compute the number of bytes by subtracting the end from the beginning
                var    numUVBytes   = uvDataEnd - uvOffset;
                IntPtr uvPlaneStart = new IntPtr(nativeImage.planeData.ToInt64() + uvOffset);

                if (cameraImage.uv == null || cameraImage.uv.Length != numUVBytes)
                {
                    cameraImage.uv = new byte[numUVBytes];
                }

                Marshal.Copy(uvPlaneStart, cameraImage.uv, 0, (int)numUVBytes);

                ARCoreNative.Device.ReleaseImageBuffer(nativeImage);

                // The data is usually provided as VU rather than UV,
                // so we need to swap the bytes.
                // There's no way to know this currently, but it's always
                // been this way on every device so far.
                for (int i = 1; i < numUVBytes; i += 2)
                {
                    var b = cameraImage.uv[i - 1];
                    cameraImage.uv[i - 1] = cameraImage.uv[i];
                    cameraImage.uv[i]     = b;
                }

                return(true);
            }

            return(false);
        }
Exemple #29
0
 public abstract bool TryGetCameraImage(ref CameraImage cameraImage);
        public void ProcessImage(CameraImage cameraImage)
        {
            var bitmap = converter.ToBitmap(cameraImage);
            var greyImage = new Image<Gray, byte>(bitmap);

            var cornerPoints = cornersDetector.Detect(greyImage, configuration.InnerCornersPerChessboardCols,
                configuration.InnerCornersPerChessboardRows);

            if (cornerPoints.Size > 1)
            {
                Vectors = vectorsCalculator.Calculate(cornerPoints, greyImage.Size);
                var colorImage = MarkChessboard(bitmap, cornerPoints);
                Image = converter.Convert(colorImage.ToBitmap());
            }
            else
            {
                Image = converter.Convert(bitmap);
            }
        }
Exemple #31
0
        public ImageSource detectRetroreflectiveBlob(int width, int height, byte[] pixelData)
        {
            //Create color and depth images to process
            Image <Bgr, short> openCvImg      = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));
            Image <Bgr, short> openCvDepthImg = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));

            int stride = width * ((PixelFormats.Bgr32.BitsPerPixel) / 8);
            //We create an image 96x96
            BitmapSource sBitmap = System.Windows.Media.Imaging.BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            openCvImg.Bitmap = CameraImage.BmsToBm(sBitmap);

            // copy this image as the debug image on which will be drawn
            var gray_image = openCvImg.Convert <Gray, byte>();

            gray_image._GammaCorrect(0.3);

            var greyThreshImg = gray_image.ThresholdBinary(new Gray(220), new Gray(255));

            //greyThreshImg = greyThreshImg.Dilate(5);

            Emgu.CV.Cvb.CvBlobs        resultingImgBlobs = new Emgu.CV.Cvb.CvBlobs();
            Emgu.CV.Cvb.CvBlobDetector bDetect           = new Emgu.CV.Cvb.CvBlobDetector();
            var nBlobs = bDetect.Detect(greyThreshImg, resultingImgBlobs);

            int _blobSizeThreshold = 1;
            var rgb = new Rgb(255, 0, 0);
            var depthFrameReader = _kSensor.DepthFrameSource.OpenReader();
            var depthFrame       = depthFrameReader.AcquireLatestFrame();

            ushort[] depthData = new ushort[width * height];

            depthFrame.CopyFrameDataToArray(depthData);

            List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > detectedBlobs = new List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> >();

            if (nBlobs > 0)
            {
                var blobImg = greyThreshImg;

                foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingImgBlobs.Values)
                {
                    if (targetBlob.Area > _blobSizeThreshold)
                    {
                        blobImg.Draw(targetBlob.BoundingBox, new Gray(255), 1);
                        float centroidX = targetBlob.Centroid.X;
                        float centroidY = targetBlob.Centroid.Y;

                        DepthSpacePoint dsp = new DepthSpacePoint();
                        dsp.X = targetBlob.Centroid.X; //targetBlob.BoundingBox.X;
                        dsp.Y = targetBlob.Centroid.Y; //targetBlob.BoundingBox.Y;
                        int depth       = (int)(width * dsp.Y + dsp.X);
                        var mappedPoint = _kSensor.CoordinateMapper.MapDepthPointToCameraSpace(dsp, depthData[depth]);
                        detectedBlobs.Add(new KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint>(targetBlob, mappedPoint));
                    }
                }
            }

            depthFrame.Dispose();
            //return BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);
            //return detectedBlobs;
            return(CameraImage.BmToBms(greyThreshImg.Bitmap));
        }
 private void OnWrite(CameraImage image)
 {
     ImageProvider.SetImage(image);
 }
Exemple #33
0
        public KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > > processBlobs(DepthFrameReader depthFrameReader, InfraredFrame frame, double blobSizeThreshold)
        {
            int width  = frame.FrameDescription.Width;
            int height = frame.FrameDescription.Height;

            ushort[] imageDataArray = new ushort[width * height];
            frame.CopyFrameDataToArray(imageDataArray);


            byte[] pixelData = new byte[width * height * (PixelFormats.Bgr32.BitsPerPixel + 7) / 8];

            int colorIndex = 0;

            for (int i = 0; i < imageDataArray.Length; i++)
            {
                ushort d = (ushort)(imageDataArray[i] >> 8);
                byte   b = (byte)d;
                int    x = colorIndex;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = 255;
            }

            Image <Bgr, short> openCvImg      = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));
            Image <Bgr, short> openCvDepthImg = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));

            int stride = width * ((PixelFormats.Bgr32.BitsPerPixel) / 8);
            //We create an image 96x96
            BitmapSource sBitmap = System.Windows.Media.Imaging.BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            openCvImg.Bitmap = CameraImage.BmsToBm(sBitmap);

            // copy this image as the debug image on which will be drawn
            var gray_image = openCvImg.Convert <Gray, byte>();

            gray_image._GammaCorrect(0.3);

            var greyThreshImg = gray_image.ThresholdBinary(new Gray(220), new Gray(255));

            greyThreshImg = greyThreshImg.Dilate(5);

            var rgb = new Rgb(255, 0, 0);
            //var depthFrameReader = _kSensor.DepthFrameSource.OpenReader();
            var depthFrame = depthFrameReader.AcquireLatestFrame();

            if (depthFrame == null)
            {
                return(new KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > >(null, null));
            }
            ushort[] depthData = new ushort[width * height];

            depthFrame.CopyFrameDataToArray(depthData);
            depthFrame.Dispose();

            Emgu.CV.Cvb.CvBlobs        resultingImgBlobs = new Emgu.CV.Cvb.CvBlobs();
            Emgu.CV.Cvb.CvBlobDetector bDetect           = new Emgu.CV.Cvb.CvBlobDetector();
            var nBlobs = bDetect.Detect(greyThreshImg, resultingImgBlobs);


            List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > mappedPoints = new List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> >();

            if (nBlobs > 0)
            {
                var             blobImg = greyThreshImg;
                DepthSpacePoint dsp     = new DepthSpacePoint();
                foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingImgBlobs.Values)
                {
                    if (targetBlob.Area > blobSizeThreshold)
                    {
                        blobImg.Draw(targetBlob.BoundingBox, new Gray(255), 1);
                        dsp.X = targetBlob.Centroid.X;
                        dsp.Y = targetBlob.Centroid.Y;
                        int depth       = (int)this.blobDetector.getDepth((int)dsp.X, (int)dsp.Y, width, depthData);//(Math.Floor(width * dsp.Y + dsp.X));
                        var mappedPoint = _kSensor.CoordinateMapper.MapDepthPointToCameraSpace(dsp, depthData[depth]);
                        if (!float.IsInfinity(mappedPoint.X) && !float.IsInfinity(mappedPoint.Y) && !float.IsInfinity(mappedPoint.Z))
                        {
                            mappedPoints.Add(new KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint>(targetBlob, mappedPoint));
                        }
                    }
                }
            }


            //return BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);
            var bitmap = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            return(new KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > >(bitmap, mappedPoints));
        }
Exemple #34
0
 public void ProcessRightImage(CameraImage cameraImage)
 {
     rightCameraImage = cameraImage;
     Process();
 }