/// <summary> /// A little worker that will move along the outside of the a blob and mark points /// </summary> /// <param name="p_point">Starting Position</param> /// <param name="p_width">Width of the image it's tracing</param> public ContourTracer(ref byte[] p_buffer, Point p_point, int p_width, Direction p_dir) { m_buffer = p_buffer; m_curPoint = m_startPoint = p_point; m_width = p_width; m_curFace = m_startFace = p_dir; m_curOffset = ImageProcess.getOffset(p_point.X, p_point.Y, p_width, 4); m_dirValues = new int[6]; m_dirValues[(int)Direction.UP] = -p_width * 4; m_dirValues[(int)Direction.RIGHT] = 4; m_dirValues[(int)Direction.DOWN] = p_width * 4; m_dirValues[(int)Direction.LEFT] = -4; m_dirValues[0] = m_dirValues[(int)Direction.LEFT]; m_dirValues[5] = m_dirValues[(int)Direction.UP]; }
/// <summary> /// performs the checks and movements required for that condition /// The check is based on facing as follows /// |_1_|_2_|_3_| /// |_^_| /// The caret represents facing and checks these point in sequence /// If it is valid, contains a foreground pixel, move there and update facing, based on rules /// </summary> /// <param name="p_next"></param> /// <returns>true for continue or false meaning to stop</returns> private bool increment(ref Point p_next, int p_iterations = 0) { if (p_iterations == 3) { return(false); } else { //If the point in it's direction and counterclockwise from the current position is a valid pixel if (m_buffer[m_curOffset + m_dirValues[(int)m_curFace] + m_dirValues[(int)m_curFace - 1]] != 0) { m_curOffset += m_dirValues[(int)m_curFace] + m_dirValues[(int)m_curFace - 1]; changeFacing(-1); //Face left of current facing //I will copy this section of code becuase i don't want to have to run it up to 3 times each pixel per increment m_curPoint = ImageProcess.getPoint(m_curOffset, m_width, 4); p_next = m_curPoint; } else if (m_buffer[m_curOffset + m_dirValues[(int)m_curFace]] != 0) { m_curOffset += m_dirValues[(int)m_curFace]; m_curPoint = ImageProcess.getPoint(m_curOffset, m_width, 4); p_next = m_curPoint; } else if (m_buffer[m_curOffset + m_dirValues[(int)m_curFace] + m_dirValues[(int)m_curFace + 1]] != 0) { m_curOffset += m_dirValues[(int)m_curFace] + m_dirValues[(int)m_curFace + 1]; m_curPoint = ImageProcess.getPoint(m_curOffset, m_width, 4); p_next = m_curPoint; } else { //Turn clockwise and try again changeFacing(1); if (!m_curPoint.Equals(m_startPoint) || m_curFace != m_startFace) { return(increment(ref p_next, ++p_iterations)); } } //The end condition is when the tracer is on the starting position and facing the same direction // cp == sp ^ cf == sf, perform de'morgans to get // cp != sp | cf != sf return(!m_curPoint.Equals(m_startPoint) || m_curFace != m_startFace); } }
/// <summary> /// This updates the buffer to be representative of the datapoints /// </summary> /// <param name="p_dataPoints"></param> /// <param name="p_data"></param> /// <param name="p_buffer"></param> public static void updateBuffer(List <Point> p_dataPoints, ref BitmapData p_data, ref byte[] p_buffer) { byte[] newBuffer = new byte[p_buffer.Length]; int offset; foreach (Point point in p_dataPoints) { offset = ImageProcess.getOffset(point.X, point.Y, p_data.Width, 4); newBuffer[offset] = newBuffer[offset + 1] = newBuffer[offset + 2] = 255; } for (offset = 3; offset < p_buffer.Length; offset += 4) { newBuffer[offset] = 255; } Buffer.BlockCopy(newBuffer, 0, p_buffer, 0, p_buffer.Length); }
public static async void StartImageProcessing( Action <object[]> resultCallback, Action preResultCallback, TimeSpan?timeout, ImageProcess processMethod, bool repeat) { object[] result; if (frameProcessor == null) { var mediaFrameSourceFinder = new MediaFrameSourceFinder(); var populated = await mediaFrameSourceFinder.PopulateAsync( MediaFrameSourceFinder.ColorVideoPreviewFilter, MediaFrameSourceFinder.FirstOrDefault); if (populated) { // We'll take the first video capture device. var videoCaptureDevice = await CaptureDeviceFinder.FindFirstOrDefaultAsync(); if (videoCaptureDevice != null) { // Make a processor which will pull frames from the camera and run frameProcessor = new CaptureFrameProcessor( mediaFrameSourceFinder, videoCaptureDevice, MediaEncodingSubtypes.Bgra8); // Remember to ask for auto-focus on the video capture device. frameProcessor.SetVideoDeviceControllerInitialiser( vd => vd.Focus.TrySetAuto(true)); } } } if (frameProcessor != null) { isRunning = true; await frameProcessor.ProcessFramesAsync(timeout, processMethod, repeat, resultCallback, preResultCallback); result = frameProcessor.Result; } }