public override IObservable <IplImage> Process(IObservable <DeviceEvents> source) { return(source.SelectMany(device => Observable.FromEvent <FrameCallback, Frame>( handler => device.ColorFrame += handler, handler => device.ColorFrame -= handler)) .Select(frame => { var size = new Size(frame.Width, frame.Height); var image = new IplImage(size, IplDepth.U8, 3); var frameHeader = new Mat(size, Depth.U8, 3, frame.FrameData, frame.Stride); CV.Copy(frameHeader, image); return image; })); }
public override void Show(object value) { var inputImage = (IplImage)value; if (Mashups.Count > 0) { VisualizerImage = IplImageHelper.EnsureImageFormat(VisualizerImage, inputImage.Size, inputImage.Depth, inputImage.Channels); CV.Copy(inputImage, VisualizerImage); } else { VisualizerImage = inputImage; } }
protected override void Write(BinaryWriter writer, IplImage input) { var step = input.Width * input.Channels * ((int)(input.Depth) & 0xFF) / 8; var data = new byte[step * input.Height]; var dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned); try { var dataHeader = new IplImage(input.Size, input.Depth, input.Channels, IntPtr.Zero); dataHeader.SetData(dataHandle.AddrOfPinnedObject(), step); CV.Copy(input, dataHeader); } finally { dataHandle.Free(); } writer.Write(data); }
public void CopyImageWithChannelOfInterest_ResultMatContainsSelectedChannelValues() { using (var image = new IplImage(new Size(3, 3), IplDepth.F32, 3)) { image.SetZero(); image[1, 1] = new Scalar(0, 1, 0, 0); using (var mask = new IplImage(image.Size, image.Depth, 1)) { image.ChannelOfInterest = 2; CV.Copy(image, mask); image.ChannelOfInterest = 0; Assert.AreEqual(image[1, 1].Val1, mask[1, 1].Val0); } } }
public override IObservable <Mat> Process(IObservable <Mat> source) { return(source.Select(input => { var channels = Channels; var output = new Mat(input.Size, input.Depth, input.Channels); var reference = new Mat(1, input.Cols, input.Depth, input.Channels); if (channels == null || channels.Length == 0) { if (input.Depth != Depth.F32) { var temp = new Mat(reference.Rows, reference.Cols, Depth.F32, reference.Channels); CV.Reduce(input, temp, 0, ReduceOperation.Avg); CV.Convert(temp, reference); } else { CV.Reduce(input, reference, 0, ReduceOperation.Avg); } } else if (channels.Length == 1) { CV.Copy(input.GetRow(channels[0]), reference); } else { var sum = input.Depth != Depth.F32 ? new Mat(reference.Rows, reference.Cols, Depth.F32, reference.Channels) : reference; sum.SetZero(); for (int i = 0; i < channels.Length; i++) { using (var referenceChannel = input.GetRow(channels[i])) { CV.Add(sum, referenceChannel, sum); } } CV.ConvertScale(sum, reference, 1f / channels.Length); } CV.Repeat(reference, output); CV.Sub(input, output, output); return output; })); }
public override void Show(object value) { var keyPoints = (KeyPointCollection)value; var image = keyPoints.Image; var output = new IplImage(image.Size, IplDepth.U8, 3); if (image.Channels == 1) { CV.CvtColor(image, output, ColorConversion.Gray2Bgr); } else { CV.Copy(image, output); } Draw(output, keyPoints); base.Show(output); }
public IObservable <int[]> Process(IObservable <Mat> source) { return(Observable.Defer(() => { var steps = 0; var prevA = false; var prevB = false; var first = true; return source.Select(value => { var threshold = VoltageThreshold; var samples = new double[value.Rows * value.Cols]; var result = new int[value.Cols]; using (var sampleHeader = Mat.CreateMatHeader(samples, value.Rows, value.Cols, Depth.F64, 1)) { CV.Copy(value, sampleHeader); } for (int i = 0; i < value.Cols; i++) { var a = samples[i] > threshold; var b = samples[value.Cols + i] > threshold; if (!first) { if (a & !prevA) // rising A { steps += b ? -Step : Step; } else if (b & !prevB) // rising B { steps += a ? Step : -Step; } } else { first = false; } prevA = a; prevB = b; result[i] = steps; } return result; }); })); }
public int Update(Mat source, int index) { int windowElements; if (samples != null && (windowElements = Math.Min(source.Cols - index, samples.Cols - offset)) > 0) { using (var dataSubRect = source.GetSubRect(new Rect(index, 0, windowElements, source.Rows))) using (var windowSubRect = samples.GetSubRect(new Rect(offset, 0, windowElements, samples.Rows))) { CV.Copy(dataSubRect, windowSubRect); } offset += windowElements; return(windowElements); } return(0); }
public override IObservable <Mat> Process(IObservable <Mat> source) { return(Observable.Defer(() => { var skipSamples = Count; var previous = default(Mat); return source.SkipWhile(xs => { var skip = skipSamples > 0; if (skip) { skipSamples -= xs.Cols; previous = xs; } return skip; }).Select(input => { var bufferOffset = (input.Cols + skipSamples) % input.Cols; if (bufferOffset > 0) { var buffer = new Mat(input.Size, input.Depth, input.Channels); var previousDataLength = buffer.Cols - bufferOffset; var currentDataLength = buffer.Cols - previousDataLength; using (var previousBuffer = buffer.GetSubRect(new Rect(0, 0, previousDataLength, buffer.Rows))) using (var previousInput = previous.GetSubRect(new Rect(bufferOffset, 0, previousDataLength, buffer.Rows))) using (var currentBuffer = buffer.GetSubRect(new Rect(previousDataLength, 0, currentDataLength, buffer.Rows))) using (var currentInput = input.GetSubRect(new Rect(0, 0, currentDataLength, buffer.Rows))) { CV.Copy(previousInput, previousBuffer); CV.Copy(currentInput, currentBuffer); } previous = input; return buffer; } else { return input; } }); })); }
static IplImage GetColorCopy(IplImage image) { if (image.Depth != IplDepth.U8) { var temp = new IplImage(image.Size, IplDepth.U8, image.Channels); CV.ConvertScale(image, temp, (double)byte.MaxValue / ushort.MaxValue); image = temp; } var output = new IplImage(image.Size, IplDepth.U8, 3); if (image.Channels == 1) { CV.CvtColor(image, output, ColorConversion.Gray2Bgr); } else { CV.Copy(image, output); } return(output); }
public override void Show(object value) { if (input != null) { var markerFrame = (MarkerFrame)value; var image = new IplImage(input.Size, input.Depth, 3); if (showThreshold) { var threshold = new IplImage(input.Size, input.Depth, 1); var grayscale = input; if (grayscale.Channels > 1) { CV.CvtColor(input, threshold, ColorConversion.Bgr2Gray); grayscale = threshold; } imageThreshold.Threshold(detectMarkers.ThresholdMethod, grayscale, threshold, detectMarkers.Param1, detectMarkers.Param2); CV.CvtColor(threshold, image, ColorConversion.Gray2Bgr); } else if (input.Channels == 1) { CV.CvtColor(input, image, ColorConversion.Gray2Bgr); } else { CV.Copy(input, image); } foreach (var marker in markerFrame.DetectedMarkers) { marker.Draw(image, Scalar.Rgb(0, 0, 255), 2, true); if (markerFrame.CameraParameters != null) { DrawingUtils.Draw3dCube(image, marker, markerFrame.CameraParameters); } } base.Show(image); } }
public override IObservable <IplImage> Process(IObservable <IplImage> source) { return(Observable.Defer(() => { var count = 0; IplImage mean = null; return source.Select(input => { if (mean == null) { mean = new IplImage(input.Size, input.Depth, input.Channels); mean.SetZero(); } var output = new IplImage(input.Size, input.Depth, input.Channels); CV.Sub(input, mean, output); CV.ConvertScale(output, output, 1f / ++count, 0); CV.Add(mean, output, mean); CV.Copy(mean, output); return output; }); })); }
private void OnFrameReceived(Frame frame) { if (VmbFrameStatusType.VmbFrameStatusComplete == frame.ReceiveStatus) { IplImage output; unsafe { fixed(byte *p = frame.Buffer) { IplImage bitmapHeader = new IplImage(new Size((int)frame.Width, (int)frame.Height), IplDepth.U8, 1, (IntPtr)p); output = new IplImage(bitmapHeader.Size, bitmapHeader.Depth, bitmapHeader.Channels); CV.Copy(bitmapHeader, output); } } camera.QueueFrame(frame); global_observer.OnNext(new VimbaDataFrame(output, frame.Timestamp, frame.FrameID)); } else { camera.QueueFrame(frame); } }
Mat MatMap(Tuple <Mat, Mat> source) { var output = new Mat(source.Item1.Size, source.Item1.Depth, source.Item1.Channels); var map = source.Item2.Reshape(0, 1); // I dont know wtf is going on with this if (MapDimension == 0) { for (int i = 0; i < map.Cols; i++) { CV.Copy(source.Item1.GetRow((int)map[i].Val0), output.GetRow(i)); } } else { for (int i = 0; i < map.Cols; i++) { CV.Copy(source.Item1.GetCol((int)map[i].Val0), output.GetCol(i)); } } return(output); }
//Mat MatMap(Tuple<Mat, Mat> source) Mat MatMap(Mat source) { var output = new Mat(source.Size, source.Depth, source.Channels); var map = Map.Reshape(0, 1); // I dont know wtf is going on with this if (MapDimension == Dimension.ROWS) { for (int i = 0; i < map.Cols; i++) { CV.Copy(source.GetRow((int)map[i].Val0), output.GetRow(i)); } } else { for (int i = 0; i < map.Cols; i++) { CV.Copy(source.GetCol((int)map[i].Val0), output.GetCol(i)); } } return(output); }
public override void Show(object value) { var image = (IplImage)value; var visualizerImage = visualizer.VisualizerImage; if (visualizerImage != null && image != null) { // Treat image as mask and overlay it if (image.Channels == 1) { var overlay = image; // If target is a color image, convert before overlay if (visualizerImage.Channels == 3) { color = IplImageHelper.EnsureImageFormat(color, visualizerImage.Size, visualizerImage.Depth, visualizerImage.Channels); CV.CvtColor(image, color, ColorConversion.Gray2Bgr); overlay = color; } CV.Copy(overlay, visualizerImage, image); } } }
public NeuropixDataFrame(ElectrodePacket[] packets, float bufferCapacity) { var sampleCount = SampleCount * packets.Length; var startTrigger = new Mat(1, sampleCount, Depth.U8, 1); var synchronization = new Mat(1, sampleCount, Depth.U16, 1); var counters = new Mat(SampleCount + 1, sampleCount, Depth.S32, 1); var lfpData = new Mat(ChannelCount, packets.Length, Depth.F32, 1); var apData = new Mat(ChannelCount, sampleCount, Depth.F32, 1); using (var startTriggerHeader = new Mat(1, SampleCount, Depth.U8, 1, IntPtr.Zero)) using (var synchronizationHeader = new Mat(1, SampleCount, Depth.U16, 1, IntPtr.Zero)) using (var countersHeader = new Mat(SampleCount, SampleCount + 1, Depth.S32, 1, IntPtr.Zero)) using (var lfpDataHeader = new Mat(ChannelCount, 1, Depth.F32, 1, IntPtr.Zero)) using (var apDataHeader = new Mat(SampleCount, ChannelCount, Depth.F32, 1, IntPtr.Zero)) { for (int i = 0; i < packets.Length; i++) { startTriggerHeader.SetData(packets[i].StartTrigger, Mat.AutoStep); synchronizationHeader.SetData(packets[i].Synchronization, Mat.AutoStep); countersHeader.SetData(packets[i].Counters, Mat.AutoStep); lfpDataHeader.SetData(packets[i].LfpData, Mat.AutoStep); apDataHeader.SetData(packets[i].ApData, Mat.AutoStep); CV.Copy(startTriggerHeader, startTrigger.GetSubRect(new Rect(i * SampleCount, 0, startTriggerHeader.Cols, startTriggerHeader.Rows))); CV.Copy(synchronizationHeader, synchronization.GetSubRect(new Rect(i * SampleCount, 0, synchronizationHeader.Cols, synchronizationHeader.Rows))); CV.Transpose(countersHeader, counters.GetSubRect(new Rect(i * SampleCount, 0, countersHeader.Rows, countersHeader.Cols))); CV.Copy(lfpDataHeader, lfpData.GetSubRect(new Rect(i, 0, lfpDataHeader.Cols, lfpDataHeader.Rows))); CV.Transpose(apDataHeader, apData.GetSubRect(new Rect(i * SampleCount, 0, apDataHeader.Rows, apDataHeader.Cols))); } } StartTrigger = startTrigger; Synchronization = synchronization; Counters = counters; LfpData = lfpData; ApData = apData; BufferCapacity = bufferCapacity; }
// Mat case public override IObservable <Mat> Process(IObservable <Mat> source) { // TODO: what happens if more than one frame needs to be processed befor this finishes? return(source.Do( input => { // Sanity check if (rows < input.Rows || cols < input.Cols) { throw new IndexOutOfRangeException(); } // Data to send (row indicator along with input exposure pattern) var data = new Mat(rows, cols, Depth.S32, 1); //S32 var sub_data = data.GetSubRect(new Rect(1, 0, input.Cols, input.Rows)); // Convert element type if needed var convertDepth = input.Depth != Depth.S32; //S32 if (convertDepth) { CV.Convert(input, sub_data); } else { CV.Copy(input, sub_data); } // Write out matrix, row by row with the first number being an encoded row number for (int i = 0; i < rows; i++) { var row = data.GetRow(i); row[0] = new Scalar(i + 16384, 0, 0, 0); oni_ref.DAQ.Write((uint)DeviceIndex.SelectedIndex, row.Data, 4 * (data.Cols)); } })); }
public override IObservable <RegionActivityCollection> Process(IObservable <IplImage> source) { return(Observable.Defer(() => { var roi = default(IplImage); var mask = default(IplImage); var currentRegions = default(Point[][]); var boundingRegions = default(Rect[]); return source.Select(input => { var operation = Operation; var output = new RegionActivityCollection(); mask = IplImageHelper.EnsureImageFormat(mask, input.Size, IplDepth.U8, 1); if (operation != ReduceOperation.Sum) { roi = null; } else { roi = IplImageHelper.EnsureImageFormat(roi, input.Size, input.Depth, input.Channels); } if (Regions != currentRegions) { currentRegions = Regions; if (currentRegions != null) { mask.SetZero(); CV.FillPoly(mask, currentRegions, Scalar.All(255)); boundingRegions = currentRegions.Select(polygon => { var points = polygon.SelectMany(point => new[] { point.X, point.Y }).ToArray(); using (var mat = new Mat(1, polygon.Length, Depth.S32, 2)) { Marshal.Copy(points, 0, mat.Data, points.Length); return CV.BoundingRect(mat); } }).ToArray(); } } if (currentRegions != null) { var activeMask = mask; if (roi != null) { roi.SetZero(); CV.Copy(input, roi, mask); activeMask = roi; } var activation = ActivationFunction(operation); for (int i = 0; i < boundingRegions.Length; i++) { var rect = boundingRegions[i]; var polygon = currentRegions[i]; using (var region = input.GetSubRect(rect)) using (var regionMask = activeMask.GetSubRect(rect)) { output.Add(new RegionActivity { Roi = polygon, Rect = rect, Activity = activation(region, regionMask) }); } } } return output; }); })); }
public static void DrawConnectedComponent(IplImage image, ConnectedComponent component, Point2f offset) { if (component.Area <= 0) { return; } var centroid = component.Centroid + offset; var orientation = component.Orientation; var minorAxisOrientation = orientation + Math.PI / 2.0; var halfMajorAxis = component.MajorAxisLength * 0.5; var halfMinorAxis = component.MinorAxisLength * 0.5; var major1 = new Point((int)(centroid.X + halfMajorAxis * Math.Cos(orientation)), (int)(centroid.Y + halfMajorAxis * Math.Sin(orientation))); var major2 = new Point((int)(centroid.X - halfMajorAxis * Math.Cos(orientation)), (int)(centroid.Y - halfMajorAxis * Math.Sin(orientation))); var minor1 = new Point((int)(centroid.X + halfMinorAxis * Math.Cos(minorAxisOrientation)), (int)(centroid.Y + halfMinorAxis * Math.Sin(minorAxisOrientation))); var minor2 = new Point((int)(centroid.X - halfMinorAxis * Math.Cos(minorAxisOrientation)), (int)(centroid.Y - halfMinorAxis * Math.Sin(minorAxisOrientation))); if (component.Patch != null) { var target = image; var patch = component.Patch; var mask = patch.Channels == 1 ? patch : null; try { if (component.Contour != null) { var rect = component.Contour.Rect; mask = new IplImage(patch.Size, patch.Depth, 1); mask.SetZero(); CV.DrawContours(mask, component.Contour, Scalar.All(255), Scalar.All(0), 0, -1, LineFlags.Connected8, new Point(-rect.X, -rect.Y)); if (image.Width != rect.Width || image.Height != rect.Height) { target = image.GetSubRect(component.Contour.Rect); } } if (patch.Channels != target.Channels) { var conversion = patch.Channels > image.Channels ? ColorConversion.Bgr2Gray : ColorConversion.Gray2Bgr; patch = new IplImage(patch.Size, patch.Depth, image.Channels); CV.CvtColor(component.Patch, patch, conversion); } CV.Copy(patch, target, mask); } finally { if (patch != component.Patch) { patch.Dispose(); } if (mask != component.Patch) { mask.Dispose(); } if (target != image) { target.Dispose(); } } } else if (component.Contour != null) { CV.DrawContours(image, component.Contour, Scalar.All(255), Scalar.All(0), 0, -1, LineFlags.Connected8, new Point(offset)); } if (component.Contour != null) { CV.DrawContours(image, component.Contour, Scalar.Rgb(255, 0, 0), Scalar.Rgb(0, 0, 255), 0, 1, LineFlags.Connected8, new Point(offset)); } CV.Line(image, major1, major2, Scalar.Rgb(0, 0, 255)); CV.Line(image, minor1, minor2, Scalar.Rgb(255, 0, 0)); CV.Circle(image, new Point(centroid), 2, Scalar.Rgb(255, 0, 0), -1); }
public override IObservable <Mat> Process(IObservable <Mat> source) { return(Observable.Defer(() => { Mat kernel = null; Mat overlap = null; Mat overlapInput = null; Mat overlapEnd = null; Mat overlapStart = null; Mat overlapFilter = null; Rect overlapOutput = default(Rect); float[] currentKernel = null; return source.Select(input => { if (Kernel != currentKernel || currentKernel != null && (input.Rows != overlapOutput.Height || input.Cols != overlapOutput.Width)) { currentKernel = Kernel; if (currentKernel == null || currentKernel.Length == 0) { kernel = null; } else { kernel = new Mat(1, currentKernel.Length, Depth.F32, 1); Marshal.Copy(currentKernel, 0, kernel.Data, currentKernel.Length); var anchor = Anchor; if (anchor == -1) { anchor = kernel.Cols / 2; } overlap = new Mat(input.Rows, input.Cols + kernel.Cols - 1, input.Depth, input.Channels); overlapInput = overlap.GetSubRect(new Rect(kernel.Cols - 1, 0, input.Cols, input.Rows)); overlapFilter = new Mat(overlap.Rows, overlap.Cols, overlap.Depth, overlap.Channels); if (kernel.Cols > 1) { overlapEnd = overlap.GetSubRect(new Rect(overlap.Cols - kernel.Cols + 1, 0, kernel.Cols - 1, input.Rows)); overlapStart = overlap.GetSubRect(new Rect(0, 0, kernel.Cols - 1, input.Rows)); } overlapOutput = new Rect(anchor, 0, input.Cols, input.Rows); CV.CopyMakeBorder(input, overlap, new Point(kernel.Cols - 1, 0), IplBorder.Reflect); } } if (kernel == null) { return input; } else { CV.Copy(input, overlapInput); CV.Filter2D(overlap, overlapFilter, kernel, new Point(Anchor, -1)); if (overlapEnd != null) { CV.Copy(overlapEnd, overlapStart); } return overlapFilter.GetSubRect(overlapOutput).Clone(); } }); })); }
public override IObservable <Mat> Process(IObservable <Mat> source) { return(Observable.Create <Mat>(observer => { var carry = 0; var index = 0; var offset = 0; var lottery = 0; var scaleFactor = 0.0; var currentFactor = 0; var buffer = default(Mat); var carryBuffer = default(Mat); var downsampling = Downsampling; var random = downsampling == DownsamplingMethod.Dithering ? new Random() : null; var reduceOp = (ReduceOperation)(downsampling - DownsamplingMethod.Sum); if (reduceOp == ReduceOperation.Avg) { reduceOp = ReduceOperation.Sum; } var downsample = downsampling == DownsamplingMethod.LowPass ? filter.Process(source) : source; return downsample.Subscribe(input => { try { var bufferLength = BufferLength; if (bufferLength == 0) { bufferLength = input.Cols; } if (buffer == null || buffer.Rows != input.Rows || currentFactor != factor) { index = 0; currentFactor = factor; if (downsampling >= DownsamplingMethod.Sum) { carry = currentFactor; carryBuffer = new Mat(input.Rows, 1, input.Depth, input.Channels); if (downsampling == DownsamplingMethod.Avg) { scaleFactor = 1.0 / currentFactor; } else { scaleFactor = 0; } } else if (random != null) { lottery = random.Next(currentFactor); offset = lottery; } else { offset = 0; } buffer = CreateBuffer(bufferLength, input); } while (offset < input.Cols) { // Process decimation data on this buffer Rect outputRect; if (downsampling > DownsamplingMethod.LowPass) { outputRect = new Rect(index, 0, 1, input.Rows); } else { var samples = input.Cols - offset; var whole = samples / currentFactor; outputRect = new Rect(index, 0, Math.Min(buffer.Cols - index, whole), input.Rows); } if (downsampling >= DownsamplingMethod.Sum) { // Reduce decimate var inputSamples = Math.Min(input.Cols - offset, carry); var inputRect = new Rect(offset, 0, inputSamples, input.Rows); using (var inputBuffer = input.GetSubRect(inputRect)) using (var outputBuffer = buffer.GetCol(index)) { if (carry < currentFactor) { CV.Reduce(inputBuffer, carryBuffer, 1, reduceOp); switch (reduceOp) { case ReduceOperation.Sum: CV.Add(outputBuffer, carryBuffer, outputBuffer); break; case ReduceOperation.Max: CV.Max(outputBuffer, carryBuffer, outputBuffer); break; case ReduceOperation.Min: CV.Min(outputBuffer, carryBuffer, outputBuffer); break; } } else { CV.Reduce(inputBuffer, outputBuffer, 1, reduceOp); } offset += inputRect.Width; carry -= inputSamples; if (carry <= 0) { index++; carry = currentFactor; if (scaleFactor > 0) { CV.ConvertScale(outputBuffer, outputBuffer, scaleFactor); } } } } else if (outputRect.Width > 1) { // Block decimate var inputRect = new Rect(offset, 0, outputRect.Width * currentFactor, input.Rows); using (var inputBuffer = input.GetSubRect(inputRect)) using (var outputBuffer = buffer.GetSubRect(outputRect)) { CV.Resize(inputBuffer, outputBuffer, SubPixelInterpolation.NearestNeighbor); } index += outputRect.Width; offset += inputRect.Width; } else { // Decimate single time point using (var inputBuffer = input.GetCol(offset)) using (var outputBuffer = buffer.GetCol(index)) { CV.Copy(inputBuffer, outputBuffer); } index++; if (random != null) { offset += currentFactor - lottery; lottery = random.Next(currentFactor); offset += lottery; } else { offset += currentFactor; } } if (index >= buffer.Cols) { index = 0; observer.OnNext(buffer); buffer = CreateBuffer(bufferLength, input); } } offset -= input.Cols; } catch (Exception ex) { observer.OnError(ex); } }, observer.OnError, () => { // Emit pending buffer if (index > 0) { observer.OnNext(buffer.GetCols(0, index)); } buffer = null; observer.OnCompleted(); }); })); }
public FlyCapture() { ColorProcessing = ColorProcessingAlgorithm.Default; source = Observable.Create <FlyCaptureDataFrame>((observer, cancellationToken) => { return(Task.Factory.StartNew(() => { lock (captureLock) { ManagedCamera camera; using (var manager = new ManagedBusManager()) { var guid = manager.GetCameraFromIndex((uint)Index); camera = new ManagedCamera(); camera.Connect(guid); // Power on the camera const uint CameraPower = 0x610; const uint CameraPowerValue = 0x80000000; camera.WriteRegister(CameraPower, CameraPowerValue); // Wait for camera to complete power-up const Int32 MillisecondsToSleep = 100; uint cameraPowerValueRead = 0; do { Thread.Sleep(MillisecondsToSleep); cameraPowerValueRead = camera.ReadRegister(CameraPower); }while ((cameraPowerValueRead & CameraPowerValue) == 0); } var capture = 0; try { // Set frame rate var prop = new CameraProperty(PropertyType.FrameRate); prop.absControl = true; prop.absValue = FramesPerSecond; prop.autoManualMode = false; prop.onOff = true; camera.SetProperty(prop); // Enable/disable blackfly pull up const uint pullUp = 0x19D0; if (EnableBlackflyOutputVoltage) { camera.WriteRegister(pullUp, 0x10000001); } else { camera.WriteRegister(pullUp, 0x10000000); } // Acquisition parameters var colorProcessing = ColorProcessing; var autoExposure = !AutoExposure; // Horrible hack to trigger update inititally var shutter = Shutter; var gain = Gain; // Configure embedded info const uint embeddedInfo = 0x12F8; uint embeddedInfoState = camera.ReadRegister(embeddedInfo); if (EnableEmbeddedFrameCounter) { embeddedInfoState |= (uint)1 << 6; } else { embeddedInfoState &= ~((uint)1 << 6); } if (EnableEmbeddedFrameTimeStamp) { embeddedInfoState |= (uint)1 << 0; } else { embeddedInfoState &= ~((uint)1 << 0); } camera.WriteRegister(embeddedInfo, embeddedInfoState); using (var image = new ManagedImage()) using (var notification = cancellationToken.Register(() => { Interlocked.Exchange(ref capture, 0); camera.StopCapture(); })) { camera.StartCapture(); Interlocked.Exchange(ref capture, 1); while (!cancellationToken.IsCancellationRequested) { IplImage output; BayerTileFormat bayerTileFormat; if (autoExposure != AutoExposure && AutoExposure) { prop = new CameraProperty(PropertyType.AutoExposure); prop.autoManualMode = true; prop.onOff = true; camera.SetProperty(prop); autoExposure = AutoExposure; // Shutter prop = new CameraProperty(PropertyType.Shutter); prop.absControl = true; prop.autoManualMode = true; prop.onOff = true; camera.SetProperty(prop); // Shutter prop = new CameraProperty(PropertyType.Gain); prop.absControl = true; prop.autoManualMode = true; prop.onOff = true; camera.SetProperty(prop); autoExposure = AutoExposure; } else if (autoExposure != AutoExposure && !AutoExposure) { shutter = -0.1f; // Hack gain = -0.1f; autoExposure = AutoExposure; } if (shutter != Shutter && !AutoExposure) { // Figure out max shutter time given current frame rate var info = camera.GetPropertyInfo(PropertyType.Shutter); var delta = info.absMax - info.absMin; prop = new CameraProperty(PropertyType.Shutter); prop.absControl = true; prop.absValue = Shutter * delta + info.absMin; prop.autoManualMode = false; prop.onOff = true; camera.SetProperty(prop); shutter = Shutter; } if (gain != Gain && !AutoExposure) { // Figure out max shutter time given current frame rate var info = camera.GetPropertyInfo(PropertyType.Shutter); var delta = info.absMax - info.absMin; prop = new CameraProperty(PropertyType.Gain); prop.absControl = true; prop.absValue = Gain * delta + info.absMin;; prop.autoManualMode = false; prop.onOff = true; camera.SetProperty(prop); gain = Gain; } try { camera.RetrieveBuffer(image); } catch (FC2Exception ex) { if (capture == 0) { break; } else if (IgnoreImageConsistencyError && ex.CauseType == ErrorType.ImageConsistencyError) { continue; } else { throw; } } if (image.pixelFormat == PixelFormat.PixelFormatMono8 || image.pixelFormat == PixelFormat.PixelFormatMono16 || (image.pixelFormat == PixelFormat.PixelFormatRaw8 && (image.bayerTileFormat == BayerTileFormat.None || colorProcessing == ColorProcessingAlgorithm.NoColorProcessing))) { unsafe { bayerTileFormat = image.bayerTileFormat; var depth = image.pixelFormat == PixelFormat.PixelFormatMono16 ? IplDepth.U16 : IplDepth.U8; var bitmapHeader = new IplImage(new Size((int)image.cols, (int)image.rows), depth, 1, new IntPtr(image.data)); output = new IplImage(bitmapHeader.Size, bitmapHeader.Depth, bitmapHeader.Channels); CV.Copy(bitmapHeader, output); } } else { unsafe { bayerTileFormat = BayerTileFormat.None; output = new IplImage(new Size((int)image.cols, (int)image.rows), IplDepth.U8, 3); using (var convertedImage = new ManagedImage( (uint)output.Height, (uint)output.Width, (uint)output.WidthStep, (byte *)output.ImageData.ToPointer(), (uint)(output.WidthStep * output.Height), PixelFormat.PixelFormatBgr)) { convertedImage.colorProcessingAlgorithm = colorProcessing; image.Convert(PixelFormat.PixelFormatBgr, convertedImage); } } } observer.OnNext(new FlyCaptureDataFrame(output, image.imageMetadata, bayerTileFormat)); } } } finally { // Power off the camera const uint CameraPower = 0x610; const uint CameraPowerValue = 0x00000000; camera.WriteRegister(CameraPower, CameraPowerValue); if (capture != 0) { camera.StopCapture(); } camera.Disconnect(); camera.Dispose(); } } }, cancellationToken, TaskCreationOptions.LongRunning, TaskScheduler.Default)); }) .PublishReconnectable() .RefCount(); }
public override IObservable <IplImage> Process(IObservable <IplImage> source) { return(Observable.Defer(() => { var mask = default(IplImage); var boundingBox = default(Rect); var currentRegions = default(Point[][]); return source.Select(input => { if (Regions != currentRegions) { currentRegions = Regions; boundingBox = default(Rect); if (currentRegions != null) { mask = new IplImage(input.Size, IplDepth.U8, 1); mask.SetZero(); var points = currentRegions .SelectMany(region => region) .SelectMany(point => new[] { point.X, point.Y }) .ToArray(); if (points.Length > 0) { using (var mat = new Mat(1, points.Length / 2, Depth.S32, 2)) { Marshal.Copy(points, 0, mat.Data, points.Length); boundingBox = CV.BoundingRect(mat); boundingBox = ClipRectangle(boundingBox, input.Size); } CV.FillPoly(mask, currentRegions, Scalar.All(255)); if (cropOutput) { mask = mask.GetSubRect(boundingBox); } } } else { mask = null; } } var selectionType = MaskType; if (selectionType <= ThresholdTypes.BinaryInv) { var size = mask != null ? mask.Size : input.Size; var output = new IplImage(size, IplDepth.U8, 1); switch (selectionType) { case ThresholdTypes.Binary: if (mask == null) { output.SetZero(); } else { CV.Copy(mask, output); } break; case ThresholdTypes.BinaryInv: if (mask == null) { output.Set(Scalar.All(255)); } else { CV.Not(mask, output); } break; default: throw new InvalidOperationException("Selection operation is not supported."); } return output; } if (currentRegions != null && boundingBox.Width > 0 && boundingBox.Height > 0) { var output = new IplImage(mask.Size, input.Depth, input.Channels); var inputRoi = cropOutput ? input.GetSubRect(boundingBox) : input; try { switch (selectionType) { case ThresholdTypes.ToZeroInv: var fillRoi = cropOutput ? inputRoi : input; CV.Copy(fillRoi, output); output.Set(FillValue, mask); break; case ThresholdTypes.ToZero: output.Set(FillValue); CV.Copy(inputRoi, output, mask); break; default: throw new InvalidOperationException("Selection operation is not supported."); } } finally { if (inputRoi != input) { inputRoi.Close(); } } return output; } return input; }); })); }
public FlyCapture() { NumBuffers = 10; GrabMode = GrabMode.BufferFrames; ColorProcessing = ColorProcessingAlgorithm.Default; source = Observable.Create <FlyCaptureDataFrame>((observer, cancellationToken) => { return(Task.Factory.StartNew(() => { lock (captureLock) { ManagedCamera camera; using (var manager = new ManagedBusManager()) { var guid = manager.GetCameraFromIndex((uint)Index); camera = new ManagedCamera(); camera.Connect(guid); } var capture = 0; var numBuffers = NumBuffers; var config = camera.GetConfiguration(); config.grabMode = GrabMode; config.numBuffers = (uint)NumBuffers; config.highPerformanceRetrieveBuffer = true; camera.SetConfiguration(config); try { var colorProcessing = ColorProcessing; using (var image = new ManagedImage()) using (var notification = cancellationToken.Register(() => { Interlocked.Exchange(ref capture, 0); camera.StopCapture(); })) { camera.StartCapture(); Interlocked.Exchange(ref capture, 1); while (!cancellationToken.IsCancellationRequested) { IplImage output; BayerTileFormat bayerTileFormat; try { camera.RetrieveBuffer(image); } catch (FC2Exception) { if (capture == 0) { break; } else { throw; } } var raw16 = image.pixelFormat == PixelFormat.PixelFormatRaw16; if (image.pixelFormat == PixelFormat.PixelFormatMono8 || image.pixelFormat == PixelFormat.PixelFormatMono16 || ((image.pixelFormat == PixelFormat.PixelFormatRaw8 || raw16) && (image.bayerTileFormat == BayerTileFormat.None || colorProcessing == ColorProcessingAlgorithm.NoColorProcessing))) { unsafe { bayerTileFormat = image.bayerTileFormat; var depth = image.pixelFormat == PixelFormat.PixelFormatMono16 || raw16 ? IplDepth.U16 : IplDepth.U8; var bitmapHeader = new IplImage(new Size((int)image.cols, (int)image.rows), depth, 1, new IntPtr(image.data)); output = new IplImage(bitmapHeader.Size, bitmapHeader.Depth, bitmapHeader.Channels); CV.Copy(bitmapHeader, output); } } else { unsafe { bayerTileFormat = BayerTileFormat.None; var depth = raw16 ? IplDepth.U16 : IplDepth.U8; var format = raw16 ? PixelFormat.PixelFormatBgr16 : PixelFormat.PixelFormatBgr; output = new IplImage(new Size((int)image.cols, (int)image.rows), depth, 3); using (var convertedImage = new ManagedImage( (uint)output.Height, (uint)output.Width, (uint)output.WidthStep, (byte *)output.ImageData.ToPointer(), (uint)(output.WidthStep * output.Height), format)) { convertedImage.colorProcessingAlgorithm = colorProcessing; image.Convert(format, convertedImage); } } } observer.OnNext(new FlyCaptureDataFrame(output, image.imageMetadata, bayerTileFormat)); } } } finally { if (capture != 0) { camera.StopCapture(); } camera.Disconnect(); camera.Dispose(); } } }, cancellationToken, TaskCreationOptions.LongRunning, TaskScheduler.Default)); }) .PublishReconnectable() .RefCount(); }
private Tuple <bool, Mat> Process(IEnumerable <Position3D> positions) { // If appropriate, filter to find all position measures within the time window if (Window > 0) { // NB: this step deals with the fact that sometimes occlusions will mean skiped frames from some receivers. // We only want to compare estimated positions that occured close in time. // Maybe we want to do this with a reactive operator though...? var latest = positions.Select(pos => pos.Time).ToArray().Max(); positions = positions.Where(pos => latest - pos.Time < Window); // If there are not enough good positions remaining, // fail predicate if (positions.Count() < 3) { return(new Tuple <bool, Mat>(false, null)); } } Mat data = new Mat(3, positions.Count(), Depth.F64, 1); var j = 0; foreach (var p in positions) { CV.Copy(p.Matrix, data.GetCol(j++)); } // Shift the data to 0 mean Mat row_mean = new Mat(3, 1, Depth.F64, 1); CV.Reduce(data, row_mean, 1, ReduceOperation.Avg); data = data - row_mean; // SVD // See https://www.ltu.se/cms_fs/1.51590!/svd-fitting.pdf Mat S = new Mat(3, 1, Depth.F64, 1); Mat U = new Mat(3, 3, Depth.F64, 1); CV.SVD(data, S, U); // Get Quaterion // See https://math.stackexchange.com/questions/2889712/how-to-calculate-quaternions-from-principal-axes-of-an-ellipsoid // Rotation angle var theta = Math.Acos(0.5 * (CV.Trace(U).Val0 - 1)); // Rotation axis Mat Ut = new Mat(3, 3, Depth.F64, 1); CV.Transpose(U, Ut); CV.Sub(U, Ut, U); var ax = U[1, 2].Val0; var ay = U[2, 0].Val0; var az = U[0, 1].Val0; // To quaternion Mat quat = new Mat(4, 1, Depth.F64, 1); var s = Math.Sin(theta / 2); quat[0] = new Scalar(ax * s); quat[1] = new Scalar(ay * s); quat[2] = new Scalar(az * s); quat[3] = new Scalar(Math.Cos(theta / 2)); return(new Tuple <bool, Mat>(true, quat)); }
static Func <IManagedImage, IplImage> GetConverter(PixelFormatEnums pixelFormat, ColorProcessingAlgorithm colorProcessing) { int outputChannels; IplDepth outputDepth; if (pixelFormat < PixelFormatEnums.BayerGR8 || pixelFormat == PixelFormatEnums.BGR8 || pixelFormat <= PixelFormatEnums.BayerBG16 && colorProcessing == ColorProcessingAlgorithm.NoColorProcessing) { if (pixelFormat == PixelFormatEnums.BGR8) { outputChannels = 3; outputDepth = IplDepth.U8; } else { outputChannels = 1; var depthFactor = (int)pixelFormat; if (pixelFormat > PixelFormatEnums.Mono16) { depthFactor = (depthFactor - 3) / 4; } outputDepth = (IplDepth)(8 * (depthFactor + 1)); } return(image => { var width = (int)image.Width; var height = (int)image.Height; using (var bitmapHeader = new IplImage(new Size(width, height), outputDepth, outputChannels, image.DataPtr)) { var output = new IplImage(bitmapHeader.Size, outputDepth, outputChannels); CV.Copy(bitmapHeader, output); return output; } }); } PixelFormatEnums outputFormat; if (pixelFormat == PixelFormatEnums.Mono12p || pixelFormat == PixelFormatEnums.Mono12Packed) { outputFormat = PixelFormatEnums.Mono16; outputDepth = IplDepth.U16; outputChannels = 1; } else if (pixelFormat >= PixelFormatEnums.BayerGR8 && pixelFormat <= PixelFormatEnums.BayerBG16) { outputFormat = PixelFormatEnums.BGR8; outputDepth = IplDepth.U8; outputChannels = 3; } else { throw new InvalidOperationException(string.Format("Unable to convert pixel format {0}.", pixelFormat)); } return(image => { var width = (int)image.Width; var height = (int)image.Height; var output = new IplImage(new Size(width, height), outputDepth, outputChannels); unsafe { using (var destination = new ManagedImage((uint)width, (uint)height, 0, 0, outputFormat, output.ImageData.ToPointer())) { image.Convert(destination, outputFormat, (SpinnakerNET.ColorProcessingAlgorithm)colorProcessing); return output; } } }); }
public override IObservable <Mat> Process(IObservable <DeviceEvents> source) { return(source.SelectMany(evts => { var device = evts.Device; Extrinsics depthToColor; Intrinsics colorIntrinsics, depthIntrinsics; var colorStream = Observable.FromEvent <FrameCallback, Frame>(handler => evts.ColorFrame += handler, handler => evts.ColorFrame -= handler); var depthStream = Observable.FromEvent <FrameCallback, Frame>(handler => evts.DepthFrame += handler, handler => evts.DepthFrame -= handler); device.GetStreamIntrinsics(Stream.Color, out colorIntrinsics); device.GetStreamIntrinsics(Stream.Depth, out depthIntrinsics); device.GetExtrinsics(Stream.Depth, Stream.Color, out depthToColor); var pixelScale = new Vector2(colorIntrinsics.Width, colorIntrinsics.Height); var depthScale = device.DepthScale; int depthBufferWidth = 0; ushort[] depthBuffer = null; TexVertex[] depthPoints = null; var depthBufferStream = depthStream.Select(frame => { if (depthBuffer == null) { depthBufferWidth = frame.Width; depthBuffer = new ushort[frame.Width * frame.Height]; depthPoints = new TexVertex[depthBuffer.Length]; } var depthFrameHeader = new Mat(frame.Height, frame.Width, Depth.U16, 1, frame.FrameData, frame.Stride); using (var bufferHeader = Mat.CreateMatHeader(depthBuffer, depthFrameHeader.Rows, depthFrameHeader.Cols, depthFrameHeader.Depth, depthFrameHeader.Channels)) { CV.Copy(depthFrameHeader, bufferHeader); } return depthBuffer; }); return depthBufferStream.Select(depth => { int pindex = 0; for (int i = 0; i < depthBuffer.Length; i++) { Vector2 depthPixel, colorPixel; depthPixel.X = i % depthBufferWidth; depthPixel.Y = i / depthBufferWidth; var depthValue = depth[i] * depthScale; if (depthValue == 0) { continue; } Vector3 depthPoint, colorPoint; Intrinsics.DeprojectPoint(ref depthPixel, ref depthIntrinsics, depthValue, out depthPoint); Extrinsics.TransformPoint(ref depthPoint, ref depthToColor, out colorPoint); Intrinsics.ProjectPoint(ref colorPoint, ref colorIntrinsics, out colorPixel); Vector2.Divide(ref colorPixel, ref pixelScale, out depthPoints[pindex].TexCoord); depthPoints[pindex].Position = depthPoint; pindex++; } return Mat.FromArray(depthPoints, pindex, 5, Depth.F32, 1); }); })); }