private static Task<ApplyResult> ApplyExtract_DoIt_Task(Convolution2D imageConv, FeatureRecognizer_Extract extract, FeatureRecognizer_Extract_Sub sub) { return Task.Run(() => { VectorInt totalReduce = sub.Extract.GetReduction(); Convolution2D finalImage = imageConv; if (extract.PreFilter != null) { finalImage = Convolutions.Convolute(imageConv, extract.PreFilter); //TODO: The final worker method shouldn't do this if too small. I'm just doing it to show the user something totalReduce += extract.PreFilter.GetReduction(); } if (imageConv.Width <= totalReduce.X || imageConv.Height <= totalReduce.Y) { // Too small, exit early return new ApplyResult(finalImage, null, null); } // Apply convolutions Convolution2D filtered = Convolutions.Convolute(finalImage, sub.Extract); // Look at the brightest spots, and see if they are matches var matches = AnalyzeBrightSpots(filtered, sub.Results); return new ApplyResult(finalImage, filtered, matches); }); }
private void BuildChangedExtract(FeatureRecognizer_Extract origExtract, Convolution2D newConv) { var image = _images.FirstOrDefault(o => o.UniqueID == origExtract.ImageID); if (image == null) { throw new ApplicationException("Couldn't find the image that the original extract references"); } // Image convolution Convolution2D imageConv = ((BitmapCustomCachedBytes)UtilityWPF.ConvertToColorArray(image.Bitmap, false, Colors.Transparent)).ToConvolution(); if (origExtract.PreFilter != null) { imageConv = Convolutions.Convolute(imageConv, origExtract.PreFilter); } //NOTE: The original extract contains multiple sizes (reductions of the largest one). But since this convolution is derived from //the largest extract, it would be difficult to reduce. So only keeping the highest resolution image FeatureRecognizer_Extract_Sub resultPatch = BuildExtractResult(imageConv, newConv); FinishBuildingExtract(origExtract.PreFilter, new[] { resultPatch }, origExtract.ImageID); }
private async static void ApplyExtract_DoIt(Grid grid, FeatureRecognizer_Image image, FeatureRecognizer_Extract extract, FeatureRecognizer_Extract_Sub sub, ConvolutionResultNegPosColoring edgeColor, ContextMenu contextMenu) { // Source image BitmapSource bitmap; if (sub.InputWidth == image.Bitmap.PixelWidth && sub.InputHeight == image.Bitmap.PixelHeight) { bitmap = image.Bitmap; } else { bitmap = UtilityWPF.ResizeImage(image.Bitmap, sub.InputWidth, sub.InputHeight); } Convolution2D imageConv = ((BitmapCustomCachedBytes)UtilityWPF.ConvertToColorArray(bitmap, false, Colors.Transparent)).ToConvolution(); // Convolute, look for matches var results = await ApplyExtract_DoIt_Task(imageConv, extract, sub); #region Show results // Left Image ApplyExtract_Draw_LeftImage(grid, results.ImageConv, extract.PreFilter, edgeColor); // Right Image if (results.Filtered != null) { ApplyExtract_Draw_RightImage(grid, results.Filtered, edgeColor); } // Matches if (results.Matches != null && results.Matches.Length > 0) { ApplyExtract_Draw_Matches(grid, results.Matches, results.Filtered.Size, sub, contextMenu); } #endregion }
private void ApplyExtract(FeatureRecognizer_Image image, FeatureRecognizer_Extract extract) { pnlExtractResults.Children.Clear(); ConvolutionResultNegPosColoring edgeColor = (ConvolutionResultNegPosColoring)cboEdgeColors.SelectedValue; foreach (var sub in extract.Extracts) { // Create a panel that will hold the result Grid grid = new Grid(); if (pnlExtractResults.Children.Count > 0) { grid.Margin = new Thickness(0, 10, 0, 0); } grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(1, GridUnitType.Star) }); grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(5, GridUnitType.Pixel) }); grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(1, GridUnitType.Star) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(1, GridUnitType.Auto) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(5, GridUnitType.Pixel) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(1, GridUnitType.Auto) }); pnlExtractResults.Children.Add(grid); // Do the work in another thread ApplyExtract_DoIt(grid, image, extract, sub, edgeColor, _extractResultContextMenu); } }
private void EditConvolution(FeatureRecognizer_Extract extract) { ImageFilterPainter viewer = new ImageFilterPainter(); viewer.Closed += Child_Closed; viewer.SaveRequested += Painter_SaveRequested; _childWindows.Add(viewer); viewer.Tag = extract; viewer.EditKernel((Convolution2D)extract.Extracts[0].Extract); // give it the highest resolution one viewer.Show(); }
private void AddExtract(FeatureRecognizer_Extract extract) { panelExtracts.Children.Add(extract.Control); _extracts.Add(extract); }
private void FinishBuildingExtract(ConvolutionBase2D filter, FeatureRecognizer_Extract_Sub[] subs, string imageID) { string uniqueID = Guid.NewGuid().ToString(); // Determine filename string filename = "extract - " + uniqueID + ".xml"; string fullFilename = System.IO.Path.Combine(_workingFolder, filename); // Add it FeatureRecognizer_Extract extract = new FeatureRecognizer_Extract() { Extracts = subs, PreFilter = filter, Control = Convolutions.GetThumbnail(subs[0].Extract, THUMBSIZE_EXTRACT, _extractContextMenu), ImageID = imageID, UniqueID = uniqueID, Filename = filename, }; if (extract.PreFilter != null && extract.PreFilter is Convolution2D) { extract.PreFilterDNA_Single = ((Convolution2D)extract.PreFilter).ToDNA(); } else if (extract.PreFilter != null && extract.PreFilter is ConvolutionSet2D) { extract.PreFilterDNA_Set = ((ConvolutionSet2D)extract.PreFilter).ToDNA(); } // Copy to the working folder UtilityCore.SerializeToFile(fullFilename, extract); AddExtract(extract); // Update the session file SaveSession_SessionFile(_workingFolder); }