private void AddImage(string filename, string category) { string uniqueID = Guid.NewGuid().ToString(); // Try to read as a bitmap. The caller should have a catch block that will handle non images (better to do this now than bomb later) BitmapSource bitmap = new BitmapImage(new Uri(filename)); // Build entry FeatureRecognizer_Image entry = new FeatureRecognizer_Image() { Category = category, UniqueID = uniqueID, Filename = filename, //ImageControl = FeatureRecognizer.GetTreeviewImageCtrl(new BitmapImage(new Uri(filename))), //Bitmap = bitmap, }; // Store it //FeatureRecognizer.AddImage(entry, _images, treeImages, cboImageLabel); _images.Add(entry); }
private static double[] GetTrainingImage(FeatureRecognizer_Image image, ConvolutionBase2D kernel) { // Enlarge the initial image by the kernel's reduction so that after convolution, it is the desired size VectorInt reduction = kernel.GetReduction(); if (reduction.X != reduction.Y) { throw new ApplicationException(string.Format("Kernel should be square: {0}x{1}", reduction.X, reduction.Y)); } BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, IMAGESIZE + reduction.X, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, 1d); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, IMAGESIZE + reduction.X, IMAGESIZE + reduction.X); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.Abs(retVal); // It looks better when it's black on white double[] inverted = retVal.Values. Select(o => 1d - o). ToArray(); return inverted; }
public ImageInput(FeatureRecognizer_Image image, double[] value_orig, double[] value_normalized) { this.Image = image; this.Weights_Orig = value_orig; this.Weights = value_normalized; }
private static double[] GetValuesFromImage_ConvMaxpool_Gray(FeatureRecognizer_Image image, ConvolutionBase2D kernel, int size, double scaleValue) { const int INITIALSIZE = 80; BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, INITIALSIZE, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, scaleValue); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, INITIALSIZE, INITIALSIZE); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.MaxPool(retVal, size, size); retVal = Convolutions.Abs(retVal); return retVal.Values; }
private static double[] GetValuesFromImage_Resize_Gray(FeatureRecognizer_Image image, int size, double scaleValue) { BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, size, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, scaleValue); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, size, size); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } return retVal.Values; }
private static double[] GetValuesFromImage_ConvMaxpool_Color(FeatureRecognizer_Image image, ConvolutionBase2D kernel, int size, double scaleValue) { const int INITIALSIZE = 80; BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, INITIALSIZE, true); var convs = UtilityWPF.ConvertToConvolution_RGB(bitmap, scaleValue); var final = new[] { convs.Item1, convs.Item2, convs.Item3 }. Select(o => { Convolution2D retVal = o; if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, INITIALSIZE, INITIALSIZE); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.MaxPool(retVal, size, size); return Convolutions.Abs(retVal); }). ToArray(); return MergeConvs(final[0], final[1], final[2]); }
private static double[] GetValuesFromImage_3Pixel(FeatureRecognizer_Image image, SimpleColorComponent top, SimpleColorComponent middle, SimpleColorComponent bottom) { BitmapSource bitmap = UtilityWPF.ResizeImage(new BitmapImage(new Uri(image.Filename)), 1, 3); byte[][] colors = ((BitmapCustomCachedBytes)UtilityWPF.ConvertToColorArray(bitmap, false, Colors.Black)) .GetColors_Byte(); if (colors.Length != 3) { throw new ApplicationException("Expected exactly three pixels: " + colors.Length.ToString()); } double[] retVal = new double[3]; SimpleColorComponent[] components = new[] { top, middle, bottom }; for (int cntr = 0; cntr < 3; cntr++) { switch (components[cntr]) { case SimpleColorComponent.R: #region R retVal[cntr] = colors[cntr][1] / 255d; //colors[cntr][0] is alpha #endregion break; case SimpleColorComponent.G: #region G retVal[cntr] = colors[cntr][2] / 255d; //colors[cntr][0] is alpha #endregion break; case SimpleColorComponent.B: #region B retVal[cntr] = colors[cntr][3] / 255d; //colors[cntr][0] is alpha #endregion break; case SimpleColorComponent.H: #region H ColorHSV hsvH = UtilityWPF.RGBtoHSV(Color.FromArgb(colors[cntr][0], colors[cntr][1], colors[cntr][2], colors[cntr][3])); retVal[cntr] = hsvH.H / 360d; #endregion break; case SimpleColorComponent.S: #region S ColorHSV hsvS = UtilityWPF.RGBtoHSV(Color.FromArgb(colors[cntr][0], colors[cntr][1], colors[cntr][2], colors[cntr][3])); retVal[cntr] = hsvS.S / 100d; #endregion break; case SimpleColorComponent.V: #region V ColorHSV hsvV = UtilityWPF.RGBtoHSV(Color.FromArgb(colors[cntr][0], colors[cntr][1], colors[cntr][2], colors[cntr][3])); retVal[cntr] = hsvV.V / 100d; #endregion break; default: throw new ApplicationException("Unknown SimpleColorComponent: " + components[cntr].ToString()); } } return retVal; }
private static double[] GetValuesFromImage_Resize_Color(FeatureRecognizer_Image image, int size, double scaleValue) { BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, size, true); var convs = UtilityWPF.ConvertToConvolution_RGB(bitmap, scaleValue); if (convs.Item1.Width != convs.Item1.Height) { convs = Tuple.Create( Convolutions.ExtendBorders(convs.Item1, size, size), //NOTE: width or height is already the desired size, this will just enlarge the other to make it square Convolutions.ExtendBorders(convs.Item2, size, size), Convolutions.ExtendBorders(convs.Item3, size, size)); } return MergeConvs(convs.Item1, convs.Item2, convs.Item3); }
private static double[] GetValuesFromImage_1Pixel(FeatureRecognizer_Image image, SimpleColorScheme scheme) { BitmapSource bitmap = UtilityWPF.ResizeImage(new BitmapImage(new Uri(image.Filename)), 1, 1); byte[][] colors = ((BitmapCustomCachedBytes)UtilityWPF.ConvertToColorArray(bitmap, false, Colors.Black)) .GetColors_Byte(); if (colors.Length != 1) { throw new ApplicationException("Expected exactly one pixel: " + colors.Length.ToString()); } double[] retVal = new double[3]; switch (scheme) { case SimpleColorScheme.RGB: #region RGB retVal[0] = colors[0][1] / 255d; //colors[0][0] is alpha retVal[1] = colors[0][2] / 255d; retVal[2] = colors[0][3] / 255d; #endregion break; case SimpleColorScheme.HSV: #region HSV ColorHSV hsv = UtilityWPF.RGBtoHSV(Color.FromArgb(colors[0][0], colors[0][1], colors[0][2], colors[0][3])); retVal[0] = hsv.H / 360d; retVal[1] = hsv.S / 100d; retVal[2] = hsv.V / 100d; #endregion break; default: throw new ApplicationException("Unknown SimpleColorScheme: " + scheme.ToString()); } return retVal; }
internal static void AddImage(FeatureRecognizer_Image image, List<FeatureRecognizer_Image> images, TreeView treeview, ComboBox combobox) { images.Add(image); List<string> tags = new List<string>(); // Find existing name node TreeViewItem nameNode = null; foreach (TreeViewItem node in treeview.Items) { string header = (string)node.Header; tags.Add(header); if (header.Equals(image.Category, StringComparison.OrdinalIgnoreCase)) { nameNode = node; break; } } if (nameNode == null) { // Create new nameNode = new TreeViewItem() { Header = image.Category }; // Store this alphabetically int insertIndex = UtilityCore.GetInsertIndex(tags, image.Category); treeview.Items.Insert(insertIndex, nameNode); } // Add the image control to this name node nameNode.Items.Add(image.ImageControl); nameNode.IsExpanded = true; #region Update tag combobox string currentText = combobox.Text; combobox.Items.Clear(); foreach (string comboItem in images.Select(o => o.Category).Distinct().OrderBy(o => o)) { combobox.Items.Add(comboItem); } combobox.Text = currentText; #endregion }
private void btnAddImage_Click(object sender, RoutedEventArgs e) { try { string tag = cboImageLabel.Text; if (string.IsNullOrWhiteSpace(tag)) { MessageBox.Show("Please give this image a name", this.Title, MessageBoxButton.OK, MessageBoxImage.Warning); return; } OpenFileDialog dialog = new OpenFileDialog(); dialog.Multiselect = true; dialog.Title = "Please select an image"; bool? result = dialog.ShowDialog(); if (result == null || !result.Value) { return; } foreach (string dialogFilename in dialog.FileNames) { // Make sure that it's a valid file BitmapSource bitmap = new BitmapImage(new Uri(dialogFilename)); // Resize (for this tester, just force it to be square) bitmap = UtilityWPF.ResizeImage(bitmap, IMAGESIZE, IMAGESIZE); string uniqueID = Guid.NewGuid().ToString(); // Copy to the working folder string filename = tag + " - " + uniqueID + ".png"; string fullFilename = System.IO.Path.Combine(_workingFolder, filename); UtilityWPF.SaveBitmapPNG(bitmap, fullFilename); // Build entry FeatureRecognizer_Image entry = new FeatureRecognizer_Image() { Category = tag, UniqueID = uniqueID, Filename = filename, ImageControl = GetTreeviewImageCtrl(bitmap), Bitmap = bitmap, }; // Store it AddImage(entry, _images, treeImages, cboImageLabel); } // Update the session file SaveSession_SessionFile(_workingFolder); } catch (Exception ex) { MessageBox.Show(ex.ToString(), this.Title, MessageBoxButton.OK, MessageBoxImage.Error); } }
private async static void ApplyExtract_DoIt(Grid grid, FeatureRecognizer_Image image, FeatureRecognizer_Extract extract, FeatureRecognizer_Extract_Sub sub, ConvolutionResultNegPosColoring edgeColor, ContextMenu contextMenu) { // Source image BitmapSource bitmap; if (sub.InputWidth == image.Bitmap.PixelWidth && sub.InputHeight == image.Bitmap.PixelHeight) { bitmap = image.Bitmap; } else { bitmap = UtilityWPF.ResizeImage(image.Bitmap, sub.InputWidth, sub.InputHeight); } Convolution2D imageConv = ((BitmapCustomCachedBytes)UtilityWPF.ConvertToColorArray(bitmap, false, Colors.Transparent)).ToConvolution(); // Convolute, look for matches var results = await ApplyExtract_DoIt_Task(imageConv, extract, sub); #region Show results // Left Image ApplyExtract_Draw_LeftImage(grid, results.ImageConv, extract.PreFilter, edgeColor); // Right Image if (results.Filtered != null) { ApplyExtract_Draw_RightImage(grid, results.Filtered, edgeColor); } // Matches if (results.Matches != null && results.Matches.Length > 0) { ApplyExtract_Draw_Matches(grid, results.Matches, results.Filtered.Size, sub, contextMenu); } #endregion }
private void ApplyExtract(FeatureRecognizer_Image image, FeatureRecognizer_Extract extract) { pnlExtractResults.Children.Clear(); ConvolutionResultNegPosColoring edgeColor = (ConvolutionResultNegPosColoring)cboEdgeColors.SelectedValue; foreach (var sub in extract.Extracts) { // Create a panel that will hold the result Grid grid = new Grid(); if (pnlExtractResults.Children.Count > 0) { grid.Margin = new Thickness(0, 10, 0, 0); } grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(1, GridUnitType.Star) }); grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(5, GridUnitType.Pixel) }); grid.ColumnDefinitions.Add(new ColumnDefinition() { Width = new GridLength(1, GridUnitType.Star) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(1, GridUnitType.Auto) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(5, GridUnitType.Pixel) }); grid.RowDefinitions.Add(new RowDefinition() { Height = new GridLength(1, GridUnitType.Auto) }); pnlExtractResults.Children.Add(grid); // Do the work in another thread ApplyExtract_DoIt(grid, image, extract, sub, edgeColor, _extractResultContextMenu); } }