private static double[] GetTrainingImage(FeatureRecognizer_Image image, ConvolutionBase2D kernel) { // Enlarge the initial image by the kernel's reduction so that after convolution, it is the desired size VectorInt reduction = kernel.GetReduction(); if (reduction.X != reduction.Y) { throw new ApplicationException(string.Format("Kernel should be square: {0}x{1}", reduction.X, reduction.Y)); } BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, IMAGESIZE + reduction.X, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, 1d); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, IMAGESIZE + reduction.X, IMAGESIZE + reduction.X); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.Abs(retVal); // It looks better when it's black on white double[] inverted = retVal.Values. Select(o => 1d - o). ToArray(); return(inverted); }
private static Convolution2D Convolute(Convolution2D conv, ToVectorInstructions instr) { Convolution2D retVal = conv; if (retVal.Width != retVal.Height) { int max = Math.Max(retVal.Width, retVal.Height); retVal = Convolutions.ExtendBorders(retVal, max, max); // make it square } if (instr.ShouldNormalize) { retVal = Convolutions.Normalize(retVal); } if (instr.Convolution != null) { retVal = Convolutions.Convolute(retVal, instr.Convolution); } retVal = Convolutions.MaxPool(retVal, instr.ToSize, instr.ToSize); retVal = Convolutions.Abs(retVal); return(retVal); }
private void ApplyFilter(ConvolutionBase2D kernel) { // Convert the original image to grayscale Convolution2D image = GetOriginalImageGrays(); if (image == null) { // The original image is empty return; } Convolution2D filtered = null; if (kernel is Convolution2D) { #region Single Convolution2D kernelSingle = (Convolution2D)kernel; // This window builds kernels without gain or iterations, so make a clone with those tacked on Convolution2D kernelFinal = new Convolution2D( kernelSingle.Values, kernelSingle.Width, kernelSingle.Height, kernelSingle.IsNegPos, trkGain.Value, Convert.ToInt32(trkIterations.Value), chkExpandBorder.IsChecked.Value); filtered = Convolutions.Convolute(image, kernelFinal); if (chkSubtract.IsChecked.Value) { filtered = Convolutions.Subtract(image, filtered); } #endregion } else if (kernel is ConvolutionSet2D) { #region Set ConvolutionSet2D kernelSet = (ConvolutionSet2D)kernel; filtered = Convolutions.Convolute(image, kernelSet); #endregion } else { throw new ArgumentException("Unknown type of kernel: " + kernel.GetType().ToString()); } // Show Filtered modifiedImage.Source = Convolutions.GetBitmap(filtered, (ConvolutionResultNegPosColoring)cboEdgeColors.SelectedValue); }