public HopfieldNetwork() { InitializeComponent(); _trainKernel = Convolutions.GetEdgeSet_GuassianThenEdge(5); _initialized = true; }
public ToVectorInstructions(int[] fromSizes, int toSize, ConvolutionBase2D convolution = null, bool shouldNormalize = false, bool isColor2D = false) { this.FromSizes = fromSizes; this.ToSize = toSize; this.Convolution = convolution; this.ShouldNormalize = shouldNormalize; this.IsColor2D = isColor2D; }
public static Convolution2D Convolute(Convolution2D image, ConvolutionBase2D kernel, string description = "") { if (kernel is Convolution2D) { return Convolute_Single(image, (Convolution2D)kernel, description); } else if (kernel is ConvolutionSet2D) { return Convolute_Set(image, (ConvolutionSet2D)kernel, description); } else { throw new ApplicationException("Unexpected type of kernel: " + kernel.GetType().ToString()); } }
public static string GetToolTip(ConvolutionBase2D conv, ConvolutionToolTipType typeSet, ConvolutionToolTipType typeSingle) { if (conv is ConvolutionSet2D) { return GetToolTip((ConvolutionSet2D)conv, typeSet); } else if (conv is Convolution2D) { return GetToolTip((Convolution2D)conv, typeSingle); } else { return ""; } }
public DragDataObject(int index, Border control, ConvolutionBase2D kernel) { this.Index = index; this.Control = control; this.Kernel = kernel; }
public BrainRGBRecognizer(EditorOptions options, ItemOptions itemOptions, BrainRGBRecognizerDNA dna, IContainer energyTanks) : base(options, dna, itemOptions.Brain_Damage.HitpointMin, itemOptions.Brain_Damage.HitpointSlope, itemOptions.Brain_Damage.Damage) { _itemOptions = itemOptions; _energyTanks = energyTanks; this.Design = new BrainRGBRecognizerDesign(options, true); this.Design.SetDNA(dna); _dnaExtra = dna.Extra ?? BrainRGBRecognizerDNAExtra.GetDefaultDNA(); _isColor = _dnaExtra.IsColor; _finalResolution = _dnaExtra.FinalResolution; if (_dnaExtra.UseEdgeDetect) { _convolution = Convolutions.GetEdgeSet_Sobel(); } else { _convolution = null; } _somDiscardDupes = _dnaExtra.ShouldSOMDiscardDupes; _somIsColor = _isColor; _shortTermMemory = new ShortTermMemory<double[]>(itemOptions.ShortTermMemory_MillisecondsBetween, itemOptions.ShortTermMemory_Size); //TODO: Get params from itemOptions _nonLifeEventSnapshots = new NonLifeEventSnapshots<double[]>(); //_nonLifeEventSnapshots = new NonLifeEventSnapshots<double[]>(.25, .6, 2); // faster times for debugging GetMass(out _mass, out _volume, out _radius, out _scaleActual, dna, itemOptions); }
private static RecognitionResults RecognizeImage(double[] input, long inputToken, int width, int height, int finalResolution, TrainedRecognizer[] recognizers, LifeEventToVector lifeEvents, ConvolutionBase2D convolution, bool isColor) { if (recognizers == null) { return new RecognitionResults(inputToken, new double[lifeEvents.Types.Length]); } double[] normalized = NormalizeInput(input, width, height, finalResolution, convolution, isColor); foreach (var recognizer in recognizers) { double[] output = recognizer.Network.Compute(normalized); //TODO: Analyze outputs of all the recognizers to come up with a final result. Can't just take the average -- if they all //agree, that's great. But disagreement should have a zero output (or at least a very weak output) return new RecognitionResults(inputToken, output); } throw new ApplicationException("finish this"); }
public static Border GetThumbnail(ConvolutionBase2D conv, int thumbSize, ContextMenu contextMenu, ConvolutionToolTipType typeSet = ConvolutionToolTipType.None, ConvolutionToolTipType typeSingle = ConvolutionToolTipType.Size) { if (conv is Convolution2D) { return GetThumbnail_Single((Convolution2D)conv, thumbSize, contextMenu, typeSingle); } else if (conv is ConvolutionSet2D) { return GetThumbnail_Set((ConvolutionSet2D)conv, thumbSize, contextMenu, typeSet); } else { throw new ArgumentException("Unknown type of kernel: " + conv.GetType().ToString()); } }
/// <summary> /// This returns some primitive filters that are a good bucket to choose from when you want a random primitive filter /// </summary> public static Tuple<ConvolutionPrimitiveType, ConvolutionBase2D[]>[] GetPrimitiveConvolutions(ConvolutionPrimitiveType[] filter = null) { var retVal = new List<Tuple<ConvolutionPrimitiveType, ConvolutionBase2D[]>>(); List<ConvolutionBase2D> convolutions = new List<ConvolutionBase2D>(); #region Gaussian if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.Gaussian)) { convolutions.Clear(); foreach (int size in new[] { 3, 7 }) { convolutions.Add(GetGaussian(size, 1)); convolutions.Add(GetGaussian(size, 2)); } retVal.Add(Tuple.Create(ConvolutionPrimitiveType.Gaussian, convolutions.ToArray())); } #endregion #region Laplacian if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.Laplacian)) { convolutions.Clear(); foreach (int gain in new[] { 1, 2 }) { convolutions.Add(GetEdge_Laplacian(true, gain)); convolutions.Add(GetEdge_Laplacian(false, gain)); } retVal.Add(Tuple.Create(ConvolutionPrimitiveType.Laplacian, convolutions.ToArray())); } #endregion #region Gaussian Subtract if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.Gaussian_Subtract)) { convolutions.Clear(); convolutions.Add(new ConvolutionSet2D(new[] { GetGaussian(3, 1) }, SetOperationType.Subtract)); retVal.Add(Tuple.Create(ConvolutionPrimitiveType.Gaussian_Subtract, convolutions.ToArray())); } #endregion #region Individual Sobel if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.Individual_Sobel)) { convolutions.Clear(); Convolution2D sobelVert = GetEdge_Sobel(true); Convolution2D sobelHorz = GetEdge_Sobel(false); Convolution2D sobel45 = Rotate_45(sobelVert, true); Convolution2D sobel135 = Rotate_45(sobelHorz, true); convolutions.Add(sobelVert); convolutions.Add(sobelHorz); convolutions.Add(sobel45); convolutions.Add(sobel135); convolutions.Add(Invert(sobelVert)); convolutions.Add(Invert(sobelHorz)); convolutions.Add(Invert(sobel45)); convolutions.Add(Invert(sobel135)); retVal.Add(Tuple.Create(ConvolutionPrimitiveType.Individual_Sobel, convolutions.ToArray())); } #endregion #region MaxAbs Sobel if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.MaxAbs_Sobel)) { convolutions.Clear(); foreach (int gain in new[] { 1, 2 }) { convolutions.Add(GetEdgeSet_Sobel(gain)); } retVal.Add(Tuple.Create(ConvolutionPrimitiveType.MaxAbs_Sobel, convolutions.ToArray())); } #endregion #region Gausian then edge if (filter == null || filter.Any(o => o == ConvolutionPrimitiveType.Gaussian_Then_Edge)) { convolutions.Clear(); foreach (int size in new[] { 3, 5, 7 }) { ConvolutionSet2D maxSobel = GetEdgeSet_Sobel(); ConvolutionBase2D[] convs = new ConvolutionBase2D[] { GetGaussian(size), maxSobel, }; convolutions.Add(new ConvolutionSet2D(convs, SetOperationType.Standard)); } retVal.Add(Tuple.Create(ConvolutionPrimitiveType.Gaussian_Then_Edge, convolutions.ToArray())); } #endregion return retVal.ToArray(); }
private static double[] GetValuesFromImage_ConvMaxpool_Gray(FeatureRecognizer_Image image, ConvolutionBase2D kernel, int size, double scaleValue) { const int INITIALSIZE = 80; BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, INITIALSIZE, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, scaleValue); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, INITIALSIZE, INITIALSIZE); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.MaxPool(retVal, size, size); retVal = Convolutions.Abs(retVal); return retVal.Values; }
private static double[] GetValuesFromImage_ConvMaxpool_Color(FeatureRecognizer_Image image, ConvolutionBase2D kernel, int size, double scaleValue) { const int INITIALSIZE = 80; BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, INITIALSIZE, true); var convs = UtilityWPF.ConvertToConvolution_RGB(bitmap, scaleValue); var final = new[] { convs.Item1, convs.Item2, convs.Item3 }. Select(o => { Convolution2D retVal = o; if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, INITIALSIZE, INITIALSIZE); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.MaxPool(retVal, size, size); return Convolutions.Abs(retVal); }). ToArray(); return MergeConvs(final[0], final[1], final[2]); }
private void InsertKernel(ConvolutionBase2D kernel, int index = -1) { Border border = Convolutions.GetThumbnail(kernel, 80, _kernelContextMenu); if (index < 0) { panel.Children.Add(border); _kernels.Add(kernel); } else { panel.Children.Insert(index, border); _kernels.Insert(index, kernel); } }
private static ReducedExtract[] GetExtractSizes(BitmapSource bitmap, ConvolutionBase2D filter, RectInt rect, int minExtractSize = 10) { List<ReducedExtract> retVal = new List<ReducedExtract>(); VectorInt filterReduce = filter == null ? new VectorInt(0, 0) : filter.GetReduction(); Rect percents = new Rect() { X = rect.X.ToDouble() / bitmap.PixelWidth.ToDouble(), Y = rect.Y.ToDouble() / bitmap.PixelHeight.ToDouble(), Width = rect.Width.ToDouble() / bitmap.PixelWidth.ToDouble(), Height = rect.Height.ToDouble() / bitmap.PixelHeight.ToDouble(), }; double percent = 1d; while (true) { VectorInt imageSize = new VectorInt() { X = (bitmap.PixelWidth * percent).ToInt_Round(), Y = (bitmap.PixelHeight * percent).ToInt_Round(), }; VectorInt postSize = imageSize - filterReduce; RectInt newRect = new RectInt() { X = (percents.X * postSize.X).ToInt_Round(), Y = (percents.Y * postSize.Y).ToInt_Round(), Width = (percents.Width * postSize.X).ToInt_Round(), Height = (percents.Height * postSize.Y).ToInt_Round(), }; if (newRect.Width < minExtractSize || newRect.Height < minExtractSize) { break; } retVal.Add(new ReducedExtract() { Percent = percent, ImageSize = imageSize, Extract = newRect, }); percent *= .75; } return retVal.ToArray(); }
private static void ApplyExtract_Draw_LeftImage(Grid grid, Convolution2D imageConv, ConvolutionBase2D preFilter, ConvolutionResultNegPosColoring edgeColor) { string tooltip = string.Format("{0}x{1}", imageConv.Width, imageConv.Height); if (preFilter != null) { tooltip = preFilter.Description + "\r\n" + tooltip; } Image image = new Image() { Source = Convolutions.GetBitmap(imageConv, edgeColor), HorizontalAlignment = HorizontalAlignment.Stretch, VerticalAlignment = VerticalAlignment.Stretch, ToolTip = tooltip, }; Grid.SetColumn(image, 0); Grid.SetRow(image, 0); grid.Children.Add(image); }
private void FinishBuildingExtract(ConvolutionBase2D filter, FeatureRecognizer_Extract_Sub[] subs, string imageID) { string uniqueID = Guid.NewGuid().ToString(); // Determine filename string filename = "extract - " + uniqueID + ".xml"; string fullFilename = System.IO.Path.Combine(_workingFolder, filename); // Add it FeatureRecognizer_Extract extract = new FeatureRecognizer_Extract() { Extracts = subs, PreFilter = filter, Control = Convolutions.GetThumbnail(subs[0].Extract, THUMBSIZE_EXTRACT, _extractContextMenu), ImageID = imageID, UniqueID = uniqueID, Filename = filename, }; if (extract.PreFilter != null && extract.PreFilter is Convolution2D) { extract.PreFilterDNA_Single = ((Convolution2D)extract.PreFilter).ToDNA(); } else if (extract.PreFilter != null && extract.PreFilter is ConvolutionSet2D) { extract.PreFilterDNA_Set = ((ConvolutionSet2D)extract.PreFilter).ToDNA(); } // Copy to the working folder UtilityCore.SerializeToFile(fullFilename, extract); AddExtract(extract); // Update the session file SaveSession_SessionFile(_workingFolder); }
public ConvolutionSet2D(ConvolutionBase2D[] convolutions, SetOperationType operationType, string description = "") { foreach (object child in convolutions) { if (!(child is Convolution2D) && !(child is ConvolutionSet2D)) { throw new ArgumentException("Object passed in must be Convolution2D or ConvolutionSet2D: " + child.GetType().ToString()); } } this.Convolutions = convolutions; this.OperationType = operationType; _description = description; }
private static double[] GetTrainingImage(FeatureRecognizer_Image image, ConvolutionBase2D kernel) { // Enlarge the initial image by the kernel's reduction so that after convolution, it is the desired size VectorInt reduction = kernel.GetReduction(); if (reduction.X != reduction.Y) { throw new ApplicationException(string.Format("Kernel should be square: {0}x{1}", reduction.X, reduction.Y)); } BitmapSource bitmap = new BitmapImage(new Uri(image.Filename)); bitmap = UtilityWPF.ResizeImage(bitmap, IMAGESIZE + reduction.X, true); Convolution2D retVal = UtilityWPF.ConvertToConvolution(bitmap, 1d); if (retVal.Width != retVal.Height) { retVal = Convolutions.ExtendBorders(retVal, IMAGESIZE + reduction.X, IMAGESIZE + reduction.X); //NOTE: width or height is already the desired size, this will just enlarge the other to make it square } retVal = Convolutions.Convolute(retVal, kernel); retVal = Convolutions.Abs(retVal); // It looks better when it's black on white double[] inverted = retVal.Values. Select(o => 1d - o). ToArray(); return inverted; }
public static ConvolutionSet2D GetEdgeSet_GuassianThenEdge(int guassianSize = 3, double guassianStandardDeviationMultiplier = 1d, double edgeGain = 1d) { ConvolutionBase2D[] convs = new ConvolutionBase2D[] { GetGaussian(guassianSize, guassianStandardDeviationMultiplier), GetEdgeSet_Sobel(edgeGain), }; string guassianDescription = string.Format("gaussian {0}", guassianSize); if (!guassianStandardDeviationMultiplier.IsNearValue(1)) { guassianDescription += " x" + guassianStandardDeviationMultiplier.ToStringSignificantDigits(1); } string edgeDescription = "edge"; if (!edgeGain.IsNearValue(1)) { edgeDescription += string.Format(" [gain={0}]", edgeGain.ToStringSignificantDigits(1)); } string description = string.Format("{0} then {1}", guassianDescription, edgeDescription); return new ConvolutionSet2D(convs, SetOperationType.Standard, description); }
private void AddKernel(ConvolutionBase2D kernel) { Border border = Convolutions.GetThumbnail(kernel, 40, null); // Store them panelKernels.Children.Add(border); _kernels.Add(kernel); }
/// <summary> /// This runs the input through NormalizeInput, and returns an object with those results /// </summary> private static TrainerInput GetTrainingInput(TrainerInput raw, ConvolutionBase2D convolution, bool isColor, int finalResolution) { var importantEvents = raw.ImportantEvents. Select(o => { double[] normalized = NormalizeInput(o.Item2, raw.Width, raw.Height, finalResolution, convolution, isColor); return Tuple.Create(o.Item1, normalized); }). ToArray(); var unimportantEvents = raw.UnimportantEvents. Select(o => NormalizeInput(o, raw.Width, raw.Height, finalResolution, convolution, isColor)). ToArray(); var awayPoints = GetAwayPoints(importantEvents.Select(o => o.Item2).ToArray()); //VectorInt reduction; //if (convolution == null) //{ // reduction = new VectorInt(0, 0); //} //else //{ // reduction = convolution.GetReduction(); //} return new TrainerInput() { //Width = raw.Width - reduction.X, //Height = raw.Height - reduction.Y, Width = finalResolution, Height = finalResolution, ImportantEvents = importantEvents, UnimportantEvents = unimportantEvents.Concat(awayPoints).ToArray(), IsColor = isColor, }; }
private void AddKernel(ConvolutionBase2D kernel) { Border border = Convolutions.GetThumbnail(kernel, 40, _kernelContextMenu); //if (!string.IsNullOrEmpty(tooltipHeader)) //{ // // For simple (not composite) kernels, it's the image that gets the tooltip. So if this is one of those, add to the tooltip // if (border.Child is Image) // { // string existingTooltip = ((Image)border.Child).ToolTip as string; // if (!string.IsNullOrEmpty(existingTooltip)) // { // ((Image)border.Child).ToolTip = tooltipHeader + "\r\n" + existingTooltip; // } // else // { // border.ToolTip = tooltipHeader; // } // } // else // { // border.ToolTip = tooltipHeader; // } //} // Store them panelKernels.Children.Add(border); _kernels.Add(kernel); }
/// <summary> /// The values are from 0 to 255, and need to be 0 to 1 /// </summary> private static double[] NormalizeInput(double[] input, int width, int height, int finalResolution, ConvolutionBase2D convolution, bool isColor) { // This part is now done earlier on //double[] retVal = input. // Select(o => o / 255d). // ToArray(); double[] retVal = input; if (convolution != null) { if (isColor) { // Each pixel in input is R,G,B (so 3 values per pixel). Each of the 3 colors needs to be run through the // convolution independently. Then put them back into triples to feed the neural net // // Note that it's ok that they are jumbled up when going to the neural net. Weights are assigned randomly // before starting training, so it could be all Rs, then all Gs, then Bs, and it wouldn't make any difference. // // But it is important for the convolutions to work with pure 2D images, because the convolutions are // essentially sliding a rectangle across another rectangle, and taking dot products (so rgb triples would // cause the convolution result to be nonsense) // Split into 3 arrays var split = SplitColor_conv(input, width, height); // Convolute independently Convolution2D r = Convolutions.Convolute(split.Item1, convolution); Convolution2D g = Convolutions.Convolute(split.Item2, convolution); Convolution2D b = Convolutions.Convolute(split.Item3, convolution); r = Convolutions.MaxPool(r, finalResolution, finalResolution); g = Convolutions.MaxPool(g, finalResolution, finalResolution); b = Convolutions.MaxPool(b, finalResolution, finalResolution); // Put back into one large array (but smaller than the original) retVal = MergeColor(r, g, b); } else { Convolution2D convoluted = Convolutions.Convolute(new Convolution2D(input, width, height, false), convolution); convoluted = Convolutions.MaxPool(convoluted, finalResolution, finalResolution); retVal = convoluted.Values; } } return retVal; }
private void ApplyFilter(ConvolutionBase2D kernel) { // Convert the original image to grayscale Convolution2D image = GetOriginalImageGrays(); if (image == null) { // The original image is empty return; } Convolution2D filtered = null; if (kernel is Convolution2D) { #region Single Convolution2D kernelSingle = (Convolution2D)kernel; // This window builds kernels without gain or iterations, so make a clone with those tacked on Convolution2D kernelFinal = new Convolution2D( kernelSingle.Values, kernelSingle.Width, kernelSingle.Height, kernelSingle.IsNegPos, trkGain.Value, Convert.ToInt32(trkIterations.Value), chkExpandBorder.IsChecked.Value); filtered = Convolutions.Convolute(image, kernelFinal); if (chkSubtract.IsChecked.Value) { filtered = Convolutions.Subtract(image, filtered); } #endregion } else if (kernel is ConvolutionSet2D) { #region Set ConvolutionSet2D kernelSet = (ConvolutionSet2D)kernel; filtered = Convolutions.Convolute(image, kernelSet); #endregion } else { throw new ArgumentException("Unknown type of kernel: " + kernel.GetType().ToString()); } // Show Filtered modifiedImage.Source = Convolutions.GetBitmap(filtered, (ConvolutionResultNegPosColoring)cboEdgeColors.SelectedValue); }
private static TrainedRecognizer Train(TrainerInput input, ConvolutionBase2D convolution, bool isColor, int finalResolution) { if (input == null || input.ImportantEvents == null || input.ImportantEvents.Length == 0) { return null; } TrainerInput normalized = GetTrainingInput(input, convolution, isColor, finalResolution); List<double[]> inputs = new List<double[]>(); List<double[]> outputs = new List<double[]>(); // Important Events inputs.AddRange(normalized.ImportantEvents.Select(o => o.Item2)); outputs.AddRange(normalized.ImportantEvents.Select(o => o.Item1.Vector)); // Unimportant Events (sensor input that should output zeros) if (normalized.UnimportantEvents != null && normalized.UnimportantEvents.Length > 0) { int outputVectorLength = outputs[0].Length; inputs.AddRange(normalized.UnimportantEvents); outputs.AddRange(Enumerable.Range(0, normalized.UnimportantEvents.Length). Select(o => new double[outputVectorLength])); } //NOTE: If there is an exception, the network couldn't be trained BasicNetwork network = null; try { //network = UtilityEncog.GetTrainedNetwork(inputs.ToArray(), outputs.ToArray(), UtilityEncog.ERROR, 15, 45).NetworkOrNull; network = UtilityEncog.GetTrainedNetwork(inputs.ToArray(), outputs.ToArray(), UtilityEncog.ERROR, 5, 15).NetworkOrNull; } catch (Exception) { } if (network == null) { return null; } return new TrainedRecognizer() { Network = network, InputRaw = input, InputNormalized = normalized, }; }
/// <param name="itemDimensions"> /// The width, height, depth, etc of each item /// </param> /// <param name="convolution"> /// This gives a chance to run an edge detect, or some other convolution /// TODO: Support convolutions that can handle arbitrary dimensions /// </param> /// <param name="shouldNormalize"> /// Forces all inputs to go between 0 and 1. /// NOTE: Only do this if the inputs come from varied sources /// </param> /// <param name="dupeDistance"> /// If two items are closer together than this, they will be treated like they are the same. /// NOTE: If all the inputs are 0 to 1 (or even -1 to 1), then the default value should be fine. But if the /// inputs have larger values (like 0 to 255), you would want a larger min value /// </param> public SOMList(int[] itemDimensions, ConvolutionBase2D convolution = null, bool shouldNormalize = false, bool discardDupes = true, double dupeDistance = .001, bool isColor2D = false) { _instructions = new ToVectorInstructions(itemDimensions, GetResize(itemDimensions), convolution, shouldNormalize, isColor2D); _dupeDistSquared = dupeDistance * dupeDistance; }