Example #1
0
        private void Guess()
        {
            int id = nn.Guess(pictureBox2.Image, true);

            EntityNetwork.Feature feature = nn.GetFeature(id);

            if (feature != null)
            {
                label2.Text       = feature.description + " (" + feature.name + ")";
                pictureBox4.Image = feature.image;
            }
            else
            {
                label2.Text       = "I can't recognize";
                pictureBox4.Image = null;
            }
        }
Example #2
0
        /// <summary>
        /// The main worker, which extracts the features from the original image, tries to find out from the neural network what this feature is,
        /// and if the neural network recognizes it, it takes the entities from the feature and adds it to the `insertionNode`.
        /// Entities from the fixture are placed in the center of the window under test.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void backgroundWorkerML_DoWork(object sender, DoWorkEventArgs e)
        {
            while (!backgroundWorkerML.CancellationPending)
            {
                // Pick a random sub-image (window)

                Rectangle rect = new Rectangle();

                rect.Width  = nn.GetWindowSize();
                rect.Height = nn.GetWindowSize();

                bool zigzag = true;

                if (zigzag)
                {
                    rect.X = windowsPos.X;
                    rect.Y = windowsPos.Y;

                    windowsPos.X += 1;
                    if (windowsPos.X >= (ML_sourceBitmap.Width - rect.Width))
                    {
                        windowsPos.X  = 0;
                        windowsPos.Y += 1;

                        if (windowsPos.Y >= (ML_sourceBitmap.Height - rect.Height))
                        {
                            Console.WriteLine("Zigzag scan complete");
                            return;
                        }
                    }
                }
                else
                {
                    rect.X = rnd.Next(0, ML_sourceBitmap.Width - rect.Width - 1);
                    rect.Y = rnd.Next(0, ML_sourceBitmap.Height - rect.Height - 1);
                }

                Bitmap subImage = ML_sourceBitmap.Clone(rect, ML_sourceBitmap.PixelFormat);

                // Ask the neural network what it is

                int id = nn.Guess(subImage, false);

                EntityNetwork.Feature feature = nn.GetFeature(id);

                if (feature != null)
                {
                    // If the neural network has detected the feature, get a list of the feature entities and center them in the sub-image window.

                    if (feature.entities != null)
                    {
                        XmlSerializer ser = new XmlSerializer(typeof(List <Entity>));

                        using (StringReader textReader = new StringReader(feature.entities))
                        {
                            PointF        center   = entityBox1.ImageToLambda(rect.X + rect.Width / 2, rect.Y + rect.Height / 2);
                            List <Entity> entities = (List <Entity>)ser.Deserialize(textReader);
                            EntityAligner.CenterFeatureEntities(center, entities);
                            entityBox1.root.Children.AddRange(entities);

                            Console.WriteLine("Found " + feature.name);

                            //entityBox1.Invalidate();
                        }
                    }
                }
            }
        }