Beispiel #1
0
 public override void Setup(IEvolutionState state, IParameter paramBase)
 {
     confMat                = new double[10, 10];
     imageTransformer       = new ImageTransformer(0, 0, "", "");
     NumOfImagesPerCategory = 100;
     SuperpixelSize         = 8;
     NumOfClasses           = 10;
     NumOfFeatures          = 64 * 64 / (SuperpixelSize * SuperpixelSize);
     CategoryDirs           = Directory.EnumerateDirectories(@"F:\Gesty\problem2\grayscale").ToArray();
     currentImage           = new Image <Gray, Byte> [4];
     originalImage          = new Image <Gray, Byte> [4];
     for (int i = 0; i < 4; i++)
     {
         currentImage[i]  = new Image <Gray, Byte>(64, 64);
         originalImage[i] = new Image <Gray, Byte>(64, 64);
     }
     // Parameter = new SVMParameter();
     //Parameter.Type = SVMType.C_SVC;
     //Parameter.Kernel = SVMKernelType.POLY;
     base.Setup(state, paramBase);
     Input = (ProblemData)state.Parameters.GetInstanceForParameterEq(
         paramBase.Push(P_DATA), null, typeof(ProblemData));
     Input.Setup(state, paramBase.Push(P_DATA));
     // imageTransformer.RescaleAndSaveImages();
     // imageList = Directory.GetFiles(@"F:\Gesty\rescaled");
 }
Beispiel #2
0
 private void SaveImagesAndExcelProcess(object sender, DoWorkEventArgs e)
 {
     try
     {
         var worker    = sender as BackgroundWorker;
         var arguments = e.Argument as object[];
         var lines     = arguments[0] as List <FileStatusLine>;
         var formatter = new ImageTransformer(lines, arguments[1].ToString());
         if (worker.CancellationPending)
         {
             return;
         }
         formatter.Run(worker, (bool)arguments[2], (bool)arguments[3], arguments[4].ToString(),
                       Int32.Parse(arguments[5].ToString()), Int32.Parse(arguments[6].ToString()));
         if (worker.CancellationPending)
         {
             return;
         }
         SaveDataToExcel(worker, arguments[7] as IList <ExcelRowDataModel>, lines, arguments[8].ToString());
     }
     catch (Exception ex)
     {
         MessageBox.Show(ex.ToString(), "Error", MessageBoxButton.OK, MessageBoxImage.Error);
     }
 }
        public static async Task <byte[]> ResizeImageTizen(byte[] imageData, float width, float height)
        {
            using (JpegDecoder jpegDecoder = new JpegDecoder())
            {
                Size newImageSize = new Size((int)width, (int)height);
                IEnumerable <BitmapFrame> image = await jpegDecoder.DecodeAsync(imageData);

                Size   oldImageSize = image.First().Size;
                byte[] rawImageData = image.First().Buffer;
                using (MediaPacket mediaPacket = MediaPacket.Create(new VideoMediaFormat(MediaFormatVideoMimeType.Rgba, oldImageSize)))
                {
                    mediaPacket.VideoPlanes[0].Buffer.CopyFrom(rawImageData, 0, rawImageData.Length);
                    using (ImageTransformer imageTransformer = new ImageTransformer())
                    {
                        using (MediaPacket newMediaPacket = await imageTransformer.TransformAsync(mediaPacket, new ResizeTransform(newImageSize)))
                        {
                            IMediaBuffer buffer          = newMediaPacket.VideoPlanes[0].Buffer;
                            byte[]       newRawImageData = new byte[buffer.Length];
                            buffer.CopyTo(newRawImageData, 0, buffer.Length);
                            using (var jpegEncoder = new JpegEncoder())
                            {
                                jpegEncoder.Quality = 100;
                                jpegEncoder.SetResolution(newImageSize);
                                using (MemoryStream ms = new MemoryStream())
                                {
                                    await jpegEncoder.EncodeAsync(newRawImageData, ms);

                                    return(ms.ToArray());
                                }
                            }
                        }
                    }
                }
            }
        }
Beispiel #4
0
        public void WhenCalling_ApplyTransforms_WithNoGdiImageInSuppliedImageArg_Should_ThrowArgumentException()
        {
            var sut = new ImageTransformer();

            sut.Invoking(it => it.ApplyTransforms(new Image("01_04_2019_001103"), "100x100"))
            .Should().ThrowExactly <ArgumentException>();
        }
Beispiel #5
0
        public override void Eval(IEvolutionState state, int thread, GPData input, ADFStack stack, GPIndividual individual, IProblem problem)
        {
            var p = (FeatureExtractionProblem2)problem;

            Children[0].Eval(state, thread, input, stack, individual, problem);
            ImageTransformer.MaxPooling4(p.currentImage[thread]);
        }
Beispiel #6
0
        public void WhenCalling_ApplyTransforms_WithNullImage_Should_ThrowArgumentNullException()
        {
            var sut = new ImageTransformer();

            sut.Invoking(it => it.ApplyTransforms(null, "100x100"))
            .Should().ThrowExactly <ArgumentNullException>();
        }
Beispiel #7
0
        public void WhenCalling_ApplyTransforms_WithImageType_Should_ReturnImageWithCorrectImageFormat(string imageTypeParameter, string expectedImageFormatName)
        {
            var sut       = new ImageTransformer();
            var testImage = new Image("01_04_2019_001103", TestHelpers.GetTestImage());
            var result    = sut.ApplyTransforms(testImage, imageType: imageTypeParameter);

            result.ToGdiImage().RawFormat.ToString().Should().BeEquivalentTo(expectedImageFormatName);
        }
Beispiel #8
0
        public void WhenCalling_ApplyTransforms_WithNoTransforms_Should_ReturnOriginalImage()
        {
            var sut       = new ImageTransformer();
            var testImage = new Image("01_04_2019_001103", TestHelpers.GetTestImage());
            var result    = sut.ApplyTransforms(testImage);

            result.Name.Should().BeEquivalentTo("01_04_2019_001103");
            TestHelpers.CompareImages(result.ToGdiImage(), TestHelpers.GetTestImage()).Should().Be(CompareResult.Same);
        }
Beispiel #9
0
        public void WhenCalling_ApplyTransforms_WithBackgroundColour_Should_ReturnImageWithCorrectBackgroundColour(string backgroundColourParameter, string expectedBackgroundColour)
        {
            var sut       = new ImageTransformer();
            var testImage = new Image("01_04_2019_001103", TestHelpers.GetTestImage(makeTransparent: true));
            var result    = sut.ApplyTransforms(testImage, backgroundColour: backgroundColourParameter);

            var expectedGdiImage = TestHelpers.GetTestImage(GDI.Color.FromName(expectedBackgroundColour));

            TestHelpers.CompareImages(result.ToGdiImage(), expectedGdiImage).Should().Be(CompareResult.Same);
        }
Beispiel #10
0
        public void WhenCalling_ApplyTransforms_WithResolution_Should_ReturnImageWithCorrectResolution(string resolutionParameter, int expectedWidth, int expectedHeight)
        {
            var sut       = new ImageTransformer();
            var testImage = new Image("01_04_2019_001103", TestHelpers.GetTestImage());
            var result    = sut.ApplyTransforms(testImage, resolutionParameter);

            result.ToGdiImage().Width.Should().Be(expectedWidth);
            result.ToGdiImage().Height.Should().Be(expectedHeight);
            result.ToGdiImage().RawFormat.Should().Be(TestHelpers.GetTestImage().RawFormat);
        }
Beispiel #11
0
        public void WhenCalling_ApplyTransforms_WithWatermarkText_Should_ReturnImageWithWatermark()
        {
            var sut       = new ImageTransformer();
            var testImage = new Image("01_04_2019_001103", TestHelpers.GetTestImage(width: 1000, height: 600));
            var result    = sut.ApplyTransforms(testImage, watermarkText: "Some watermark text");

            var expectedGdiImage = TestHelpers.GetTestImage(width: 1000, height: 600, watermark: "Some watermark text");

            TestHelpers.CompareImages(result.ToGdiImage(), expectedGdiImage).Should().Be(CompareResult.Same);
        }
        public MarkdownTransformer()
        {
            var core       = new CoreTransformer();
            var code       = new CodeTransformer(core);
            var acclaim    = new AcclaimTransformer(code);
            var image      = new ImageTransformer(acclaim);
            var gist       = new GistTransformer(image);
            var roiArticle = new ROIArticleTransformer(gist);

            _innerTransformer = roiArticle;
        }
Beispiel #13
0
        static void Main(string[] args) // usage transformerTest.exe PathToImage flip/Rotation x y width height
        {
            string path        = args[0];
            string paramString = args[1] + ' ' + args[2];

            Transformer.Model.TransformationParametrs transformParametrs = Transformer.Model.TransformationParametrs.Parse(paramString);
            byte[] image = File.ReadAllBytes(path);

            ImageTransformer transformer = new ImageTransformer();

            byte[] result = transformer.Transform(image, transformParametrs);
            File.WriteAllBytes("out.png", result);
        }
Beispiel #14
0
        public async Task <IActionResult> ClassifyBase64(string base64image)
        {
            if (!ModelState.IsValid || base64image == null)
            {
                return(BadRequest("Bad request because of invalid model state or null parameter"));
            }
            byte[] imageData = await ImageTransformer.Base64ToByteArray(base64image);

            if (imageData == null)
            {
                return(BadRequest("Bad request because of an invalid base64 image"));
            }
            return(Classify(imageData));
        }
        public void UpdateImage(Imagine image)
        {
            if (image == null)
            {
                return;
            }
            if (_applicationContext.Imagines.FirstOrDefault(i => i.ImagineId == image.ImagineId) == null)
            {
                return;
            }
            var clonedImage = image;

            ImageTransformer.MergeImageEntities(clonedImage, image);
            _applicationContext.SaveChanges();
        }
Beispiel #16
0
        public void TransformImages(IEnumerable <Image> images)
        {
            if (images == null)
            {
                return;
            }

            foreach (var image in images)
            {
                if (image.BinData == null)
                {
                    continue;
                }

                ImageTransformer.ProcessImage(image);
            }
        }
 public GNImageAligner(ImageTransformer transformer, ImageTransformer.Parameters initialParameters, object template0, double[] roiPts, object target0, GNImageAligner.Settings settings)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(template0, __typeref (GNImageAligner), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(target0, __typeref (GNImageAligner), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   base.\u002Ector();
   GNImageAligner gnImageAligner1 = this;
   this.residualUpdateNeeded = true;
   this.lastLinePosition = 0;
   this.trials = 0;
   this.setSettings((ImageAligner.Settings) settings);
   initialParameters.size();
   GNImageAligner gnImageAligner2 = this;
   int num = settings.pyramidLevels;
   throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$IplImage");
 }
        public void Evaluate(IEvolutionState state, Individual ind, int subpop, int threadnum)
        {
            if (!ind.Evaluated)
            {
                int imageIndex;
                var SVMTrainData = new StreamWriter(@"F:\Gesty\features\traindata" + threadnum + ".txt");
                var SVMTestData  = new StreamWriter(@"F:\Gesty\features\testdata" + threadnum + ".txt");
                for (imageIndex = 0; imageIndex < imageList.Length; imageIndex++)
                {
                    var image = new Image <Gray, Byte>(imageList[imageIndex]);
                    var line  = new StringBuilder();
                    image.CopyTo(currentImage[threadnum]);
                    image.CopyTo(originalImage[threadnum]);
                    image.Dispose();
                    ((GPIndividual)ind).Trees[0].Child.Eval(state, threadnum, Input, Stack, ((GPIndividual)ind), this);
                    //int[] features = imageTransformer.GetSuperpixelFeatures(currentImage[threadnum]);
                    int[] features = ImageTransformer.GetSquareSuperpixelFeatures(currentImage[threadnum], 25);

                    line.Append((imageIndex / 213) + 1 + " ");
                    for (int i = 1; i <= features.Length; i++)
                    {
                        line.Append(i + ":" + features[i - 1] + " ");
                    }
                    var lineString = line.ToString().Trim();
                    if (imageIndex % 2 == 0)
                    {
                        SVMTrainData.WriteLine(lineString);
                    }
                    else
                    {
                        SVMTestData.WriteLine(lineString);
                    }
                }
                SVMTrainData.Close();
                SVMTestData.Close();
                var      problem     = SVMProblemHelper.Load(@"F:\Gesty\features\traindata" + threadnum + ".txt");
                var      testProblem = SVMProblemHelper.Load(@"F:\Gesty\features\testdata" + threadnum + ".txt");
                var      model       = problem.Train(Parameter);
                double[] target      = testProblem.Predict(model);
                double   accuracy    = testProblem.EvaluateClassificationProblem(target);
                var      f           = ((KozaFitness)ind.Fitness);
                f.SetStandardizedFitness(state, (float)(100 - accuracy));
                ind.Evaluated = true;
            }
        }
Beispiel #19
0
        public void Correctly_Ignore_Image_With_Class_With_Responsive()
        {
            var meta = JObject.Parse(
                @"{
                            ""url"": ""/rfc-weekly-17th-October-2016"",
                            ""published"": ""2016-10-17"",
                            ""modified"": ""2016-10-17"",
                            ""title"": ""RFC Weekly - 17th October 2016"",
                            ""enabled"":  ""true""
                        }");
            var markdown = "<img class=\"img-responsive\"\\>";

            var uat = new ImageTransformer();

            var result = uat.TransformMarkdown(meta, markdown);

            Assert.Contains(markdown, result);
        }
 public GNImageAligner(ImageTransformer transformer, ImageTransformer.Parameters initialParameters, object template0, double[] roiPts, object target0)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(template0, __typeref (GNImageAligner), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(target0, __typeref (GNImageAligner), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   ImageTransformer transformer1 = transformer;
   ImageTransformer.Parameters initialParameters1 = initialParameters;
   object template0_1 = template0;
   double[] numArray = roiPts;
   object obj1 = target0;
   GNImageAligner.Settings settings1 = new GNImageAligner.Settings();
   object obj2 = obj1;
   double[] roiPts1 = numArray;
   object target0_1 = obj2;
   GNImageAligner.Settings settings2 = settings1;
   // ISSUE: explicit constructor call
   this.\u002Ector(transformer1, initialParameters1, template0_1, roiPts1, target0_1, settings2);
 }
Beispiel #21
0
        private byte[] TransformImage(TransformationsViewModel viewModel)
        {
            var transformations = LoadImageTransformations(viewModel);

            Bitmap bmp = null;

            using (MemoryStream stm = new MemoryStream())
            {
                viewModel.File.CopyTo(stm);

                bmp = ImageTransformer.Apply(stm, transformations);

                // clear memory stream
                stm.Position = 0;
                stm.SetLength(0);

                bmp.Save(stm, System.Drawing.Imaging.ImageFormat.Png);
                return(stm.ToArray());
            }
        }
 public override void Setup(IEvolutionState state, IParameter paramBase)
 {
     imageTransformer = new ImageTransformer(200, 200, @"F:\Gesty\bin", @"F:\Gesty\rescaled");
     currentImage     = new Image <Gray, Byte> [4];
     originalImage    = new Image <Gray, Byte> [4];
     for (int i = 0; i < 4; i++)
     {
         currentImage[i]  = new Image <Gray, Byte>(200, 200);
         originalImage[i] = new Image <Gray, Byte>(200, 200);
     }
     Parameter        = new SVMParameter();
     Parameter.Type   = SVMType.C_SVC;
     Parameter.Kernel = SVMKernelType.POLY;
     base.Setup(state, paramBase);
     Input = (ProblemData)state.Parameters.GetInstanceForParameterEq(
         paramBase.Push(P_DATA), null, typeof(ProblemData));
     Input.Setup(state, paramBase.Push(P_DATA));
     // imageTransformer.RescaleAndSaveImages();
     // imageList = Directory.GetFiles(@"F:\Gesty\rescaled");
     imageTransformer.RescaleAndRotate();
     imageList = Directory.GetFiles(@"F:\Gesty\rotated");
 }
        public async Task <IActionResult> SaveUserBase64Contribution(string base64image, string predictedLabel)
        {
            if (!ModelState.IsValid || base64image == null)
            {
                return(BadRequest("Bad request because of invalid model state or null parameter"));
            }
            byte[] imageData = await ImageTransformer.Base64ToByteArray(base64image);

            if (imageData == null)
            {
                return(BadRequest("Bad request because of an invalid base64 image"));
            }
            string filename = await Transformer.SaveBytesToFile(imageData);

            if (filename == null)
            {
                return(BadRequest("The file which you uploaded can't save in server"));
            }
            Sentiment user = new Sentiment(filename, predictedLabel, DateTime.Now);

            sentimentService.Insert(user);
            return(Ok(user));
        }
 public override void compose(ImageTransformer.Parameters p1, bool inverse1, ImageTransformer.Parameters p2, bool inverse2)
 {
   int num1 = inverse1 ? 1 : 0;
   int num2 = inverse2 ? 1 : 0;
   base.compose(p1, num1 != 0, p2, num2 != 0);
   this.composeGainBias(p1, num1 != 0, p2, num2 != 0);
 }
        private void ApplyButton_Click(object sender, EventArgs e)
        {
            if (_file == null)
            {
                return;
            }


            List <IImageTransformation> transformations = new List <IImageTransformation>(4);

            if (this.AngleNumericUpDown.Value != 0)
            {
                transformations.Add(
                    new RotationImageTransformation((double)this.AngleNumericUpDown.Value));
            }

            if (this.StretchHorizNumericUpDown.Value != 0 || this.StretchVertNumericUpDown.Value != 0)
            {
                transformations.Add(
                    new StretchImageTransformation(
                        (double)this.StretchHorizNumericUpDown.Value / 100,
                        (double)this.StretchVertNumericUpDown.Value / 100));
            }

            if (this.FlipHorizontalCheckBox.Checked || this.FlipVerticalCheckBox.Checked)
            {
                transformations.Add(
                    new FlipImageTransformation(this.FlipHorizontalCheckBox.Checked, this.FlipVerticalCheckBox.Checked));
            }

            if (new decimal[]
            {
                this.DensityAlphaNumericUpDown.Value,
                this.DensityRedNumericUpDown.Value,
                this.DensityGreenNumericUpDown.Value,
                this.DensityBlueNumericUpDown.Value
            }.Any(a => a != 100))
            {
                transformations.Add(
                    new DensityImageTransformation(
                        (double)this.DensityAlphaNumericUpDown.Value / 100,
                        (double)this.DensityRedNumericUpDown.Value / 100,
                        (double)this.DensityGreenNumericUpDown.Value / 100,
                        (double)this.DensityBlueNumericUpDown.Value / 100
                        ));
            }

            DisposePreviousImage();
            StartStopwatch();


            if (transformations.Any())
            {
                var bmp = ImageTransformer.Apply(_file, transformations.ToArray());

                this.ImagePictureBox.Image = bmp;
            }
            else
            {
                LoadImageFile();
            }

            StopStopwatch();
        }
 public virtual void setPriorParameters(ImageTransformer.Parameters priorParameters)
 {
   this.priorParameters.set(priorParameters);
 }
 public virtual void setParameters(ImageTransformer.Parameters parameters)
 {
   this.parameters.set(parameters);
   this.subspaceParameters = parameters.getSubspace();
   if (this.subspaceParameters != null && this.settings.subspaceAlpha != 0.0)
   {
     for (int index = 0; index < this.tempSubspaceParameters.Length; ++index)
       this.tempSubspaceParameters[index] = (double[]) this.subspaceParameters.Clone();
   }
   this.residualUpdateNeeded = true;
 }
 public virtual void transform(object srcImage, object dstImage, object roi, int pyramidLevel, ImageTransformer.Parameters parameters, bool inverse)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(srcImage, __typeref (ProCamTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(dstImage, __typeref (ProCamTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(roi, __typeref (ProCamTransformer), "com.googlecode.javacv.cpp.opencv_core$CvRect");
   if (inverse)
   {
     string str = "Inverse transform not supported.";
     Throwable.__\u003CsuppressFillInStackTrace\u003E();
     throw new UnsupportedOperationException(str);
   }
   else
   {
     ProCamTransformer.Parameters parameters1 = (ProCamTransformer.Parameters) parameters;
     parameters1.getSurfaceParameters();
     parameters1.getProjectorParameters();
     if (ProCamTransformer.Parameters.access\u0024000(parameters1) == null || ((System.Array) ProCamTransformer.Parameters.access\u0024000(parameters1)).Length <= pyramidLevel)
     {
       int num = pyramidLevel + 1;
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$IplImage");
     }
     else
     {
       ProCamTransformer.Parameters.access\u0024000(parameters1);
       ProCamTransformer.Parameters.access\u0024000(parameters1);
       throw new NoClassDefFoundError("[Lcom.googlecode.javacv.cpp.opencv_core$IplImage;");
     }
   }
 }
 public virtual void transform(object srcPts, object dstPts, ImageTransformer.Parameters parameters, bool inverse)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(srcPts, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(dstPts, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
   int num = inverse ? 1 : 0;
   ProjectiveTransformer.Parameters parameters1 = (ProjectiveTransformer.Parameters) parameters;
   object dst;
   if (num != 0)
   {
     // ISSUE: type reference
     dst = ByteCodeHelper.DynamicCast(ProjectiveTransformer.H3x3.get(), __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
     opencv_core.cvInvert(parameters1.getH(), dst);
   }
   else
     dst = parameters1.getH();
   object ca1 = srcPts;
   object obj1 = dstPts;
   object obj2 = dst;
   object ca2 = obj1;
   object cm = obj2;
   opencv_core.cvPerspectiveTransform(ca1, ca2, cm);
 }
Beispiel #30
0
 /// <summary>
 /// Transform the input byte with the image transformer
 /// </summary>
 /// <param name="input"></param>
 /// <param name="transformer"></param>
 /// <returns></returns>
 public static byte[] Transform(this byte[] input, ImageTransformer transformer)
 {
     return(transformer.Transform(input));
 }
Beispiel #31
0
        private void ProcessFullRequest(HttpResponse response)
        {
            using (MemoryStream memoryStream = new MemoryStream()) {
                ImageTransformer imageTransformer = new ImageTransformer();
                imageTransformer.GraphicsQuality = ImageSettings.GraphicsQuality;
                imageTransformer.Copyright       = ImageSettings.Copyright;
                imageTransformer.CopyrightSize   = ImageSettings.CopyrightSize;
                imageTransformer.MaxWidth        = ImageSettings.MaxWidth;
                imageTransformer.MaxHeight       = ImageSettings.MaxHeight;
                imageTransformer.Grayscale       = ImageSettings.Grayscale;
                imageTransformer.Negative        = ImageSettings.Negative;
                imageTransformer.Sepia           = ImageSettings.Sepia;
                imageTransformer.Clip            = ImageSettings.Clip;
                imageTransformer.Quantize        = ImageSettings.Quantize;
                imageTransformer.MaintainPalette = ImageSettings.MaintainPalette;
                imageTransformer.Brightness      = ImageSettings.Brightness;
                imageTransformer.Contrast        = ImageSettings.Contrast;
                imageTransformer.Opacity         = ImageSettings.Opacity;

                string customTransform = ImageSettings.CustomTransform;
                if (!string.IsNullOrEmpty(customTransform))
                {
                    string         customTransformClassName = config.GetImageTransformClassName(customTransform);
                    ImageTransform customTransformClass     = ImageTransformFactory.Create(customTransformClassName);

                    ICustomDataConsumer customTransFormClassAsCustomDataConsumer = customTransformClass as ICustomDataConsumer;
                    if (customTransFormClassAsCustomDataConsumer != null)
                    {
                        customTransFormClassAsCustomDataConsumer.SetCustomData(ImageSettings.CustomData);
                    }

                    imageTransformer.CustomTransforms.Add(customTransformClass);
                }

                ImageRetriever.EnsureImage();
                using (Image originalImage = ImageRetriever.GetImage())
                    using (Image resizedBitmap = imageTransformer.Transform(originalImage)) {
                        long        outputQuality = ImageSettings.OutputQuality;
                        ImageFormat imageFormat   = ImageMetadata.ImageFormat;

                        resizedBitmap.SaveToMemoryStream(memoryStream, imageFormat, outputQuality);
                    }

                memoryStream.Capacity = (int)memoryStream.Position;



                if (memoryStream.Capacity > 0)
                {
                    if (ImageSettings.ServerCacheTimeout > 0)
                    {
                        ImageCacheBroker.AddImageBytes(memoryStream.ToArray());
                    }

                    response.ContentType = ImageMetadata.ContentType;
                    response.AppendHeader("Content-Disposition", string.Concat("inline; filename=\"", ImageMetadata.SaveName, "\""));
                    response.AppendHeader("Content-Length", memoryStream.Capacity.ToString(CultureInfo.InvariantCulture));
                    response.Cache.SetCacheability(HttpCacheability.Public);
                    response.Cache.SetAllowResponseInBrowserHistory(true);
                    response.Cache.SetLastModified(ImageMetadata.LastModifiedDate);
                    response.Cache.SetValidUntilExpires(true);
                    response.Cache.SetExpires(DateTime.UtcNow.AddMinutes(ImageSettings.ClientCacheTimeout));

                    memoryStream.WriteTo(response.OutputStream);
                }
            }
        }
Beispiel #32
0
        static void Main(string[] args)
        {
            CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;

            ECActivator.AddSourceAssemblies(new[] { Assembly.GetAssembly(typeof(IEvolutionState)), Assembly.GetAssembly(typeof(FeatureExtractionProblem2)) });
            IEvolutionState state = Evolve.Initialize(Evolve.LoadParameterDatabase(new[] { "-file", @"Parameters\problem2.params" }), 11);

            state.Run(EvolutionState.C_STARTED_FRESH);
            var problem = (FeatureExtractionProblem2)state.Evaluator.p_problem;
            // var treeLog1 = state.Output.AddLog(@"F:\Gesty\logs\gvtree_gen1.txt");
            //var treeLog2 = state.Output.AddLog(@"F:\Gesty\logs\gvtree_gen10.txt");
            //var ecjGraph = state.Output.AddLog(@"F:\Gesty\logs\ecjGraph.txt");
            //var bestOfRun = ((SimpleStatistics)state.Statistics).BestOfRun[0];
            //((GPIndividual)bestOfRun).Trees[0].PrintTree(state, ecjGraph);
            //((GPIndividual)bestOfRun).Trees[0].PrintStyle = GPTree.PRINT_STYLE_DOT;
            //((GPIndividual)bestOfRun).Trees[0].PrintTreeForHumans(state, treeLog);

            var input      = problem.Input;
            var stack      = problem.Stack;
            var stats      = (SimpleStatistics)state.Statistics;
            var bestOfRun  = (GPIndividual)stats.BestOfRun[0];
            var treeReader = new StreamReader(@"F:\Gesty\logs\ecjGraph.txt");

            bestOfRun.Trees[0].ReadTree(state, treeReader);
            bestOfRun.Evaluated = false;


            var imageList = new string[] {
                @"F:\Gesty\problem2\grayscale\A\a\color_0_0002.png",
                @"F:\Gesty\problem2\grayscale\A\b\color_1_0002.png",
                @"F:\Gesty\problem2\grayscale\A\c\color_2_0002.png",
                @"F:\Gesty\problem2\grayscale\A\f\color_5_0002.png",
                @"F:\Gesty\problem2\grayscale\A\h\color_7_0002.png"
            };
            int i = 0;

            foreach (string image in imageList)
            {
                var dir     = @"F:\Gesty\testy\examples";
                var tempImg = new Image <Gray, Byte>(image);
                tempImg.CopyTo(problem.currentImage[0]);
                tempImg.CopyTo(problem.originalImage[0]);
                bestOfRun.Trees[0].Child.Eval(state, 0, input, stack, bestOfRun, problem);
                tempImg.Save(dir + @"\" + i + ".png");
                problem.currentImage[0].Save(dir + @"\" + i + "_trans.png");
                ImageTransformer.GetSquareSuperpixelImages(problem.currentImage[0], dir, i.ToString(), 8);
                i++;
            }
            problem.Evaluate(state, bestOfRun, 0, 0);
            Console.WriteLine(bestOfRun.Fitness);

            /*
             * var confMatFile = new StreamWriter(@"F:\Gesty\testy\confmatBest.csv");
             * for (int x = 0; x < 10; x++)
             * {
             *  var line = new StringBuilder();
             *  for (int j = 0; j < 10; j++)
             *  {
             *      line.Append(problem.confMat[x, j].ToString() + ';');
             *  }
             *  confMatFile.WriteLine(line.ToString().Trim(';'));
             * }
             * confMatFile.Close();
             */

            // Console.WriteLine(bestOfRun.Fitness);

            /*
             * var tempImg = new Image<Gray, Byte>(@"F:\Gesty\problem2\grayscale\A\a\color_0_0002.png");
             * tempImg.CopyTo(problem.currentImage[0]);
             * tempImg.CopyTo(problem.originalImage[0]);
             * ((GPIndividual)bestOfRun).Trees[0].Child.Eval(state, 0, input, stack, bestOfRun, problem);
             * problem.currentImage[0].Save(@"F:\Gesty\testy\transformed.png");
             */

            /*
             * var gesty = new string[8] { "piesc", "dlon", "1p", "2p", "3p", "4p", "5p", "kciuk" };
             * var imageIndex = 0;
             * foreach (string gest in gesty)
             * {
             *  var dir = @"F:\Gesty\superpixel\200x200\" + gest;
             *
             *  //oryginalny
             *  var tempImg = new Image<Gray, Byte>(problem.imageList[imageIndex]);
             *  tempImg.CopyTo(problem.currentImage[0]);
             *  tempImg.CopyTo(problem.originalImage[0]);
             *  tempImg.Dispose();
             *  bestOfRun.Trees[0].Child.Eval(state, 0, input, stack, bestOfRun, problem);
             *  problem.imageTransformer.GetSuperpixelImages(problem.currentImage[0], dir, "oryginalny");
             *  imageIndex += 101;
             *
             *  //obrocony
             *  var tempImg2 = new Image<Gray, Byte>(problem.imageList[imageIndex]);
             *  tempImg2.CopyTo(problem.currentImage[0]);
             *  tempImg2.CopyTo(problem.originalImage[0]);
             *  tempImg2.Dispose();
             *  bestOfRun.Trees[0].Child.Eval(state, 0, input, stack, bestOfRun, problem);
             *  problem.imageTransformer.GetSuperpixelImages(problem.currentImage[0], dir, "obrocony");
             *  imageIndex += 101;
             *
             *  //zaklocony
             *  var tempImg3 = new Image<Gray, Byte>(problem.imageList[imageIndex]);
             *  tempImg3.CopyTo(problem.currentImage[0]);
             *  tempImg3.CopyTo(problem.originalImage[0]);
             *  tempImg3.Dispose();
             *  bestOfRun.Trees[0].Child.Eval(state, 0, input, stack, bestOfRun, problem);
             *  problem.imageTransformer.GetSuperpixelImages(problem.currentImage[0], dir, "zaklocony");
             *  imageIndex += 11;
             * }
             */
            Console.ReadKey();
        }
 public virtual void composeGainBias(ImageTransformer.Parameters p1, bool inverse1, ImageTransformer.Parameters p2, bool inverse2)
 {
   int num1 = inverse1 ? 1 : 0;
   int num2 = inverse2 ? 1 : 0;
   if (!ProjectiveGainBiasTransformer.Parameters.\u0024assertionsDisabled && (num1 != 0 || num2 != 0))
   {
     Throwable.__\u003CsuppressFillInStackTrace\u003E();
     throw new AssertionError();
   }
   else
   {
     ProjectiveGainBiasTransformer.Parameters parameters1 = (ProjectiveGainBiasTransformer.Parameters) p1;
     ProjectiveGainBiasTransformer.Parameters parameters2 = (ProjectiveGainBiasTransformer.Parameters) p2;
     object a1 = parameters1.getA();
     object src = parameters1.getB();
     object a2 = parameters2.getA();
     object b = parameters2.getB();
     if (this.b != null)
     {
       if (parameters1.fakeIdentity && ProjectiveGainBiasTransformer.access\u0024200(this.this\u00240) != null)
       {
         // ISSUE: type reference
         object obj1 = ByteCodeHelper.DynamicCast(ProjectiveGainBiasTransformer.access\u0024300().get(), __typeref (ProjectiveGainBiasTransformer.Parameters), "com.googlecode.javacv.cpp.opencv_core$CvMat");
         object src1 = ProjectiveGainBiasTransformer.access\u0024200(this.this\u00240);
         object obj2 = src;
         object obj3 = obj1;
         object src2 = obj2;
         object dst = obj3;
         opencv_core.cvMatMul(src1, src2, dst);
         src = obj1;
       }
       if (a2 == null && b == null)
         opencv_core.cvCopy(src, this.b);
       else if (src == null)
         opencv_core.cvCopy(b, this.b);
       else if (b == null)
       {
         object src1 = a2;
         object obj1 = src;
         object obj2 = this.b;
         object src2 = obj1;
         object dst = obj2;
         opencv_core.cvMatMul(src1, src2, dst);
       }
       else
       {
         object ca1 = a2;
         object obj1 = src;
         double num3 = 1.0;
         object obj2 = b;
         double num4 = 1.0;
         object obj3 = this.b;
         int num5 = 0;
         object obj4 = obj3;
         double num6 = num4;
         object obj5 = obj2;
         double num7 = num3;
         object ca2 = obj1;
         double d1 = num7;
         object ca3 = obj5;
         double d2 = num6;
         object ca4 = obj4;
         int i = num5;
         opencv_core.cvGEMM(ca1, ca2, d1, ca3, d2, ca4, i);
       }
     }
     if (this.A != null)
     {
       if (a1 == null)
         opencv_core.cvCopy(a2, this.A);
       else if (a2 == null)
       {
         opencv_core.cvCopy(a1, this.A);
       }
       else
       {
         object src1 = a2;
         object obj1 = a1;
         object obj2 = this.A;
         object src2 = obj1;
         object dst = obj2;
         opencv_core.cvMatMul(src1, src2, dst);
       }
     }
     switch (ProjectiveGainBiasTransformer.access\u0024000(this.this\u00240))
     {
       case 0:
         if (!ProjectiveGainBiasTransformer.Parameters.\u0024assertionsDisabled && this.A != null)
         {
           Throwable.__\u003CsuppressFillInStackTrace\u003E();
           throw new AssertionError();
         }
         else
           break;
       case 1:
         double[] numArray1 = this.gainBiasParameters;
         object obj6 = this.A;
         throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
       case 3:
         double[] numArray2 = this.gainBiasParameters;
         object obj7 = this.A;
         throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
       case 9:
         object obj8 = this.A;
         double[] numArray3 = this.gainBiasParameters;
         throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
       default:
         if (!ProjectiveGainBiasTransformer.Parameters.\u0024assertionsDisabled)
         {
           Throwable.__\u003CsuppressFillInStackTrace\u003E();
           throw new AssertionError();
         }
         else
           break;
     }
     switch (ProjectiveGainBiasTransformer.access\u0024100(this.this\u00240))
     {
       case 0:
         if (ProjectiveGainBiasTransformer.Parameters.\u0024assertionsDisabled || this.b == null)
           break;
         Throwable.__\u003CsuppressFillInStackTrace\u003E();
         throw new AssertionError();
       case 1:
         double[] numArray4 = this.gainBiasParameters;
         ProjectiveGainBiasTransformer.access\u0024000(this.this\u00240);
         object obj9 = this.b;
         throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
       case 3:
         object obj10 = this.b;
         double[] numArray5 = this.gainBiasParameters;
         ProjectiveGainBiasTransformer.access\u0024000(this.this\u00240);
         throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
       default:
         if (ProjectiveGainBiasTransformer.Parameters.\u0024assertionsDisabled)
           break;
         Throwable.__\u003CsuppressFillInStackTrace\u003E();
         throw new AssertionError();
     }
   }
 }
 public override void transform(ImageTransformer.Data[] data, object roi, ImageTransformer.Parameters[] parameters, bool[] inverses)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(roi, __typeref (ProjectiveGainBiasTransformer), "com.googlecode.javacv.cpp.opencv_core$CvRect");
   if (!ProjectiveGainBiasTransformer.\u0024assertionsDisabled && data.Length != parameters.Length)
   {
     Throwable.__\u003CsuppressFillInStackTrace\u003E();
     throw new AssertionError();
   }
   else
   {
     int num1 = 1;
     for (int index = 0; index < data.Length; ++index)
     {
       ImageTransformer.Data data1 = data[index];
       if (data1.srcImg != null)
       {
         if ((data1.transImg != null || data1.dstImg != null) && (data1.subImg == null && data1.srcDotImg == null) && data1.dstDstDot == null)
         {
           object obj1 = data1.transImg != null ? data1.transImg : data1.dstImg;
           int num2 = inverses != null ? (inverses[index] ? 1 : 0) : 0;
           object srcImage1 = data1.srcImg;
           object obj2 = obj1;
           object obj3 = roi;
           int num3 = data1.pyramidLevel;
           ImageTransformer.Parameters parameters1 = parameters[index];
           bool flag1 = num2 != 0;
           ImageTransformer.Parameters parameters2 = parameters1;
           int num4 = num3;
           object obj4 = obj3;
           object dstImage1 = obj2;
           object roi1 = obj4;
           int pyramidLevel1 = num4;
           ImageTransformer.Parameters parameters3 = parameters2;
           int num5 = flag1 ? 1 : 0;
           this.transform(srcImage1, dstImage1, roi1, pyramidLevel1, parameters3, num5 != 0);
           object srcImage2 = obj1;
           object obj5 = obj1;
           object obj6 = roi;
           int num6 = data1.pyramidLevel;
           ImageTransformer.Parameters parameters4 = parameters[index];
           bool flag2 = num2 != 0;
           ImageTransformer.Parameters parameters5 = parameters4;
           int num7 = num6;
           object obj7 = obj6;
           object dstImage2 = obj5;
           object roi2 = obj7;
           int pyramidLevel2 = num7;
           ImageTransformer.Parameters parameters6 = parameters5;
           int num8 = flag2 ? 1 : 0;
           this.transformGainBias(srcImage2, dstImage2, roi2, pyramidLevel2, parameters6, num8 != 0);
         }
         else
           num1 = 0;
       }
     }
     if (num1 != 0)
       return;
     ProjectiveGainBiasTransformer\u00241Cache transformer1Cache = !(data[0].cache is ProjectiveGainBiasTransformer\u00241Cache) ? (ProjectiveGainBiasTransformer\u00241Cache) null : (ProjectiveGainBiasTransformer\u00241Cache) data[0].cache;
     if (transformer1Cache == null || transformer1Cache.length != data.Length)
       data[0].cache = (object) (transformer1Cache = new ProjectiveGainBiasTransformer\u00241Cache(this, data.Length));
     if (0 < data.Length)
     {
       object obj = transformer1Cache.kernelData;
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.cvkernels$KernelData");
     }
     else
     {
       object obj = transformer1Cache.kernelData;
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.cvkernels$KernelData");
     }
   }
 }
 public virtual void transformGainBias(object srcImage, object dstImage, object roi, int pyramidLevel, ImageTransformer.Parameters parameters, bool inverse)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(srcImage, __typeref (ProjectiveGainBiasTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(dstImage, __typeref (ProjectiveGainBiasTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(roi, __typeref (ProjectiveGainBiasTransformer), "com.googlecode.javacv.cpp.opencv_core$CvRect");
   int num1 = inverse ? 1 : 0;
   ProjectiveGainBiasTransformer.Parameters parameters1 = (ProjectiveGainBiasTransformer.Parameters) parameters;
   if (Arrays.equals(parameters1.getGainBiasParameters(), parameters1.getIdentityGainBiasParameters()) && (this.X == null || parameters1.fakeIdentity) || this.X == null && this.numGains == 0 && this.numBiases == 0)
   {
     if (srcImage == dstImage)
       return;
     opencv_core.cvCopy(srcImage, dstImage);
   }
   else
   {
     // ISSUE: type reference
     object X2 = ByteCodeHelper.DynamicCast(ProjectiveGainBiasTransformer.X24x4.get(), __typeref (ProjectiveGainBiasTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
     int num2 = pyramidLevel;
     ProjectiveGainBiasTransformer.Parameters parameters2 = parameters1;
     bool flag = num1 != 0;
     ProjectiveGainBiasTransformer.Parameters parameters3 = parameters2;
     int pyramidLevel1 = num2;
     ProjectiveGainBiasTransformer.Parameters p = parameters3;
     int num3 = flag ? 1 : 0;
     this.prepareTransform(X2, pyramidLevel1, p, num3 != 0);
     throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvMat");
   }
 }
 /// <summary>
 /// Transform the image bytes using a transformer
 /// </summary>
 /// <param name="transformer"></param>
 /// <returns></returns>
 public VirtualImage Transform(ImageTransformer transformer)
 {
     Content = transformer.Transform(Content);
     return(this);
 }
 public virtual void transform(object srcImage, object dstImage, object roi, int pyramidLevel, ImageTransformer.Parameters parameters, bool inverse)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(srcImage, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(dstImage, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$IplImage");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(roi, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvRect");
   ProjectiveTransformer.Parameters parameters1 = (ProjectiveTransformer.Parameters) parameters;
   if (this.K2 != null && this.invK1 != null && (this.R != null && this.t != null) && parameters1.fakeIdentity)
   {
     if (srcImage == dstImage)
       return;
     opencv_core.cvCopy(srcImage, dstImage);
   }
   else
   {
     // ISSUE: type reference
     object H = ByteCodeHelper.DynamicCast(ProjectiveTransformer.H3x3.get(), __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
     int num1 = pyramidLevel;
     ProjectiveTransformer.Parameters parameters2 = parameters1;
     bool flag = true;
     ProjectiveTransformer.Parameters parameters3 = parameters2;
     int pyramidLevel1 = num1;
     ProjectiveTransformer.Parameters p = parameters3;
     int num2 = flag ? 1 : 0;
     this.prepareHomography(H, pyramidLevel1, p, num2 != 0);
     if (roi != null)
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$CvRect");
     else
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.opencv_core$IplImage");
   }
 }
Beispiel #38
0
        public void Evaluate(IEvolutionState state, Individual ind, int subpop, int threadnum)
        {
            if (!ind.Evaluated)
            {
                int counter  = 0;
                var features = new int[5, NumOfImagesPerCategory *NumOfClasses, NumOfFeatures];
                var labels   = new int[5, NumOfImagesPerCategory *NumOfClasses];

                for (int i = 0; i < 5; i++)
                {
                    counter = 0;
                    var categoryDir     = CategoryDirs[i];
                    var subcategoryDirs = Directory.EnumerateDirectories(categoryDir).ToArray();
                    for (int j = 0; j < NumOfClasses; j++)
                    {
                        var subcategoryDir = subcategoryDirs[j];
                        var images         = Directory.GetFiles(subcategoryDir);
                        for (int k = 0; k < NumOfImagesPerCategory; k++)
                        {
                            var tempImage = new Image <Gray, Byte>(images[k]);

                            tempImage.CopyTo(currentImage[threadnum]);
                            tempImage.CopyTo(originalImage[threadnum]);
                            tempImage.Dispose();

                            ((GPIndividual)ind).Trees[0].Child.Eval(state, threadnum, Input, Stack, ((GPIndividual)ind), this);

                            int[] imageFeatures = ImageTransformer.GetSquareSuperpixelFeatures(currentImage[threadnum], SuperpixelSize);
                            for (int x = 0; x < imageFeatures.Length; x++)
                            {
                                features[i, counter, x] = imageFeatures[x];
                                labels[i, counter]      = j + 1;
                            }
                            counter++;
                        }
                    }
                }

                /*
                 * var trainDataFile = new StreamWriter(@"F:\Gesty\problem2\features\traindata" + threadnum + ".txt");
                 * var testDataFile = new StreamWriter(@"F:\Gesty\problem2\features\testdata" + threadnum + ".txt");
                 *
                 * for(int i=0; i<4; i++)
                 * {
                 *  for(int j=0; j<1000; j++)
                 *  {
                 *      var line = new StringBuilder();
                 *      line.Append(labels[i, j].ToString() + " ");
                 *      for (int k=0; k<NumOfFeatures; k++)
                 *      {
                 *          line.Append((k + 1).ToString() + ":" + features[i, j, k].ToString() + " ");
                 *      }
                 *      trainDataFile.WriteLine(line.ToString().Trim());
                 *  }
                 * }
                 * for (int j = 0; j < 1000; j++)
                 * {
                 *  var line = new StringBuilder();
                 *  line.Append(labels[4, j].ToString() + " ");
                 *  for (int k = 0; k < NumOfFeatures; k++)
                 *  {
                 *      line.Append((k + 1).ToString() + ":" + features[4, j, k].ToString() + " ");
                 *  }
                 *  testDataFile.WriteLine(line.ToString().Trim());
                 * }
                 * trainDataFile.Close();
                 * testDataFile.Close();
                 */
                var    confMatI = new double[10, 10];
                double accuracy = 0;
                for (int x = 0; x < 5; x++)
                {
                    var trainData    = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses * 4, NumOfFeatures);
                    var trainClasses = new Matrix <int>(NumOfImagesPerCategory * NumOfClasses * 4, 1);
                    var testData     = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses, NumOfFeatures);
                    var testClasses  = new Matrix <int>(NumOfImagesPerCategory * NumOfClasses, 1);
                    //trainData


                    int imageCount = 0;
                    for (int i = 0; i < 5; i++)
                    {
                        if (i != x)
                        {
                            for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++)
                            {
                                for (int k = 0; k < NumOfFeatures; k++)
                                {
                                    trainData[imageCount, k]    = features[i, j, k];
                                    trainClasses[imageCount, 0] = labels[i, j];
                                }
                                imageCount++;
                            }
                        }
                        else
                        {
                            for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++)
                            {
                                for (int k = 0; k < NumOfFeatures; k++)
                                {
                                    testData[j, k]    = features[i, j, k];
                                    testClasses[j, 0] = labels[i, j];
                                }
                            }
                        }
                    }


                    Emgu.CV.ML.SVM model = new Emgu.CV.ML.SVM();


                    var predictions = new Matrix <float>(NumOfImagesPerCategory * NumOfClasses, 1);
                    var trainData2  = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses);
                    model.Type = Emgu.CV.ML.SVM.SvmType.CSvc;
                    model.SetKernel(Emgu.CV.ML.SVM.SvmKernelType.Poly);
                    model.TermCriteria = new MCvTermCriteria(10000, 0.001);
                    model.Degree       = 3;
                    model.Gamma        = 0.001;
                    model.Coef0        = 0;
                    model.C            = 1000;
                    model.Nu           = 0.5;
                    model.P            = 0.1;
                    model.Train(trainData2);
                    model.Predict(testData, predictions);
                    // var predictionsArray = (float[,])predictions.GetData();
                    int correctPredictions = 0;
                    for (int i = 0; i < predictions.Rows; i++)
                    {
                        if ((int)predictions[i, 0] == testClasses[i, 0])
                        {
                            correctPredictions++;
                        }
                        var predictedLabel = (int)predictions[i, 0];
                        var trueLabel      = testClasses[i, 0];
                        confMatI[predictedLabel - 1, trueLabel - 1]++;
                    }
                    for (int i = 0; i < 10; i++)
                    {
                        for (int j = 0; j < 10; j++)
                        {
                            confMat[i, j] = (confMatI[i, j] / 500) * 100;
                        }
                    }
                    if (correctPredictions > 0)
                    {
                        accuracy += 100 * ((double)correctPredictions / (double)predictions.Rows);
                    }
                }

                /*
                 * for(int i=0; i<NumOfImagesPerCategory*NumOfClasses*4; i++)
                 * {
                 *  for(int j=0; j<NumOfFeatures; j++)
                 *  {
                 *      //trainData[i, j] = ((trainData[i, j] - 0) / (255 - 0)) * (1 + 1) - 1;
                 *  }
                 *  trainClasses[i, 0] = ((trainClasses[i, 0] - 1) / (NumOfClasses - 1)) * (1 + 1) - 1;
                 * }
                 */

                //testData

                /*
                 * for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++)
                 * {
                 *  var line = new StringBuilder();
                 *  line.Append(labels[4, j] + " ");
                 *  for (int k = 0; k < NumOfFeatures; k++)
                 *  {
                 *      line.Append(k + 1 + ":" + features[4, j, k] + " ");
                 *  }
                 *  testData.WriteLine(line.ToString().Trim());
                 * }
                 *
                 * trainData.Close();
                 * testData.Close();
                 */

                //predictions.Dispose();

                /*
                 * var netData = new SharpLearning.Containers.Matrices.F64Matrix(NumOfImagesPerCategory * NumOfClasses * 4, NumOfFeatures);
                 * var netTargets = new double[NumOfImagesPerCategory * NumOfClasses * 4];
                 * int imageCount = 0;
                 * for (int i = 0; i < 4; i++)
                 * {
                 *  for (int j = 0; j < NumOfImagesPerCategory * NumOfClasses; j++)
                 *  {
                 *      for (int k = 0; k < NumOfFeatures; k++)
                 *      {
                 *          netData[imageCount, k] = features[i, j, k];
                 *          netTargets[imageCount] = labels[i, j];
                 *      }
                 *      imageCount++;
                 *  }
                 * }
                 */
                /*
                 * var CVNeuralNet = new Emgu.CV.ML.ANN_MLP();
                 *
                 * CVNeuralNet.TermCriteria = new MCvTermCriteria(10000, 0.001);
                 * var layerSizes = new Matrix<int>(new int[4] { NumOfFeatures, NumOfFeatures * 10, NumOfFeatures*5, 1 });
                 * CVNeuralNet.SetLayerSizes(layerSizes);
                 * CVNeuralNet.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Rprop);
                 * CVNeuralNet.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym);
                 * CVNeuralNet.BackpropMomentumScale = 0.01;
                 * CVNeuralNet.BackpropWeightScale = 0.2;
                 * var trainData2 = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses);
                 * var predictions = new Matrix<float>(NumOfImagesPerCategory * NumOfClasses*4, 1);
                 * CVNeuralNet.Train(trainData2);
                 * CVNeuralNet.Predict(trainData, predictions);
                 */
                /*
                 * var net = new NeuralNet();
                 * net.Add(new InputLayer(NumOfFeatures));
                 * net.Add(new DropoutLayer(0.5));
                 * net.Add(new DenseLayer(NumOfFeatures * 4, SharpLearning.Neural.Activations.Activation.Sigmoid));
                 * net.Add(new DenseLayer(NumOfFeatures * 4, SharpLearning.Neural.Activations.Activation.Sigmoid));
                 * net.Add(new DropoutLayer(0.5));
                 * net.Add(new SoftMaxLayer(NumOfClasses));
                 * var learner = new ClassificationNeuralNetLearner(net, new SquareLoss());
                 * var model = learner.Learn(netData, netTargets);
                 * var predictions = model.Predict(netData);
                 * int correctPredictions = 0;
                 * for (int i = 0; i < predictions.Length; i++)
                 * {
                 *  if ((int)predictions[i] == netTargets[i])
                 *      correctPredictions++;
                 * }
                 * if (correctPredictions > 0)
                 *  accuracy = 100 * ((double)correctPredictions / (double)predictions.Length);
                 */
                /*
                 * var problem = SVMProblemHelper.Load(@"F:\Gesty\problem2\features\traindata" + threadnum + ".txt");
                 * var testProblem = SVMProblemHelper.Load(@"F:\Gesty\problem2\features\testdata" + threadnum + ".txt");
                 * var model = problem.Train(Parameter);
                 * double[] target = testProblem.Predict(model);
                 * double accuracy = testProblem.EvaluateClassificationProblem(target);
                 */
                var f = ((KozaFitness)ind.Fitness);
                f.SetStandardizedFitness(state, (float)(100 - (accuracy / 5)));
                ind.Evaluated = true;

                var transFeatures = new StreamWriter(@"F:\Gesty\testy\transFeatures.csv");
                for (int i1 = 0; i1 < 1000; i1++)
                {
                    for (int i2 = 0; i2 < 5; i2++)
                    {
                        var line = new StringBuilder();
                        line.Append(labels[i2, i1].ToString() + ',');
                        for (int i3 = 0; i3 < 64; i3++)
                        {
                            line.Append(features[i2, i1, i3].ToString() + ',');
                        }
                        transFeatures.WriteLine(line.ToString().Trim(','));
                    }
                }
                transFeatures.Close();
            }
        }
 public virtual void transform(object srcPts, object dstPts, ImageTransformer.Parameters parameters, bool inverse)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(srcPts, __typeref (ProCamTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(dstPts, __typeref (ProCamTransformer), "com.googlecode.javacv.cpp.opencv_core$CvMat");
   int num1 = inverse ? 1 : 0;
   ProjectiveGainBiasTransformer gainBiasTransformer = this.surfaceTransformer;
   object srcPts1 = srcPts;
   object obj = dstPts;
   ProjectiveGainBiasTransformer.Parameters parameters1 = ProCamTransformer.Parameters.access\u0024100((ProCamTransformer.Parameters) parameters);
   bool flag = num1 != 0;
   ImageTransformer.Parameters parameters2 = (ImageTransformer.Parameters) parameters1;
   object dstPts1 = obj;
   ImageTransformer.Parameters parameters3 = parameters2;
   int num2 = flag ? 1 : 0;
   gainBiasTransformer.transform(srcPts1, dstPts1, parameters3, num2 != 0);
 }
Beispiel #40
0
        public static PatternMatchingTransformationResult Execute(
            EImageBW8 eImage,
            float max_AngleOffset,
            float max_XTranslateOffset,
            float max_YTranslateOffset,
            int maxNumberOfTrial,
            EuresysDoublePatternMatcher eMatcher,
            PatternMatcherParameters patternMatcherParameters,
            EROIBW8 matcherROI,
            ELineGauge gauge1,
            ELineGauge gauge2,
            ELineGauge gauge3,
            ELineGauge gauge4,
            double filterTolerance,
            double fiducialOffset,
            WaferOrientation fiducialOrientation,
            bool isIncludeFiducialTolerance,
            Func <ELineGauge,
                  ELineGauge,
                  ELineGauge,
                  ELineGauge,
                  EImageBW8,
                  double,
                  double,
                  WaferOrientation,
                  bool,
                  PointF> FindCenter,
            PointF TeacherMidPoint)
        {
            List <string> messages    = new List <string>();
            float         thetaOffset = -1;
            float         xOffset     = -1;
            float         yOffset     = -1;

            bool isPass = false;
            bool isThetaOffsetWithinTolerance = false;
            bool isXOffsetWithinTolerance     = false;
            bool isYOffsetWithinTolerance     = false;

            int xTranslationCount     = -1;
            int yTranslationCount     = -1;
            int thetaTranslationCount = -1;

            PointF calibratedCenterPoint = new PointF(
                patternMatcherParameters.WaferCenterXPos,
                patternMatcherParameters.WaferCenterYPos);
            PointF currentWaferCenterPoint            = new PointF(-1, -1);
            EuresysDoubleMatcherResults matchedResult = null;

            messages.Add("Maximum NumberOfTrial is " + maxNumberOfTrial);
            messages.Add("Accepted AngleOffset is below " + max_AngleOffset);
            messages.Add("Accepted X Offset is below " + max_XTranslateOffset);
            messages.Add("Accepted Y Offset is below " + max_YTranslateOffset);

            #region X Offset

            for (int i = 0; i < maxNumberOfTrial; i++)
            {
                // Find X Offset
                currentWaferCenterPoint = FindCenter.Invoke(
                    gauge1,
                    gauge2,
                    gauge3,
                    gauge4,
                    eImage,
                    filterTolerance,
                    fiducialOffset,
                    fiducialOrientation,
                    isIncludeFiducialTolerance);

                xOffset = calibratedCenterPoint.X - currentWaferCenterPoint.X;

                if (Math.Abs(xOffset) < max_XTranslateOffset)
                {
                    isXOffsetWithinTolerance = true;
                    xTranslationCount        = i;
                    messages.Add("XOffset within tolerance");
                    messages.Add("Number of X tranlation performed = " + xTranslationCount);
                    break;
                }

                else
                {
                    isXOffsetWithinTolerance = false;
                    messages.Add(string.Format("XOffset: {0} out of tolerance", xOffset));
                    eImage = ImageTransformer.TranslateImage_X(eImage, xOffset);
                    messages.Add("Image X Translated by " + xOffset);
                }

                if (i == maxNumberOfTrial)
                {
                    xTranslationCount = i;
                    messages.Add("Maximum number of trials for XOffset reached");
                }
            }

            #endregion XOffset

            #region Y Offset

            for (int i = 0; i <= maxNumberOfTrial; i++)
            {
                // Find Y Offset
                currentWaferCenterPoint = FindCenter.Invoke(
                    gauge1,
                    gauge2,
                    gauge3,
                    gauge4,
                    eImage,
                    filterTolerance,
                    fiducialOffset,
                    fiducialOrientation,
                    isIncludeFiducialTolerance);
                yOffset = calibratedCenterPoint.Y - currentWaferCenterPoint.Y;

                if (Math.Abs(yOffset) < max_YTranslateOffset)
                {
                    isYOffsetWithinTolerance = true;
                    yTranslationCount        = i;
                    messages.Add("YOffset within tolerance");
                    messages.Add("Number of Y tranlation performed = " + yTranslationCount);
                    break;
                }

                else
                {
                    isYOffsetWithinTolerance = false;
                    messages.Add(string.Format("YOffset: {0} out of tolerance", yOffset));
                    eImage = ImageTransformer.TranslateImage_Y(eImage, yOffset);
                    messages.Add("Image Y Translated by " + yOffset);
                }

                if (i == maxNumberOfTrial)
                {
                    yTranslationCount = i;
                    messages.Add("Maximum number of trials for YOffset reached");
                }
            }

            #endregion Y offset

            #region Theta Offset

            if (isXOffsetWithinTolerance && isYOffsetWithinTolerance)
            {
                for (int i = 0; i <= maxNumberOfTrial; i++)
                {
                    currentWaferCenterPoint = FindCenter.Invoke(
                        gauge1,
                        gauge2,
                        gauge3,
                        gauge4,
                        eImage,
                        filterTolerance,
                        fiducialOffset,
                        fiducialOrientation,
                        isIncludeFiducialTolerance);

                    EuresysEROIHelper.AttachROI(eImage, matcherROI);
                    eMatcher.MatchPatterns(matcherROI);


                    WaferOrientation patternOrientation = FindWaferOrientation(currentWaferCenterPoint, TeacherMidPoint);


                    matchedResult = PatternMatcherParameterHelper.CreateDoublePatternMatcherResult(
                        eMatcher,
                        matcherROI,
                        patternMatcherParameters.OriginalXPos_pattern1,
                        patternMatcherParameters.OriginalYPos_pattern1,
                        currentWaferCenterPoint,
                        patternMatcherParameters.DefaultAngleOffset,
                        patternOrientation);


                    thetaOffset = matchedResult.AngleBetweenResult;
                    if (thetaOffset > 180)
                    {
                        throw new ArgumentOutOfRangeException("theta Offset must not be higher than 180");
                    }
                    else if (thetaOffset < -180)
                    {
                        throw new ArgumentOutOfRangeException("theta Offset must not be lesser than 180");
                    }

                    if (Math.Abs(thetaOffset) < max_AngleOffset)
                    {
                        isThetaOffsetWithinTolerance = true;
                        thetaTranslationCount        = i;
                        messages.Add("Theta Offset within tolerance");
                        messages.Add("Number of Theta tranlation performed = " + thetaTranslationCount);
                        break;
                    }
                    else
                    {
                        isThetaOffsetWithinTolerance = false;
                        messages.Add(string.Format("Theta Offset: {0} out of tolerance", thetaOffset));
                        eImage = ImageTransformer.RotateImage(eImage, thetaOffset, calibratedCenterPoint);
                        messages.Add("Image Theta Rotated by " + thetaOffset + " around calibrated Center Point");
                    }

                    if (i == maxNumberOfTrial)
                    {
                        thetaTranslationCount = i;
                        messages.Add("Maximum number of trials for Theta Offset reached");
                    }
                }
            }

            else
            {
                if (!isXOffsetWithinTolerance)
                {
                    messages.Add("Theta offset correction skipped due to X Offset out of tolerance");
                }

                if (!isYOffsetWithinTolerance)
                {
                    messages.Add("Theta offset correction skipped due to X Offset out of tolerance");
                }
            }

            #endregion Theta Offset

            #region Final Result

            messages.Add("Final Angle is " + Math.Round(thetaOffset, 4));
            messages.Add("Final X Offset is " + Math.Round(xOffset, 4));
            messages.Add("Final Y Offset is " + Math.Round(yOffset, 4));

            if (isThetaOffsetWithinTolerance &&
                isXOffsetWithinTolerance &&
                isYOffsetWithinTolerance)
            {
                isPass = true;
                messages.Add("Result is Pass");
            }

            else
            {
                isPass = false;
                messages.Add("Result is False");
            }

            return(new PatternMatchingTransformationResult()
            {
                MatchedResult = matchedResult,
                eImageAfterTransformation = eImage,
                FinalAngleOffset = thetaOffset,
                FinalXOffset = xOffset,
                FinalYOffset = yOffset,
                IsPass = isPass,
                IsThetaOffsetWithinTolerance = isThetaOffsetWithinTolerance,
                IsXOffsetWithinTolerance = isXOffsetWithinTolerance,
                IsYOffsetWithinTolerance = isXOffsetWithinTolerance,
                XTranlastionCount = xTranslationCount,
                YTranslationCount = yTranslationCount,
                ThetaTranslationCount = thetaTranslationCount,
                FinalWaferCenter = currentWaferCenterPoint,
                Messages = messages,
            });

            #endregion Final Result
        }
Beispiel #41
0
        public static PatternMatchingTransformationResult ExecuteWithoutRotation(
            EImageBW8 eImage,
            float max_XTranslateOffset,
            float max_YTranslateOffset,
            int maxNumberOfTrial,
            PatternMatcherParameters patternMatcherParameters,
            ELineGauge gauge1,
            ELineGauge gauge2,
            ELineGauge gauge3,
            ELineGauge gauge4,
            double filterTolerance,
            double fiducialOffset,
            WaferOrientation fiducialOrientation,
            bool isIncludeFiducialTolerance,
            Func <ELineGauge,
                  ELineGauge,
                  ELineGauge,
                  ELineGauge,
                  EImageBW8,
                  double,
                  double,
                  WaferOrientation,
                  bool,
                  PointF> FindCenter)
        {
            List <string> messages    = new List <string>();
            float         thetaOffset = -1;
            float         xOffset     = -1;
            float         yOffset     = -1;

            bool isPass = false;
            bool isXOffsetWithinTolerance = false;
            bool isYOffsetWithinTolerance = false;

            int xTranslationCount = -1;
            int yTranslationCount = -1;

            PointF calibratedCenterPoint = new PointF(
                patternMatcherParameters.WaferCenterXPos,
                patternMatcherParameters.WaferCenterYPos);
            PointF currentWaferCenterPoint            = new PointF(-1, -1);
            EuresysDoubleMatcherResults matchedResult = null;

            messages.Add("Maximum NumberOfTrial is " + maxNumberOfTrial);
            messages.Add("Accepted X Offset is below " + max_XTranslateOffset);
            messages.Add("Accepted Y Offset is below " + max_YTranslateOffset);

            #region X Offset

            for (int i = 0; i < maxNumberOfTrial; i++)
            {
                // Find X Offset
                currentWaferCenterPoint = FindCenter.Invoke(
                    gauge1,
                    gauge2,
                    gauge3,
                    gauge4,
                    eImage,
                    filterTolerance,
                    fiducialOffset,
                    fiducialOrientation,
                    isIncludeFiducialTolerance);

                xOffset = calibratedCenterPoint.X - currentWaferCenterPoint.X;

                if (Math.Abs(xOffset) < max_XTranslateOffset)
                {
                    isXOffsetWithinTolerance = true;
                    xTranslationCount        = i;
                    messages.Add("XOffset within tolerance");
                    messages.Add("Number of X tranlation performed = " + xTranslationCount);
                    break;
                }

                else
                {
                    isXOffsetWithinTolerance = false;
                    messages.Add(string.Format("XOffset: {0} out of tolerance", xOffset));
                    eImage = ImageTransformer.TranslateImage_X(eImage, xOffset);
                    messages.Add("Image X Translated by " + xOffset);
                }

                if (i == maxNumberOfTrial)
                {
                    xTranslationCount = i;
                    messages.Add("Maximum number of trials for XOffset reached");
                }
            }

            #endregion XOffset

            #region Y Offset

            for (int i = 0; i <= maxNumberOfTrial; i++)
            {
                // Find Y Offset
                currentWaferCenterPoint = FindCenter.Invoke(
                    gauge1,
                    gauge2,
                    gauge3,
                    gauge4,
                    eImage,
                    filterTolerance,
                    fiducialOffset,
                    fiducialOrientation,
                    isIncludeFiducialTolerance);
                yOffset = calibratedCenterPoint.Y - currentWaferCenterPoint.Y;

                if (Math.Abs(yOffset) < max_YTranslateOffset)
                {
                    isYOffsetWithinTolerance = true;
                    yTranslationCount        = i;
                    messages.Add("YOffset within tolerance");
                    messages.Add("Number of Y tranlation performed = " + yTranslationCount);
                    break;
                }

                else
                {
                    isYOffsetWithinTolerance = false;
                    messages.Add(string.Format("YOffset: {0} out of tolerance", yOffset));
                    eImage = ImageTransformer.TranslateImage_Y(eImage, yOffset);
                    messages.Add("Image Y Translated by " + yOffset);
                }

                if (i == maxNumberOfTrial)
                {
                    yTranslationCount = i;
                    messages.Add("Maximum number of trials for YOffset reached");
                }
            }

            #endregion Y offset

            #region Theta Offset

            thetaOffset = 0;
            messages.Add("Theta Offset is skipped");

            #endregion Theta Offset

            #region Final Result

            messages.Add("Final Angle is " + Math.Round(thetaOffset, 4));
            messages.Add("Final X Offset is " + Math.Round(xOffset, 4));
            messages.Add("Final Y Offset is " + Math.Round(yOffset, 4));

            if (isXOffsetWithinTolerance &&
                isYOffsetWithinTolerance)
            {
                isPass = true;
                messages.Add("Result is Pass");
            }

            else
            {
                isPass = false;
                messages.Add("Result is False");
            }

            return(new PatternMatchingTransformationResult()
            {
                MatchedResult = matchedResult,
                eImageAfterTransformation = eImage,
                FinalAngleOffset = thetaOffset,
                FinalXOffset = xOffset,
                FinalYOffset = yOffset,
                IsPass = isPass,
                IsThetaOffsetWithinTolerance = true,
                IsXOffsetWithinTolerance = isXOffsetWithinTolerance,
                IsYOffsetWithinTolerance = isXOffsetWithinTolerance,
                XTranlastionCount = xTranslationCount,
                YTranslationCount = yTranslationCount,
                ThetaTranslationCount = 0,
                FinalWaferCenter = currentWaferCenterPoint,
                Messages = messages,
            });

            #endregion Final Result
        }
Beispiel #42
0
 /// <summary>
 /// Transform all input virtual image with image transformer in parallel
 /// </summary>
 /// <param name="inputs"></param>
 /// <param name="transformer"></param>
 /// <returns></returns>
 public static VirtualImage[] Transform(this VirtualImage[] inputs, ImageTransformer transformer)
 {
     return(inputs.AsParallel().Select(s => s.Transform(transformer)).ToArray());
 }
 public virtual void transform(ImageTransformer.Data[] data, object roi, ImageTransformer.Parameters[] parameters, bool[] inverses)
 {
   // ISSUE: type reference
   ByteCodeHelper.DynamicCast(roi, __typeref (ProjectiveTransformer), "com.googlecode.javacv.cpp.opencv_core$CvRect");
   if (!ProjectiveTransformer.\u0024assertionsDisabled && data.Length != parameters.Length)
   {
     Throwable.__\u003CsuppressFillInStackTrace\u003E();
     throw new AssertionError();
   }
   else
   {
     int num1 = 1;
     for (int index = 0; index < data.Length; ++index)
     {
       ImageTransformer.Data data1 = data[index];
       if (data1.srcImg != null)
       {
         if ((data1.transImg != null || data1.dstImg != null) && (data1.subImg == null && data1.srcDotImg == null) && data1.dstDstDot == null)
         {
           object srcImage = data1.srcImg;
           object obj1 = data1.transImg != null ? data1.transImg : data1.dstImg;
           object obj2 = roi;
           int num2 = data1.pyramidLevel;
           ImageTransformer.Parameters parameters1 = parameters[index];
           bool flag = inverses != null && inverses[index];
           ImageTransformer.Parameters parameters2 = parameters1;
           int num3 = num2;
           object obj3 = obj2;
           object dstImage = obj1;
           object roi1 = obj3;
           int pyramidLevel = num3;
           ImageTransformer.Parameters parameters3 = parameters2;
           int num4 = flag ? 1 : 0;
           this.transform(srcImage, dstImage, roi1, pyramidLevel, parameters3, num4 != 0);
         }
         else
           num1 = 0;
       }
     }
     if (num1 != 0)
       return;
     ProjectiveTransformer\u00241Cache transformer1Cache = !(data[0].cache is ProjectiveTransformer\u00241Cache) ? (ProjectiveTransformer\u00241Cache) null : (ProjectiveTransformer\u00241Cache) data[0].cache;
     if (transformer1Cache == null || transformer1Cache.length != data.Length)
       data[0].cache = (object) (transformer1Cache = new ProjectiveTransformer\u00241Cache(this, data.Length));
     if (0 < data.Length)
     {
       object obj = transformer1Cache.kernelData;
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.cvkernels$KernelData");
     }
     else
     {
       object obj = transformer1Cache.kernelData;
       throw new NoClassDefFoundError("com.googlecode.javacv.cpp.cvkernels$KernelData");
     }
   }
 }